Skip to content

Commit 7984fbf

Browse files
max-montesCopilot
andcommitted
Fix OllamaChatClient passing unsupported kwargs to ollama.AsyncClient.chat()
Stop forwarding **kwargs from _inner_get_response() to ollama.AsyncClient.chat(). When the orchestration layer (e.g. HandoffBuilder) injects kwargs like allow_multiple_tool_calls=True, these were passed through unfiltered, causing a TypeError since ollama.AsyncClient.chat() does not accept them. This aligns with how other clients (e.g. OpenAIChatClient) handle kwargs — they only pass the prepared options_dict to the provider's API, not the raw **kwargs from the middleware chain. Fixes #4402 Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
1 parent 3b4eed2 commit 7984fbf

File tree

2 files changed

+86
-5
lines changed

2 files changed

+86
-5
lines changed

python/packages/ollama/agent_framework_ollama/_chat_client.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -349,7 +349,7 @@ def _inner_get_response(
349349
messages: Sequence[Message],
350350
options: Mapping[str, Any],
351351
stream: bool = False,
352-
**kwargs: Any,
352+
**kwargs: Any, # noqa: ARG002 — accepted for interface compatibility; not forwarded to Ollama
353353
) -> Awaitable[ChatResponse] | ResponseStream[ChatResponseUpdate, ChatResponse]:
354354
if stream:
355355
# Streaming mode
@@ -360,7 +360,6 @@ async def _stream() -> AsyncIterable[ChatResponseUpdate]:
360360
response_object: AsyncIterable[OllamaChatResponse] = await self.client.chat( # type: ignore[misc]
361361
stream=True,
362362
**options_dict,
363-
**kwargs,
364363
)
365364
except Exception as ex:
366365
raise ChatClientException(f"Ollama streaming chat request failed : {ex}", ex) from ex
@@ -378,7 +377,6 @@ async def _get_response() -> ChatResponse:
378377
response: OllamaChatResponse = await self.client.chat( # type: ignore[misc]
379378
stream=False,
380379
**options_dict,
381-
**kwargs,
382380
)
383381
except Exception as ex:
384382
raise ChatClientException(f"Ollama chat request failed : {ex}", ex) from ex
@@ -395,8 +393,9 @@ def _prepare_options(self, messages: Sequence[Message], options: Mapping[str, An
395393

396394
messages = prepend_instructions_to_messages(list(messages), instructions, role="system")
397395

398-
# Keys to exclude from processing
399-
exclude_keys = {"instructions", "tool_choice"}
396+
# Keys to exclude from processing — these are either handled separately
397+
# or not supported by the Ollama API.
398+
exclude_keys = {"instructions", "tool_choice", "allow_multiple_tool_calls"}
400399

401400
# Build run_options and model_options separately
402401
run_options: dict[str, Any] = {}

python/packages/ollama/tests/test_ollama_chat_client.py

Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -248,6 +248,88 @@ async def test_cmc(
248248
assert result.text == "test"
249249

250250

251+
@patch.object(AsyncClient, "chat", new_callable=AsyncMock)
252+
async def test_cmc_ignores_unsupported_kwargs(
253+
mock_chat: AsyncMock,
254+
ollama_unit_test_env: dict[str, str],
255+
chat_history: list[Message],
256+
mock_chat_completion_response: OllamaChatResponse,
257+
) -> None:
258+
"""Verify that unsupported kwargs (e.g. allow_multiple_tool_calls) are
259+
silently filtered out and never forwarded to ollama.AsyncClient.chat().
260+
261+
Regression test for: https://github.com/microsoft/agent-framework/issues/4402
262+
"""
263+
mock_chat.return_value = mock_chat_completion_response
264+
chat_history.append(Message(text="hello world", role="user"))
265+
266+
ollama_client = OllamaChatClient()
267+
result = await ollama_client.get_response(
268+
messages=chat_history,
269+
allow_multiple_tool_calls=True,
270+
)
271+
272+
assert result.text == "test"
273+
mock_chat.assert_called_once()
274+
call_kwargs = mock_chat.call_args.kwargs
275+
assert "allow_multiple_tool_calls" not in call_kwargs
276+
277+
278+
@patch.object(AsyncClient, "chat", new_callable=AsyncMock)
279+
async def test_cmc_streaming_ignores_unsupported_kwargs(
280+
mock_chat: AsyncMock,
281+
ollama_unit_test_env: dict[str, str],
282+
chat_history: list[Message],
283+
mock_streaming_chat_completion_response: AsyncStream[OllamaChatResponse],
284+
) -> None:
285+
"""Verify that unsupported kwargs are filtered in streaming mode too.
286+
287+
Regression test for: https://github.com/microsoft/agent-framework/issues/4402
288+
"""
289+
mock_chat.return_value = mock_streaming_chat_completion_response
290+
chat_history.append(Message(text="hello world", role="user"))
291+
292+
ollama_client = OllamaChatClient()
293+
result = ollama_client.get_response(
294+
messages=chat_history,
295+
stream=True,
296+
allow_multiple_tool_calls=True,
297+
)
298+
299+
async for chunk in result:
300+
assert chunk.text == "test"
301+
302+
mock_chat.assert_called_once()
303+
call_kwargs = mock_chat.call_args.kwargs
304+
assert "allow_multiple_tool_calls" not in call_kwargs
305+
306+
307+
@patch.object(AsyncClient, "chat", new_callable=AsyncMock)
308+
async def test_cmc_ignores_unsupported_options(
309+
mock_chat: AsyncMock,
310+
ollama_unit_test_env: dict[str, str],
311+
chat_history: list[Message],
312+
mock_chat_completion_response: OllamaChatResponse,
313+
) -> None:
314+
"""Verify that unsupported keys inside the options dict are also stripped
315+
before reaching ollama.AsyncClient.chat().
316+
317+
Regression test for: https://github.com/microsoft/agent-framework/issues/4402
318+
"""
319+
mock_chat.return_value = mock_chat_completion_response
320+
chat_history.append(Message(text="hello world", role="user"))
321+
322+
ollama_client = OllamaChatClient()
323+
await ollama_client.get_response(
324+
messages=chat_history,
325+
options={"allow_multiple_tool_calls": True},
326+
)
327+
328+
mock_chat.assert_called_once()
329+
call_kwargs = mock_chat.call_args.kwargs
330+
assert "allow_multiple_tool_calls" not in call_kwargs
331+
332+
251333
@patch.object(AsyncClient, "chat", new_callable=AsyncMock)
252334
async def test_cmc_reasoning(
253335
mock_chat: AsyncMock,

0 commit comments

Comments
 (0)