diff --git a/openhands-sdk/openhands/sdk/llm/options/chat_options.py b/openhands-sdk/openhands/sdk/llm/options/chat_options.py index 7f29acd8db..bb3c8fef5f 100644 --- a/openhands-sdk/openhands/sdk/llm/options/chat_options.py +++ b/openhands-sdk/openhands/sdk/llm/options/chat_options.py @@ -39,9 +39,14 @@ def select_chat_options( if llm.reasoning_effort is not None: out["reasoning_effort"] = llm.reasoning_effort - # All reasoning models ignore temp/top_p - out.pop("temperature", None) - out.pop("top_p", None) + # OpenAI reasoning models (o1, o3) ignore temp/top_p + # Gemini models DO respect temperature, so we only pop for OpenAI + model_lower = llm.model.lower() + # Normalize to basename so provider-prefixed IDs like "openai/o1" are handled + model_name = model_lower.split("/")[-1] + if model_name.startswith(("o1", "o3")): + out.pop("temperature", None) + out.pop("top_p", None) # Gemini 2.5-pro default to low if not set if "gemini-2.5-pro" in llm.model: