diff --git a/litellm/llms/ollama/chat/transformation.py b/litellm/llms/ollama/chat/transformation.py index 3b755e79330c..b740eb122fda 100644 --- a/litellm/llms/ollama/chat/transformation.py +++ b/litellm/llms/ollama/chat/transformation.py @@ -184,9 +184,12 @@ def map_openai_params( ): if value.get("json_schema") and value["json_schema"].get("schema"): optional_params["format"] = value["json_schema"]["schema"] - ### FUNCTION CALLING LOGIC ### if param == "reasoning_effort" and value is not None: - optional_params["think"] = True + if model.startswith("gpt-oss"): + optional_params["think"] = value + else: + optional_params["think"] = True + ### FUNCTION CALLING LOGIC ### if param == "tools": ## CHECK IF MODEL SUPPORTS TOOL CALLING ## try: @@ -281,6 +284,7 @@ def transform_request( stream = optional_params.pop("stream", False) format = optional_params.pop("format", None) keep_alive = optional_params.pop("keep_alive", None) + think = optional_params.pop("think", None) function_name = optional_params.pop("function_name", None) litellm_params["function_name"] = function_name tools = optional_params.pop("tools", None) @@ -344,6 +348,8 @@ def transform_request( data["tools"] = tools if keep_alive is not None: data["keep_alive"] = keep_alive + if think is not None: + data["think"] = think return data diff --git a/litellm/llms/ollama/completion/transformation.py b/litellm/llms/ollama/completion/transformation.py index 981a987ec915..b476e5c8a631 100644 --- a/litellm/llms/ollama/completion/transformation.py +++ b/litellm/llms/ollama/completion/transformation.py @@ -180,7 +180,10 @@ def map_openai_params( elif param == "stop": optional_params["stop"] = value elif param == "reasoning_effort" and value is not None: - optional_params["think"] = True + if model.startswith("gpt-oss"): + optional_params["think"] = value + else: + optional_params["think"] = True elif param == "response_format" and isinstance(value, dict): if value["type"] == "json_object": optional_params["format"] = "json" @@ -412,6 +415,7 @@ def transform_request( stream = optional_params.pop("stream", False) format = optional_params.pop("format", None) images = optional_params.pop("images", None) + think = optional_params.pop("think", None) data = { "model": model, "prompt": ollama_prompt, @@ -425,6 +429,8 @@ def transform_request( data["images"] = [ _convert_image(convert_to_ollama_image(image)) for image in images ] + if think is not None: + data["think"] = think return data diff --git a/litellm/llms/ollama_chat.py b/litellm/llms/ollama_chat.py index 082312d28f21..e186636de99e 100644 --- a/litellm/llms/ollama_chat.py +++ b/litellm/llms/ollama_chat.py @@ -59,6 +59,7 @@ def get_ollama_response( # noqa: PLR0915 stream = optional_params.pop("stream", False) format = optional_params.pop("format", None) keep_alive = optional_params.pop("keep_alive", None) + think = optional_params.pop("think", None) function_name = optional_params.pop("function_name", None) tools = optional_params.pop("tools", None) @@ -98,6 +99,8 @@ def get_ollama_response( # noqa: PLR0915 data["tools"] = tools if keep_alive is not None: data["keep_alive"] = keep_alive + if think is not None: + data["think"] = think ## LOGGING logging_obj.pre_call( input=None,