|
10 | 10 | import pytest
|
11 | 11 |
|
12 | 12 | sys.path.insert(
|
13 |
| - 0, os.path.abspath("../../../../..") |
14 |
| -) # Adds the parent directory to the system path |
| 13 | + 0, os.path.abspath("../../..") |
| 14 | +) # Adds the parent directory to the system-path |
15 | 15 | import litellm
|
16 |
| -from litellm.completion_extras.litellm_responses_transformation.transformation import ( |
17 |
| - LiteLLMResponsesTransformationHandler, |
18 |
| - OpenAiResponsesToChatCompletionStreamIterator, |
19 |
| -) |
20 |
| -from litellm.types.llms.openai import Reasoning |
21 |
| -from litellm.types.utils import Delta, ModelResponseStream, StreamingChoices |
22 |
| - |
23 |
| - |
24 |
| -class TestLiteLLMResponsesTransformation: |
25 |
| - def setup_method(self): |
26 |
| - self.handler = LiteLLMResponsesTransformationHandler() |
27 |
| - self.model = "responses-api-model" |
28 |
| - self.logging_obj = MagicMock() |
29 |
| - |
30 |
| - def test_transform_request_reasoning_effort(self): |
31 |
| - """ |
32 |
| - Test that reasoning_effort is mapped to reasoning parameter correctly. |
33 |
| - """ |
34 |
| - # Case 1: reasoning_effort = "high" |
35 |
| - optional_params_high = {"reasoning_effort": "high"} |
36 |
| - result_high = self.handler.transform_request( |
37 |
| - model=self.model, |
38 |
| - messages=[], |
39 |
| - optional_params=optional_params_high, |
40 |
| - litellm_params={}, |
41 |
| - headers={}, |
42 |
| - litellm_logging_obj=self.logging_obj, |
43 |
| - ) |
44 |
| - assert "reasoning" in result_high |
45 |
| - assert result_high["reasoning"] == Reasoning(effort="high", summary="detailed") |
46 |
| - |
47 |
| - # Case 2: reasoning_effort = "medium" |
48 |
| - optional_params_medium = {"reasoning_effort": "medium"} |
49 |
| - result_medium = self.handler.transform_request( |
50 |
| - model=self.model, |
51 |
| - messages=[], |
52 |
| - optional_params=optional_params_medium, |
53 |
| - litellm_params={}, |
54 |
| - headers={}, |
55 |
| - litellm_logging_obj=self.logging_obj, |
56 |
| - ) |
57 |
| - assert "reasoning" in result_medium |
58 |
| - assert result_medium["reasoning"] == Reasoning(effort="medium", summary="auto") |
59 |
| - |
60 |
| - # Case 3: reasoning_effort = "low" |
61 |
| - optional_params_low = {"reasoning_effort": "low"} |
62 |
| - result_low = self.handler.transform_request( |
63 |
| - model=self.model, |
64 |
| - messages=[], |
65 |
| - optional_params=optional_params_low, |
66 |
| - litellm_params={}, |
67 |
| - headers={}, |
68 |
| - litellm_logging_obj=self.logging_obj, |
69 |
| - ) |
70 |
| - assert "reasoning" in result_low |
71 |
| - assert result_low["reasoning"] == Reasoning(effort="low", summary="auto") |
72 |
| - |
73 |
| - # Case 4: no reasoning_effort |
74 |
| - optional_params_none = {} |
75 |
| - result_none = self.handler.transform_request( |
76 |
| - model=self.model, |
77 |
| - messages=[], |
78 |
| - optional_params=optional_params_none, |
79 |
| - litellm_params={}, |
80 |
| - headers={}, |
81 |
| - litellm_logging_obj=self.logging_obj, |
82 |
| - ) |
83 |
| - assert "reasoning" in result_none |
84 |
| - assert result_none["reasoning"] == Reasoning(summary="auto") |
85 |
| - |
86 |
| - # Case 5: reasoning_effort = None |
87 |
| - optional_params_explicit_none = {"reasoning_effort": None} |
88 |
| - result_explicit_none = self.handler.transform_request( |
89 |
| - model=self.model, |
90 |
| - messages=[], |
91 |
| - optional_params=optional_params_explicit_none, |
92 |
| - litellm_params={}, |
93 |
| - headers={}, |
94 |
| - litellm_logging_obj=self.logging_obj, |
95 |
| - ) |
96 |
| - assert "reasoning" in result_explicit_none |
97 |
| - assert result_explicit_none["reasoning"] == Reasoning(summary="auto") |
98 |
| - |
99 |
| - def test_convert_chat_completion_messages_to_responses_api_image_input(self): |
100 |
| - """ |
101 |
| - Test that chat completion messages with image inputs are converted correctly. |
102 |
| - """ |
103 |
| - user_content = "What's in this image?" |
104 |
| - user_image = "https://w7.pngwing.com/pngs/666/274/png-transparent-image-pictures-icon-photo-thumbnail.png" |
105 |
| - |
106 |
| - messages = [ |
107 |
| - { |
108 |
| - "role": "user", |
109 |
| - "content": [ |
110 |
| - { |
111 |
| - "type": "text", |
112 |
| - "text": user_content, |
113 |
| - }, |
114 |
| - { |
115 |
| - "type": "image_url", |
116 |
| - "image_url": {"url": user_image}, |
117 |
| - }, |
118 |
| - ], |
119 |
| - }, |
120 |
| - ] |
121 |
| - |
122 |
| - response, _ = self.handler.convert_chat_completion_messages_to_responses_api(messages) |
123 |
| - |
124 |
| - response_str = json.dumps(response) |
125 |
| - |
126 |
| - assert user_content in response_str |
127 |
| - assert user_image in response_str |
128 |
| - |
129 |
| - print("response: ", response) |
130 |
| - assert response[0]["content"][1]["image_url"] == user_image |
131 |
| - |
132 |
| - def test_openai_responses_chunk_parser_reasoning_summary(self): |
133 |
| - """ |
134 |
| - Test that OpenAI responses chunk parser handles reasoning summary correctly. |
135 |
| - """ |
136 |
| - iterator = OpenAiResponsesToChatCompletionStreamIterator( |
137 |
| - streaming_response=None, sync_stream=True |
138 |
| - ) |
139 |
| - |
140 |
| - chunk = { |
141 |
| - "delta": "**Compar", |
142 |
| - "item_id": "rs_686d544208748198b6912e27b7c299c00e24bd875d35bade", |
143 |
| - "output_index": 0, |
144 |
| - "sequence_number": 4, |
145 |
| - "summary_index": 0, |
146 |
| - "type": "response.reasoning_summary_text.delta", |
147 |
| - } |
148 |
| - |
149 |
| - result = iterator.chunk_parser(chunk) |
150 |
| - |
151 |
| - assert isinstance(result, ModelResponseStream) |
152 |
| - assert len(result.choices) == 1 |
153 |
| - choice = result.choices[0] |
154 |
| - assert isinstance(choice, StreamingChoices) |
155 |
| - assert choice.index == 0 |
156 |
| - delta = choice.delta |
157 |
| - assert isinstance(delta, Delta) |
158 |
| - assert delta.content is None |
159 |
| - assert delta.reasoning_content == "**Compar" |
160 |
| - assert delta.tool_calls is None |
161 |
| - assert delta.function_call is None |
| 16 | + |
| 17 | + |
| 18 | +def test_convert_chat_completion_messages_to_responses_api_image_input(): |
| 19 | + from litellm.completion_extras.litellm_responses_transformation.transformation import ( |
| 20 | + LiteLLMResponsesTransformationHandler, |
| 21 | + ) |
| 22 | + |
| 23 | + handler = LiteLLMResponsesTransformationHandler() |
| 24 | + |
| 25 | + user_content = "What's in this image?" |
| 26 | + user_image = "https://w7.pngwing.com/pngs/666/274/png-transparent-image-pictures-icon-photo-thumbnail.png" |
| 27 | + |
| 28 | + messages = [ |
| 29 | + { |
| 30 | + "role": "user", |
| 31 | + "content": [ |
| 32 | + { |
| 33 | + "type": "text", |
| 34 | + "text": user_content, |
| 35 | + }, |
| 36 | + { |
| 37 | + "type": "image_url", |
| 38 | + "image_url": {"url": user_image}, |
| 39 | + }, |
| 40 | + ], |
| 41 | + }, |
| 42 | + ] |
| 43 | + |
| 44 | + response, _ = handler.convert_chat_completion_messages_to_responses_api(messages) |
| 45 | + |
| 46 | + response_str = json.dumps(response) |
| 47 | + |
| 48 | + assert user_content in response_str |
| 49 | + assert user_image in response_str |
| 50 | + |
| 51 | + print("response: ", response) |
| 52 | + assert response[0]["content"][1]["image_url"] == user_image |
| 53 | + |
| 54 | + |
| 55 | +def test_openai_responses_chunk_parser_reasoning_summary(): |
| 56 | + from litellm.completion_extras.litellm_responses_transformation.transformation import ( |
| 57 | + OpenAiResponsesToChatCompletionStreamIterator, |
| 58 | + ) |
| 59 | + from litellm.types.utils import Delta, ModelResponseStream, StreamingChoices |
| 60 | + |
| 61 | + iterator = OpenAiResponsesToChatCompletionStreamIterator( |
| 62 | + streaming_response=None, sync_stream=True |
| 63 | + ) |
| 64 | + |
| 65 | + chunk = { |
| 66 | + "delta": "**Compar", |
| 67 | + "item_id": "rs_686d544208748198b6912e27b7c299c00e24bd875d35bade", |
| 68 | + "output_index": 0, |
| 69 | + "sequence_number": 4, |
| 70 | + "summary_index": 0, |
| 71 | + "type": "response.reasoning_summary_text.delta", |
| 72 | + } |
| 73 | + |
| 74 | + result = iterator.chunk_parser(chunk) |
| 75 | + |
| 76 | + assert isinstance(result, ModelResponseStream) |
| 77 | + assert len(result.choices) == 1 |
| 78 | + choice = result.choices[0] |
| 79 | + assert isinstance(choice, StreamingChoices) |
| 80 | + assert choice.index == 0 |
| 81 | + delta = choice.delta |
| 82 | + assert isinstance(delta, Delta) |
| 83 | + assert delta.content is None |
| 84 | + assert delta.reasoning_content == "**Compar" |
| 85 | + assert delta.tool_calls is None |
| 86 | + assert delta.function_call is None |
0 commit comments