|
2 | 2 |
|
3 | 3 | from __future__ import annotations |
4 | 4 |
|
5 | | -from unittest.mock import MagicMock, patch |
6 | | - |
7 | 5 | import pytest |
8 | | -from pydantic import BaseModel |
9 | 6 |
|
10 | | -from toolsgen.core.client import create_openai_client, create_structured_completion |
| 7 | +from toolsgen.core.client import create_openai_client |
11 | 8 | from toolsgen.core.config import ModelConfig |
12 | 9 |
|
13 | 10 |
|
@@ -70,96 +67,3 @@ def test_create_openai_client_with_openai_params( |
70 | 67 |
|
71 | 68 | assert client.timeout == 120.0 |
72 | 69 | assert client.max_retries == 3 |
73 | | - |
74 | | - |
75 | | -@patch("toolsgen.core.client.OpenAI") |
76 | | -def test_create_structured_completion_success(mock_openai_class: MagicMock) -> None: |
77 | | - """Test successful structured completion.""" |
78 | | - |
79 | | - # Define a test response model |
80 | | - class TestResponse(BaseModel): |
81 | | - message: str |
82 | | - count: int |
83 | | - |
84 | | - # Mock OpenAI client and response |
85 | | - mock_client = MagicMock() |
86 | | - mock_response = MagicMock() |
87 | | - mock_response.choices[0].message.content = '{"message": "hello", "count": 42}' |
88 | | - mock_client.chat.completions.create.return_value = mock_response |
89 | | - |
90 | | - messages = [{"role": "user", "content": "Test"}] |
91 | | - result = create_structured_completion( |
92 | | - client=mock_client, |
93 | | - model="gpt-4", |
94 | | - messages=messages, |
95 | | - response_model=TestResponse, |
96 | | - temperature=0.7, |
97 | | - max_tokens=100, |
98 | | - ) |
99 | | - |
100 | | - assert isinstance(result, TestResponse) |
101 | | - assert result.message == "hello" |
102 | | - assert result.count == 42 |
103 | | - |
104 | | - # Verify the API was called correctly |
105 | | - mock_client.chat.completions.create.assert_called_once() |
106 | | - call_kwargs = mock_client.chat.completions.create.call_args.kwargs |
107 | | - assert call_kwargs["model"] == "gpt-4" |
108 | | - assert call_kwargs["temperature"] == 0.7 |
109 | | - assert call_kwargs["max_tokens"] == 100 |
110 | | - assert call_kwargs["response_format"]["type"] == "json_schema" |
111 | | - |
112 | | - |
113 | | -@patch("toolsgen.core.client.OpenAI") |
114 | | -def test_create_structured_completion_empty_content( |
115 | | - mock_openai_class: MagicMock, |
116 | | -) -> None: |
117 | | - """Test structured completion fails with empty content.""" |
118 | | - |
119 | | - class TestResponse(BaseModel): |
120 | | - message: str |
121 | | - |
122 | | - mock_client = MagicMock() |
123 | | - mock_response = MagicMock() |
124 | | - mock_response.choices[0].message.content = None |
125 | | - mock_client.chat.completions.create.return_value = mock_response |
126 | | - |
127 | | - messages = [{"role": "user", "content": "Test"}] |
128 | | - |
129 | | - with pytest.raises(ValueError, match="LLM returned empty content"): |
130 | | - create_structured_completion( |
131 | | - client=mock_client, |
132 | | - model="gpt-4", |
133 | | - messages=messages, |
134 | | - response_model=TestResponse, |
135 | | - ) |
136 | | - |
137 | | - |
138 | | -@patch("toolsgen.core.client.OpenAI") |
139 | | -def test_create_structured_completion_with_schema(mock_openai_class: MagicMock) -> None: |
140 | | - """Test that structured completion uses correct JSON schema.""" |
141 | | - |
142 | | - class TestResponse(BaseModel): |
143 | | - value: str |
144 | | - |
145 | | - mock_client = MagicMock() |
146 | | - mock_response = MagicMock() |
147 | | - mock_response.choices[0].message.content = '{"value": "test"}' |
148 | | - mock_client.chat.completions.create.return_value = mock_response |
149 | | - |
150 | | - messages = [{"role": "user", "content": "Test"}] |
151 | | - create_structured_completion( |
152 | | - client=mock_client, |
153 | | - model="gpt-4", |
154 | | - messages=messages, |
155 | | - response_model=TestResponse, |
156 | | - ) |
157 | | - |
158 | | - # Verify schema was passed correctly |
159 | | - call_kwargs = mock_client.chat.completions.create.call_args.kwargs |
160 | | - assert "response_format" in call_kwargs |
161 | | - response_format = call_kwargs["response_format"] |
162 | | - assert response_format["type"] == "json_schema" |
163 | | - assert response_format["json_schema"]["name"] == "TestResponse" |
164 | | - assert response_format["json_schema"]["strict"] is True |
165 | | - assert "schema" in response_format["json_schema"] |
0 commit comments