Skip to content

Commit c122993

Browse files
committed
Merge branch 'develop' of https://github.com/atasoglu/toolsgen
2 parents 3d861cb + 9a85ae6 commit c122993

File tree

5 files changed

+9
-147
lines changed

5 files changed

+9
-147
lines changed

CHANGELOG.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,12 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
88

99
Nothing yet.
1010

11+
## [0.5.1] - 2025-01-11
12+
### Removed
13+
- Removed redundant `create_structured_completion()` function from `core.client` module
14+
- Function was unused in the codebase; structured outputs are implemented directly in `judge.py`
15+
- Cleaned up unused imports and test cases
16+
1117
## [0.5.0] - 2025-01-11
1218
### Added
1319
- Hugging Face Hub integration for direct dataset uploads

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ toolsgen = ["prompts/*.txt"]
1313

1414
[project]
1515
name = "toolsgen"
16-
version = "0.5.0"
16+
version = "0.5.1"
1717
description = "Generate tool-calling datasets from OpenAI-compatible tool specs"
1818
readme = "README.md"
1919
requires-python = ">=3.9"

src/toolsgen/core/__init__.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
"""Core dataset generation package."""
22

3-
from .client import create_openai_client, create_structured_completion
3+
from .client import create_openai_client
44
from .config import GenerationConfig, ModelConfig, RoleBasedModelConfig
55
from .generator import generate_dataset
66
from .io import append_record_jsonl, load_tool_specs, write_dataset_jsonl
@@ -12,7 +12,6 @@
1212
"RoleBasedModelConfig",
1313
# Client
1414
"create_openai_client",
15-
"create_structured_completion",
1615
# Generator
1716
"generate_dataset",
1817
# I/O

src/toolsgen/core/client.py

Lines changed: 0 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,7 @@
33
from __future__ import annotations
44

55
import os
6-
from typing import Any, Dict, List, Optional
7-
86
from openai import OpenAI
9-
from pydantic import BaseModel
10-
117
from .config import ModelConfig
128

139

@@ -40,46 +36,3 @@ def create_openai_client(model_config: ModelConfig) -> OpenAI:
4036
params.update(model_config.openai_params)
4137

4238
return OpenAI(**params)
43-
44-
45-
def create_structured_completion(
46-
client: OpenAI,
47-
model: str,
48-
messages: List[Dict[str, Any]],
49-
response_model: type[BaseModel],
50-
temperature: float = 0.7,
51-
max_tokens: Optional[int] = None,
52-
) -> BaseModel:
53-
"""Create a chat completion with structured output.
54-
55-
Args:
56-
client: OpenAI client instance.
57-
model: Model name to use.
58-
messages: List of message dictionaries.
59-
response_model: Pydantic model class for response structure.
60-
temperature: Sampling temperature.
61-
max_tokens: Maximum tokens to generate.
62-
63-
Returns:
64-
Instance of response_model populated with API response.
65-
"""
66-
response = client.chat.completions.create(
67-
model=model,
68-
messages=messages,
69-
temperature=temperature,
70-
max_tokens=max_tokens,
71-
response_format={
72-
"type": "json_schema",
73-
"json_schema": {
74-
"name": response_model.__name__,
75-
"schema": response_model.model_json_schema(),
76-
"strict": True,
77-
},
78-
},
79-
)
80-
81-
content = response.choices[0].message.content
82-
if not content:
83-
raise ValueError("LLM returned empty content")
84-
85-
return response_model.model_validate_json(content)

tests/test_client.py

Lines changed: 1 addition & 97 deletions
Original file line numberDiff line numberDiff line change
@@ -2,12 +2,9 @@
22

33
from __future__ import annotations
44

5-
from unittest.mock import MagicMock, patch
6-
75
import pytest
8-
from pydantic import BaseModel
96

10-
from toolsgen.core.client import create_openai_client, create_structured_completion
7+
from toolsgen.core.client import create_openai_client
118
from toolsgen.core.config import ModelConfig
129

1310

@@ -70,96 +67,3 @@ def test_create_openai_client_with_openai_params(
7067

7168
assert client.timeout == 120.0
7269
assert client.max_retries == 3
73-
74-
75-
@patch("toolsgen.core.client.OpenAI")
76-
def test_create_structured_completion_success(mock_openai_class: MagicMock) -> None:
77-
"""Test successful structured completion."""
78-
79-
# Define a test response model
80-
class TestResponse(BaseModel):
81-
message: str
82-
count: int
83-
84-
# Mock OpenAI client and response
85-
mock_client = MagicMock()
86-
mock_response = MagicMock()
87-
mock_response.choices[0].message.content = '{"message": "hello", "count": 42}'
88-
mock_client.chat.completions.create.return_value = mock_response
89-
90-
messages = [{"role": "user", "content": "Test"}]
91-
result = create_structured_completion(
92-
client=mock_client,
93-
model="gpt-4",
94-
messages=messages,
95-
response_model=TestResponse,
96-
temperature=0.7,
97-
max_tokens=100,
98-
)
99-
100-
assert isinstance(result, TestResponse)
101-
assert result.message == "hello"
102-
assert result.count == 42
103-
104-
# Verify the API was called correctly
105-
mock_client.chat.completions.create.assert_called_once()
106-
call_kwargs = mock_client.chat.completions.create.call_args.kwargs
107-
assert call_kwargs["model"] == "gpt-4"
108-
assert call_kwargs["temperature"] == 0.7
109-
assert call_kwargs["max_tokens"] == 100
110-
assert call_kwargs["response_format"]["type"] == "json_schema"
111-
112-
113-
@patch("toolsgen.core.client.OpenAI")
114-
def test_create_structured_completion_empty_content(
115-
mock_openai_class: MagicMock,
116-
) -> None:
117-
"""Test structured completion fails with empty content."""
118-
119-
class TestResponse(BaseModel):
120-
message: str
121-
122-
mock_client = MagicMock()
123-
mock_response = MagicMock()
124-
mock_response.choices[0].message.content = None
125-
mock_client.chat.completions.create.return_value = mock_response
126-
127-
messages = [{"role": "user", "content": "Test"}]
128-
129-
with pytest.raises(ValueError, match="LLM returned empty content"):
130-
create_structured_completion(
131-
client=mock_client,
132-
model="gpt-4",
133-
messages=messages,
134-
response_model=TestResponse,
135-
)
136-
137-
138-
@patch("toolsgen.core.client.OpenAI")
139-
def test_create_structured_completion_with_schema(mock_openai_class: MagicMock) -> None:
140-
"""Test that structured completion uses correct JSON schema."""
141-
142-
class TestResponse(BaseModel):
143-
value: str
144-
145-
mock_client = MagicMock()
146-
mock_response = MagicMock()
147-
mock_response.choices[0].message.content = '{"value": "test"}'
148-
mock_client.chat.completions.create.return_value = mock_response
149-
150-
messages = [{"role": "user", "content": "Test"}]
151-
create_structured_completion(
152-
client=mock_client,
153-
model="gpt-4",
154-
messages=messages,
155-
response_model=TestResponse,
156-
)
157-
158-
# Verify schema was passed correctly
159-
call_kwargs = mock_client.chat.completions.create.call_args.kwargs
160-
assert "response_format" in call_kwargs
161-
response_format = call_kwargs["response_format"]
162-
assert response_format["type"] == "json_schema"
163-
assert response_format["json_schema"]["name"] == "TestResponse"
164-
assert response_format["json_schema"]["strict"] is True
165-
assert "schema" in response_format["json_schema"]

0 commit comments

Comments
 (0)