Skip to content

Commit 59f7c46

Browse files
authored
Merge pull request #24 from nuhatech/dev
Release v0.1.23: Introduced `omit_temperature` parameter in `OpenAILL…
2 parents 3086b24 + ebb30a0 commit 59f7c46

File tree

5 files changed

+33
-6
lines changed

5 files changed

+33
-6
lines changed

CHANGELOG.md

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
77

88
## [Unreleased]
99

10+
## [0.1.23] - 2026-02-11
11+
12+
### Added
13+
- **`omit_temperature` parameter for `OpenAILLM`**: Reasoning models (o1, o3, gpt-5-nano, etc.) only support the default temperature value. Set `omit_temperature=True` to omit the `temperature` parameter from API calls entirely.
14+
- **`omit_temperature` parameter for `AgenticQueryPipeline`**: Propagated to the internally created `OpenAILLM` when no custom LLM is provided.
15+
1016
## [0.1.22] - 2026-02-11
1117

1218
### Added
@@ -270,7 +276,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
270276
- Example scripts for common use cases
271277
- API reference documentation
272278

273-
[Unreleased]: https://github.com/nuhatech/maktaba/compare/v0.1.22...HEAD
279+
[Unreleased]: https://github.com/nuhatech/maktaba/compare/v0.1.23...HEAD
280+
[0.1.23]: https://github.com/nuhatech/maktaba/compare/v0.1.22...v0.1.23
274281
[0.1.22]: https://github.com/nuhatech/maktaba/compare/v0.1.21...v0.1.22
275282
[0.1.21]: https://github.com/nuhatech/maktaba/compare/v0.1.20...v0.1.21
276283
[0.1.20]: https://github.com/nuhatech/maktaba/compare/v0.1.19...v0.1.20

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "maktaba"
3-
version = "0.1.22"
3+
version = "0.1.23"
44
description = "Production-ready RAG infrastructure for multilingual applications"
55
authors = [
66
{name = "NuhaTech", email = "contact@nuhatech.com"}

src/maktaba/llm/openai.py

Lines changed: 18 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ def __init__(
3737
timeout_s: float = 30.0,
3838
prompts: Optional[AgenticPrompts] = None,
3939
use_max_completion_tokens: bool = False,
40+
omit_temperature: bool = False,
4041
) -> None:
4142
"""
4243
Initialize OpenAI LLM.
@@ -50,13 +51,17 @@ def __init__(
5051
use_max_completion_tokens: Use ``max_completion_tokens`` instead of
5152
``max_tokens`` in API calls. Required for newer OpenAI models
5253
(o1, o3, gpt-5-nano, etc.) that no longer accept ``max_tokens``.
54+
omit_temperature: Omit the ``temperature`` parameter from API calls.
55+
Required for reasoning models (o1, o3, gpt-5-nano, etc.) that
56+
only support the default temperature value.
5357
"""
5458
self.api_key = api_key
5559
self.model = model
5660
self.temperature = temperature
5761
self.timeout_s = timeout_s
5862
self.prompts = prompts or default_prompts()
5963
self.use_max_completion_tokens = use_max_completion_tokens
64+
self.omit_temperature = omit_temperature
6065
self._logger = get_logger("maktaba.llm.openai")
6166

6267
# Lazy client initialization
@@ -75,6 +80,16 @@ def _get_client(self) -> Optional[Any]:
7580
self._client = self._OpenAI(api_key=self.api_key, timeout=self.timeout_s)
7681
return self._client
7782

83+
def _temperature_kwargs(self, temperature: float) -> Dict[str, Any]:
84+
"""Build the temperature keyword argument for the OpenAI API.
85+
86+
Returns an empty dict when :attr:`omit_temperature` is ``True`` so the
87+
parameter is omitted entirely (reasoning models reject custom values).
88+
"""
89+
if self.omit_temperature:
90+
return {}
91+
return {"temperature": temperature}
92+
7893
def _token_limit_kwargs(self, max_tokens: int | None) -> Dict[str, Any]:
7994
"""Build the token-limit keyword argument for the OpenAI API.
8095
@@ -118,7 +133,7 @@ async def complete_text(
118133
{"role": "system", "content": system},
119134
{"role": "user", "content": prompt},
120135
],
121-
temperature=temperature,
136+
**self._temperature_kwargs(temperature),
122137
**self._token_limit_kwargs(max_tokens),
123138
)
124139

@@ -157,7 +172,7 @@ async def complete_json(
157172
{"role": "system", "content": system},
158173
{"role": "user", "content": prompt},
159174
],
160-
temperature=temperature,
175+
**self._temperature_kwargs(temperature),
161176
**self._token_limit_kwargs(max_tokens),
162177
response_format={"type": "json_object"},
163178
)
@@ -198,7 +213,7 @@ async def stream_text(
198213
{"role": "system", "content": system},
199214
{"role": "user", "content": prompt},
200215
],
201-
temperature=temperature,
216+
**self._temperature_kwargs(temperature),
202217
**self._token_limit_kwargs(max_tokens),
203218
stream=True,
204219
)

src/maktaba/pipeline/agentic.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@ def __init__(
4444
prompts: Optional[AgenticPrompts] = None,
4545
namespace: Optional[str] = None,
4646
use_max_completion_tokens: bool = False,
47+
omit_temperature: bool = False,
4748
) -> None:
4849
"""
4950
Initialize agentic pipeline.
@@ -61,6 +62,9 @@ def __init__(
6162
use_max_completion_tokens: Use ``max_completion_tokens`` instead of
6263
``max_tokens`` in OpenAI API calls. Required for newer models
6364
(o1, o3, gpt-5-nano, etc.). Only applies when *llm* is not provided.
65+
omit_temperature: Omit the ``temperature`` parameter from OpenAI API
66+
calls. Required for reasoning models that only support the default
67+
value. Only applies when *llm* is not provided.
6468
6569
Example:
6670
# Use default prompts
@@ -94,6 +98,7 @@ def __init__(
9498
model=llm_model,
9599
prompts=prompts,
96100
use_max_completion_tokens=use_max_completion_tokens,
101+
omit_temperature=omit_temperature,
97102
)
98103

99104
async def _execute_single_query(

uv.lock

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)