Skip to content

Commit 8a424fc

Browse files
authored
feat: add new config option: model_supports_reasoning_summaries (#1524)
As noted in the updated docs, this makes it so that you can set: ```toml model_supports_reasoning_summaries = true ``` as a way of overriding the existing heuristic for when to set the `reasoning` field on a sampling request: https://github.com/openai/codex/blob/341c091c5b09dc706ab5c7d629516e6ef5aaf902/codex-rs/core/src/client_common.rs#L152-L166
1 parent 341c091 commit 8a424fc

File tree

7 files changed

+49
-16
lines changed

7 files changed

+49
-16
lines changed

codex-rs/config.md

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -206,6 +206,14 @@ To disable reasoning summaries, set `model_reasoning_summary` to `"none"` in you
206206
model_reasoning_summary = "none" # disable reasoning summaries
207207
```
208208

209+
## model_supports_reasoning_summaries
210+
211+
By default, `reasoning` is only set on requests to OpenAI models that are known to support them. To force `reasoning` to set on requests to the current model, you can force this behavior by setting the following in `config.toml`:
212+
213+
```toml
214+
model_supports_reasoning_summaries = true
215+
```
216+
209217
## sandbox_mode
210218

211219
Codex executes model-generated shell commands inside an OS-level sandbox.

codex-rs/core/src/client.rs

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ use crate::client_common::ResponseEvent;
2323
use crate::client_common::ResponseStream;
2424
use crate::client_common::ResponsesApiRequest;
2525
use crate::client_common::create_reasoning_param_for_request;
26+
use crate::config::Config;
2627
use crate::config_types::ReasoningEffort as ReasoningEffortConfig;
2728
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
2829
use crate::error::CodexErr;
@@ -36,9 +37,11 @@ use crate::models::ResponseItem;
3637
use crate::openai_tools::create_tools_json_for_responses_api;
3738
use crate::protocol::TokenUsage;
3839
use crate::util::backoff;
40+
use std::sync::Arc;
3941

4042
#[derive(Clone)]
4143
pub struct ModelClient {
44+
config: Arc<Config>,
4245
model: String,
4346
client: reqwest::Client,
4447
provider: ModelProviderInfo,
@@ -48,12 +51,14 @@ pub struct ModelClient {
4851

4952
impl ModelClient {
5053
pub fn new(
51-
model: impl ToString,
54+
config: Arc<Config>,
5255
provider: ModelProviderInfo,
5356
effort: ReasoningEffortConfig,
5457
summary: ReasoningSummaryConfig,
5558
) -> Self {
59+
let model = config.model.clone();
5660
Self {
61+
config,
5762
model: model.to_string(),
5863
client: reqwest::Client::new(),
5964
provider,
@@ -108,7 +113,7 @@ impl ModelClient {
108113

109114
let full_instructions = prompt.get_full_instructions(&self.model);
110115
let tools_json = create_tools_json_for_responses_api(prompt, &self.model)?;
111-
let reasoning = create_reasoning_param_for_request(&self.model, self.effort, self.summary);
116+
let reasoning = create_reasoning_param_for_request(&self.config, self.effort, self.summary);
112117
let payload = ResponsesApiRequest {
113118
model: &self.model,
114119
instructions: &full_instructions,

codex-rs/core/src/client_common.rs

Lines changed: 17 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -131,15 +131,16 @@ pub(crate) struct ResponsesApiRequest<'a> {
131131
pub(crate) stream: bool,
132132
}
133133

134+
use crate::config::Config;
135+
134136
pub(crate) fn create_reasoning_param_for_request(
135-
model: &str,
137+
config: &Config,
136138
effort: ReasoningEffortConfig,
137139
summary: ReasoningSummaryConfig,
138140
) -> Option<Reasoning> {
139-
let effort: Option<OpenAiReasoningEffort> = effort.into();
140-
let effort = effort?;
141-
142-
if model_supports_reasoning_summaries(model) {
141+
if model_supports_reasoning_summaries(config) {
142+
let effort: Option<OpenAiReasoningEffort> = effort.into();
143+
let effort = effort?;
143144
Some(Reasoning {
144145
effort,
145146
summary: summary.into(),
@@ -149,19 +150,24 @@ pub(crate) fn create_reasoning_param_for_request(
149150
}
150151
}
151152

152-
pub fn model_supports_reasoning_summaries(model: &str) -> bool {
153-
// Currently, we hardcode this rule to decide whether enable reasoning.
153+
pub fn model_supports_reasoning_summaries(config: &Config) -> bool {
154+
// Currently, we hardcode this rule to decide whether to enable reasoning.
154155
// We expect reasoning to apply only to OpenAI models, but we do not want
155156
// users to have to mess with their config to disable reasoning for models
156157
// that do not support it, such as `gpt-4.1`.
157158
//
158159
// Though if a user is using Codex with non-OpenAI models that, say, happen
159-
// to start with "o", then they can set `model_reasoning_effort = "none` in
160+
// to start with "o", then they can set `model_reasoning_effort = "none"` in
160161
// config.toml to disable reasoning.
161162
//
162-
// Ultimately, this should also be configurable in config.toml, but we
163-
// need to have defaults that "just work." Perhaps we could have a
164-
// "reasoning models pattern" as part of ModelProviderInfo?
163+
// Converseley, if a user has a non-OpenAI provider that supports reasoning,
164+
// they can set the top-level `model_supports_reasoning_summaries = true`
165+
// config option to enable reasoning.
166+
if config.model_supports_reasoning_summaries {
167+
return true;
168+
}
169+
170+
let model = &config.model;
165171
model.starts_with("o") || model.starts_with("codex")
166172
}
167173

codex-rs/core/src/codex.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -586,7 +586,7 @@ async fn submission_loop(
586586
}
587587

588588
let client = ModelClient::new(
589-
model.clone(),
589+
config.clone(),
590590
provider.clone(),
591591
model_reasoning_effort,
592592
model_reasoning_summary,

codex-rs/core/src/config.rs

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -130,6 +130,10 @@ pub struct Config {
130130
/// If not "none", the value to use for `reasoning.summary` when making a
131131
/// request using the Responses API.
132132
pub model_reasoning_summary: ReasoningSummary,
133+
134+
/// When set to `true`, overrides the default heuristic and forces
135+
/// `model_supports_reasoning_summaries()` to return `true`.
136+
pub model_supports_reasoning_summaries: bool,
133137
}
134138

135139
impl Config {
@@ -308,6 +312,9 @@ pub struct ConfigToml {
308312

309313
pub model_reasoning_effort: Option<ReasoningEffort>,
310314
pub model_reasoning_summary: Option<ReasoningSummary>,
315+
316+
/// Override to force-enable reasoning summaries for the configured model.
317+
pub model_supports_reasoning_summaries: Option<bool>,
311318
}
312319

313320
impl ConfigToml {
@@ -472,6 +479,10 @@ impl Config {
472479
.model_reasoning_summary
473480
.or(cfg.model_reasoning_summary)
474481
.unwrap_or_default(),
482+
483+
model_supports_reasoning_summaries: cfg
484+
.model_supports_reasoning_summaries
485+
.unwrap_or(false),
475486
};
476487
Ok(config)
477488
}
@@ -776,6 +787,7 @@ disable_response_storage = true
776787
hide_agent_reasoning: false,
777788
model_reasoning_effort: ReasoningEffort::High,
778789
model_reasoning_summary: ReasoningSummary::Detailed,
790+
model_supports_reasoning_summaries: false,
779791
},
780792
o3_profile_config
781793
);
@@ -820,6 +832,7 @@ disable_response_storage = true
820832
hide_agent_reasoning: false,
821833
model_reasoning_effort: ReasoningEffort::default(),
822834
model_reasoning_summary: ReasoningSummary::default(),
835+
model_supports_reasoning_summaries: false,
823836
};
824837

825838
assert_eq!(expected_gpt3_profile_config, gpt3_profile_config);
@@ -879,6 +892,7 @@ disable_response_storage = true
879892
hide_agent_reasoning: false,
880893
model_reasoning_effort: ReasoningEffort::default(),
881894
model_reasoning_summary: ReasoningSummary::default(),
895+
model_supports_reasoning_summaries: false,
882896
};
883897

884898
assert_eq!(expected_zdr_profile_config, zdr_profile_config);

codex-rs/exec/src/event_processor.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ impl EventProcessor {
139139
("sandbox", summarize_sandbox_policy(&config.sandbox_policy)),
140140
];
141141
if config.model_provider.wire_api == WireApi::Responses
142-
&& model_supports_reasoning_summaries(&config.model)
142+
&& model_supports_reasoning_summaries(config)
143143
{
144144
entries.push((
145145
"reasoning effort",

codex-rs/tui/src/history_cell.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,7 @@ impl HistoryCell {
159159
("sandbox", summarize_sandbox_policy(&config.sandbox_policy)),
160160
];
161161
if config.model_provider.wire_api == WireApi::Responses
162-
&& model_supports_reasoning_summaries(&config.model)
162+
&& model_supports_reasoning_summaries(config)
163163
{
164164
entries.push((
165165
"reasoning effort",

0 commit comments

Comments
 (0)