Skip to content

Commit 40de81e

Browse files
authored
Remove reasoning format (#8484)
This isn't very useful parameter. logic: ``` if model puts `**` in their reasoning, trim it and visualize the header. if couldn't trim: don't render if model doesn't support: don't render ``` We can simplify to: ``` if could trim, visualize header. if not, don't render ```
1 parent 972b585 commit 40de81e

File tree

15 files changed

+54
-188
lines changed

15 files changed

+54
-188
lines changed

codex-rs/app-server/tests/common/models_cache.rs

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@ use codex_protocol::openai_models::ConfigShellToolType;
55
use codex_protocol::openai_models::ModelInfo;
66
use codex_protocol::openai_models::ModelPreset;
77
use codex_protocol::openai_models::ModelVisibility;
8-
use codex_protocol::openai_models::ReasoningSummaryFormat;
98
use codex_protocol::openai_models::TruncationPolicyConfig;
109
use serde_json::json;
1110
use std::path::Path;
@@ -35,7 +34,6 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
3534
truncation_policy: TruncationPolicyConfig::bytes(10_000),
3635
supports_parallel_tool_calls: false,
3736
context_window: None,
38-
reasoning_summary_format: ReasoningSummaryFormat::None,
3937
experimental_supported_tools: Vec::new(),
4038
}
4139
}

codex-rs/codex-api/src/endpoint/models.rs

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -227,7 +227,6 @@ mod tests {
227227
"truncation_policy": {"mode": "bytes", "limit": 10_000},
228228
"supports_parallel_tool_calls": false,
229229
"context_window": null,
230-
"reasoning_summary_format": "none",
231230
"experimental_supported_tools": [],
232231
}))
233232
.unwrap(),

codex-rs/codex-api/tests/models_integration.rs

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@ use codex_protocol::openai_models::ModelVisibility;
1010
use codex_protocol::openai_models::ModelsResponse;
1111
use codex_protocol::openai_models::ReasoningEffort;
1212
use codex_protocol::openai_models::ReasoningEffortPreset;
13-
use codex_protocol::openai_models::ReasoningSummaryFormat;
1413
use codex_protocol::openai_models::TruncationPolicyConfig;
1514
use http::HeaderMap;
1615
use http::Method;
@@ -85,7 +84,6 @@ async fn models_client_hits_models_endpoint() {
8584
truncation_policy: TruncationPolicyConfig::bytes(10_000),
8685
supports_parallel_tool_calls: false,
8786
context_window: None,
88-
reasoning_summary_format: ReasoningSummaryFormat::None,
8987
experimental_supported_tools: Vec::new(),
9088
}],
9189
etag: String::new(),

codex-rs/core/src/config/mod.rs

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,6 @@ use codex_protocol::config_types::SandboxMode;
3838
use codex_protocol::config_types::TrustLevel;
3939
use codex_protocol::config_types::Verbosity;
4040
use codex_protocol::openai_models::ReasoningEffort;
41-
use codex_protocol::openai_models::ReasoningSummaryFormat;
4241
use codex_rmcp_client::OAuthCredentialsStoreMode;
4342
use codex_utils_absolute_path::AbsolutePathBuf;
4443
use codex_utils_absolute_path::AbsolutePathBufGuard;
@@ -303,9 +302,6 @@ pub struct Config {
303302
/// Optional override to force-enable reasoning summaries for the configured model.
304303
pub model_supports_reasoning_summaries: Option<bool>,
305304

306-
/// Optional override to force reasoning summary format for the configured model.
307-
pub model_reasoning_summary_format: Option<ReasoningSummaryFormat>,
308-
309305
/// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`).
310306
pub model_verbosity: Option<Verbosity>,
311307

@@ -786,9 +782,6 @@ pub struct ConfigToml {
786782
/// Override to force-enable reasoning summaries for the configured model.
787783
pub model_supports_reasoning_summaries: Option<bool>,
788784

789-
/// Override to force reasoning summary format for the configured model.
790-
pub model_reasoning_summary_format: Option<ReasoningSummaryFormat>,
791-
792785
/// Base URL for requests to ChatGPT (as opposed to the OpenAI API).
793786
pub chatgpt_base_url: Option<String>,
794787

@@ -1379,7 +1372,6 @@ impl Config {
13791372
.or(cfg.model_reasoning_summary)
13801373
.unwrap_or_default(),
13811374
model_supports_reasoning_summaries: cfg.model_supports_reasoning_summaries,
1382-
model_reasoning_summary_format: cfg.model_reasoning_summary_format.clone(),
13831375
model_verbosity: config_profile.model_verbosity.or(cfg.model_verbosity),
13841376
chatgpt_base_url: config_profile
13851377
.chatgpt_base_url
@@ -3189,7 +3181,6 @@ model_verbosity = "high"
31893181
model_reasoning_effort: Some(ReasoningEffort::High),
31903182
model_reasoning_summary: ReasoningSummary::Detailed,
31913183
model_supports_reasoning_summaries: None,
3192-
model_reasoning_summary_format: None,
31933184
model_verbosity: None,
31943185
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
31953186
base_instructions: None,
@@ -3273,7 +3264,6 @@ model_verbosity = "high"
32733264
model_reasoning_effort: None,
32743265
model_reasoning_summary: ReasoningSummary::default(),
32753266
model_supports_reasoning_summaries: None,
3276-
model_reasoning_summary_format: None,
32773267
model_verbosity: None,
32783268
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
32793269
base_instructions: None,
@@ -3372,7 +3362,6 @@ model_verbosity = "high"
33723362
model_reasoning_effort: None,
33733363
model_reasoning_summary: ReasoningSummary::default(),
33743364
model_supports_reasoning_summaries: None,
3375-
model_reasoning_summary_format: None,
33763365
model_verbosity: None,
33773366
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
33783367
base_instructions: None,
@@ -3457,7 +3446,6 @@ model_verbosity = "high"
34573446
model_reasoning_effort: Some(ReasoningEffort::High),
34583447
model_reasoning_summary: ReasoningSummary::Detailed,
34593448
model_supports_reasoning_summaries: None,
3460-
model_reasoning_summary_format: None,
34613449
model_verbosity: Some(Verbosity::High),
34623450
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
34633451
base_instructions: None,

codex-rs/core/src/models_manager/manager.rs

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -354,7 +354,6 @@ mod tests {
354354
"truncation_policy": {"mode": "bytes", "limit": 10_000},
355355
"supports_parallel_tool_calls": false,
356356
"context_window": null,
357-
"reasoning_summary_format": "none",
358357
"experimental_supported_tools": [],
359358
}))
360359
.expect("valid model")

codex-rs/core/src/models_manager/model_family.rs

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@ use codex_protocol::openai_models::ApplyPatchToolType;
33
use codex_protocol::openai_models::ConfigShellToolType;
44
use codex_protocol::openai_models::ModelInfo;
55
use codex_protocol::openai_models::ReasoningEffort;
6-
use codex_protocol::openai_models::ReasoningSummaryFormat;
76

87
use crate::config::Config;
98
use crate::truncate::TruncationPolicy;
@@ -48,9 +47,6 @@ pub struct ModelFamily {
4847
// The reasoning effort to use for this model family when none is explicitly chosen.
4948
pub default_reasoning_effort: Option<ReasoningEffort>,
5049

51-
// Define if we need a special handling of reasoning summary
52-
pub reasoning_summary_format: ReasoningSummaryFormat,
53-
5450
/// Whether this model supports parallel tool calls when using the
5551
/// Responses API.
5652
pub supports_parallel_tool_calls: bool,
@@ -88,9 +84,6 @@ impl ModelFamily {
8884
if let Some(supports_reasoning_summaries) = config.model_supports_reasoning_summaries {
8985
self.supports_reasoning_summaries = supports_reasoning_summaries;
9086
}
91-
if let Some(reasoning_summary_format) = config.model_reasoning_summary_format.as_ref() {
92-
self.reasoning_summary_format = reasoning_summary_format.clone();
93-
}
9487
if let Some(context_window) = config.model_context_window {
9588
self.context_window = Some(context_window);
9689
}
@@ -128,7 +121,6 @@ impl ModelFamily {
128121
truncation_policy,
129122
supports_parallel_tool_calls,
130123
context_window,
131-
reasoning_summary_format,
132124
experimental_supported_tools,
133125
} = model;
134126

@@ -144,7 +136,6 @@ impl ModelFamily {
144136
self.truncation_policy = truncation_policy.into();
145137
self.supports_parallel_tool_calls = supports_parallel_tool_calls;
146138
self.context_window = context_window;
147-
self.reasoning_summary_format = reasoning_summary_format;
148139
self.experimental_supported_tools = experimental_supported_tools;
149140
}
150141

@@ -175,7 +166,6 @@ macro_rules! model_family {
175166
context_window: Some(CONTEXT_WINDOW_272K),
176167
auto_compact_token_limit: None,
177168
supports_reasoning_summaries: false,
178-
reasoning_summary_format: ReasoningSummaryFormat::None,
179169
supports_parallel_tool_calls: false,
180170
apply_patch_tool_type: None,
181171
base_instructions: BASE_INSTRUCTIONS.to_string(),
@@ -250,7 +240,6 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
250240
model_family!(
251241
slug, slug,
252242
supports_reasoning_summaries: true,
253-
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
254243
base_instructions: GPT_5_CODEX_INSTRUCTIONS.to_string(),
255244
experimental_supported_tools: vec![
256245
"grep_files".to_string(),
@@ -270,7 +259,6 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
270259
model_family!(
271260
slug, slug,
272261
supports_reasoning_summaries: true,
273-
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
274262
base_instructions: GPT_5_2_CODEX_INSTRUCTIONS.to_string(),
275263
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
276264
shell_type: ConfigShellToolType::ShellCommand,
@@ -299,7 +287,6 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
299287
model_family!(
300288
slug, slug,
301289
supports_reasoning_summaries: true,
302-
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
303290
base_instructions: GPT_5_2_CODEX_INSTRUCTIONS.to_string(),
304291
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
305292
shell_type: ConfigShellToolType::ShellCommand,
@@ -312,7 +299,6 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
312299
model_family!(
313300
slug, slug,
314301
supports_reasoning_summaries: true,
315-
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
316302
base_instructions: GPT_5_2_CODEX_INSTRUCTIONS.to_string(),
317303
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
318304
shell_type: ConfigShellToolType::ShellCommand,
@@ -325,7 +311,6 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
325311
model_family!(
326312
slug, slug,
327313
supports_reasoning_summaries: true,
328-
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
329314
base_instructions: GPT_5_1_CODEX_MAX_INSTRUCTIONS.to_string(),
330315
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
331316
shell_type: ConfigShellToolType::ShellCommand,
@@ -341,7 +326,6 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
341326
model_family!(
342327
slug, slug,
343328
supports_reasoning_summaries: true,
344-
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
345329
base_instructions: GPT_5_CODEX_INSTRUCTIONS.to_string(),
346330
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
347331
shell_type: ConfigShellToolType::ShellCommand,
@@ -416,7 +400,6 @@ fn derive_default_model_family(model: &str) -> ModelFamily {
416400
context_window: None,
417401
auto_compact_token_limit: None,
418402
supports_reasoning_summaries: false,
419-
reasoning_summary_format: ReasoningSummaryFormat::None,
420403
supports_parallel_tool_calls: false,
421404
apply_patch_tool_type: None,
422405
base_instructions: BASE_INSTRUCTIONS.to_string(),
@@ -460,7 +443,6 @@ mod tests {
460443
truncation_policy: TruncationPolicyConfig::bytes(10_000),
461444
supports_parallel_tool_calls: false,
462445
context_window: None,
463-
reasoning_summary_format: ReasoningSummaryFormat::None,
464446
experimental_supported_tools: Vec::new(),
465447
}
466448
}
@@ -524,7 +506,6 @@ mod tests {
524506
experimental_supported_tools: vec!["local".to_string()],
525507
truncation_policy: TruncationPolicy::Bytes(10_000),
526508
context_window: Some(100),
527-
reasoning_summary_format: ReasoningSummaryFormat::None,
528509
);
529510

530511
let updated = family.with_remote_overrides(vec![ModelInfo {
@@ -549,7 +530,6 @@ mod tests {
549530
truncation_policy: TruncationPolicyConfig::tokens(2_000),
550531
supports_parallel_tool_calls: true,
551532
context_window: Some(400_000),
552-
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
553533
experimental_supported_tools: vec!["alpha".to_string(), "beta".to_string()],
554534
}]);
555535

@@ -568,10 +548,6 @@ mod tests {
568548
assert_eq!(updated.truncation_policy, TruncationPolicy::Tokens(2_000));
569549
assert!(updated.supports_parallel_tool_calls);
570550
assert_eq!(updated.context_window, Some(400_000));
571-
assert_eq!(
572-
updated.reasoning_summary_format,
573-
ReasoningSummaryFormat::Experimental
574-
);
575551
assert_eq!(
576552
updated.experimental_supported_tools,
577553
vec!["alpha".to_string(), "beta".to_string()]

codex-rs/core/tests/responses_headers.rs

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@ use codex_core::models_manager::manager::ModelsManager;
1414
use codex_otel::otel_manager::OtelManager;
1515
use codex_protocol::ConversationId;
1616
use codex_protocol::config_types::ReasoningSummary;
17-
use codex_protocol::openai_models::ReasoningSummaryFormat;
1817
use codex_protocol::protocol::SessionSource;
1918
use codex_protocol::protocol::SubAgentSource;
2019
use core_test_support::load_default_config_for_test;
@@ -246,7 +245,6 @@ async fn responses_respects_model_family_overrides_from_config() {
246245
config.model_provider_id = provider.name.clone();
247246
config.model_provider = provider.clone();
248247
config.model_supports_reasoning_summaries = Some(true);
249-
config.model_reasoning_summary_format = Some(ReasoningSummaryFormat::Experimental);
250248
config.model_reasoning_summary = ReasoningSummary::Detailed;
251249
let effort = config.model_reasoning_effort;
252250
let summary = config.model_reasoning_summary;

codex-rs/core/tests/suite/remote_models.rs

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@ use codex_protocol::openai_models::ModelVisibility;
2424
use codex_protocol::openai_models::ModelsResponse;
2525
use codex_protocol::openai_models::ReasoningEffort;
2626
use codex_protocol::openai_models::ReasoningEffortPreset;
27-
use codex_protocol::openai_models::ReasoningSummaryFormat;
2827
use codex_protocol::openai_models::TruncationPolicyConfig;
2928
use codex_protocol::user_input::UserInput;
3029
use core_test_support::load_default_config_for_test;
@@ -83,7 +82,6 @@ async fn remote_models_remote_model_uses_unified_exec() -> Result<()> {
8382
truncation_policy: TruncationPolicyConfig::bytes(10_000),
8483
supports_parallel_tool_calls: false,
8584
context_window: None,
86-
reasoning_summary_format: ReasoningSummaryFormat::None,
8785
experimental_supported_tools: Vec::new(),
8886
};
8987

@@ -222,7 +220,6 @@ async fn remote_models_apply_remote_base_instructions() -> Result<()> {
222220
truncation_policy: TruncationPolicyConfig::bytes(10_000),
223221
supports_parallel_tool_calls: false,
224222
context_window: None,
225-
reasoning_summary_format: ReasoningSummaryFormat::None,
226223
experimental_supported_tools: Vec::new(),
227224
};
228225
mount_models_once(
@@ -486,7 +483,6 @@ fn test_remote_model(slug: &str, visibility: ModelVisibility, priority: i32) ->
486483
truncation_policy: TruncationPolicyConfig::bytes(10_000),
487484
supports_parallel_tool_calls: false,
488485
context_window: None,
489-
reasoning_summary_format: ReasoningSummaryFormat::None,
490486
experimental_supported_tools: Vec::new(),
491487
}
492488
}

codex-rs/protocol/src/openai_models.rs

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -125,14 +125,6 @@ pub enum ApplyPatchToolType {
125125
Function,
126126
}
127127

128-
#[derive(Deserialize, Debug, Clone, PartialEq, Eq, Default, Hash, TS, JsonSchema, Serialize)]
129-
#[serde(rename_all = "snake_case")]
130-
pub enum ReasoningSummaryFormat {
131-
#[default]
132-
None,
133-
Experimental,
134-
}
135-
136128
/// Server-provided truncation policy metadata for a model.
137129
#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, TS, JsonSchema)]
138130
#[serde(rename_all = "snake_case")]
@@ -188,7 +180,6 @@ pub struct ModelInfo {
188180
pub truncation_policy: TruncationPolicyConfig,
189181
pub supports_parallel_tool_calls: bool,
190182
pub context_window: Option<i64>,
191-
pub reasoning_summary_format: ReasoningSummaryFormat,
192183
pub experimental_supported_tools: Vec<String>,
193184
}
194185

codex-rs/tui/src/chatwidget.rs

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -537,14 +537,11 @@ impl ChatWidget {
537537
}
538538

539539
fn on_agent_reasoning_final(&mut self) {
540-
let reasoning_summary_format = self.get_model_family().reasoning_summary_format;
541540
// At the end of a reasoning block, record transcript-only content.
542541
self.full_reasoning_buffer.push_str(&self.reasoning_buffer);
543542
if !self.full_reasoning_buffer.is_empty() {
544-
let cell = history_cell::new_reasoning_summary_block(
545-
self.full_reasoning_buffer.clone(),
546-
reasoning_summary_format,
547-
);
543+
let cell =
544+
history_cell::new_reasoning_summary_block(self.full_reasoning_buffer.clone());
548545
self.add_boxed_history(cell);
549546
}
550547
self.reasoning_buffer.clear();

0 commit comments

Comments
 (0)