Skip to content

Commit a35c8d6

Browse files
committed
clippy
1 parent fbe6ac2 commit a35c8d6

File tree

8 files changed

+80
-21
lines changed

8 files changed

+80
-21
lines changed

codex-rs/app-server/tests/common/models_cache.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
1515
slug: preset.id.clone(),
1616
display_name: preset.display_name.clone(),
1717
description: Some(preset.description.clone()),
18-
default_reasoning_level: preset.default_reasoning_effort,
18+
default_reasoning_level: Some(preset.default_reasoning_effort),
1919
supported_reasoning_levels: preset.supported_reasoning_efforts.clone(),
2020
shell_type: ConfigShellToolType::ShellCommand,
2121
visibility: if preset.show_in_picker {

codex-rs/codex-api/tests/models_integration.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ async fn models_client_hits_models_endpoint() {
5656
slug: "gpt-test".to_string(),
5757
display_name: "gpt-test".to_string(),
5858
description: Some("desc".to_string()),
59-
default_reasoning_level: ReasoningEffort::Medium,
59+
default_reasoning_level: Some(ReasoningEffort::Medium),
6060
supported_reasoning_levels: vec![
6161
ReasoningEffortPreset {
6262
effort: ReasoningEffort::Low,

codex-rs/core/src/client.rs

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -205,12 +205,9 @@ impl ModelClient {
205205
let instructions = prompt.get_full_instructions(&model_info).into_owned();
206206
let tools_json: Vec<Value> = create_tools_json_for_responses_api(&prompt.tools)?;
207207

208-
let default_reasoning_effort = (!model_info.supported_reasoning_levels.is_empty()
209-
&& !model_info.slug.contains("codex"))
210-
.then_some(model_info.default_reasoning_level);
211208
let reasoning = if model_info.supports_reasoning_summaries {
212209
Some(Reasoning {
213-
effort: self.effort.or(default_reasoning_effort),
210+
effort: self.effort.or(model_info.default_reasoning_level),
214211
summary: if self.summary == ReasoningSummaryConfig::None {
215212
None
216213
} else {

codex-rs/core/src/codex.rs

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1445,8 +1445,10 @@ impl Session {
14451445

14461446
pub(crate) async fn set_total_tokens_full(&self, turn_context: &TurnContext) {
14471447
let context_window = turn_context.client.get_model_context_window();
1448-
let mut state = self.state.lock().await;
1449-
state.set_token_usage_full(context_window);
1448+
{
1449+
let mut state = self.state.lock().await;
1450+
state.set_token_usage_full(context_window);
1451+
}
14501452
self.send_token_count_event(turn_context).await;
14511453
}
14521454

@@ -2331,9 +2333,15 @@ pub(crate) async fn run_task(
23312333
}
23322334

23332335
let model_info = turn_context.client.get_model_info();
2334-
let auto_compact_limit = model_info
2335-
.auto_compact_token_limit
2336-
.unwrap_or((model_info.context_window * 9) / 10);
2336+
let auto_compact_limit = model_info.auto_compact_token_limit.unwrap_or_else(|| {
2337+
// For unknown models (context_window=0), disable auto-compaction by using i64::MAX
2338+
// This matches the old ModelFamily behavior.
2339+
if model_info.context_window == 0 {
2340+
i64::MAX
2341+
} else {
2342+
(model_info.context_window * 9) / 10
2343+
}
2344+
});
23372345
let total_usage_tokens = sess.get_total_token_usage().await;
23382346
if total_usage_tokens >= auto_compact_limit {
23392347
run_auto_compact(&sess, &turn_context).await;

codex-rs/core/src/models_manager/manager.rs

Lines changed: 28 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ use std::time::Duration;
1313
use tokio::sync::RwLock;
1414
use tokio::sync::TryLockError;
1515
use tracing::error;
16+
use tracing::warn;
1617

1718
use super::cache;
1819
use super::cache::ModelsCache;
@@ -134,8 +135,21 @@ impl ModelsManager {
134135
.await
135136
.into_iter()
136137
.find(|m| m.slug == model);
137-
let model = model_info::find_model_info_for_slug(model);
138-
model_info::with_config_overrides(model_info::merge_remote_overrides(model, remote), config)
138+
let local_model = model_info::find_model_info_for_slug(model);
139+
let is_unknown = local_model.context_window == 0 && remote.is_none();
140+
let final_model = model_info::with_config_overrides(
141+
model_info::merge_remote_overrides(local_model, remote),
142+
config,
143+
);
144+
// Warn if model is unknown (context_window=0) and wasn't overridden by config or remote
145+
if is_unknown && final_model.context_window == 0 && config.model_context_window.is_none() {
146+
warn!(
147+
model = model,
148+
"Unknown model: context window and reasoning presets not configured. \
149+
Set model_context_window in config.toml to enable auto-compaction."
150+
);
151+
}
152+
final_model
139153
}
140154

141155
pub async fn get_model(&self, model: &Option<String>, config: &Config) -> String {
@@ -181,7 +195,18 @@ impl ModelsManager {
181195
#[cfg(any(test, feature = "test-support"))]
182196
/// Offline helper that builds a `ModelInfo` without consulting remote state.
183197
pub fn construct_model_info_offline(model: &str, config: &Config) -> ModelInfo {
184-
model_info::with_config_overrides(model_info::find_model_info_for_slug(model), config)
198+
let local_model = model_info::find_model_info_for_slug(model);
199+
let is_unknown = local_model.context_window == 0;
200+
let final_model = model_info::with_config_overrides(local_model, config);
201+
// Warn if model is unknown (context_window=0) and wasn't overridden by config
202+
if is_unknown && final_model.context_window == 0 && config.model_context_window.is_none() {
203+
warn!(
204+
model = model,
205+
"Unknown model: context window and reasoning presets not configured. \
206+
Set model_context_window in config.toml to enable auto-compaction."
207+
);
208+
}
209+
final_model
185210
}
186211

187212
async fn get_etag(&self) -> Option<String> {

codex-rs/core/src/models_manager/model_info.rs

Lines changed: 29 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ macro_rules! model_info {
3333
// This is primarily used when remote metadata is available. When running
3434
// offline, core generally omits the effort field unless explicitly
3535
// configured by the user.
36-
default_reasoning_level: ReasoningEffort::Medium,
36+
default_reasoning_level: Some(ReasoningEffort::Medium),
3737
supported_reasoning_levels: supported_reasoning_level_low_medium_high(),
3838
shell_type: ConfigShellToolType::Default,
3939
visibility: ModelVisibility::None,
@@ -169,6 +169,7 @@ pub(crate) fn find_model_info_for_slug(slug: &str) -> ModelInfo {
169169
support_verbosity: true,
170170
default_verbosity: Some(Verbosity::Low),
171171
base_instructions: BASE_INSTRUCTIONS.to_string(),
172+
default_reasoning_level: Some(ReasoningEffort::Medium),
172173
truncation_policy: TruncationPolicyConfig::bytes(10_000),
173174
shell_type: ConfigShellToolType::UnifiedExec,
174175
supports_parallel_tool_calls: true,
@@ -247,6 +248,7 @@ pub(crate) fn find_model_info_for_slug(slug: &str) -> ModelInfo {
247248
supports_parallel_tool_calls: true,
248249
context_window: CONTEXT_WINDOW_272K,
249250
supported_reasoning_levels: supported_reasoning_level_low_medium_high_xhigh_non_codex(),
251+
default_reasoning_level: Some(ReasoningEffort::Medium),
250252
)
251253
} else if slug.starts_with("gpt-5.1") && !slug.contains("codex") {
252254
model_info!(
@@ -260,6 +262,7 @@ pub(crate) fn find_model_info_for_slug(slug: &str) -> ModelInfo {
260262
shell_type: ConfigShellToolType::ShellCommand,
261263
supports_parallel_tool_calls: true,
262264
context_window: CONTEXT_WINDOW_272K,
265+
default_reasoning_level: Some(ReasoningEffort::Medium),
263266
supported_reasoning_levels: supported_reasoning_level_low_medium_high_non_codex(),
264267
)
265268
} else if slug.starts_with("gpt-5") {
@@ -273,7 +276,31 @@ pub(crate) fn find_model_info_for_slug(slug: &str) -> ModelInfo {
273276
context_window: CONTEXT_WINDOW_272K,
274277
)
275278
} else {
276-
model_info!(slug)
279+
// Unknown model: return defaults with context_window=0 (sentinel for "unknown")
280+
// and empty base_instructions. This matches the old ModelFamily behavior.
281+
ModelInfo {
282+
slug: slug.to_string(),
283+
display_name: slug.to_string(),
284+
description: None,
285+
default_reasoning_level: Some(ReasoningEffort::Medium),
286+
supported_reasoning_levels: Vec::new(),
287+
shell_type: ConfigShellToolType::Default,
288+
visibility: ModelVisibility::None,
289+
supported_in_api: true,
290+
priority: 99,
291+
upgrade: None,
292+
base_instructions: String::new(),
293+
supports_reasoning_summaries: false,
294+
support_verbosity: false,
295+
default_verbosity: None,
296+
apply_patch_tool_type: None,
297+
truncation_policy: TruncationPolicyConfig::bytes(10_000),
298+
supports_parallel_tool_calls: false,
299+
context_window: 0,
300+
auto_compact_token_limit: None,
301+
effective_context_window_percent: 95,
302+
experimental_supported_tools: Vec::new(),
303+
}
277304
}
278305
}
279306

codex-rs/core/tests/suite/remote_models.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ async fn remote_models_remote_model_uses_unified_exec() -> Result<()> {
6464
slug: REMOTE_MODEL_SLUG.to_string(),
6565
display_name: "Remote Test".to_string(),
6666
description: Some("A remote model that requires the test shell".to_string()),
67-
default_reasoning_level: ReasoningEffort::Medium,
67+
default_reasoning_level: Some(ReasoningEffort::Medium),
6868
supported_reasoning_levels: vec![ReasoningEffortPreset {
6969
effort: ReasoningEffort::Medium,
7070
description: ReasoningEffort::Medium.to_string(),
@@ -203,7 +203,7 @@ async fn remote_models_apply_remote_base_instructions() -> Result<()> {
203203
slug: model.to_string(),
204204
display_name: "Parallel Remote".to_string(),
205205
description: Some("A remote model with custom instructions".to_string()),
206-
default_reasoning_level: ReasoningEffort::Medium,
206+
default_reasoning_level: Some(ReasoningEffort::Medium),
207207
supported_reasoning_levels: vec![ReasoningEffortPreset {
208208
effort: ReasoningEffort::Medium,
209209
description: ReasoningEffort::Medium.to_string(),
@@ -465,7 +465,7 @@ fn test_remote_model(slug: &str, visibility: ModelVisibility, priority: i32) ->
465465
slug: slug.to_string(),
466466
display_name: format!("{slug} display"),
467467
description: Some(format!("{slug} description")),
468-
default_reasoning_level: ReasoningEffort::Medium,
468+
default_reasoning_level: Some(ReasoningEffort::Medium),
469469
supported_reasoning_levels: vec![ReasoningEffortPreset {
470470
effort: ReasoningEffort::Medium,
471471
description: ReasoningEffort::Medium.to_string(),

codex-rs/protocol/src/openai_models.rs

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,7 @@ pub struct ModelInfo {
169169
pub slug: String,
170170
pub display_name: String,
171171
pub description: Option<String>,
172-
pub default_reasoning_level: ReasoningEffort,
172+
pub default_reasoning_level: Option<ReasoningEffort>,
173173
pub supported_reasoning_levels: Vec<ReasoningEffortPreset>,
174174
pub shell_type: ConfigShellToolType,
175175
pub visibility: ModelVisibility,
@@ -209,7 +209,9 @@ impl From<ModelInfo> for ModelPreset {
209209
model: info.slug.clone(),
210210
display_name: info.display_name,
211211
description: info.description.unwrap_or_default(),
212-
default_reasoning_effort: info.default_reasoning_level,
212+
default_reasoning_effort: info
213+
.default_reasoning_level
214+
.unwrap_or(ReasoningEffort::Medium),
213215
supported_reasoning_efforts: info.supported_reasoning_levels.clone(),
214216
is_default: false, // default is the highest priority available model
215217
upgrade: info.upgrade.as_ref().map(|upgrade_slug| ModelUpgrade {

0 commit comments

Comments
 (0)