Skip to content

Commit d5dfba2

Browse files
feat: arcticfox in the wild (#6906)
<img width="485" height="600" alt="image" src="https://github.com/user-attachments/assets/4341740d-dd58-4a3e-b69a-33a3be0606c5" /> --------- Co-authored-by: jif-oai <[email protected]>
1 parent 1924500 commit d5dfba2

31 files changed

+375
-211
lines changed

codex-rs/app-server-protocol/src/protocol/common.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -531,7 +531,7 @@ mod tests {
531531
let request = ClientRequest::NewConversation {
532532
request_id: RequestId::Integer(42),
533533
params: v1::NewConversationParams {
534-
model: Some("arcticfox".to_string()),
534+
model: Some("gpt-5.1-codex-max".to_string()),
535535
model_provider: None,
536536
profile: None,
537537
cwd: None,
@@ -549,7 +549,7 @@ mod tests {
549549
"method": "newConversation",
550550
"id": 42,
551551
"params": {
552-
"model": "arcticfox",
552+
"model": "gpt-5.1-codex-max",
553553
"modelProvider": null,
554554
"profile": null,
555555
"cwd": null,

codex-rs/app-server/tests/suite/config.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
2727
std::fs::write(
2828
config_toml,
2929
r#"
30-
model = "arcticfox"
30+
model = "gpt-5.1-codex-max"
3131
approval_policy = "on-request"
3232
sandbox_mode = "workspace-write"
3333
model_reasoning_summary = "detailed"
@@ -87,7 +87,7 @@ async fn get_config_toml_parses_all_fields() -> Result<()> {
8787
}),
8888
forced_chatgpt_workspace_id: Some("12345678-0000-0000-0000-000000000000".into()),
8989
forced_login_method: Some(ForcedLoginMethod::Chatgpt),
90-
model: Some("arcticfox".into()),
90+
model: Some("gpt-5.1-codex-max".into()),
9191
model_reasoning_effort: Some(ReasoningEffort::High),
9292
model_reasoning_summary: Some(ReasoningSummary::Detailed),
9393
model_verbosity: Some(Verbosity::Medium),

codex-rs/app-server/tests/suite/set_default_model.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
5757
std::fs::write(
5858
config_toml,
5959
r#"
60-
model = "arcticfox"
60+
model = "gpt-5.1-codex-max"
6161
model_reasoning_effort = "medium"
6262
"#,
6363
)

codex-rs/app-server/tests/suite/v2/model_list.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -46,9 +46,9 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
4646

4747
let expected_models = vec![
4848
Model {
49-
id: "arcticfox".to_string(),
50-
model: "arcticfox".to_string(),
51-
display_name: "arcticfox".to_string(),
49+
id: "gpt-5.1-codex-max".to_string(),
50+
model: "gpt-5.1-codex-max".to_string(),
51+
display_name: "gpt-5.1-codex-max".to_string(),
5252
description: "Latest Codex-optimized flagship for deep and fast reasoning.".to_string(),
5353
supported_reasoning_efforts: vec![
5454
ReasoningEffortOption {
@@ -174,7 +174,7 @@ async fn list_models_pagination_works() -> Result<()> {
174174
} = to_response::<ModelListResponse>(first_response)?;
175175

176176
assert_eq!(first_items.len(), 1);
177-
assert_eq!(first_items[0].id, "arcticfox");
177+
assert_eq!(first_items[0].id, "gpt-5.1-codex-max");
178178
let next_cursor = first_cursor.ok_or_else(|| anyhow!("cursor for second page"))?;
179179

180180
let second_request = mcp

codex-rs/app-server/tests/suite/v2/thread_resume.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ async fn thread_resume_returns_original_thread() -> Result<()> {
3131
// Start a thread.
3232
let start_id = mcp
3333
.send_thread_start_request(ThreadStartParams {
34-
model: Some("arcticfox".to_string()),
34+
model: Some("gpt-5.1-codex-max".to_string()),
3535
..Default::default()
3636
})
3737
.await?;
@@ -132,7 +132,7 @@ async fn thread_resume_prefers_path_over_thread_id() -> Result<()> {
132132

133133
let start_id = mcp
134134
.send_thread_start_request(ThreadStartParams {
135-
model: Some("arcticfox".to_string()),
135+
model: Some("gpt-5.1-codex-max".to_string()),
136136
..Default::default()
137137
})
138138
.await?;
@@ -177,7 +177,7 @@ async fn thread_resume_supports_history_and_overrides() -> Result<()> {
177177
// Start a thread.
178178
let start_id = mcp
179179
.send_thread_start_request(ThreadStartParams {
180-
model: Some("arcticfox".to_string()),
180+
model: Some("gpt-5.1-codex-max".to_string()),
181181
..Default::default()
182182
})
183183
.await?;

codex-rs/common/src/model_presets.rs

Lines changed: 29 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,8 @@ use codex_core::protocol_config_types::ReasoningEffort;
55
use once_cell::sync::Lazy;
66

77
pub const HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG: &str = "hide_gpt5_1_migration_prompt";
8-
pub const HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG: &str = "hide_arcticfox_migration_prompt";
8+
pub const HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG: &str =
9+
"hide_gpt-5.1-codex-max_migration_prompt";
910

1011
/// A reasoning effort option that can be surfaced for a model.
1112
#[derive(Debug, Clone, Copy)]
@@ -49,9 +50,9 @@ pub struct ModelPreset {
4950
static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
5051
vec![
5152
ModelPreset {
52-
id: "arcticfox",
53-
model: "arcticfox",
54-
display_name: "arcticfox",
53+
id: "gpt-5.1-codex-max",
54+
model: "gpt-5.1-codex-max",
55+
display_name: "gpt-5.1-codex-max",
5556
description: "Latest Codex-optimized flagship for deep and fast reasoning.",
5657
default_reasoning_effort: ReasoningEffort::Medium,
5758
supported_reasoning_efforts: &[
@@ -98,9 +99,9 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
9899
],
99100
is_default: false,
100101
upgrade: Some(ModelUpgrade {
101-
id: "arcticfox",
102+
id: "gpt-5.1-codex-max",
102103
reasoning_effort_mapping: None,
103-
migration_config_key: HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG,
104+
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG,
104105
}),
105106
show_in_picker: true,
106107
},
@@ -121,7 +122,11 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
121122
},
122123
],
123124
is_default: false,
124-
upgrade: None,
125+
upgrade: Some(ModelUpgrade {
126+
id: "gpt-5.1-codex-max",
127+
reasoning_effort_mapping: None,
128+
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG,
129+
}),
125130
show_in_picker: true,
126131
},
127132
ModelPreset {
@@ -145,7 +150,11 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
145150
},
146151
],
147152
is_default: false,
148-
upgrade: None,
153+
upgrade: Some(ModelUpgrade {
154+
id: "gpt-5.1-codex-max",
155+
reasoning_effort_mapping: None,
156+
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG,
157+
}),
149158
show_in_picker: true,
150159
},
151160
// Deprecated models.
@@ -171,9 +180,9 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
171180
],
172181
is_default: false,
173182
upgrade: Some(ModelUpgrade {
174-
id: "arcticfox",
183+
id: "gpt-5.1-codex-max",
175184
reasoning_effort_mapping: None,
176-
migration_config_key: HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG,
185+
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG,
177186
}),
178187
show_in_picker: false,
179188
},
@@ -227,12 +236,9 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
227236
],
228237
is_default: false,
229238
upgrade: Some(ModelUpgrade {
230-
id: "gpt-5.1",
231-
reasoning_effort_mapping: Some(HashMap::from([(
232-
ReasoningEffort::Minimal,
233-
ReasoningEffort::Low,
234-
)])),
235-
migration_config_key: HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG,
239+
id: "gpt-5.1-codex-max",
240+
reasoning_effort_mapping: None,
241+
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG,
236242
}),
237243
show_in_picker: false,
238244
},
@@ -243,7 +249,7 @@ pub fn builtin_model_presets(auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
243249
PRESETS
244250
.iter()
245251
.filter(|preset| match auth_mode {
246-
Some(AuthMode::ApiKey) => preset.show_in_picker && preset.id != "arcticfox",
252+
Some(AuthMode::ApiKey) => preset.show_in_picker && preset.id != "gpt-5.1-codex-max",
247253
_ => preset.show_in_picker,
248254
})
249255
.cloned()
@@ -266,8 +272,12 @@ mod tests {
266272
}
267273

268274
#[test]
269-
fn arcticfox_hidden_for_api_key_auth() {
275+
fn gpt_5_1_codex_max_hidden_for_api_key_auth() {
270276
let presets = builtin_model_presets(Some(AuthMode::ApiKey));
271-
assert!(presets.iter().all(|preset| preset.id != "arcticfox"));
277+
assert!(
278+
presets
279+
.iter()
280+
.all(|preset| preset.id != "gpt-5.1-codex-max")
281+
);
272282
}
273283
}
File renamed without changes.

codex-rs/core/src/client_common.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -422,7 +422,7 @@ mod tests {
422422
expects_apply_patch_instructions: false,
423423
},
424424
InstructionsTestCase {
425-
slug: "arcticfox",
425+
slug: "gpt-5.1-codex-max",
426426
expects_apply_patch_instructions: false,
427427
},
428428
];

codex-rs/core/src/config/edit.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -846,7 +846,7 @@ hide_gpt5_1_migration_prompt = true
846846
}
847847

848848
#[test]
849-
fn blocking_set_hide_arcticfox_migration_prompt_preserves_table() {
849+
fn blocking_set_hide_gpt_5_1_codex_max_migration_prompt_preserves_table() {
850850
let tmp = tempdir().expect("tmpdir");
851851
let codex_home = tmp.path();
852852
std::fs::write(
@@ -860,7 +860,7 @@ existing = "value"
860860
codex_home,
861861
None,
862862
&[ConfigEdit::SetNoticeHideModelMigrationPrompt(
863-
"hide_arcticfox_migration_prompt".to_string(),
863+
"hide_gpt-5.1-codex-max_migration_prompt".to_string(),
864864
true,
865865
)],
866866
)
@@ -870,7 +870,7 @@ existing = "value"
870870
std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
871871
let expected = r#"[notice]
872872
existing = "value"
873-
hide_arcticfox_migration_prompt = true
873+
"hide_gpt-5.1-codex-max_migration_prompt" = true
874874
"#;
875875
assert_eq!(contents, expected);
876876
}

codex-rs/core/src/config/mod.rs

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -62,11 +62,11 @@ pub mod profile;
6262
pub mod types;
6363

6464
#[cfg(target_os = "windows")]
65-
pub const OPENAI_DEFAULT_MODEL: &str = "arcticfox";
65+
pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5.1-codex-max";
6666
#[cfg(not(target_os = "windows"))]
67-
pub const OPENAI_DEFAULT_MODEL: &str = "arcticfox";
68-
const OPENAI_DEFAULT_REVIEW_MODEL: &str = "arcticfox";
69-
pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "arcticfox";
67+
pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5.1-codex-max";
68+
const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5.1-codex-max";
69+
pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "gpt-5.1-codex-max";
7070

7171
/// Maximum number of bytes of the documentation that will be embedded. Larger
7272
/// files are *silently truncated* to this size so we do not take up too much of
@@ -81,7 +81,7 @@ pub struct Config {
8181
/// Optional override of model selection.
8282
pub model: String,
8383

84-
/// Model used specifically for review sessions. Defaults to "arcticfox".
84+
/// Model used specifically for review sessions. Defaults to "gpt-5.1-codex-max".
8585
pub review_model: String,
8686

8787
pub model_family: ModelFamily,

0 commit comments

Comments
 (0)