Skip to content

Commit 45fcbe3

Browse files
committed
Use prior-turn personality for model-switch compaction base instructions
1 parent 013b23f commit 45fcbe3

File tree

4 files changed

+167
-1
lines changed

4 files changed

+167
-1
lines changed

codex-rs/core/src/codex.rs

Lines changed: 31 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1432,11 +1432,12 @@ impl Session {
14321432
&self,
14331433
previous_turn_context: &TurnContext,
14341434
turn_context: &TurnContext,
1435+
previous_turn_personality: Option<Personality>,
14351436
) {
14361437
let mut state = self.state.lock().await;
14371438
let previous_model_default = previous_turn_context
14381439
.model_info
1439-
.get_model_instructions(previous_turn_context.personality);
1440+
.get_model_instructions(previous_turn_personality);
14401441
if state.session_configuration.base_instructions == previous_model_default {
14411442
state.session_configuration.base_instructions = turn_context
14421443
.model_info
@@ -1492,18 +1493,21 @@ impl Session {
14921493
state.initial_context_seeded = true;
14931494
}
14941495
self.set_previous_model(None).await;
1496+
self.set_previous_personality(None).await;
14951497
// Ensure initial items are visible to immediate readers (e.g., tests, forks).
14961498
self.flush_rollout().await;
14971499
}
14981500
InitialHistory::Resumed(resumed_history) => {
14991501
let rollout_items = resumed_history.history;
15001502
let previous_model = Self::last_rollout_model_name(&rollout_items)
15011503
.map(std::string::ToString::to_string);
1504+
let previous_personality = Self::last_rollout_personality(&rollout_items);
15021505
{
15031506
let mut state = self.state.lock().await;
15041507
state.initial_context_seeded = false;
15051508
}
15061509
self.set_previous_model(previous_model).await;
1510+
self.set_previous_personality(previous_personality).await;
15071511

15081512
// If resuming, warn when the last recorded model differs from the current one.
15091513
let curr = turn_context.model_info.slug.as_str();
@@ -1546,7 +1550,9 @@ impl Session {
15461550
InitialHistory::Forked(rollout_items) => {
15471551
let previous_model = Self::last_rollout_model_name(&rollout_items)
15481552
.map(std::string::ToString::to_string);
1553+
let previous_personality = Self::last_rollout_personality(&rollout_items);
15491554
self.set_previous_model(previous_model).await;
1555+
self.set_previous_personality(previous_personality).await;
15501556

15511557
// Always add response items to conversation history
15521558
let reconstructed_history = self
@@ -1597,6 +1603,16 @@ impl Session {
15971603
})
15981604
}
15991605

1606+
fn last_rollout_personality(rollout_items: &[RolloutItem]) -> Option<Personality> {
1607+
rollout_items.iter().rev().find_map(|it| {
1608+
if let RolloutItem::TurnContext(ctx) = it {
1609+
ctx.personality
1610+
} else {
1611+
None
1612+
}
1613+
})
1614+
}
1615+
16001616
fn last_token_info_from_rollout(rollout_items: &[RolloutItem]) -> Option<TokenUsageInfo> {
16011617
rollout_items.iter().rev().find_map(|item| match item {
16021618
RolloutItem::EventMsg(EventMsg::TokenCount(ev)) => ev.info.clone(),
@@ -1609,11 +1625,21 @@ impl Session {
16091625
state.previous_model()
16101626
}
16111627

1628+
async fn previous_personality(&self) -> Option<Personality> {
1629+
let state = self.state.lock().await;
1630+
state.previous_personality()
1631+
}
1632+
16121633
pub(crate) async fn set_previous_model(&self, previous_model: Option<String>) {
16131634
let mut state = self.state.lock().await;
16141635
state.set_previous_model(previous_model);
16151636
}
16161637

1638+
pub(crate) async fn set_previous_personality(&self, previous_personality: Option<Personality>) {
1639+
let mut state = self.state.lock().await;
1640+
state.set_previous_personality(previous_personality);
1641+
}
1642+
16171643
fn maybe_refresh_shell_snapshot_for_cwd(
16181644
&self,
16191645
previous_cwd: &Path,
@@ -3764,6 +3790,8 @@ mod handlers {
37643790
let turn_context = sess.new_default_turn_with_sub_id(sub_id).await;
37653791
sess.set_previous_model(Some(turn_context.model_info.slug.clone()))
37663792
.await;
3793+
sess.set_previous_personality(turn_context.personality)
3794+
.await;
37673795

37683796
let mut history = sess.clone_history().await;
37693797
history.drop_last_n_user_turns(num_turns);
@@ -4388,6 +4416,7 @@ async fn run_pre_sampling_compact(
43884416
) -> CodexResult<()> {
43894417
let total_usage_tokens_before_compaction = sess.get_total_token_usage().await;
43904418
let previous_model = sess.previous_model().await;
4419+
let previous_personality = sess.previous_personality().await;
43914420
let previous_turn_context = match maybe_run_previous_model_inline_compact(
43924421
sess,
43934422
turn_context,
@@ -4417,6 +4446,7 @@ async fn run_pre_sampling_compact(
44174446
sess.set_base_instructions_for_turn_model(
44184447
previous_turn_context.as_ref(),
44194448
turn_context.as_ref(),
4449+
previous_personality,
44204450
)
44214451
.await;
44224452
}

codex-rs/core/src/state/session.rs

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
//! Session-wide mutable state.
22
3+
use codex_protocol::config_types::Personality;
34
use codex_protocol::models::ResponseItem;
45
use std::collections::HashMap;
56
use std::collections::HashSet;
@@ -27,6 +28,8 @@ pub(crate) struct SessionState {
2728
pub(crate) initial_context_seeded: bool,
2829
/// Previous model seen by the session, used for model-switch handling on task start.
2930
previous_model: Option<String>,
31+
/// Previous personality seen by the session, used for model-switch compaction handling.
32+
previous_personality: Option<Personality>,
3033
/// Startup regular task pre-created during session initialization.
3134
pub(crate) startup_regular_task: Option<RegularTask>,
3235
pub(crate) active_mcp_tool_selection: Option<Vec<String>>,
@@ -46,6 +49,7 @@ impl SessionState {
4649
mcp_dependency_prompted: HashSet::new(),
4750
initial_context_seeded: false,
4851
previous_model: None,
52+
previous_personality: None,
4953
startup_regular_task: None,
5054
active_mcp_tool_selection: None,
5155
active_connector_selection: HashSet::new(),
@@ -67,6 +71,12 @@ impl SessionState {
6771
pub(crate) fn set_previous_model(&mut self, previous_model: Option<String>) {
6872
self.previous_model = previous_model;
6973
}
74+
pub(crate) fn previous_personality(&self) -> Option<Personality> {
75+
self.previous_personality
76+
}
77+
pub(crate) fn set_previous_personality(&mut self, previous_personality: Option<Personality>) {
78+
self.previous_personality = previous_personality;
79+
}
7080

7181
pub(crate) fn clone_history(&self) -> ContextManager {
7282
self.history.clone()

codex-rs/core/src/tasks/mod.rs

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -155,6 +155,8 @@ impl Session {
155155
// Update previous model before TurnComplete is emitted so
156156
// immediately following turns observe the correct switch state.
157157
sess.set_previous_model(Some(model_slug)).await;
158+
sess.set_previous_personality(ctx_for_finish.personality)
159+
.await;
158160
if !task_cancellation_token.is_cancelled() {
159161
// Emit completion uniformly from spawn site so all tasks share the same lifecycle.
160162
sess.on_task_finished(Arc::clone(&ctx_for_finish), last_agent_message)
@@ -275,6 +277,8 @@ impl Session {
275277
// Set previous model even when interrupted so model-switch handling stays correct.
276278
self.set_previous_model(Some(task.turn_context.model_info.slug.clone()))
277279
.await;
280+
self.set_previous_personality(task.turn_context.personality)
281+
.await;
278282

279283
let session_ctx = Arc::new(SessionTaskContext::new(Arc::clone(self)));
280284
session_task

codex-rs/core/tests/suite/compact.rs

Lines changed: 122 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ use codex_core::built_in_model_providers;
55
use codex_core::compact::SUMMARIZATION_PROMPT;
66
use codex_core::compact::SUMMARY_PREFIX;
77
use codex_core::config::Config;
8+
use codex_core::config::types::Personality;
89
use codex_core::features::Feature;
910
use codex_core::protocol::AskForApproval;
1011
use codex_core::protocol::EventMsg;
@@ -2155,6 +2156,127 @@ async fn pre_sampling_compact_model_switch_preserves_base_instructions_override(
21552156
assert_eq!(requests[2].instructions_text(), base_instructions_override);
21562157
}
21572158

2159+
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
2160+
async fn pre_sampling_compact_model_switch_with_personality_updates_base_instructions() {
2161+
skip_if_no_network!();
2162+
2163+
let server = MockServer::start().await;
2164+
let previous_model = "gpt-5.2-codex";
2165+
let next_model = "gpt-5.1-codex-max";
2166+
2167+
let previous_model_info = model_info_with_context_window(previous_model, 273_000);
2168+
let next_model_info = model_info_with_context_window(next_model, 125_000);
2169+
let previous_default_instructions = previous_model_info.get_model_instructions(None);
2170+
let next_pragmatic_instructions =
2171+
next_model_info.get_model_instructions(Some(Personality::Pragmatic));
2172+
assert_ne!(
2173+
previous_default_instructions, next_pragmatic_instructions,
2174+
"test requires personality-specific instructions to differ from previous model default"
2175+
);
2176+
2177+
let models_mock = mount_models_once(
2178+
&server,
2179+
ModelsResponse {
2180+
models: vec![previous_model_info, next_model_info],
2181+
},
2182+
)
2183+
.await;
2184+
2185+
let request_log = mount_sse_sequence(
2186+
&server,
2187+
vec![
2188+
sse(vec![
2189+
ev_assistant_message("m1", "before switch"),
2190+
ev_completed_with_tokens("r1", 120_000),
2191+
]),
2192+
sse(vec![
2193+
ev_assistant_message("m2", "PRE_SAMPLING_SUMMARY"),
2194+
ev_completed_with_tokens("r2", 10),
2195+
]),
2196+
sse(vec![
2197+
ev_assistant_message("m3", "after switch with personality"),
2198+
ev_completed_with_tokens("r3", 100),
2199+
]),
2200+
],
2201+
)
2202+
.await;
2203+
2204+
let model_provider = non_openai_model_provider(&server);
2205+
let mut builder = test_codex()
2206+
.with_auth(CodexAuth::create_dummy_chatgpt_auth_for_testing())
2207+
.with_model(previous_model)
2208+
.with_config(move |config| {
2209+
config.model_provider = model_provider;
2210+
set_test_compact_prompt(config);
2211+
config.features.enable(Feature::RemoteModels);
2212+
config.features.enable(Feature::Personality);
2213+
});
2214+
let test = builder.build(&server).await.expect("build test codex");
2215+
2216+
test.codex
2217+
.submit(Op::UserTurn {
2218+
items: vec![UserInput::Text {
2219+
text: "before switch".into(),
2220+
text_elements: Vec::new(),
2221+
}],
2222+
final_output_json_schema: None,
2223+
cwd: test.cwd.path().to_path_buf(),
2224+
approval_policy: AskForApproval::Never,
2225+
sandbox_policy: SandboxPolicy::DangerFullAccess,
2226+
model: previous_model.to_string(),
2227+
effort: None,
2228+
summary: ReasoningSummary::Auto,
2229+
collaboration_mode: None,
2230+
personality: None,
2231+
})
2232+
.await
2233+
.expect("submit first user turn");
2234+
wait_for_event(&test.codex, |event| {
2235+
matches!(event, EventMsg::TurnComplete(_))
2236+
})
2237+
.await;
2238+
2239+
test.codex
2240+
.submit(Op::UserTurn {
2241+
items: vec![UserInput::Text {
2242+
text: "after switch".into(),
2243+
text_elements: Vec::new(),
2244+
}],
2245+
final_output_json_schema: None,
2246+
cwd: test.cwd.path().to_path_buf(),
2247+
approval_policy: AskForApproval::Never,
2248+
sandbox_policy: SandboxPolicy::DangerFullAccess,
2249+
model: next_model.to_string(),
2250+
effort: None,
2251+
summary: ReasoningSummary::Auto,
2252+
collaboration_mode: None,
2253+
personality: Some(Personality::Pragmatic),
2254+
})
2255+
.await
2256+
.expect("submit second user turn");
2257+
assert_compaction_uses_turn_lifecycle_id(&test.codex).await;
2258+
2259+
let requests = request_log.requests();
2260+
assert_eq!(models_mock.requests().len(), 1);
2261+
assert_eq!(
2262+
requests.len(),
2263+
3,
2264+
"expected user, compact, and follow-up requests"
2265+
);
2266+
assert_pre_sampling_switch_compaction_requests(
2267+
&requests[0],
2268+
&requests[1],
2269+
&requests[2],
2270+
previous_model,
2271+
next_model,
2272+
);
2273+
assert_eq!(
2274+
requests[2].instructions_text(),
2275+
next_pragmatic_instructions,
2276+
"follow-up request should use new model base instructions for prior-turn personality"
2277+
);
2278+
}
2279+
21582280
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
21592281
async fn auto_compact_persists_rollout_entries() {
21602282
skip_if_no_network!();

0 commit comments

Comments
 (0)