diff --git a/codex-rs/Cargo.lock b/codex-rs/Cargo.lock index 29be64f9a9b..70e903fbefd 100644 --- a/codex-rs/Cargo.lock +++ b/codex-rs/Cargo.lock @@ -1596,7 +1596,9 @@ dependencies = [ "bytes", "codex-core", "futures", + "pretty_assertions", "reqwest", + "semver", "serde_json", "tokio", "tracing", diff --git a/codex-rs/Cargo.toml b/codex-rs/Cargo.toml index 8a996955e58..4bd9d63d0a2 100644 --- a/codex-rs/Cargo.toml +++ b/codex-rs/Cargo.toml @@ -192,6 +192,7 @@ serde_yaml = "0.9" serial_test = "3.2.0" sha1 = "0.10.6" sha2 = "0.10" +semver = "1.0" shlex = "1.3.0" similar = "2.7.0" socket2 = "0.6.1" diff --git a/codex-rs/common/src/oss.rs b/codex-rs/common/src/oss.rs index b2f511e4780..f686bb60164 100644 --- a/codex-rs/common/src/oss.rs +++ b/codex-rs/common/src/oss.rs @@ -1,18 +1,52 @@ //! OSS provider utilities shared between TUI and exec. use codex_core::LMSTUDIO_OSS_PROVIDER_ID; +use codex_core::OLLAMA_CHAT_PROVIDER_ID; use codex_core::OLLAMA_OSS_PROVIDER_ID; +use codex_core::WireApi; use codex_core::config::Config; +use codex_core::protocol::DeprecationNoticeEvent; +use std::io; /// Returns the default model for a given OSS provider. pub fn get_default_model_for_oss_provider(provider_id: &str) -> Option<&'static str> { match provider_id { LMSTUDIO_OSS_PROVIDER_ID => Some(codex_lmstudio::DEFAULT_OSS_MODEL), - OLLAMA_OSS_PROVIDER_ID => Some(codex_ollama::DEFAULT_OSS_MODEL), + OLLAMA_OSS_PROVIDER_ID | OLLAMA_CHAT_PROVIDER_ID => Some(codex_ollama::DEFAULT_OSS_MODEL), _ => None, } } +/// Returns a deprecation notice if Ollama doesn't support the responses wire API. +pub async fn ollama_chat_deprecation_notice( + config: &Config, +) -> io::Result> { + if config.model_provider_id != OLLAMA_OSS_PROVIDER_ID + || config.model_provider.wire_api != WireApi::Responses + { + return Ok(None); + } + + if let Some(detection) = codex_ollama::detect_wire_api(&config.model_provider).await? + && detection.wire_api == WireApi::Chat + { + let version_suffix = detection + .version + .as_ref() + .map(|version| format!(" (version {version})")) + .unwrap_or_default(); + let summary = format!( + "Your Ollama server{version_suffix} doesn't support the Responses API. Either update Ollama or set `oss_provider = \"{OLLAMA_CHAT_PROVIDER_ID}\"` (or `model_provider = \"{OLLAMA_CHAT_PROVIDER_ID}\"`) in your config.toml to use the \"chat\" wire API. Support for the \"chat\" wire API is deprecated and will soon be removed." + ); + return Ok(Some(DeprecationNoticeEvent { + summary, + details: None, + })); + } + + Ok(None) +} + /// Ensures the specified OSS provider is ready (models downloaded, service reachable). pub async fn ensure_oss_provider_ready( provider_id: &str, @@ -24,7 +58,7 @@ pub async fn ensure_oss_provider_ready( .await .map_err(|e| std::io::Error::other(format!("OSS setup failed: {e}")))?; } - OLLAMA_OSS_PROVIDER_ID => { + OLLAMA_OSS_PROVIDER_ID | OLLAMA_CHAT_PROVIDER_ID => { codex_ollama::ensure_oss_ready(config) .await .map_err(|e| std::io::Error::other(format!("OSS setup failed: {e}")))?; diff --git a/codex-rs/core/src/config/mod.rs b/codex-rs/core/src/config/mod.rs index 22fcbfd6472..0f000fcffb4 100644 --- a/codex-rs/core/src/config/mod.rs +++ b/codex-rs/core/src/config/mod.rs @@ -24,6 +24,7 @@ use crate::features::FeaturesToml; use crate::git_info::resolve_root_git_project_for_trust; use crate::model_provider_info::LMSTUDIO_OSS_PROVIDER_ID; use crate::model_provider_info::ModelProviderInfo; +use crate::model_provider_info::OLLAMA_CHAT_PROVIDER_ID; use crate::model_provider_info::OLLAMA_OSS_PROVIDER_ID; use crate::model_provider_info::built_in_model_providers; use crate::project_doc::DEFAULT_PROJECT_DOC_FILENAME; @@ -634,14 +635,14 @@ pub fn set_project_trust_level( pub fn set_default_oss_provider(codex_home: &Path, provider: &str) -> std::io::Result<()> { // Validate that the provider is one of the known OSS providers match provider { - LMSTUDIO_OSS_PROVIDER_ID | OLLAMA_OSS_PROVIDER_ID => { + LMSTUDIO_OSS_PROVIDER_ID | OLLAMA_OSS_PROVIDER_ID | OLLAMA_CHAT_PROVIDER_ID => { // Valid provider, continue } _ => { return Err(std::io::Error::new( std::io::ErrorKind::InvalidInput, format!( - "Invalid OSS provider '{provider}'. Must be one of: {LMSTUDIO_OSS_PROVIDER_ID}, {OLLAMA_OSS_PROVIDER_ID}" + "Invalid OSS provider '{provider}'. Must be one of: {LMSTUDIO_OSS_PROVIDER_ID}, {OLLAMA_OSS_PROVIDER_ID}, {OLLAMA_CHAT_PROVIDER_ID}" ), )); } @@ -843,7 +844,7 @@ pub struct ConfigToml { pub experimental_compact_prompt_file: Option, pub experimental_use_unified_exec_tool: Option, pub experimental_use_freeform_apply_patch: Option, - /// Preferred OSS provider for local models, e.g. "lmstudio" or "ollama". + /// Preferred OSS provider for local models, e.g. "lmstudio", "ollama", or "ollama-chat". pub oss_provider: Option, } diff --git a/codex-rs/core/src/lib.rs b/codex-rs/core/src/lib.rs index 1fb25ebc138..085ca5a42c4 100644 --- a/codex-rs/core/src/lib.rs +++ b/codex-rs/core/src/lib.rs @@ -57,6 +57,7 @@ pub use model_provider_info::DEFAULT_LMSTUDIO_PORT; pub use model_provider_info::DEFAULT_OLLAMA_PORT; pub use model_provider_info::LMSTUDIO_OSS_PROVIDER_ID; pub use model_provider_info::ModelProviderInfo; +pub use model_provider_info::OLLAMA_CHAT_PROVIDER_ID; pub use model_provider_info::OLLAMA_OSS_PROVIDER_ID; pub use model_provider_info::WireApi; pub use model_provider_info::built_in_model_providers; diff --git a/codex-rs/core/src/model_provider_info.rs b/codex-rs/core/src/model_provider_info.rs index 96173922372..60f20128d52 100644 --- a/codex-rs/core/src/model_provider_info.rs +++ b/codex-rs/core/src/model_provider_info.rs @@ -260,6 +260,7 @@ pub const DEFAULT_OLLAMA_PORT: u16 = 11434; pub const LMSTUDIO_OSS_PROVIDER_ID: &str = "lmstudio"; pub const OLLAMA_OSS_PROVIDER_ID: &str = "ollama"; +pub const OLLAMA_CHAT_PROVIDER_ID: &str = "ollama-chat"; /// Built-in default provider list. pub fn built_in_model_providers() -> HashMap { @@ -273,6 +274,10 @@ pub fn built_in_model_providers() -> HashMap { ("openai", P::create_openai_provider()), ( OLLAMA_OSS_PROVIDER_ID, + create_oss_provider(DEFAULT_OLLAMA_PORT, WireApi::Responses), + ), + ( + OLLAMA_CHAT_PROVIDER_ID, create_oss_provider(DEFAULT_OLLAMA_PORT, WireApi::Chat), ), ( diff --git a/codex-rs/exec/src/cli.rs b/codex-rs/exec/src/cli.rs index 8cff14f929a..e12e8693942 100644 --- a/codex-rs/exec/src/cli.rs +++ b/codex-rs/exec/src/cli.rs @@ -28,7 +28,7 @@ pub struct Cli { #[arg(long = "oss", default_value_t = false)] pub oss: bool, - /// Specify which local provider to use (lmstudio or ollama). + /// Specify which local provider to use (lmstudio, ollama, or ollama-chat). /// If not specified with --oss, will use config default or show selection. #[arg(long = "local-provider")] pub oss_provider: Option, diff --git a/codex-rs/exec/src/lib.rs b/codex-rs/exec/src/lib.rs index da3389c46ac..09efd1f4dca 100644 --- a/codex-rs/exec/src/lib.rs +++ b/codex-rs/exec/src/lib.rs @@ -15,9 +15,11 @@ pub use cli::Command; pub use cli::ReviewArgs; use codex_common::oss::ensure_oss_provider_ready; use codex_common::oss::get_default_model_for_oss_provider; +use codex_common::oss::ollama_chat_deprecation_notice; use codex_core::AuthManager; use codex_core::LMSTUDIO_OSS_PROVIDER_ID; use codex_core::NewThread; +use codex_core::OLLAMA_CHAT_PROVIDER_ID; use codex_core::OLLAMA_OSS_PROVIDER_ID; use codex_core::ThreadManager; use codex_core::auth::enforce_login_restrictions; @@ -176,7 +178,7 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option) -> any Some(provider) } else { return Err(anyhow::anyhow!( - "No default OSS provider configured. Use --local-provider=provider or set oss_provider to either {LMSTUDIO_OSS_PROVIDER_ID} or {OLLAMA_OSS_PROVIDER_ID} in config.toml" + "No default OSS provider configured. Use --local-provider=provider or set oss_provider to one of: {LMSTUDIO_OSS_PROVIDER_ID}, {OLLAMA_OSS_PROVIDER_ID}, {OLLAMA_CHAT_PROVIDER_ID} in config.toml" )); } } else { @@ -223,6 +225,14 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option) -> any std::process::exit(1); } + let ollama_chat_support_notice = match ollama_chat_deprecation_notice(&config).await { + Ok(notice) => notice, + Err(err) => { + tracing::warn!(?err, "Failed to detect Ollama wire API"); + None + } + }; + let otel = codex_core::otel_init::build_provider(&config, env!("CARGO_PKG_VERSION"), false); #[allow(clippy::print_stderr)] @@ -252,6 +262,12 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option) -> any last_message_file.clone(), )), }; + if let Some(notice) = ollama_chat_support_notice { + event_processor.process_event(Event { + id: String::new(), + msg: EventMsg::DeprecationNotice(notice), + }); + } if oss { // We're in the oss section, so provider_id should be Some diff --git a/codex-rs/ollama/Cargo.toml b/codex-rs/ollama/Cargo.toml index ee16bd5e057..56e8d6e5d8b 100644 --- a/codex-rs/ollama/Cargo.toml +++ b/codex-rs/ollama/Cargo.toml @@ -17,6 +17,7 @@ bytes = { workspace = true } codex-core = { workspace = true } futures = { workspace = true } reqwest = { workspace = true, features = ["json", "stream"] } +semver = { workspace = true } serde_json = { workspace = true } tokio = { workspace = true, features = [ "io-std", @@ -30,3 +31,4 @@ wiremock = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } +pretty_assertions = { workspace = true } diff --git a/codex-rs/ollama/src/client.rs b/codex-rs/ollama/src/client.rs index 93244cc2e5d..4f603c68b30 100644 --- a/codex-rs/ollama/src/client.rs +++ b/codex-rs/ollama/src/client.rs @@ -1,6 +1,7 @@ use bytes::BytesMut; use futures::StreamExt; use futures::stream::BoxStream; +use semver::Version; use serde_json::Value as JsonValue; use std::collections::VecDeque; use std::io; @@ -53,7 +54,7 @@ impl OllamaClient { } /// Build a client from a provider definition and verify the server is reachable. - async fn try_from_provider(provider: &ModelProviderInfo) -> io::Result { + pub(crate) async fn try_from_provider(provider: &ModelProviderInfo) -> io::Result { #![expect(clippy::expect_used)] let base_url = provider .base_url @@ -125,6 +126,32 @@ impl OllamaClient { Ok(names) } + /// Query the server for its version string, returning `None` when unavailable. + pub async fn fetch_version(&self) -> io::Result> { + let version_url = format!("{}/api/version", self.host_root.trim_end_matches('/')); + let resp = self + .client + .get(version_url) + .send() + .await + .map_err(io::Error::other)?; + if !resp.status().is_success() { + return Ok(None); + } + let val = resp.json::().await.map_err(io::Error::other)?; + let Some(version_str) = val.get("version").and_then(|v| v.as_str()).map(str::trim) else { + return Ok(None); + }; + let normalized = version_str.trim_start_matches('v'); + match Version::parse(normalized) { + Ok(version) => Ok(Some(version)), + Err(err) => { + tracing::warn!("Failed to parse Ollama version `{version_str}`: {err}"); + Ok(None) + } + } + } + /// Start a model pull and emit streaming events. The returned stream ends when /// a Success event is observed or the server closes the connection. pub async fn pull_model_stream( @@ -236,6 +263,7 @@ impl OllamaClient { #[cfg(test)] mod tests { use super::*; + use pretty_assertions::assert_eq; // Happy-path tests using a mock HTTP server; skip if sandbox network is disabled. #[tokio::test] @@ -269,6 +297,42 @@ mod tests { assert!(models.contains(&"mistral".to_string())); } + #[tokio::test] + async fn test_fetch_version() { + if std::env::var(codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() { + tracing::info!( + "{} is set; skipping test_fetch_version", + codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR + ); + return; + } + + let server = wiremock::MockServer::start().await; + wiremock::Mock::given(wiremock::matchers::method("GET")) + .and(wiremock::matchers::path("/api/tags")) + .respond_with(wiremock::ResponseTemplate::new(200).set_body_raw( + serde_json::json!({ "models": [] }).to_string(), + "application/json", + )) + .mount(&server) + .await; + wiremock::Mock::given(wiremock::matchers::method("GET")) + .and(wiremock::matchers::path("/api/version")) + .respond_with(wiremock::ResponseTemplate::new(200).set_body_raw( + serde_json::json!({ "version": "0.14.1" }).to_string(), + "application/json", + )) + .mount(&server) + .await; + + let client = OllamaClient::try_from_provider_with_base_url(server.uri().as_str()) + .await + .expect("client"); + + let version = client.fetch_version().await.expect("version fetch"); + assert_eq!(version, Some(Version::new(0, 14, 1))); + } + #[tokio::test] async fn test_probe_server_happy_path_openai_compat_and_native() { if std::env::var(codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() { diff --git a/codex-rs/ollama/src/lib.rs b/codex-rs/ollama/src/lib.rs index 4ced3b62760..b049f0a482f 100644 --- a/codex-rs/ollama/src/lib.rs +++ b/codex-rs/ollama/src/lib.rs @@ -4,15 +4,23 @@ mod pull; mod url; pub use client::OllamaClient; +use codex_core::ModelProviderInfo; +use codex_core::WireApi; use codex_core::config::Config; pub use pull::CliProgressReporter; pub use pull::PullEvent; pub use pull::PullProgressReporter; pub use pull::TuiProgressReporter; +use semver::Version; /// Default OSS model to use when `--oss` is passed without an explicit `-m`. pub const DEFAULT_OSS_MODEL: &str = "gpt-oss:20b"; +pub struct WireApiDetection { + pub wire_api: WireApi, + pub version: Option, +} + /// Prepare the local OSS environment when `--oss` is selected. /// /// - Ensures a local Ollama server is reachable. @@ -45,3 +53,65 @@ pub async fn ensure_oss_ready(config: &Config) -> std::io::Result<()> { Ok(()) } + +fn min_responses_version() -> Version { + Version::new(0, 13, 4) +} + +fn wire_api_for_version(version: &Version) -> WireApi { + if *version == Version::new(0, 0, 0) || *version >= min_responses_version() { + WireApi::Responses + } else { + WireApi::Chat + } +} + +/// Detect which wire API the running Ollama server supports based on its version. +/// Returns `Ok(None)` when the version endpoint is missing or unparsable; callers +/// should keep the configured default in that case. +pub async fn detect_wire_api( + provider: &ModelProviderInfo, +) -> std::io::Result> { + let client = crate::OllamaClient::try_from_provider(provider).await?; + let Some(version) = client.fetch_version().await? else { + return Ok(None); + }; + + let wire_api = wire_api_for_version(&version); + + Ok(Some(WireApiDetection { + wire_api, + version: Some(version), + })) +} + +#[cfg(test)] +mod tests { + use super::*; + use pretty_assertions::assert_eq; + + #[test] + fn test_wire_api_for_version_dev_zero_keeps_responses() { + assert_eq!( + wire_api_for_version(&Version::new(0, 0, 0)), + WireApi::Responses + ); + } + + #[test] + fn test_wire_api_for_version_before_cutoff_is_chat() { + assert_eq!(wire_api_for_version(&Version::new(0, 13, 3)), WireApi::Chat); + } + + #[test] + fn test_wire_api_for_version_at_or_after_cutoff_is_responses() { + assert_eq!( + wire_api_for_version(&Version::new(0, 13, 4)), + WireApi::Responses + ); + assert_eq!( + wire_api_for_version(&Version::new(0, 14, 0)), + WireApi::Responses + ); + } +} diff --git a/codex-rs/tui/src/app.rs b/codex-rs/tui/src/app.rs index 9e5ac2d95e4..a47e407b50e 100644 --- a/codex-rs/tui/src/app.rs +++ b/codex-rs/tui/src/app.rs @@ -35,6 +35,7 @@ use codex_core::features::Feature; use codex_core::models_manager::manager::ModelsManager; use codex_core::models_manager::model_presets::HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG; use codex_core::models_manager::model_presets::HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG; +use codex_core::protocol::DeprecationNoticeEvent; use codex_core::protocol::EventMsg; use codex_core::protocol::FinalOutput; use codex_core::protocol::ListSkillsResponseEvent; @@ -121,6 +122,15 @@ fn emit_skill_load_warnings(app_event_tx: &AppEventSender, errors: &[SkillErrorI } } +fn emit_deprecation_notice(app_event_tx: &AppEventSender, notice: Option) { + let Some(DeprecationNoticeEvent { summary, details }) = notice else { + return; + }; + app_event_tx.send(AppEvent::InsertHistoryCell(Box::new( + crate::history_cell::new_deprecation_notice(summary, details), + ))); +} + #[derive(Debug, Clone, PartialEq, Eq)] struct SessionSummary { usage_line: String, @@ -343,10 +353,12 @@ impl App { resume_selection: ResumeSelection, feedback: codex_feedback::CodexFeedback, is_first_run: bool, + ollama_chat_support_notice: Option, ) -> Result { use tokio_stream::StreamExt; let (app_event_tx, mut app_event_rx) = unbounded_channel(); let app_event_tx = AppEventSender::new(app_event_tx); + emit_deprecation_notice(&app_event_tx, ollama_chat_support_notice); let thread_manager = Arc::new(ThreadManager::new( config.codex_home.clone(), diff --git a/codex-rs/tui/src/cli.rs b/codex-rs/tui/src/cli.rs index 2b19b4c0649..617e3480524 100644 --- a/codex-rs/tui/src/cli.rs +++ b/codex-rs/tui/src/cli.rs @@ -41,7 +41,7 @@ pub struct Cli { #[arg(long = "oss", default_value_t = false)] pub oss: bool, - /// Specify which local provider to use (lmstudio or ollama). + /// Specify which local provider to use (lmstudio, ollama, or ollama-chat). /// If not specified with --oss, will use config default or show selection. #[arg(long = "local-provider")] pub oss_provider: Option, diff --git a/codex-rs/tui/src/lib.rs b/codex-rs/tui/src/lib.rs index 5855cfb71d8..99b6a8e2be4 100644 --- a/codex-rs/tui/src/lib.rs +++ b/codex-rs/tui/src/lib.rs @@ -9,6 +9,7 @@ pub use app::AppExitInfo; use codex_app_server_protocol::AuthMode; use codex_common::oss::ensure_oss_provider_ready; use codex_common::oss::get_default_model_for_oss_provider; +use codex_common::oss::ollama_chat_deprecation_notice; use codex_core::AuthManager; use codex_core::CodexAuth; use codex_core::INTERACTIVE_SESSION_SOURCES; @@ -428,6 +429,14 @@ async fn run_ratatui_app( initial_config }; + let ollama_chat_support_notice = match ollama_chat_deprecation_notice(&config).await { + Ok(notice) => notice, + Err(err) => { + tracing::warn!(?err, "Failed to detect Ollama wire API"); + None + } + }; + // Determine resume behavior: explicit id, then resume last, then picker. let resume_selection = if let Some(id_str) = cli.resume_session_id.as_deref() { match find_thread_path_by_id_str(&config.codex_home, id_str).await? { @@ -505,6 +514,7 @@ async fn run_ratatui_app( resume_selection, feedback, should_show_trust_screen, // Proxy to: is it a first run in this directory? + ollama_chat_support_notice, ) .await; diff --git a/codex-rs/tui/src/oss_selection.rs b/codex-rs/tui/src/oss_selection.rs index eb1ca182314..72679191092 100644 --- a/codex-rs/tui/src/oss_selection.rs +++ b/codex-rs/tui/src/oss_selection.rs @@ -4,6 +4,7 @@ use std::sync::LazyLock; use codex_core::DEFAULT_LMSTUDIO_PORT; use codex_core::DEFAULT_OLLAMA_PORT; use codex_core::LMSTUDIO_OSS_PROVIDER_ID; +use codex_core::OLLAMA_CHAT_PROVIDER_ID; use codex_core::OLLAMA_OSS_PROVIDER_ID; use codex_core::config::set_default_oss_provider; use crossterm::event::Event; @@ -70,10 +71,16 @@ static OSS_SELECT_OPTIONS: LazyLock> = LazyLock::new(|| { }, SelectOption { label: Line::from(vec!["O".underlined(), "llama".into()]), - description: "Local Ollama server (default port 11434)", + description: "Local Ollama server (Responses API, default port 11434)", key: KeyCode::Char('o'), provider_id: OLLAMA_OSS_PROVIDER_ID, }, + SelectOption { + label: Line::from(vec!["Ollama (".into(), "c".underlined(), "hat)".into()]), + description: "Local Ollama server (chat wire API, default port 11434)", + key: KeyCode::Char('c'), + provider_id: OLLAMA_CHAT_PROVIDER_ID, + }, ] }); @@ -99,7 +106,11 @@ impl OssSelectionWidget<'_> { status: lmstudio_status, }, ProviderOption { - name: "Ollama".to_string(), + name: "Ollama (Responses)".to_string(), + status: ollama_status.clone(), + }, + ProviderOption { + name: "Ollama (Chat)".to_string(), status: ollama_status, }, ]; diff --git a/codex-rs/tui2/src/app.rs b/codex-rs/tui2/src/app.rs index 292ccb5ac6f..51cd55a7f3e 100644 --- a/codex-rs/tui2/src/app.rs +++ b/codex-rs/tui2/src/app.rs @@ -52,6 +52,7 @@ use codex_core::features::Feature; use codex_core::models_manager::manager::ModelsManager; use codex_core::models_manager::model_presets::HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG; use codex_core::models_manager::model_presets::HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG; +use codex_core::protocol::DeprecationNoticeEvent; use codex_core::protocol::EventMsg; use codex_core::protocol::FinalOutput; use codex_core::protocol::ListSkillsResponseEvent; @@ -162,6 +163,15 @@ fn emit_skill_load_warnings(app_event_tx: &AppEventSender, errors: &[SkillErrorI } } +fn emit_deprecation_notice(app_event_tx: &AppEventSender, notice: Option) { + let Some(DeprecationNoticeEvent { summary, details }) = notice else { + return; + }; + app_event_tx.send(AppEvent::InsertHistoryCell(Box::new( + crate::history_cell::new_deprecation_notice(summary, details), + ))); +} + #[derive(Debug, Clone, PartialEq, Eq)] struct SessionSummary { usage_line: String, @@ -406,10 +416,12 @@ impl App { resume_selection: ResumeSelection, feedback: codex_feedback::CodexFeedback, is_first_run: bool, + ollama_chat_support_notice: Option, ) -> Result { use tokio_stream::StreamExt; let (app_event_tx, mut app_event_rx) = unbounded_channel(); let app_event_tx = AppEventSender::new(app_event_tx); + emit_deprecation_notice(&app_event_tx, ollama_chat_support_notice); let thread_manager = Arc::new(ThreadManager::new( config.codex_home.clone(), diff --git a/codex-rs/tui2/src/cli.rs b/codex-rs/tui2/src/cli.rs index b0daa447701..89949423461 100644 --- a/codex-rs/tui2/src/cli.rs +++ b/codex-rs/tui2/src/cli.rs @@ -41,7 +41,7 @@ pub struct Cli { #[arg(long = "oss", default_value_t = false)] pub oss: bool, - /// Specify which local provider to use (lmstudio or ollama). + /// Specify which local provider to use (lmstudio, ollama, or ollama-chat). /// If not specified with --oss, will use config default or show selection. #[arg(long = "local-provider")] pub oss_provider: Option, diff --git a/codex-rs/tui2/src/lib.rs b/codex-rs/tui2/src/lib.rs index ee4d4d5d5e6..483450c326b 100644 --- a/codex-rs/tui2/src/lib.rs +++ b/codex-rs/tui2/src/lib.rs @@ -9,6 +9,7 @@ pub use app::AppExitInfo; use codex_app_server_protocol::AuthMode; use codex_common::oss::ensure_oss_provider_ready; use codex_common::oss::get_default_model_for_oss_provider; +use codex_common::oss::ollama_chat_deprecation_notice; use codex_core::AuthManager; use codex_core::CodexAuth; use codex_core::INTERACTIVE_SESSION_SOURCES; @@ -448,6 +449,14 @@ async fn run_ratatui_app( initial_config }; + let ollama_chat_support_notice = match ollama_chat_deprecation_notice(&config).await { + Ok(notice) => notice, + Err(err) => { + tracing::warn!(?err, "Failed to detect Ollama wire API"); + None + } + }; + // Determine resume behavior: explicit id, then resume last, then picker. let resume_selection = if let Some(id_str) = cli.resume_session_id.as_deref() { match find_thread_path_by_id_str(&config.codex_home, id_str).await? { @@ -533,6 +542,7 @@ async fn run_ratatui_app( resume_selection, feedback, should_show_trust_screen, // Proxy to: is it a first run in this directory? + ollama_chat_support_notice, ) .await; diff --git a/codex-rs/tui2/src/oss_selection.rs b/codex-rs/tui2/src/oss_selection.rs index eb1ca182314..72679191092 100644 --- a/codex-rs/tui2/src/oss_selection.rs +++ b/codex-rs/tui2/src/oss_selection.rs @@ -4,6 +4,7 @@ use std::sync::LazyLock; use codex_core::DEFAULT_LMSTUDIO_PORT; use codex_core::DEFAULT_OLLAMA_PORT; use codex_core::LMSTUDIO_OSS_PROVIDER_ID; +use codex_core::OLLAMA_CHAT_PROVIDER_ID; use codex_core::OLLAMA_OSS_PROVIDER_ID; use codex_core::config::set_default_oss_provider; use crossterm::event::Event; @@ -70,10 +71,16 @@ static OSS_SELECT_OPTIONS: LazyLock> = LazyLock::new(|| { }, SelectOption { label: Line::from(vec!["O".underlined(), "llama".into()]), - description: "Local Ollama server (default port 11434)", + description: "Local Ollama server (Responses API, default port 11434)", key: KeyCode::Char('o'), provider_id: OLLAMA_OSS_PROVIDER_ID, }, + SelectOption { + label: Line::from(vec!["Ollama (".into(), "c".underlined(), "hat)".into()]), + description: "Local Ollama server (chat wire API, default port 11434)", + key: KeyCode::Char('c'), + provider_id: OLLAMA_CHAT_PROVIDER_ID, + }, ] }); @@ -99,7 +106,11 @@ impl OssSelectionWidget<'_> { status: lmstudio_status, }, ProviderOption { - name: "Ollama".to_string(), + name: "Ollama (Responses)".to_string(), + status: ollama_status.clone(), + }, + ProviderOption { + name: "Ollama (Chat)".to_string(), status: ollama_status, }, ];