diff --git a/codex-rs/app-server-protocol/src/protocol/common.rs b/codex-rs/app-server-protocol/src/protocol/common.rs index 23dcd29e35..1062c62f26 100644 --- a/codex-rs/app-server-protocol/src/protocol/common.rs +++ b/codex-rs/app-server-protocol/src/protocol/common.rs @@ -528,7 +528,7 @@ mod tests { let request = ClientRequest::NewConversation { request_id: RequestId::Integer(42), params: v1::NewConversationParams { - model: Some("arcticfox".to_string()), + model: Some("gpt-5.1-codex-max".to_string()), model_provider: None, profile: None, cwd: None, @@ -546,7 +546,7 @@ mod tests { "method": "newConversation", "id": 42, "params": { - "model": "arcticfox", + "model": "gpt-5.1-codex-max", "modelProvider": null, "profile": null, "cwd": null, diff --git a/codex-rs/app-server/tests/suite/config.rs b/codex-rs/app-server/tests/suite/config.rs index 5d1c7f9d11..75dba57229 100644 --- a/codex-rs/app-server/tests/suite/config.rs +++ b/codex-rs/app-server/tests/suite/config.rs @@ -27,7 +27,7 @@ fn create_config_toml(codex_home: &Path) -> std::io::Result<()> { std::fs::write( config_toml, r#" -model = "arcticfox" +model = "gpt-5.1-codex-max" approval_policy = "on-request" sandbox_mode = "workspace-write" model_reasoning_summary = "detailed" @@ -87,7 +87,7 @@ async fn get_config_toml_parses_all_fields() -> Result<()> { }), forced_chatgpt_workspace_id: Some("12345678-0000-0000-0000-000000000000".into()), forced_login_method: Some(ForcedLoginMethod::Chatgpt), - model: Some("arcticfox".into()), + model: Some("gpt-5.1-codex-max".into()), model_reasoning_effort: Some(ReasoningEffort::High), model_reasoning_summary: Some(ReasoningSummary::Detailed), model_verbosity: Some(Verbosity::Medium), diff --git a/codex-rs/app-server/tests/suite/set_default_model.rs b/codex-rs/app-server/tests/suite/set_default_model.rs index 4d742a8434..b56c54dbd9 100644 --- a/codex-rs/app-server/tests/suite/set_default_model.rs +++ b/codex-rs/app-server/tests/suite/set_default_model.rs @@ -57,7 +57,7 @@ fn create_config_toml(codex_home: &Path) -> std::io::Result<()> { std::fs::write( config_toml, r#" -model = "arcticfox" +model = "gpt-5.1-codex-max" model_reasoning_effort = "medium" "#, ) diff --git a/codex-rs/app-server/tests/suite/v2/model_list.rs b/codex-rs/app-server/tests/suite/v2/model_list.rs index 22c0bf6379..3c4844fed9 100644 --- a/codex-rs/app-server/tests/suite/v2/model_list.rs +++ b/codex-rs/app-server/tests/suite/v2/model_list.rs @@ -46,9 +46,9 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> { let expected_models = vec![ Model { - id: "arcticfox".to_string(), - model: "arcticfox".to_string(), - display_name: "arcticfox".to_string(), + id: "gpt-5.1-codex-max".to_string(), + model: "gpt-5.1-codex-max".to_string(), + display_name: "gpt-5.1-codex-max".to_string(), description: "Latest Codex-optimized flagship for deep and fast reasoning.".to_string(), supported_reasoning_efforts: vec![ ReasoningEffortOption { @@ -174,7 +174,7 @@ async fn list_models_pagination_works() -> Result<()> { } = to_response::(first_response)?; assert_eq!(first_items.len(), 1); - assert_eq!(first_items[0].id, "arcticfox"); + assert_eq!(first_items[0].id, "gpt-5.1-codex-max"); let next_cursor = first_cursor.ok_or_else(|| anyhow!("cursor for second page"))?; let second_request = mcp diff --git a/codex-rs/app-server/tests/suite/v2/thread_resume.rs b/codex-rs/app-server/tests/suite/v2/thread_resume.rs index 1a9c76979a..806ba08fee 100644 --- a/codex-rs/app-server/tests/suite/v2/thread_resume.rs +++ b/codex-rs/app-server/tests/suite/v2/thread_resume.rs @@ -27,7 +27,7 @@ async fn thread_resume_returns_original_thread() -> Result<()> { // Start a thread. let start_id = mcp .send_thread_start_request(ThreadStartParams { - model: Some("arcticfox".to_string()), + model: Some("gpt-5.1-codex-max".to_string()), ..Default::default() }) .await?; @@ -69,7 +69,7 @@ async fn thread_resume_prefers_path_over_thread_id() -> Result<()> { let start_id = mcp .send_thread_start_request(ThreadStartParams { - model: Some("arcticfox".to_string()), + model: Some("gpt-5.1-codex-max".to_string()), ..Default::default() }) .await?; @@ -114,7 +114,7 @@ async fn thread_resume_supports_history_and_overrides() -> Result<()> { // Start a thread. let start_id = mcp .send_thread_start_request(ThreadStartParams { - model: Some("arcticfox".to_string()), + model: Some("gpt-5.1-codex-max".to_string()), ..Default::default() }) .await?; diff --git a/codex-rs/common/src/model_presets.rs b/codex-rs/common/src/model_presets.rs index dbbe1aa4e6..a031f23b1d 100644 --- a/codex-rs/common/src/model_presets.rs +++ b/codex-rs/common/src/model_presets.rs @@ -5,7 +5,8 @@ use codex_core::protocol_config_types::ReasoningEffort; use once_cell::sync::Lazy; pub const HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG: &str = "hide_gpt5_1_migration_prompt"; -pub const HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG: &str = "hide_arcticfox_migration_prompt"; +pub const HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG: &str = + "hide_gpt-5.1-codex-max_migration_prompt"; /// A reasoning effort option that can be surfaced for a model. #[derive(Debug, Clone, Copy)] @@ -49,9 +50,9 @@ pub struct ModelPreset { static PRESETS: Lazy> = Lazy::new(|| { vec![ ModelPreset { - id: "arcticfox", - model: "arcticfox", - display_name: "arcticfox", + id: "gpt-5.1-codex-max", + model: "gpt-5.1-codex-max", + display_name: "gpt-5.1-codex-max", description: "Latest Codex-optimized flagship for deep and fast reasoning.", default_reasoning_effort: ReasoningEffort::Medium, supported_reasoning_efforts: &[ @@ -98,9 +99,9 @@ static PRESETS: Lazy> = Lazy::new(|| { ], is_default: false, upgrade: Some(ModelUpgrade { - id: "arcticfox", + id: "gpt-5.1-codex-max", reasoning_effort_mapping: None, - migration_config_key: HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG, + migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG, }), show_in_picker: true, }, @@ -121,7 +122,11 @@ static PRESETS: Lazy> = Lazy::new(|| { }, ], is_default: false, - upgrade: None, + upgrade: Some(ModelUpgrade { + id: "gpt-5.1-codex-max", + reasoning_effort_mapping: None, + migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG, + }), show_in_picker: true, }, ModelPreset { @@ -145,7 +150,11 @@ static PRESETS: Lazy> = Lazy::new(|| { }, ], is_default: false, - upgrade: None, + upgrade: Some(ModelUpgrade { + id: "gpt-5.1-codex-max", + reasoning_effort_mapping: None, + migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG, + }), show_in_picker: true, }, // Deprecated models. @@ -171,9 +180,9 @@ static PRESETS: Lazy> = Lazy::new(|| { ], is_default: false, upgrade: Some(ModelUpgrade { - id: "arcticfox", + id: "gpt-5.1-codex-max", reasoning_effort_mapping: None, - migration_config_key: HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG, + migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG, }), show_in_picker: false, }, @@ -227,12 +236,9 @@ static PRESETS: Lazy> = Lazy::new(|| { ], is_default: false, upgrade: Some(ModelUpgrade { - id: "gpt-5.1", - reasoning_effort_mapping: Some(HashMap::from([( - ReasoningEffort::Minimal, - ReasoningEffort::Low, - )])), - migration_config_key: HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG, + id: "gpt-5.1-codex-max", + reasoning_effort_mapping: None, + migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG, }), show_in_picker: false, }, @@ -243,7 +249,7 @@ pub fn builtin_model_presets(auth_mode: Option) -> Vec { PRESETS .iter() .filter(|preset| match auth_mode { - Some(AuthMode::ApiKey) => preset.show_in_picker && preset.id != "arcticfox", + Some(AuthMode::ApiKey) => preset.show_in_picker && preset.id != "gpt-5.1-codex-max", _ => preset.show_in_picker, }) .cloned() @@ -266,8 +272,12 @@ mod tests { } #[test] - fn arcticfox_hidden_for_api_key_auth() { + fn gpt_5_1_codex_max_hidden_for_api_key_auth() { let presets = builtin_model_presets(Some(AuthMode::ApiKey)); - assert!(presets.iter().all(|preset| preset.id != "arcticfox")); + assert!( + presets + .iter() + .all(|preset| preset.id != "gpt-5.1-codex-max") + ); } } diff --git a/codex-rs/core/arcticfox_prompt.md b/codex-rs/core/gpt-5.1-codex-max_prompt.md similarity index 100% rename from codex-rs/core/arcticfox_prompt.md rename to codex-rs/core/gpt-5.1-codex-max_prompt.md diff --git a/codex-rs/core/src/client_common.rs b/codex-rs/core/src/client_common.rs index 7e3eb1ac98..6e5e11204d 100644 --- a/codex-rs/core/src/client_common.rs +++ b/codex-rs/core/src/client_common.rs @@ -431,7 +431,7 @@ mod tests { expects_apply_patch_instructions: false, }, InstructionsTestCase { - slug: "arcticfox", + slug: "gpt-5.1-codex-max", expects_apply_patch_instructions: false, }, ]; diff --git a/codex-rs/core/src/config/edit.rs b/codex-rs/core/src/config/edit.rs index d3bc5cf99b..bd7c16a0ae 100644 --- a/codex-rs/core/src/config/edit.rs +++ b/codex-rs/core/src/config/edit.rs @@ -846,7 +846,7 @@ hide_gpt5_1_migration_prompt = true } #[test] - fn blocking_set_hide_arcticfox_migration_prompt_preserves_table() { + fn blocking_set_hide_gpt_5_1_codex_max_migration_prompt_preserves_table() { let tmp = tempdir().expect("tmpdir"); let codex_home = tmp.path(); std::fs::write( @@ -860,7 +860,7 @@ existing = "value" codex_home, None, &[ConfigEdit::SetNoticeHideModelMigrationPrompt( - "hide_arcticfox_migration_prompt".to_string(), + "hide_gpt-5.1-codex-max_migration_prompt".to_string(), true, )], ) @@ -870,7 +870,7 @@ existing = "value" std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config"); let expected = r#"[notice] existing = "value" -hide_arcticfox_migration_prompt = true +"hide_gpt-5.1-codex-max_migration_prompt" = true "#; assert_eq!(contents, expected); } diff --git a/codex-rs/core/src/config/mod.rs b/codex-rs/core/src/config/mod.rs index 36d4ef4dbc..1f53b5c94d 100644 --- a/codex-rs/core/src/config/mod.rs +++ b/codex-rs/core/src/config/mod.rs @@ -62,11 +62,11 @@ pub mod profile; pub mod types; #[cfg(target_os = "windows")] -pub const OPENAI_DEFAULT_MODEL: &str = "arcticfox"; +pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5.1-codex-max"; #[cfg(not(target_os = "windows"))] -pub const OPENAI_DEFAULT_MODEL: &str = "arcticfox"; -const OPENAI_DEFAULT_REVIEW_MODEL: &str = "arcticfox"; -pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "arcticfox"; +pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5.1-codex-max"; +const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5.1-codex-max"; +pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "gpt-5.1-codex-max"; /// Maximum number of bytes of the documentation that will be embedded. Larger /// files are *silently truncated* to this size so we do not take up too much of @@ -81,7 +81,7 @@ pub struct Config { /// Optional override of model selection. pub model: String, - /// Model used specifically for review sessions. Defaults to "arcticfox". + /// Model used specifically for review sessions. Defaults to "gpt-5.1-codex-max". pub review_model: String, pub model_family: ModelFamily, diff --git a/codex-rs/core/src/config/types.rs b/codex-rs/core/src/config/types.rs index 45b2c1efbf..b7f16a9028 100644 --- a/codex-rs/core/src/config/types.rs +++ b/codex-rs/core/src/config/types.rs @@ -378,8 +378,8 @@ pub struct Notice { pub hide_rate_limit_model_nudge: Option, /// Tracks whether the user has seen the model migration prompt pub hide_gpt5_1_migration_prompt: Option, - /// Tracks whether the user has seen the arcticfox migration prompt - pub hide_arcticfox_migration_prompt: Option, + /// Tracks whether the user has seen the gpt-5.1-codex-max migration prompt + pub hide_gpt_5_1_codex_max_migration_prompt: Option, } impl Notice { diff --git a/codex-rs/core/src/model_family.rs b/codex-rs/core/src/model_family.rs index be57c53d9c..725a998bed 100644 --- a/codex-rs/core/src/model_family.rs +++ b/codex-rs/core/src/model_family.rs @@ -12,7 +12,7 @@ const BASE_INSTRUCTIONS: &str = include_str!("../prompt.md"); const GPT_5_CODEX_INSTRUCTIONS: &str = include_str!("../gpt_5_codex_prompt.md"); const GPT_5_1_INSTRUCTIONS: &str = include_str!("../gpt_5_1_prompt.md"); -const ARCTICFOX_INSTRUCTIONS: &str = include_str!("../arcticfox_prompt.md"); +const GPT_5_1_CODEX_MAX_INSTRUCTIONS: &str = include_str!("../gpt-5.1-codex-max_prompt.md"); /// A model family is a group of models that share certain characteristics. #[derive(Debug, Clone, PartialEq, Eq, Hash)] @@ -173,12 +173,12 @@ pub fn find_family_for_model(slug: &str) -> Option { support_verbosity: true, truncation_policy: TruncationPolicy::Tokens(10_000), ) - } else if slug.starts_with("arcticfox") { + } else if slug.starts_with("gpt-5.1-codex-max") { model_family!( slug, slug, supports_reasoning_summaries: true, reasoning_summary_format: ReasoningSummaryFormat::Experimental, - base_instructions: ARCTICFOX_INSTRUCTIONS.to_string(), + base_instructions: GPT_5_1_CODEX_MAX_INSTRUCTIONS.to_string(), apply_patch_tool_type: Some(ApplyPatchToolType::Freeform), shell_type: ConfigShellToolType::ShellCommand, supports_parallel_tool_calls: true, @@ -202,7 +202,7 @@ pub fn find_family_for_model(slug: &str) -> Option { support_verbosity: false, truncation_policy: TruncationPolicy::Tokens(10_000), ) - } else if slug.starts_with("arcticfox") { + } else if slug.starts_with("gpt-5.1-codex-max") { model_family!( slug, slug, supports_reasoning_summaries: true, diff --git a/codex-rs/core/src/openai_model_info.rs b/codex-rs/core/src/openai_model_info.rs index cd5d12f8a4..bd0ca9d31d 100644 --- a/codex-rs/core/src/openai_model_info.rs +++ b/codex-rs/core/src/openai_model_info.rs @@ -72,7 +72,7 @@ pub(crate) fn get_model_info(model_family: &ModelFamily) -> Option { _ if slug.starts_with("gpt-5-codex") || slug.starts_with("gpt-5.1-codex") - || slug.starts_with("arcticfox") => + || slug.starts_with("gpt-5.1-codex-max") => { Some(ModelInfo::new(CONTEXT_WINDOW_272K, MAX_OUTPUT_TOKENS_128K)) } diff --git a/codex-rs/core/tests/suite/client.rs b/codex-rs/core/tests/suite/client.rs index 1495fbf80b..e15e05a99a 100644 --- a/codex-rs/core/tests/suite/client.rs +++ b/codex-rs/core/tests/suite/client.rs @@ -1155,7 +1155,7 @@ async fn token_count_includes_rate_limits_snapshot() { "reasoning_output_tokens": 0, "total_tokens": 123 }, - // Default model is arcticfox in tests → 95% usable context window + // Default model is gpt-5.1-codex-max in tests → 95% usable context window "model_context_window": 258400 }, "rate_limits": { diff --git a/codex-rs/tui/src/app.rs b/codex-rs/tui/src/app.rs index a60f1f2723..7c86dd3b6e 100644 --- a/codex-rs/tui/src/app.rs +++ b/codex-rs/tui/src/app.rs @@ -19,7 +19,7 @@ use crate::tui::TuiEvent; use crate::update_action::UpdateAction; use codex_ansi_escape::ansi_escape_line; use codex_app_server_protocol::AuthMode; -use codex_common::model_presets::HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG; +use codex_common::model_presets::HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG; use codex_common::model_presets::HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG; use codex_common::model_presets::ModelUpgrade; use codex_common::model_presets::all_model_presets; @@ -57,7 +57,7 @@ use tokio::sync::mpsc::unbounded_channel; use crate::history_cell::UpdateAvailableHistoryCell; const GPT_5_1_MIGRATION_AUTH_MODES: [AuthMode; 2] = [AuthMode::ChatGPT, AuthMode::ApiKey]; -const ARCTICFOX_MIGRATION_AUTH_MODES: [AuthMode; 1] = [AuthMode::ChatGPT]; +const GPT_5_1_CODEX_MIGRATION_AUTH_MODES: [AuthMode; 1] = [AuthMode::ChatGPT]; #[derive(Debug, Clone)] pub struct AppExitInfo { @@ -106,7 +106,9 @@ fn should_show_model_migration_prompt( fn migration_prompt_hidden(config: &Config, migration_config_key: &str) -> Option { match migration_config_key { - HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG => config.notices.hide_arcticfox_migration_prompt, + HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG => { + config.notices.hide_gpt_5_1_codex_max_migration_prompt + } HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG => config.notices.hide_gpt5_1_migration_prompt, _ => None, } @@ -170,6 +172,11 @@ async fn handle_model_migration_prompt_if_needed( effort: mapped_effort, }); } + ModelMigrationOutcome::Rejected => { + app_event_tx.send(AppEvent::PersistModelMigrationPromptAcknowledged { + migration_config: migration_config_key.to_string(), + }); + } ModelMigrationOutcome::Exit => { return Some(AppExitInfo { token_usage: TokenUsage::default(), @@ -641,19 +648,17 @@ impl App { .await { Ok(()) => { - let effort_label = effort - .map(|eff| format!(" with {eff} reasoning")) - .unwrap_or_else(|| " with default reasoning".to_string()); + let reasoning_label = Self::reasoning_label(effort); if let Some(profile) = profile { self.chat_widget.add_info_message( format!( - "Model changed to {model}{effort_label} for {profile} profile" + "Model changed to {model} {reasoning_label} for {profile} profile" ), None, ); } else { self.chat_widget.add_info_message( - format!("Model changed to {model}{effort_label}"), + format!("Model changed to {model} {reasoning_label}"), None, ); } @@ -821,6 +826,17 @@ impl App { Ok(true) } + fn reasoning_label(reasoning_effort: Option) -> &'static str { + match reasoning_effort { + Some(ReasoningEffortConfig::Minimal) => "minimal", + Some(ReasoningEffortConfig::Low) => "low", + Some(ReasoningEffortConfig::Medium) => "medium", + Some(ReasoningEffortConfig::High) => "high", + Some(ReasoningEffortConfig::XHigh) => "xhigh", + None | Some(ReasoningEffortConfig::None) => "default", + } + } + pub(crate) fn token_usage(&self) -> codex_core::protocol::TokenUsage { self.chat_widget.token_usage() } @@ -946,7 +962,7 @@ impl App { fn migration_prompt_allowed_auth_modes(migration_config_key: &str) -> Option<&'static [AuthMode]> { match migration_config_key { HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG => Some(&GPT_5_1_MIGRATION_AUTH_MODES), - HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG => Some(&ARCTICFOX_MIGRATION_AUTH_MODES), + HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG => Some(&GPT_5_1_CODEX_MIGRATION_AUTH_MODES), _ => None, } } @@ -1034,7 +1050,7 @@ mod tests { )); assert!(should_show_model_migration_prompt( "gpt-5.1-codex", - "arcticfox", + "gpt-5.1-codex-max", None )); assert!(!should_show_model_migration_prompt( @@ -1181,14 +1197,14 @@ mod tests { } #[test] - fn arcticfox_migration_limits_to_chatgpt() { + fn gpt_5_1_codex_max_migration_limits_to_chatgpt() { assert!(migration_prompt_allows_auth_mode( Some(AuthMode::ChatGPT), - HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG, + HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG, )); assert!(!migration_prompt_allows_auth_mode( Some(AuthMode::ApiKey), - HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG, + HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG, )); } diff --git a/codex-rs/tui/src/chatwidget.rs b/codex-rs/tui/src/chatwidget.rs index 6d4ba5fdf2..9429ce143e 100644 --- a/codex-rs/tui/src/chatwidget.rs +++ b/codex-rs/tui/src/chatwidget.rs @@ -2030,8 +2030,8 @@ impl ChatWidget { let effort_label = Self::reasoning_effort_label(effort); format!("⚠ {effort_label} reasoning effort can quickly consume Plus plan rate limits.") }); - let warn_for_model = - preset.model.starts_with("gpt-5.1-codex") || preset.model.starts_with("arcticfox"); + let warn_for_model = preset.model.starts_with("gpt-5.1-codex") + || preset.model.starts_with("gpt-5.1-codex-max"); struct EffortChoice { stored: Option, diff --git a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_reasoning_selection_popup.snap b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_reasoning_selection_popup.snap index 227278d05d..b4b89736a9 100644 --- a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_reasoning_selection_popup.snap +++ b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_reasoning_selection_popup.snap @@ -2,7 +2,7 @@ source: tui/src/chatwidget/tests.rs expression: popup --- - Select Reasoning Level for arcticfox + Select Reasoning Level for gpt-5.1-codex-max 1. Low Fast responses with lighter reasoning 2. Medium (default) Balances speed and reasoning depth for everyday tasks diff --git a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_reasoning_selection_popup_extra_high_warning.snap b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_reasoning_selection_popup_extra_high_warning.snap index f630598322..c5332ff590 100644 --- a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_reasoning_selection_popup_extra_high_warning.snap +++ b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_reasoning_selection_popup_extra_high_warning.snap @@ -3,7 +3,7 @@ source: tui/src/chatwidget/tests.rs assertion_line: 1548 expression: popup --- - Select Reasoning Level for arcticfox + Select Reasoning Level for gpt-5.1-codex-max 1. Low Fast responses with lighter reasoning 2. Medium (default) Balances speed and reasoning depth for everyday diff --git a/codex-rs/tui/src/chatwidget/tests.rs b/codex-rs/tui/src/chatwidget/tests.rs index 180f9db037..b4305bac7e 100644 --- a/codex-rs/tui/src/chatwidget/tests.rs +++ b/codex-rs/tui/src/chatwidget/tests.rs @@ -1526,13 +1526,13 @@ fn startup_prompts_for_windows_sandbox_when_agent_requested() { fn model_reasoning_selection_popup_snapshot() { let (mut chat, _rx, _op_rx) = make_chatwidget_manual(); - chat.config.model = "arcticfox".to_string(); + chat.config.model = "gpt-5.1-codex-max".to_string(); chat.config.model_reasoning_effort = Some(ReasoningEffortConfig::High); let preset = builtin_model_presets(None) .into_iter() - .find(|preset| preset.model == "arcticfox") - .expect("arcticfox preset"); + .find(|preset| preset.model == "gpt-5.1-codex-max") + .expect("gpt-5.1-codex-max preset"); chat.open_reasoning_popup(preset); let popup = render_bottom_popup(&chat, 80); @@ -1543,13 +1543,13 @@ fn model_reasoning_selection_popup_snapshot() { fn model_reasoning_selection_popup_extra_high_warning_snapshot() { let (mut chat, _rx, _op_rx) = make_chatwidget_manual(); - chat.config.model = "arcticfox".to_string(); + chat.config.model = "gpt-5.1-codex-max".to_string(); chat.config.model_reasoning_effort = Some(ReasoningEffortConfig::XHigh); let preset = builtin_model_presets(None) .into_iter() - .find(|preset| preset.model == "arcticfox") - .expect("arcticfox preset"); + .find(|preset| preset.model == "gpt-5.1-codex-max") + .expect("gpt-5.1-codex-max preset"); chat.open_reasoning_popup(preset); let popup = render_bottom_popup(&chat, 80); @@ -1560,12 +1560,12 @@ fn model_reasoning_selection_popup_extra_high_warning_snapshot() { fn reasoning_popup_shows_extra_high_with_space() { let (mut chat, _rx, _op_rx) = make_chatwidget_manual(); - chat.config.model = "arcticfox".to_string(); + chat.config.model = "gpt-5.1-codex-max".to_string(); let preset = builtin_model_presets(None) .into_iter() - .find(|preset| preset.model == "arcticfox") - .expect("arcticfox preset"); + .find(|preset| preset.model == "gpt-5.1-codex-max") + .expect("gpt-5.1-codex-max preset"); chat.open_reasoning_popup(preset); let popup = render_bottom_popup(&chat, 120); diff --git a/codex-rs/tui/src/model_migration.rs b/codex-rs/tui/src/model_migration.rs index 1cb8543570..283007e028 100644 --- a/codex-rs/tui/src/model_migration.rs +++ b/codex-rs/tui/src/model_migration.rs @@ -1,11 +1,13 @@ +use crate::key_hint; use crate::render::Insets; use crate::render::renderable::ColumnRenderable; use crate::render::renderable::Renderable; use crate::render::renderable::RenderableExt as _; +use crate::selection_list::selection_option_row; use crate::tui::FrameRequester; use crate::tui::Tui; use crate::tui::TuiEvent; -use codex_common::model_presets::HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG; +use codex_common::model_presets::HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG; use codex_common::model_presets::HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG; use crossterm::event::KeyCode; use crossterm::event::KeyEvent; @@ -22,8 +24,10 @@ use ratatui::widgets::Wrap; use tokio_stream::StreamExt; /// Outcome of the migration prompt. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] pub(crate) enum ModelMigrationOutcome { Accepted, + Rejected, Exit, } @@ -31,13 +35,33 @@ pub(crate) enum ModelMigrationOutcome { pub(crate) struct ModelMigrationCopy { pub heading: Vec>, pub content: Vec>, + pub can_opt_out: bool, +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +enum MigrationMenuOption { + TryNewModel, + UseExistingModel, +} + +impl MigrationMenuOption { + fn all() -> [Self; 2] { + [Self::TryNewModel, Self::UseExistingModel] + } + + fn label(self) -> &'static str { + match self { + Self::TryNewModel => "Try new model", + Self::UseExistingModel => "Use existing model", + } + } } pub(crate) fn migration_copy_for_config(migration_config_key: &str) -> ModelMigrationCopy { match migration_config_key { HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG => gpt5_migration_copy(), - HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG => arcticfox_migration_copy(), - _ => arcticfox_migration_copy(), + HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG => gpt_5_1_codex_max_migration_copy(), + _ => gpt_5_1_codex_max_migration_copy(), } } @@ -98,7 +122,8 @@ struct ModelMigrationScreen { request_frame: FrameRequester, copy: ModelMigrationCopy, done: bool, - should_exit: bool, + outcome: ModelMigrationOutcome, + highlighted_option: MigrationMenuOption, } impl ModelMigrationScreen { @@ -107,15 +132,47 @@ impl ModelMigrationScreen { request_frame, copy, done: false, - should_exit: false, + outcome: ModelMigrationOutcome::Accepted, + highlighted_option: MigrationMenuOption::TryNewModel, } } - fn accept(&mut self) { + fn finish_with(&mut self, outcome: ModelMigrationOutcome) { + self.outcome = outcome; self.done = true; self.request_frame.schedule_frame(); } + fn accept(&mut self) { + self.finish_with(ModelMigrationOutcome::Accepted); + } + + fn reject(&mut self) { + self.finish_with(ModelMigrationOutcome::Rejected); + } + + fn exit(&mut self) { + self.finish_with(ModelMigrationOutcome::Exit); + } + + fn confirm_selection(&mut self) { + if self.copy.can_opt_out { + match self.highlighted_option { + MigrationMenuOption::TryNewModel => self.accept(), + MigrationMenuOption::UseExistingModel => self.reject(), + } + } else { + self.accept(); + } + } + + fn highlight_option(&mut self, option: MigrationMenuOption) { + if self.highlighted_option != option { + self.highlighted_option = option; + self.request_frame.schedule_frame(); + } + } + fn handle_key(&mut self, key_event: KeyEvent) { if key_event.kind == KeyEventKind::Release { return; @@ -124,14 +181,36 @@ impl ModelMigrationScreen { if key_event.modifiers.contains(KeyModifiers::CONTROL) && matches!(key_event.code, KeyCode::Char('c') | KeyCode::Char('d')) { - self.should_exit = true; - self.done = true; - self.request_frame.schedule_frame(); + self.exit(); return; } - if matches!(key_event.code, KeyCode::Esc | KeyCode::Enter) { - self.accept(); + if !self.copy.can_opt_out { + if matches!(key_event.code, KeyCode::Esc | KeyCode::Enter) { + self.accept(); + } + return; + } + + match key_event.code { + KeyCode::Up | KeyCode::Char('k') => { + self.highlight_option(MigrationMenuOption::TryNewModel); + } + KeyCode::Down | KeyCode::Char('j') => { + self.highlight_option(MigrationMenuOption::UseExistingModel); + } + KeyCode::Char('1') => { + self.highlight_option(MigrationMenuOption::TryNewModel); + self.accept(); + } + KeyCode::Char('2') => { + self.highlight_option(MigrationMenuOption::UseExistingModel); + self.reject(); + } + KeyCode::Enter | KeyCode::Esc => { + self.confirm_selection(); + } + _ => {} } } @@ -140,11 +219,7 @@ impl ModelMigrationScreen { } fn outcome(&self) -> ModelMigrationOutcome { - if self.should_exit { - ModelMigrationOutcome::Exit - } else { - ModelMigrationOutcome::Accepted - } + self.outcome } } @@ -172,25 +247,56 @@ impl WidgetRef for &ModelMigrationScreen { ); } + if self.copy.can_opt_out { + column.push(Line::from("")); + column.push( + Paragraph::new("Choose how you'd like Codex to proceed.") + .wrap(Wrap { trim: false }) + .inset(Insets::tlbr(0, 2, 0, 0)), + ); + column.push(Line::from("")); + + for (idx, option) in MigrationMenuOption::all().into_iter().enumerate() { + column.push(selection_option_row( + idx, + option.label().to_string(), + self.highlighted_option == option, + )); + } + + column.push(Line::from("")); + column.push( + Line::from(vec![ + "Use ".dim(), + key_hint::plain(KeyCode::Up).into(), + "/".dim(), + key_hint::plain(KeyCode::Down).into(), + " to move, press ".dim(), + key_hint::plain(KeyCode::Enter).into(), + " to confirm".dim(), + ]) + .inset(Insets::tlbr(0, 2, 0, 0)), + ); + } + column.render(area, buf); } } -fn arcticfox_migration_copy() -> ModelMigrationCopy { +fn gpt_5_1_codex_max_migration_copy() -> ModelMigrationCopy { ModelMigrationCopy { - heading: vec!["Introducing arcticfox".bold()], + heading: vec!["Codex just got an upgrade. Introducing gpt-5.1-codex-max".bold()], content: vec![ - Line::from("We've upgraded our family of models supported in Codex to arcticfox."), Line::from( - "You can continue using legacy models by specifying them directly with the -m option or in your config.toml.", + "Codex is now powered by gpt-5.1-codex-max, our latest frontier agentic coding model. It is smarter and faster than its predecessors and capable of long-running project-scale work.", ), Line::from(vec![ "Learn more at ".into(), - "www.openai.com/index/arcticfox".cyan().underlined(), + "www.openai.com/index/gpt-5-1-codex-max".cyan().underlined(), ".".into(), ]), - Line::from(vec!["Press enter to continue".dim()]), ], + can_opt_out: true, } } @@ -211,13 +317,14 @@ fn gpt5_migration_copy() -> ModelMigrationCopy { ]), Line::from(vec!["Press enter to continue".dim()]), ], + can_opt_out: false, } } #[cfg(test)] mod tests { use super::ModelMigrationScreen; - use super::arcticfox_migration_copy; + use super::gpt_5_1_codex_max_migration_copy; use super::migration_copy_for_config; use crate::custom_terminal::Terminal; use crate::test_backend::VT100Backend; @@ -231,13 +338,15 @@ mod tests { #[test] fn prompt_snapshot() { let width: u16 = 60; - let height: u16 = 12; + let height: u16 = 20; let backend = VT100Backend::new(width, height); let mut terminal = Terminal::with_options(backend).expect("terminal"); terminal.set_viewport_area(Rect::new(0, 0, width, height)); - let screen = - ModelMigrationScreen::new(FrameRequester::test_dummy(), arcticfox_migration_copy()); + let screen = ModelMigrationScreen::new( + FrameRequester::test_dummy(), + gpt_5_1_codex_max_migration_copy(), + ); { let mut frame = terminal.get_frame(); @@ -304,8 +413,10 @@ mod tests { #[test] fn escape_key_accepts_prompt() { - let mut screen = - ModelMigrationScreen::new(FrameRequester::test_dummy(), arcticfox_migration_copy()); + let mut screen = ModelMigrationScreen::new( + FrameRequester::test_dummy(), + gpt_5_1_codex_max_migration_copy(), + ); // Simulate pressing Escape screen.handle_key(KeyEvent::new( @@ -319,4 +430,27 @@ mod tests { super::ModelMigrationOutcome::Accepted )); } + + #[test] + fn selecting_use_existing_model_rejects_upgrade() { + let mut screen = ModelMigrationScreen::new( + FrameRequester::test_dummy(), + gpt_5_1_codex_max_migration_copy(), + ); + + screen.handle_key(KeyEvent::new( + KeyCode::Down, + crossterm::event::KeyModifiers::NONE, + )); + screen.handle_key(KeyEvent::new( + KeyCode::Enter, + crossterm::event::KeyModifiers::NONE, + )); + + assert!(screen.is_done()); + assert!(matches!( + screen.outcome(), + super::ModelMigrationOutcome::Rejected + )); + } } diff --git a/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt.snap b/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt.snap index 2780fe7b78..5b3136803f 100644 --- a/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt.snap +++ b/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt.snap @@ -2,14 +2,18 @@ source: tui/src/model_migration.rs expression: terminal.backend() --- -> Introducing arcticfox +> Codex just got an upgrade. Introducing gpt-5.1-codex-max - We've upgraded our family of models supported in Codex to - arcticfox. + Codex is now powered by gpt-5.1-codex-max, our latest + frontier agentic coding model. It is smarter and faster + than its predecessors and capable of long-running + project-scale work. - You can continue using legacy models by specifying them - directly with the -m option or in your config.toml. + Learn more at www.openai.com/index/gpt-5-1-codex-max. - Learn more at www.openai.com/index/arcticfox. + Choose how you'd like Codex to proceed. - Press enter to continue +› 1. Try new model + 2. Use existing model + + Use ↑/↓ to move, press enter to confirm diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap index e0d752d5e4..3ecc4fa8ed 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap @@ -10,7 +10,7 @@ expression: sanitized │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ │ │ -│ Model: arcticfox (reasoning none, summaries auto) │ +│ Model: gpt-5.1-codex-max (reasoning none, summaries auto) │ │ Directory: [[workspace]] │ │ Approval: on-request │ │ Sandbox: read-only │ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap index d4be306410..c22577407e 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap @@ -4,20 +4,20 @@ expression: sanitized --- /status -╭───────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ -│ │ -│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ -│ information on rate limits and credits │ -│ │ -│ Model: arcticfox (reasoning high, summaries detailed) │ -│ Directory: [[workspace]] │ -│ Approval: on-request │ -│ Sandbox: workspace-write │ -│ Agents.md: │ -│ │ -│ Token usage: 1.9K total (1K input + 900 output) │ -│ Context window: 100% left (2.25K used / 272K) │ -│ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14) │ -│ Weekly limit: [███████████░░░░░░░░░] 55% left (resets 03:24) │ -╰───────────────────────────────────────────────────────────────────╯ +╭───────────────────────────────────────────────────────────────────────────╮ +│ >_ OpenAI Codex (v0.0.0) │ +│ │ +│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ +│ information on rate limits and credits │ +│ │ +│ Model: gpt-5.1-codex-max (reasoning high, summaries detailed) │ +│ Directory: [[workspace]] │ +│ Approval: on-request │ +│ Sandbox: workspace-write │ +│ Agents.md: │ +│ │ +│ Token usage: 1.9K total (1K input + 900 output) │ +│ Context window: 100% left (2.25K used / 272K) │ +│ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14) │ +│ Weekly limit: [███████████░░░░░░░░░] 55% left (resets 03:24) │ +╰───────────────────────────────────────────────────────────────────────────╯ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap index 36d2a8b4da..f0e6b73445 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap @@ -4,19 +4,19 @@ expression: sanitized --- /status -╭───────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ -│ │ -│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ -│ information on rate limits and credits │ -│ │ -│ Model: arcticfox (reasoning none, summaries auto) │ -│ Directory: [[workspace]] │ -│ Approval: on-request │ -│ Sandbox: read-only │ -│ Agents.md: │ -│ │ -│ Token usage: 750 total (500 input + 250 output) │ -│ Context window: 100% left (750 used / 272K) │ -│ Limits: data not available yet │ -╰───────────────────────────────────────────────────────────────╯ +╭───────────────────────────────────────────────────────────────────────╮ +│ >_ OpenAI Codex (v0.0.0) │ +│ │ +│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ +│ information on rate limits and credits │ +│ │ +│ Model: gpt-5.1-codex-max (reasoning none, summaries auto) │ +│ Directory: [[workspace]] │ +│ Approval: on-request │ +│ Sandbox: read-only │ +│ Agents.md: │ +│ │ +│ Token usage: 750 total (500 input + 250 output) │ +│ Context window: 100% left (750 used / 272K) │ +│ Limits: data not available yet │ +╰───────────────────────────────────────────────────────────────────────╯ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap index 36d2a8b4da..f0e6b73445 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap @@ -4,19 +4,19 @@ expression: sanitized --- /status -╭───────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ -│ │ -│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ -│ information on rate limits and credits │ -│ │ -│ Model: arcticfox (reasoning none, summaries auto) │ -│ Directory: [[workspace]] │ -│ Approval: on-request │ -│ Sandbox: read-only │ -│ Agents.md: │ -│ │ -│ Token usage: 750 total (500 input + 250 output) │ -│ Context window: 100% left (750 used / 272K) │ -│ Limits: data not available yet │ -╰───────────────────────────────────────────────────────────────╯ +╭───────────────────────────────────────────────────────────────────────╮ +│ >_ OpenAI Codex (v0.0.0) │ +│ │ +│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ +│ information on rate limits and credits │ +│ │ +│ Model: gpt-5.1-codex-max (reasoning none, summaries auto) │ +│ Directory: [[workspace]] │ +│ Approval: on-request │ +│ Sandbox: read-only │ +│ Agents.md: │ +│ │ +│ Token usage: 750 total (500 input + 250 output) │ +│ Context window: 100% left (750 used / 272K) │ +│ Limits: data not available yet │ +╰───────────────────────────────────────────────────────────────────────╯ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap index 9163942dfe..a12be950bc 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap @@ -4,21 +4,21 @@ expression: sanitized --- /status -╭─────────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ -│ │ -│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ -│ information on rate limits and credits │ -│ │ -│ Model: arcticfox (reasoning none, summaries auto) │ -│ Directory: [[workspace]] │ -│ Approval: on-request │ -│ Sandbox: read-only │ -│ Agents.md: │ -│ │ -│ Token usage: 1.9K total (1K input + 900 output) │ -│ Context window: 100% left (2.25K used / 272K) │ -│ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14) │ -│ Weekly limit: [████████████░░░░░░░░] 60% left (resets 03:34) │ -│ Warning: limits may be stale - start new turn to refresh. │ -╰─────────────────────────────────────────────────────────────────────╯ +╭───────────────────────────────────────────────────────────────────────╮ +│ >_ OpenAI Codex (v0.0.0) │ +│ │ +│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ +│ information on rate limits and credits │ +│ │ +│ Model: gpt-5.1-codex-max (reasoning none, summaries auto) │ +│ Directory: [[workspace]] │ +│ Approval: on-request │ +│ Sandbox: read-only │ +│ Agents.md: │ +│ │ +│ Token usage: 1.9K total (1K input + 900 output) │ +│ Context window: 100% left (2.25K used / 272K) │ +│ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14) │ +│ Weekly limit: [████████████░░░░░░░░] 60% left (resets 03:34) │ +│ Warning: limits may be stale - start new turn to refresh. │ +╰───────────────────────────────────────────────────────────────────────╯ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap index 5f4554bda0..02ba1adec9 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap @@ -4,19 +4,19 @@ expression: sanitized --- /status -╭───────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ -│ │ -│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ -│ information on rate limits and credits │ -│ │ -│ Model: arcticfox (reasoning high, summaries detailed) │ -│ Directory: [[workspace]] │ -│ Approval: on-request │ -│ Sandbox: read-only │ -│ Agents.md: │ -│ │ -│ Token usage: 1.9K total (1K input + 900 output) │ -│ Context window: 100% left (2.25K used / 272K) │ -│ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14) │ -╰───────────────────────────────────────────────────────────────────╯ +╭────────────────────────────────────────────────────────────────────╮ +│ >_ OpenAI Codex (v0.0.0) │ +│ │ +│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ +│ information on rate limits and credits │ +│ │ +│ Model: gpt-5.1-codex-max (reasoning high, summaries de │ +│ Directory: [[workspace]] │ +│ Approval: on-request │ +│ Sandbox: read-only │ +│ Agents.md: │ +│ │ +│ Token usage: 1.9K total (1K input + 900 output) │ +│ Context window: 100% left (2.25K used / 272K) │ +│ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14) │ +╰────────────────────────────────────────────────────────────────────╯ diff --git a/codex-rs/tui/src/status/tests.rs b/codex-rs/tui/src/status/tests.rs index 0597a44112..c6029bde7a 100644 --- a/codex-rs/tui/src/status/tests.rs +++ b/codex-rs/tui/src/status/tests.rs @@ -81,7 +81,7 @@ fn reset_at_from(captured_at: &chrono::DateTime, seconds: i64) -> fn status_snapshot_includes_reasoning_details() { let temp_home = TempDir::new().expect("temp home"); let mut config = test_config(&temp_home); - config.model = "arcticfox".to_string(); + config.model = "gpt-5.1-codex-max".to_string(); config.model_provider_id = "openai".to_string(); config.model_reasoning_effort = Some(ReasoningEffort::High); config.model_reasoning_summary = ReasoningSummary::Detailed; @@ -144,7 +144,7 @@ fn status_snapshot_includes_reasoning_details() { fn status_snapshot_includes_monthly_limit() { let temp_home = TempDir::new().expect("temp home"); let mut config = test_config(&temp_home); - config.model = "arcticfox".to_string(); + config.model = "gpt-5.1-codex-max".to_string(); config.model_provider_id = "openai".to_string(); config.cwd = PathBuf::from("/workspace/tests"); @@ -194,7 +194,7 @@ fn status_snapshot_includes_monthly_limit() { fn status_card_token_usage_excludes_cached_tokens() { let temp_home = TempDir::new().expect("temp home"); let mut config = test_config(&temp_home); - config.model = "arcticfox".to_string(); + config.model = "gpt-5.1-codex-max".to_string(); config.cwd = PathBuf::from("/workspace/tests"); let auth_manager = test_auth_manager(&config); @@ -232,7 +232,7 @@ fn status_card_token_usage_excludes_cached_tokens() { fn status_snapshot_truncates_in_narrow_terminal() { let temp_home = TempDir::new().expect("temp home"); let mut config = test_config(&temp_home); - config.model = "arcticfox".to_string(); + config.model = "gpt-5.1-codex-max".to_string(); config.model_provider_id = "openai".to_string(); config.model_reasoning_effort = Some(ReasoningEffort::High); config.model_reasoning_summary = ReasoningSummary::Detailed; @@ -285,7 +285,7 @@ fn status_snapshot_truncates_in_narrow_terminal() { fn status_snapshot_shows_missing_limits_message() { let temp_home = TempDir::new().expect("temp home"); let mut config = test_config(&temp_home); - config.model = "arcticfox".to_string(); + config.model = "gpt-5.1-codex-max".to_string(); config.cwd = PathBuf::from("/workspace/tests"); let auth_manager = test_auth_manager(&config); @@ -325,7 +325,7 @@ fn status_snapshot_shows_missing_limits_message() { fn status_snapshot_shows_empty_limits_message() { let temp_home = TempDir::new().expect("temp home"); let mut config = test_config(&temp_home); - config.model = "arcticfox".to_string(); + config.model = "gpt-5.1-codex-max".to_string(); config.cwd = PathBuf::from("/workspace/tests"); let auth_manager = test_auth_manager(&config); @@ -370,7 +370,7 @@ fn status_snapshot_shows_empty_limits_message() { fn status_snapshot_shows_stale_limits_message() { let temp_home = TempDir::new().expect("temp home"); let mut config = test_config(&temp_home); - config.model = "arcticfox".to_string(); + config.model = "gpt-5.1-codex-max".to_string(); config.cwd = PathBuf::from("/workspace/tests"); let auth_manager = test_auth_manager(&config); diff --git a/docs/config.md b/docs/config.md index fee86db30d..f47bf34643 100644 --- a/docs/config.md +++ b/docs/config.md @@ -64,7 +64,7 @@ Notes: The model that Codex should use. ```toml -model = "gpt-5.1" # overrides the default ("arcticfox" across platforms) +model = "gpt-5.1" # overrides the default ("gpt-5.1-codex-max" across platforms) ``` ### model_providers @@ -191,7 +191,7 @@ model = "mistral" ### model_reasoning_effort -If the selected model is known to support reasoning (for example: `o3`, `o4-mini`, `codex-*`, `arcticfox`, `gpt-5.1`, `gpt-5.1-codex`), reasoning is enabled by default when using the Responses API. As explained in the [OpenAI Platform documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning), this can be set to: +If the selected model is known to support reasoning (for example: `o3`, `o4-mini`, `codex-*`, `gpt-5.1-codex-max`, `gpt-5.1`, `gpt-5.1-codex`), reasoning is enabled by default when using the Responses API. As explained in the [OpenAI Platform documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning), this can be set to: - `"minimal"` - `"low"` @@ -835,7 +835,7 @@ Users can specify config values at multiple levels. Order of precedence is as fo 1. custom command-line argument, e.g., `--model o3` 2. as part of a profile, where the `--profile` is specified via a CLI (or in the config file itself) 3. as an entry in `config.toml`, e.g., `model = "o3"` -4. the default value that comes with Codex CLI (i.e., Codex CLI defaults to `arcticfox`) +4. the default value that comes with Codex CLI (i.e., Codex CLI defaults to `gpt-5.1-codex-max`) ### history @@ -938,7 +938,7 @@ Valid values: | Key | Type / Values | Notes | | ------------------------------------------------ | ----------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- | -| `model` | string | Model to use (e.g., `arcticfox`). | +| `model` | string | Model to use (e.g., `gpt-5.1-codex-max`). | | `model_provider` | string | Provider id from `model_providers` (default: `openai`). | | `model_context_window` | number | Context window tokens. | | `model_max_output_tokens` | number | Max output tokens. | diff --git a/docs/example-config.md b/docs/example-config.md index 26a4b8b503..335a826e26 100644 --- a/docs/example-config.md +++ b/docs/example-config.md @@ -18,11 +18,11 @@ Use this example configuration as a starting point. For an explanation of each f # Core Model Selection ################################################################################ -# Primary model used by Codex. Default: "arcticfox" on all platforms. -model = "arcticfox" +# Primary model used by Codex. Default: "gpt-5.1-codex-max" on all platforms. +model = "gpt-5.1-codex-max" -# Model used by the /review feature (code reviews). Default: "arcticfox". -review_model = "arcticfox" +# Model used by the /review feature (code reviews). Default: "gpt-5.1-codex-max". +review_model = "gpt-5.1-codex-max" # Provider id selected from [model_providers]. Default: "openai". model_provider = "openai" @@ -32,7 +32,7 @@ model_provider = "openai" # model_context_window = 128000 # tokens; default: auto for model # model_max_output_tokens = 8192 # tokens; default: auto for model # model_auto_compact_token_limit = 0 # disable/override auto; default: model family specific -# tool_output_token_limit = 10000 # tokens stored per tool output; default: 10000 for arcticfox +# tool_output_token_limit = 10000 # tokens stored per tool output; default: 10000 for gpt-5.1-codex-max ################################################################################ # Reasoning & Verbosity (Responses API capable models) @@ -315,7 +315,7 @@ mcp_oauth_credentials_store = "auto" [profiles] # [profiles.default] -# model = "arcticfox" +# model = "gpt-5.1-codex-max" # model_provider = "openai" # approval_policy = "on-request" # sandbox_mode = "read-only" diff --git a/docs/exec.md b/docs/exec.md index 0f5808d0db..5a17155a82 100644 --- a/docs/exec.md +++ b/docs/exec.md @@ -99,7 +99,7 @@ codex exec resume --last "Fix use-after-free issues" Only the conversation context is preserved; you must still provide flags to customize Codex behavior. ```shell -codex exec --model arcticfox --json "Review the change, look for use-after-free issues" +codex exec --model gpt-5.1-codex-max --json "Review the change, look for use-after-free issues" codex exec --model gpt-5.1 --json resume --last "Fix use-after-free issues" ```