From a2879d7e7b2b2f6a24031d2a14a94943a01b8dbd Mon Sep 17 00:00:00 2001 From: Ahmed Ibrahim Date: Wed, 19 Nov 2025 01:10:41 -0800 Subject: [PATCH 1/5] release max --- .../src/protocol/common.rs | 4 +- codex-rs/app-server/tests/suite/config.rs | 4 +- .../tests/suite/set_default_model.rs | 2 +- .../app-server/tests/suite/v2/model_list.rs | 8 +- .../tests/suite/v2/thread_resume.rs | 6 +- codex-rs/common/src/model_presets.rs | 27 ++-- codex-rs/core/arcticfox_prompt.md | 117 ------------------ codex-rs/core/src/client_common.rs | 2 +- codex-rs/core/src/config/edit.rs | 6 +- codex-rs/core/src/config/mod.rs | 10 +- codex-rs/core/src/config/types.rs | 4 +- codex-rs/core/src/model_family.rs | 8 +- codex-rs/core/src/openai_model_info.rs | 2 +- codex-rs/core/tests/suite/client.rs | 2 +- codex-rs/tui/src/app.rs | 18 +-- codex-rs/tui/src/chatwidget.rs | 4 +- ...ests__model_reasoning_selection_popup.snap | 2 +- ...ng_selection_popup_extra_high_warning.snap | 2 +- codex-rs/tui/src/chatwidget/tests.rs | 18 +-- codex-rs/tui/src/model_migration.rs | 30 +++-- ...ration__tests__model_migration_prompt.snap | 6 +- ...tatus_snapshot_includes_monthly_limit.snap | 2 +- ...s_snapshot_includes_reasoning_details.snap | 34 ++--- ...s_snapshot_shows_empty_limits_message.snap | 32 ++--- ...snapshot_shows_missing_limits_message.snap | 32 ++--- ...s_snapshot_shows_stale_limits_message.snap | 36 +++--- ...snapshot_truncates_in_narrow_terminal.snap | 32 ++--- codex-rs/tui/src/status/tests.rs | 14 +-- docs/config.md | 8 +- docs/example-config.md | 12 +- docs/exec.md | 2 +- 31 files changed, 191 insertions(+), 295 deletions(-) delete mode 100644 codex-rs/core/arcticfox_prompt.md diff --git a/codex-rs/app-server-protocol/src/protocol/common.rs b/codex-rs/app-server-protocol/src/protocol/common.rs index 23dcd29e35..1062c62f26 100644 --- a/codex-rs/app-server-protocol/src/protocol/common.rs +++ b/codex-rs/app-server-protocol/src/protocol/common.rs @@ -528,7 +528,7 @@ mod tests { let request = ClientRequest::NewConversation { request_id: RequestId::Integer(42), params: v1::NewConversationParams { - model: Some("arcticfox".to_string()), + model: Some("gpt-5.1-codex-max".to_string()), model_provider: None, profile: None, cwd: None, @@ -546,7 +546,7 @@ mod tests { "method": "newConversation", "id": 42, "params": { - "model": "arcticfox", + "model": "gpt-5.1-codex-max", "modelProvider": null, "profile": null, "cwd": null, diff --git a/codex-rs/app-server/tests/suite/config.rs b/codex-rs/app-server/tests/suite/config.rs index 5d1c7f9d11..75dba57229 100644 --- a/codex-rs/app-server/tests/suite/config.rs +++ b/codex-rs/app-server/tests/suite/config.rs @@ -27,7 +27,7 @@ fn create_config_toml(codex_home: &Path) -> std::io::Result<()> { std::fs::write( config_toml, r#" -model = "arcticfox" +model = "gpt-5.1-codex-max" approval_policy = "on-request" sandbox_mode = "workspace-write" model_reasoning_summary = "detailed" @@ -87,7 +87,7 @@ async fn get_config_toml_parses_all_fields() -> Result<()> { }), forced_chatgpt_workspace_id: Some("12345678-0000-0000-0000-000000000000".into()), forced_login_method: Some(ForcedLoginMethod::Chatgpt), - model: Some("arcticfox".into()), + model: Some("gpt-5.1-codex-max".into()), model_reasoning_effort: Some(ReasoningEffort::High), model_reasoning_summary: Some(ReasoningSummary::Detailed), model_verbosity: Some(Verbosity::Medium), diff --git a/codex-rs/app-server/tests/suite/set_default_model.rs b/codex-rs/app-server/tests/suite/set_default_model.rs index 4d742a8434..b56c54dbd9 100644 --- a/codex-rs/app-server/tests/suite/set_default_model.rs +++ b/codex-rs/app-server/tests/suite/set_default_model.rs @@ -57,7 +57,7 @@ fn create_config_toml(codex_home: &Path) -> std::io::Result<()> { std::fs::write( config_toml, r#" -model = "arcticfox" +model = "gpt-5.1-codex-max" model_reasoning_effort = "medium" "#, ) diff --git a/codex-rs/app-server/tests/suite/v2/model_list.rs b/codex-rs/app-server/tests/suite/v2/model_list.rs index 22c0bf6379..3c4844fed9 100644 --- a/codex-rs/app-server/tests/suite/v2/model_list.rs +++ b/codex-rs/app-server/tests/suite/v2/model_list.rs @@ -46,9 +46,9 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> { let expected_models = vec![ Model { - id: "arcticfox".to_string(), - model: "arcticfox".to_string(), - display_name: "arcticfox".to_string(), + id: "gpt-5.1-codex-max".to_string(), + model: "gpt-5.1-codex-max".to_string(), + display_name: "gpt-5.1-codex-max".to_string(), description: "Latest Codex-optimized flagship for deep and fast reasoning.".to_string(), supported_reasoning_efforts: vec![ ReasoningEffortOption { @@ -174,7 +174,7 @@ async fn list_models_pagination_works() -> Result<()> { } = to_response::(first_response)?; assert_eq!(first_items.len(), 1); - assert_eq!(first_items[0].id, "arcticfox"); + assert_eq!(first_items[0].id, "gpt-5.1-codex-max"); let next_cursor = first_cursor.ok_or_else(|| anyhow!("cursor for second page"))?; let second_request = mcp diff --git a/codex-rs/app-server/tests/suite/v2/thread_resume.rs b/codex-rs/app-server/tests/suite/v2/thread_resume.rs index 1a9c76979a..806ba08fee 100644 --- a/codex-rs/app-server/tests/suite/v2/thread_resume.rs +++ b/codex-rs/app-server/tests/suite/v2/thread_resume.rs @@ -27,7 +27,7 @@ async fn thread_resume_returns_original_thread() -> Result<()> { // Start a thread. let start_id = mcp .send_thread_start_request(ThreadStartParams { - model: Some("arcticfox".to_string()), + model: Some("gpt-5.1-codex-max".to_string()), ..Default::default() }) .await?; @@ -69,7 +69,7 @@ async fn thread_resume_prefers_path_over_thread_id() -> Result<()> { let start_id = mcp .send_thread_start_request(ThreadStartParams { - model: Some("arcticfox".to_string()), + model: Some("gpt-5.1-codex-max".to_string()), ..Default::default() }) .await?; @@ -114,7 +114,7 @@ async fn thread_resume_supports_history_and_overrides() -> Result<()> { // Start a thread. let start_id = mcp .send_thread_start_request(ThreadStartParams { - model: Some("arcticfox".to_string()), + model: Some("gpt-5.1-codex-max".to_string()), ..Default::default() }) .await?; diff --git a/codex-rs/common/src/model_presets.rs b/codex-rs/common/src/model_presets.rs index dbbe1aa4e6..c7ad7f8aec 100644 --- a/codex-rs/common/src/model_presets.rs +++ b/codex-rs/common/src/model_presets.rs @@ -5,7 +5,8 @@ use codex_core::protocol_config_types::ReasoningEffort; use once_cell::sync::Lazy; pub const HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG: &str = "hide_gpt5_1_migration_prompt"; -pub const HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG: &str = "hide_arcticfox_migration_prompt"; +pub const HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG: &str = + "hide_gpt-5.1-codex-max_migration_prompt"; /// A reasoning effort option that can be surfaced for a model. #[derive(Debug, Clone, Copy)] @@ -49,9 +50,9 @@ pub struct ModelPreset { static PRESETS: Lazy> = Lazy::new(|| { vec![ ModelPreset { - id: "arcticfox", - model: "arcticfox", - display_name: "arcticfox", + id: "gpt-5.1-codex-max", + model: "gpt-5.1-codex-max", + display_name: "gpt-5.1-codex-max", description: "Latest Codex-optimized flagship for deep and fast reasoning.", default_reasoning_effort: ReasoningEffort::Medium, supported_reasoning_efforts: &[ @@ -98,9 +99,9 @@ static PRESETS: Lazy> = Lazy::new(|| { ], is_default: false, upgrade: Some(ModelUpgrade { - id: "arcticfox", + id: "gpt-5.1-codex-max", reasoning_effort_mapping: None, - migration_config_key: HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG, + migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG, }), show_in_picker: true, }, @@ -171,9 +172,9 @@ static PRESETS: Lazy> = Lazy::new(|| { ], is_default: false, upgrade: Some(ModelUpgrade { - id: "arcticfox", + id: "gpt-5.1-codex-max", reasoning_effort_mapping: None, - migration_config_key: HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG, + migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG, }), show_in_picker: false, }, @@ -243,7 +244,7 @@ pub fn builtin_model_presets(auth_mode: Option) -> Vec { PRESETS .iter() .filter(|preset| match auth_mode { - Some(AuthMode::ApiKey) => preset.show_in_picker && preset.id != "arcticfox", + Some(AuthMode::ApiKey) => preset.show_in_picker && preset.id != "gpt-5.1-codex-max", _ => preset.show_in_picker, }) .cloned() @@ -266,8 +267,12 @@ mod tests { } #[test] - fn arcticfox_hidden_for_api_key_auth() { + fn gpt_5_1_codex_max_hidden_for_api_key_auth() { let presets = builtin_model_presets(Some(AuthMode::ApiKey)); - assert!(presets.iter().all(|preset| preset.id != "arcticfox")); + assert!( + presets + .iter() + .all(|preset| preset.id != "gpt-5.1-codex-max") + ); } } diff --git a/codex-rs/core/arcticfox_prompt.md b/codex-rs/core/arcticfox_prompt.md deleted file mode 100644 index 292e5d7d0f..0000000000 --- a/codex-rs/core/arcticfox_prompt.md +++ /dev/null @@ -1,117 +0,0 @@ -You are Codex, based on GPT-5. You are running as a coding agent in the Codex CLI on a user's computer. - -## General - -- When searching for text or files, prefer using `rg` or `rg --files` respectively because `rg` is much faster than alternatives like `grep`. (If the `rg` command is not found, then use alternatives.) - -## Editing constraints - -- Default to ASCII when editing or creating files. Only introduce non-ASCII or other Unicode characters when there is a clear justification and the file already uses them. -- Add succinct code comments that explain what is going on if code is not self-explanatory. You should not add comments like "Assigns the value to the variable", but a brief comment might be useful ahead of a complex code block that the user would otherwise have to spend time parsing out. Usage of these comments should be rare. -- Try to use apply_patch for single file edits, but it is fine to explore other options to make the edit if it does not work well. Do not use apply_patch for changes that are auto-generated (i.e. generating package.json or running a lint or format command like gofmt) or when scripting is more efficient (such as search and replacing a string across a codebase). -- You may be in a dirty git worktree. - * NEVER revert existing changes you did not make unless explicitly requested, since these changes were made by the user. - * If asked to make a commit or code edits and there are unrelated changes to your work or changes that you didn't make in those files, don't revert those changes. - * If the changes are in files you've touched recently, you should read carefully and understand how you can work with the changes rather than reverting them. - * If the changes are in unrelated files, just ignore them and don't revert them. -- Do not amend a commit unless explicitly requested to do so. -- While you are working, you might notice unexpected changes that you didn't make. If this happens, STOP IMMEDIATELY and ask the user how they would like to proceed. -- **NEVER** use destructive commands like `git reset --hard` or `git checkout --` unless specifically requested or approved by the user. - -## Plan tool - -When using the planning tool: -- Skip using the planning tool for straightforward tasks (roughly the easiest 25%). -- Do not make single-step plans. -- When you made a plan, update it after having performed one of the sub-tasks that you shared on the plan. - -## Codex CLI harness, sandboxing, and approvals - -The Codex CLI harness supports several different configurations for sandboxing and escalation approvals that the user can choose from. - -Filesystem sandboxing defines which files can be read or written. The options for `sandbox_mode` are: -- **read-only**: The sandbox only permits reading files. -- **workspace-write**: The sandbox permits reading files, and editing files in `cwd` and `writable_roots`. Editing files in other directories requires approval. -- **danger-full-access**: No filesystem sandboxing - all commands are permitted. - -Network sandboxing defines whether network can be accessed without approval. Options for `network_access` are: -- **restricted**: Requires approval -- **enabled**: No approval needed - -Approvals are your mechanism to get user consent to run shell commands without the sandbox. Possible configuration options for `approval_policy` are -- **untrusted**: The harness will escalate most commands for user approval, apart from a limited allowlist of safe "read" commands. -- **on-failure**: The harness will allow all commands to run in the sandbox (if enabled), and failures will be escalated to the user for approval to run again without the sandbox. -- **on-request**: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. (Note that this mode is not always available. If it is, you'll see parameters for it in the `shell` command description.) -- **never**: This is a non-interactive mode where you may NEVER ask the user for approval to run commands. Instead, you must always persist and work around constraints to solve the task for the user. You MUST do your utmost best to finish the task and validate your work before yielding. If this mode is paired with `danger-full-access`, take advantage of it to deliver the best outcome for the user. Further, in this mode, your default testing philosophy is overridden: Even if you don't see local patterns for testing, you may add tests and scripts to validate your work. Just remove them before yielding. - -When you are running with `approval_policy == on-request`, and sandboxing enabled, here are scenarios where you'll need to request approval: -- You need to run a command that writes to a directory that requires it (e.g. running tests that write to /var) -- You need to run a GUI app (e.g., open/xdg-open/osascript) to open browsers or files. -- You are running sandboxed and need to run a command that requires network access (e.g. installing packages) -- If you run a command that is important to solving the user's query, but it fails because of sandboxing, rerun the command with approval. ALWAYS proceed to use the `with_escalated_permissions` and `justification` parameters - do not message the user before requesting approval for the command. -- You are about to take a potentially destructive action such as an `rm` or `git reset` that the user did not explicitly ask for -- (for all of these, you should weigh alternative paths that do not require approval) - -When `sandbox_mode` is set to read-only, you'll need to request approval for any command that isn't a read. - -You will be told what filesystem sandboxing, network sandboxing, and approval mode are active in a developer or user message. If you are not told about this, assume that you are running with workspace-write, network sandboxing enabled, and approval on-failure. - -Although they introduce friction to the user because your work is paused until the user responds, you should leverage them when necessary to accomplish important work. If the completing the task requires escalated permissions, Do not let these settings or the sandbox deter you from attempting to accomplish the user's task unless it is set to "never", in which case never ask for approvals. - -When requesting approval to execute a command that will require escalated privileges: - - Provide the `with_escalated_permissions` parameter with the boolean value true - - Include a short, 1 sentence explanation for why you need to enable `with_escalated_permissions` in the justification parameter - -## Special user requests - -- If the user makes a simple request (such as asking for the time) which you can fulfill by running a terminal command (such as `date`), you should do so. -- If the user asks for a "review", default to a code review mindset: prioritise identifying bugs, risks, behavioural regressions, and missing tests. Findings must be the primary focus of the response - keep summaries or overviews brief and only after enumerating the issues. Present findings first (ordered by severity with file/line references), follow with open questions or assumptions, and offer a change-summary only as a secondary detail. If no findings are discovered, state that explicitly and mention any residual risks or testing gaps. - -## Frontend tasks -When doing frontend design tasks, avoid collapsing into "AI slop" or safe, average-looking layouts. -Aim for interfaces that feel intentional, bold, and a bit surprising. -- Typography: Use expressive, purposeful fonts and avoid default stacks (Inter, Roboto, Arial, system). -- Color & Look: Choose a clear visual direction; define CSS variables; avoid purple-on-white defaults. No purple bias or dark mode bias. -- Motion: Use a few meaningful animations (page-load, staggered reveals) instead of generic micro-motions. -- Background: Don't rely on flat, single-color backgrounds; use gradients, shapes, or subtle patterns to build atmosphere. -- Overall: Avoid boilerplate layouts and interchangeable UI patterns. Vary themes, type families, and visual languages across outputs. -- Ensure the page loads properly on both desktop and mobile - -Exception: If working within an existing website or design system, preserve the established patterns, structure, and visual language. - -## Presenting your work and final message - -You are producing plain text that will later be styled by the CLI. Follow these rules exactly. Formatting should make results easy to scan, but not feel mechanical. Use judgment to decide how much structure adds value. - -- Default: be very concise; friendly coding teammate tone. -- Ask only when needed; suggest ideas; mirror the user's style. -- For substantial work, summarize clearly; follow final‑answer formatting. -- Skip heavy formatting for simple confirmations. -- Don't dump large files you've written; reference paths only. -- No "save/copy this file" - User is on the same machine. -- Offer logical next steps (tests, commits, build) briefly; add verify steps if you couldn't do something. -- For code changes: - * Lead with a quick explanation of the change, and then give more details on the context covering where and why a change was made. Do not start this explanation with "summary", just jump right in. - * If there are natural next steps the user may want to take, suggest them at the end of your response. Do not make suggestions if there are no natural next steps. - * When suggesting multiple options, use numeric lists for the suggestions so the user can quickly respond with a single number. -- The user does not command execution outputs. When asked to show the output of a command (e.g. `git show`), relay the important details in your answer or summarize the key lines so the user understands the result. - -### Final answer structure and style guidelines - -- Plain text; CLI handles styling. Use structure only when it helps scanability. -- Headers: optional; short Title Case (1-3 words) wrapped in **…**; no blank line before the first bullet; add only if they truly help. -- Bullets: use - ; merge related points; keep to one line when possible; 4–6 per list ordered by importance; keep phrasing consistent. -- Monospace: backticks for commands/paths/env vars/code ids and inline examples; use for literal keyword bullets; never combine with **. -- Code samples or multi-line snippets should be wrapped in fenced code blocks; include an info string as often as possible. -- Structure: group related bullets; order sections general → specific → supporting; for subsections, start with a bolded keyword bullet, then items; match complexity to the task. -- Tone: collaborative, concise, factual; present tense, active voice; self‑contained; no "above/below"; parallel wording. -- Don'ts: no nested bullets/hierarchies; no ANSI codes; don't cram unrelated keywords; keep keyword lists short—wrap/reformat if long; avoid naming formatting styles in answers. -- Adaptation: code explanations → precise, structured with code refs; simple tasks → lead with outcome; big changes → logical walkthrough + rationale + next actions; casual one-offs → plain sentences, no headers/bullets. -- File References: When referencing files in your response follow the below rules: - * Use inline code to make file paths clickable. - * Each reference should have a stand alone path. Even if it's the same file. - * Accepted: absolute, workspace‑relative, a/ or b/ diff prefixes, or bare filename/suffix. - * Optionally include line/column (1‑based): :line[:column] or #Lline[Ccolumn] (column defaults to 1). - * Do not use URIs like file://, vscode://, or https://. - * Do not provide range of lines - * Examples: src/app.ts, src/app.ts:42, b/server/index.js#L10, C:\repo\project\main.rs:12:5 diff --git a/codex-rs/core/src/client_common.rs b/codex-rs/core/src/client_common.rs index 7e3eb1ac98..6e5e11204d 100644 --- a/codex-rs/core/src/client_common.rs +++ b/codex-rs/core/src/client_common.rs @@ -431,7 +431,7 @@ mod tests { expects_apply_patch_instructions: false, }, InstructionsTestCase { - slug: "arcticfox", + slug: "gpt-5.1-codex-max", expects_apply_patch_instructions: false, }, ]; diff --git a/codex-rs/core/src/config/edit.rs b/codex-rs/core/src/config/edit.rs index d3bc5cf99b..bd7c16a0ae 100644 --- a/codex-rs/core/src/config/edit.rs +++ b/codex-rs/core/src/config/edit.rs @@ -846,7 +846,7 @@ hide_gpt5_1_migration_prompt = true } #[test] - fn blocking_set_hide_arcticfox_migration_prompt_preserves_table() { + fn blocking_set_hide_gpt_5_1_codex_max_migration_prompt_preserves_table() { let tmp = tempdir().expect("tmpdir"); let codex_home = tmp.path(); std::fs::write( @@ -860,7 +860,7 @@ existing = "value" codex_home, None, &[ConfigEdit::SetNoticeHideModelMigrationPrompt( - "hide_arcticfox_migration_prompt".to_string(), + "hide_gpt-5.1-codex-max_migration_prompt".to_string(), true, )], ) @@ -870,7 +870,7 @@ existing = "value" std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config"); let expected = r#"[notice] existing = "value" -hide_arcticfox_migration_prompt = true +"hide_gpt-5.1-codex-max_migration_prompt" = true "#; assert_eq!(contents, expected); } diff --git a/codex-rs/core/src/config/mod.rs b/codex-rs/core/src/config/mod.rs index 36d4ef4dbc..1f53b5c94d 100644 --- a/codex-rs/core/src/config/mod.rs +++ b/codex-rs/core/src/config/mod.rs @@ -62,11 +62,11 @@ pub mod profile; pub mod types; #[cfg(target_os = "windows")] -pub const OPENAI_DEFAULT_MODEL: &str = "arcticfox"; +pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5.1-codex-max"; #[cfg(not(target_os = "windows"))] -pub const OPENAI_DEFAULT_MODEL: &str = "arcticfox"; -const OPENAI_DEFAULT_REVIEW_MODEL: &str = "arcticfox"; -pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "arcticfox"; +pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5.1-codex-max"; +const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5.1-codex-max"; +pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "gpt-5.1-codex-max"; /// Maximum number of bytes of the documentation that will be embedded. Larger /// files are *silently truncated* to this size so we do not take up too much of @@ -81,7 +81,7 @@ pub struct Config { /// Optional override of model selection. pub model: String, - /// Model used specifically for review sessions. Defaults to "arcticfox". + /// Model used specifically for review sessions. Defaults to "gpt-5.1-codex-max". pub review_model: String, pub model_family: ModelFamily, diff --git a/codex-rs/core/src/config/types.rs b/codex-rs/core/src/config/types.rs index 45b2c1efbf..b7f16a9028 100644 --- a/codex-rs/core/src/config/types.rs +++ b/codex-rs/core/src/config/types.rs @@ -378,8 +378,8 @@ pub struct Notice { pub hide_rate_limit_model_nudge: Option, /// Tracks whether the user has seen the model migration prompt pub hide_gpt5_1_migration_prompt: Option, - /// Tracks whether the user has seen the arcticfox migration prompt - pub hide_arcticfox_migration_prompt: Option, + /// Tracks whether the user has seen the gpt-5.1-codex-max migration prompt + pub hide_gpt_5_1_codex_max_migration_prompt: Option, } impl Notice { diff --git a/codex-rs/core/src/model_family.rs b/codex-rs/core/src/model_family.rs index be57c53d9c..725a998bed 100644 --- a/codex-rs/core/src/model_family.rs +++ b/codex-rs/core/src/model_family.rs @@ -12,7 +12,7 @@ const BASE_INSTRUCTIONS: &str = include_str!("../prompt.md"); const GPT_5_CODEX_INSTRUCTIONS: &str = include_str!("../gpt_5_codex_prompt.md"); const GPT_5_1_INSTRUCTIONS: &str = include_str!("../gpt_5_1_prompt.md"); -const ARCTICFOX_INSTRUCTIONS: &str = include_str!("../arcticfox_prompt.md"); +const GPT_5_1_CODEX_MAX_INSTRUCTIONS: &str = include_str!("../gpt-5.1-codex-max_prompt.md"); /// A model family is a group of models that share certain characteristics. #[derive(Debug, Clone, PartialEq, Eq, Hash)] @@ -173,12 +173,12 @@ pub fn find_family_for_model(slug: &str) -> Option { support_verbosity: true, truncation_policy: TruncationPolicy::Tokens(10_000), ) - } else if slug.starts_with("arcticfox") { + } else if slug.starts_with("gpt-5.1-codex-max") { model_family!( slug, slug, supports_reasoning_summaries: true, reasoning_summary_format: ReasoningSummaryFormat::Experimental, - base_instructions: ARCTICFOX_INSTRUCTIONS.to_string(), + base_instructions: GPT_5_1_CODEX_MAX_INSTRUCTIONS.to_string(), apply_patch_tool_type: Some(ApplyPatchToolType::Freeform), shell_type: ConfigShellToolType::ShellCommand, supports_parallel_tool_calls: true, @@ -202,7 +202,7 @@ pub fn find_family_for_model(slug: &str) -> Option { support_verbosity: false, truncation_policy: TruncationPolicy::Tokens(10_000), ) - } else if slug.starts_with("arcticfox") { + } else if slug.starts_with("gpt-5.1-codex-max") { model_family!( slug, slug, supports_reasoning_summaries: true, diff --git a/codex-rs/core/src/openai_model_info.rs b/codex-rs/core/src/openai_model_info.rs index cd5d12f8a4..bd0ca9d31d 100644 --- a/codex-rs/core/src/openai_model_info.rs +++ b/codex-rs/core/src/openai_model_info.rs @@ -72,7 +72,7 @@ pub(crate) fn get_model_info(model_family: &ModelFamily) -> Option { _ if slug.starts_with("gpt-5-codex") || slug.starts_with("gpt-5.1-codex") - || slug.starts_with("arcticfox") => + || slug.starts_with("gpt-5.1-codex-max") => { Some(ModelInfo::new(CONTEXT_WINDOW_272K, MAX_OUTPUT_TOKENS_128K)) } diff --git a/codex-rs/core/tests/suite/client.rs b/codex-rs/core/tests/suite/client.rs index 1495fbf80b..e15e05a99a 100644 --- a/codex-rs/core/tests/suite/client.rs +++ b/codex-rs/core/tests/suite/client.rs @@ -1155,7 +1155,7 @@ async fn token_count_includes_rate_limits_snapshot() { "reasoning_output_tokens": 0, "total_tokens": 123 }, - // Default model is arcticfox in tests → 95% usable context window + // Default model is gpt-5.1-codex-max in tests → 95% usable context window "model_context_window": 258400 }, "rate_limits": { diff --git a/codex-rs/tui/src/app.rs b/codex-rs/tui/src/app.rs index a60f1f2723..1f4aa7eb9a 100644 --- a/codex-rs/tui/src/app.rs +++ b/codex-rs/tui/src/app.rs @@ -19,7 +19,7 @@ use crate::tui::TuiEvent; use crate::update_action::UpdateAction; use codex_ansi_escape::ansi_escape_line; use codex_app_server_protocol::AuthMode; -use codex_common::model_presets::HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG; +use codex_common::model_presets::HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG; use codex_common::model_presets::HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG; use codex_common::model_presets::ModelUpgrade; use codex_common::model_presets::all_model_presets; @@ -57,7 +57,7 @@ use tokio::sync::mpsc::unbounded_channel; use crate::history_cell::UpdateAvailableHistoryCell; const GPT_5_1_MIGRATION_AUTH_MODES: [AuthMode; 2] = [AuthMode::ChatGPT, AuthMode::ApiKey]; -const ARCTICFOX_MIGRATION_AUTH_MODES: [AuthMode; 1] = [AuthMode::ChatGPT]; +const GPT_5_1_CODEX_MIGRATION_AUTH_MODES: [AuthMode; 1] = [AuthMode::ChatGPT]; #[derive(Debug, Clone)] pub struct AppExitInfo { @@ -106,7 +106,9 @@ fn should_show_model_migration_prompt( fn migration_prompt_hidden(config: &Config, migration_config_key: &str) -> Option { match migration_config_key { - HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG => config.notices.hide_arcticfox_migration_prompt, + HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG => { + config.notices.hide_gpt_5_1_codex_max_migration_prompt + } HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG => config.notices.hide_gpt5_1_migration_prompt, _ => None, } @@ -946,7 +948,7 @@ impl App { fn migration_prompt_allowed_auth_modes(migration_config_key: &str) -> Option<&'static [AuthMode]> { match migration_config_key { HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG => Some(&GPT_5_1_MIGRATION_AUTH_MODES), - HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG => Some(&ARCTICFOX_MIGRATION_AUTH_MODES), + HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG => Some(&GPT_5_1_CODEX_MIGRATION_AUTH_MODES), _ => None, } } @@ -1034,7 +1036,7 @@ mod tests { )); assert!(should_show_model_migration_prompt( "gpt-5.1-codex", - "arcticfox", + "gpt-5.1-codex-max", None )); assert!(!should_show_model_migration_prompt( @@ -1181,14 +1183,14 @@ mod tests { } #[test] - fn arcticfox_migration_limits_to_chatgpt() { + fn gpt_5_1_codex_max_migration_limits_to_chatgpt() { assert!(migration_prompt_allows_auth_mode( Some(AuthMode::ChatGPT), - HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG, + HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG, )); assert!(!migration_prompt_allows_auth_mode( Some(AuthMode::ApiKey), - HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG, + HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG, )); } diff --git a/codex-rs/tui/src/chatwidget.rs b/codex-rs/tui/src/chatwidget.rs index 6d4ba5fdf2..9429ce143e 100644 --- a/codex-rs/tui/src/chatwidget.rs +++ b/codex-rs/tui/src/chatwidget.rs @@ -2030,8 +2030,8 @@ impl ChatWidget { let effort_label = Self::reasoning_effort_label(effort); format!("⚠ {effort_label} reasoning effort can quickly consume Plus plan rate limits.") }); - let warn_for_model = - preset.model.starts_with("gpt-5.1-codex") || preset.model.starts_with("arcticfox"); + let warn_for_model = preset.model.starts_with("gpt-5.1-codex") + || preset.model.starts_with("gpt-5.1-codex-max"); struct EffortChoice { stored: Option, diff --git a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_reasoning_selection_popup.snap b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_reasoning_selection_popup.snap index 227278d05d..b4b89736a9 100644 --- a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_reasoning_selection_popup.snap +++ b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_reasoning_selection_popup.snap @@ -2,7 +2,7 @@ source: tui/src/chatwidget/tests.rs expression: popup --- - Select Reasoning Level for arcticfox + Select Reasoning Level for gpt-5.1-codex-max 1. Low Fast responses with lighter reasoning 2. Medium (default) Balances speed and reasoning depth for everyday tasks diff --git a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_reasoning_selection_popup_extra_high_warning.snap b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_reasoning_selection_popup_extra_high_warning.snap index f630598322..c5332ff590 100644 --- a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_reasoning_selection_popup_extra_high_warning.snap +++ b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_reasoning_selection_popup_extra_high_warning.snap @@ -3,7 +3,7 @@ source: tui/src/chatwidget/tests.rs assertion_line: 1548 expression: popup --- - Select Reasoning Level for arcticfox + Select Reasoning Level for gpt-5.1-codex-max 1. Low Fast responses with lighter reasoning 2. Medium (default) Balances speed and reasoning depth for everyday diff --git a/codex-rs/tui/src/chatwidget/tests.rs b/codex-rs/tui/src/chatwidget/tests.rs index 180f9db037..b4305bac7e 100644 --- a/codex-rs/tui/src/chatwidget/tests.rs +++ b/codex-rs/tui/src/chatwidget/tests.rs @@ -1526,13 +1526,13 @@ fn startup_prompts_for_windows_sandbox_when_agent_requested() { fn model_reasoning_selection_popup_snapshot() { let (mut chat, _rx, _op_rx) = make_chatwidget_manual(); - chat.config.model = "arcticfox".to_string(); + chat.config.model = "gpt-5.1-codex-max".to_string(); chat.config.model_reasoning_effort = Some(ReasoningEffortConfig::High); let preset = builtin_model_presets(None) .into_iter() - .find(|preset| preset.model == "arcticfox") - .expect("arcticfox preset"); + .find(|preset| preset.model == "gpt-5.1-codex-max") + .expect("gpt-5.1-codex-max preset"); chat.open_reasoning_popup(preset); let popup = render_bottom_popup(&chat, 80); @@ -1543,13 +1543,13 @@ fn model_reasoning_selection_popup_snapshot() { fn model_reasoning_selection_popup_extra_high_warning_snapshot() { let (mut chat, _rx, _op_rx) = make_chatwidget_manual(); - chat.config.model = "arcticfox".to_string(); + chat.config.model = "gpt-5.1-codex-max".to_string(); chat.config.model_reasoning_effort = Some(ReasoningEffortConfig::XHigh); let preset = builtin_model_presets(None) .into_iter() - .find(|preset| preset.model == "arcticfox") - .expect("arcticfox preset"); + .find(|preset| preset.model == "gpt-5.1-codex-max") + .expect("gpt-5.1-codex-max preset"); chat.open_reasoning_popup(preset); let popup = render_bottom_popup(&chat, 80); @@ -1560,12 +1560,12 @@ fn model_reasoning_selection_popup_extra_high_warning_snapshot() { fn reasoning_popup_shows_extra_high_with_space() { let (mut chat, _rx, _op_rx) = make_chatwidget_manual(); - chat.config.model = "arcticfox".to_string(); + chat.config.model = "gpt-5.1-codex-max".to_string(); let preset = builtin_model_presets(None) .into_iter() - .find(|preset| preset.model == "arcticfox") - .expect("arcticfox preset"); + .find(|preset| preset.model == "gpt-5.1-codex-max") + .expect("gpt-5.1-codex-max preset"); chat.open_reasoning_popup(preset); let popup = render_bottom_popup(&chat, 120); diff --git a/codex-rs/tui/src/model_migration.rs b/codex-rs/tui/src/model_migration.rs index 1cb8543570..492fc43517 100644 --- a/codex-rs/tui/src/model_migration.rs +++ b/codex-rs/tui/src/model_migration.rs @@ -5,7 +5,7 @@ use crate::render::renderable::RenderableExt as _; use crate::tui::FrameRequester; use crate::tui::Tui; use crate::tui::TuiEvent; -use codex_common::model_presets::HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG; +use codex_common::model_presets::HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG; use codex_common::model_presets::HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG; use crossterm::event::KeyCode; use crossterm::event::KeyEvent; @@ -36,8 +36,8 @@ pub(crate) struct ModelMigrationCopy { pub(crate) fn migration_copy_for_config(migration_config_key: &str) -> ModelMigrationCopy { match migration_config_key { HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG => gpt5_migration_copy(), - HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG => arcticfox_migration_copy(), - _ => arcticfox_migration_copy(), + HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG => gpt_5_1_codex_max_migration_copy(), + _ => gpt_5_1_codex_max_migration_copy(), } } @@ -176,17 +176,19 @@ impl WidgetRef for &ModelMigrationScreen { } } -fn arcticfox_migration_copy() -> ModelMigrationCopy { +fn gpt_5_1_codex_max_migration_copy() -> ModelMigrationCopy { ModelMigrationCopy { - heading: vec!["Introducing arcticfox".bold()], + heading: vec!["Codex just got an upgrade. Introducing gpt-5.1-codex-max".bold()], content: vec![ - Line::from("We've upgraded our family of models supported in Codex to arcticfox."), + Line::from( + "Codex is now powered by gpt-5.1-codex-max, our new frontier agentic coding model built for long-running, project-scale work. It's faster, more capable, and more token-efficient than gpt-5.1-codex.", + ), Line::from( "You can continue using legacy models by specifying them directly with the -m option or in your config.toml.", ), Line::from(vec![ "Learn more at ".into(), - "www.openai.com/index/arcticfox".cyan().underlined(), + "www.openai.com/index/gpt-5-1-codex-max".cyan().underlined(), ".".into(), ]), Line::from(vec!["Press enter to continue".dim()]), @@ -217,7 +219,7 @@ fn gpt5_migration_copy() -> ModelMigrationCopy { #[cfg(test)] mod tests { use super::ModelMigrationScreen; - use super::arcticfox_migration_copy; + use super::gpt_5_1_codex_max_migration_copy; use super::migration_copy_for_config; use crate::custom_terminal::Terminal; use crate::test_backend::VT100Backend; @@ -236,8 +238,10 @@ mod tests { let mut terminal = Terminal::with_options(backend).expect("terminal"); terminal.set_viewport_area(Rect::new(0, 0, width, height)); - let screen = - ModelMigrationScreen::new(FrameRequester::test_dummy(), arcticfox_migration_copy()); + let screen = ModelMigrationScreen::new( + FrameRequester::test_dummy(), + gpt_5_1_codex_max_migration_copy(), + ); { let mut frame = terminal.get_frame(); @@ -304,8 +308,10 @@ mod tests { #[test] fn escape_key_accepts_prompt() { - let mut screen = - ModelMigrationScreen::new(FrameRequester::test_dummy(), arcticfox_migration_copy()); + let mut screen = ModelMigrationScreen::new( + FrameRequester::test_dummy(), + gpt_5_1_codex_max_migration_copy(), + ); // Simulate pressing Escape screen.handle_key(KeyEvent::new( diff --git a/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt.snap b/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt.snap index 2780fe7b78..11b5b01475 100644 --- a/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt.snap +++ b/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt.snap @@ -2,14 +2,14 @@ source: tui/src/model_migration.rs expression: terminal.backend() --- -> Introducing arcticfox +> Introducing gpt-5.1-codex-max We've upgraded our family of models supported in Codex to - arcticfox. + gpt-5.1-codex-max. You can continue using legacy models by specifying them directly with the -m option or in your config.toml. - Learn more at www.openai.com/index/arcticfox. + Learn more at www.openai.com/index/gpt-5.1-codex-max. Press enter to continue diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap index e0d752d5e4..3ecc4fa8ed 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap @@ -10,7 +10,7 @@ expression: sanitized │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ │ │ -│ Model: arcticfox (reasoning none, summaries auto) │ +│ Model: gpt-5.1-codex-max (reasoning none, summaries auto) │ │ Directory: [[workspace]] │ │ Approval: on-request │ │ Sandbox: read-only │ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap index d4be306410..c22577407e 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap @@ -4,20 +4,20 @@ expression: sanitized --- /status -╭───────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ -│ │ -│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ -│ information on rate limits and credits │ -│ │ -│ Model: arcticfox (reasoning high, summaries detailed) │ -│ Directory: [[workspace]] │ -│ Approval: on-request │ -│ Sandbox: workspace-write │ -│ Agents.md: │ -│ │ -│ Token usage: 1.9K total (1K input + 900 output) │ -│ Context window: 100% left (2.25K used / 272K) │ -│ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14) │ -│ Weekly limit: [███████████░░░░░░░░░] 55% left (resets 03:24) │ -╰───────────────────────────────────────────────────────────────────╯ +╭───────────────────────────────────────────────────────────────────────────╮ +│ >_ OpenAI Codex (v0.0.0) │ +│ │ +│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ +│ information on rate limits and credits │ +│ │ +│ Model: gpt-5.1-codex-max (reasoning high, summaries detailed) │ +│ Directory: [[workspace]] │ +│ Approval: on-request │ +│ Sandbox: workspace-write │ +│ Agents.md: │ +│ │ +│ Token usage: 1.9K total (1K input + 900 output) │ +│ Context window: 100% left (2.25K used / 272K) │ +│ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14) │ +│ Weekly limit: [███████████░░░░░░░░░] 55% left (resets 03:24) │ +╰───────────────────────────────────────────────────────────────────────────╯ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap index 36d2a8b4da..f0e6b73445 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap @@ -4,19 +4,19 @@ expression: sanitized --- /status -╭───────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ -│ │ -│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ -│ information on rate limits and credits │ -│ │ -│ Model: arcticfox (reasoning none, summaries auto) │ -│ Directory: [[workspace]] │ -│ Approval: on-request │ -│ Sandbox: read-only │ -│ Agents.md: │ -│ │ -│ Token usage: 750 total (500 input + 250 output) │ -│ Context window: 100% left (750 used / 272K) │ -│ Limits: data not available yet │ -╰───────────────────────────────────────────────────────────────╯ +╭───────────────────────────────────────────────────────────────────────╮ +│ >_ OpenAI Codex (v0.0.0) │ +│ │ +│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ +│ information on rate limits and credits │ +│ │ +│ Model: gpt-5.1-codex-max (reasoning none, summaries auto) │ +│ Directory: [[workspace]] │ +│ Approval: on-request │ +│ Sandbox: read-only │ +│ Agents.md: │ +│ │ +│ Token usage: 750 total (500 input + 250 output) │ +│ Context window: 100% left (750 used / 272K) │ +│ Limits: data not available yet │ +╰───────────────────────────────────────────────────────────────────────╯ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap index 36d2a8b4da..f0e6b73445 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap @@ -4,19 +4,19 @@ expression: sanitized --- /status -╭───────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ -│ │ -│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ -│ information on rate limits and credits │ -│ │ -│ Model: arcticfox (reasoning none, summaries auto) │ -│ Directory: [[workspace]] │ -│ Approval: on-request │ -│ Sandbox: read-only │ -│ Agents.md: │ -│ │ -│ Token usage: 750 total (500 input + 250 output) │ -│ Context window: 100% left (750 used / 272K) │ -│ Limits: data not available yet │ -╰───────────────────────────────────────────────────────────────╯ +╭───────────────────────────────────────────────────────────────────────╮ +│ >_ OpenAI Codex (v0.0.0) │ +│ │ +│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ +│ information on rate limits and credits │ +│ │ +│ Model: gpt-5.1-codex-max (reasoning none, summaries auto) │ +│ Directory: [[workspace]] │ +│ Approval: on-request │ +│ Sandbox: read-only │ +│ Agents.md: │ +│ │ +│ Token usage: 750 total (500 input + 250 output) │ +│ Context window: 100% left (750 used / 272K) │ +│ Limits: data not available yet │ +╰───────────────────────────────────────────────────────────────────────╯ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap index 9163942dfe..a12be950bc 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap @@ -4,21 +4,21 @@ expression: sanitized --- /status -╭─────────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ -│ │ -│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ -│ information on rate limits and credits │ -│ │ -│ Model: arcticfox (reasoning none, summaries auto) │ -│ Directory: [[workspace]] │ -│ Approval: on-request │ -│ Sandbox: read-only │ -│ Agents.md: │ -│ │ -│ Token usage: 1.9K total (1K input + 900 output) │ -│ Context window: 100% left (2.25K used / 272K) │ -│ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14) │ -│ Weekly limit: [████████████░░░░░░░░] 60% left (resets 03:34) │ -│ Warning: limits may be stale - start new turn to refresh. │ -╰─────────────────────────────────────────────────────────────────────╯ +╭───────────────────────────────────────────────────────────────────────╮ +│ >_ OpenAI Codex (v0.0.0) │ +│ │ +│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ +│ information on rate limits and credits │ +│ │ +│ Model: gpt-5.1-codex-max (reasoning none, summaries auto) │ +│ Directory: [[workspace]] │ +│ Approval: on-request │ +│ Sandbox: read-only │ +│ Agents.md: │ +│ │ +│ Token usage: 1.9K total (1K input + 900 output) │ +│ Context window: 100% left (2.25K used / 272K) │ +│ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14) │ +│ Weekly limit: [████████████░░░░░░░░] 60% left (resets 03:34) │ +│ Warning: limits may be stale - start new turn to refresh. │ +╰───────────────────────────────────────────────────────────────────────╯ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap index 5f4554bda0..02ba1adec9 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap @@ -4,19 +4,19 @@ expression: sanitized --- /status -╭───────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ -│ │ -│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ -│ information on rate limits and credits │ -│ │ -│ Model: arcticfox (reasoning high, summaries detailed) │ -│ Directory: [[workspace]] │ -│ Approval: on-request │ -│ Sandbox: read-only │ -│ Agents.md: │ -│ │ -│ Token usage: 1.9K total (1K input + 900 output) │ -│ Context window: 100% left (2.25K used / 272K) │ -│ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14) │ -╰───────────────────────────────────────────────────────────────────╯ +╭────────────────────────────────────────────────────────────────────╮ +│ >_ OpenAI Codex (v0.0.0) │ +│ │ +│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ +│ information on rate limits and credits │ +│ │ +│ Model: gpt-5.1-codex-max (reasoning high, summaries de │ +│ Directory: [[workspace]] │ +│ Approval: on-request │ +│ Sandbox: read-only │ +│ Agents.md: │ +│ │ +│ Token usage: 1.9K total (1K input + 900 output) │ +│ Context window: 100% left (2.25K used / 272K) │ +│ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14) │ +╰────────────────────────────────────────────────────────────────────╯ diff --git a/codex-rs/tui/src/status/tests.rs b/codex-rs/tui/src/status/tests.rs index 0597a44112..c6029bde7a 100644 --- a/codex-rs/tui/src/status/tests.rs +++ b/codex-rs/tui/src/status/tests.rs @@ -81,7 +81,7 @@ fn reset_at_from(captured_at: &chrono::DateTime, seconds: i64) -> fn status_snapshot_includes_reasoning_details() { let temp_home = TempDir::new().expect("temp home"); let mut config = test_config(&temp_home); - config.model = "arcticfox".to_string(); + config.model = "gpt-5.1-codex-max".to_string(); config.model_provider_id = "openai".to_string(); config.model_reasoning_effort = Some(ReasoningEffort::High); config.model_reasoning_summary = ReasoningSummary::Detailed; @@ -144,7 +144,7 @@ fn status_snapshot_includes_reasoning_details() { fn status_snapshot_includes_monthly_limit() { let temp_home = TempDir::new().expect("temp home"); let mut config = test_config(&temp_home); - config.model = "arcticfox".to_string(); + config.model = "gpt-5.1-codex-max".to_string(); config.model_provider_id = "openai".to_string(); config.cwd = PathBuf::from("/workspace/tests"); @@ -194,7 +194,7 @@ fn status_snapshot_includes_monthly_limit() { fn status_card_token_usage_excludes_cached_tokens() { let temp_home = TempDir::new().expect("temp home"); let mut config = test_config(&temp_home); - config.model = "arcticfox".to_string(); + config.model = "gpt-5.1-codex-max".to_string(); config.cwd = PathBuf::from("/workspace/tests"); let auth_manager = test_auth_manager(&config); @@ -232,7 +232,7 @@ fn status_card_token_usage_excludes_cached_tokens() { fn status_snapshot_truncates_in_narrow_terminal() { let temp_home = TempDir::new().expect("temp home"); let mut config = test_config(&temp_home); - config.model = "arcticfox".to_string(); + config.model = "gpt-5.1-codex-max".to_string(); config.model_provider_id = "openai".to_string(); config.model_reasoning_effort = Some(ReasoningEffort::High); config.model_reasoning_summary = ReasoningSummary::Detailed; @@ -285,7 +285,7 @@ fn status_snapshot_truncates_in_narrow_terminal() { fn status_snapshot_shows_missing_limits_message() { let temp_home = TempDir::new().expect("temp home"); let mut config = test_config(&temp_home); - config.model = "arcticfox".to_string(); + config.model = "gpt-5.1-codex-max".to_string(); config.cwd = PathBuf::from("/workspace/tests"); let auth_manager = test_auth_manager(&config); @@ -325,7 +325,7 @@ fn status_snapshot_shows_missing_limits_message() { fn status_snapshot_shows_empty_limits_message() { let temp_home = TempDir::new().expect("temp home"); let mut config = test_config(&temp_home); - config.model = "arcticfox".to_string(); + config.model = "gpt-5.1-codex-max".to_string(); config.cwd = PathBuf::from("/workspace/tests"); let auth_manager = test_auth_manager(&config); @@ -370,7 +370,7 @@ fn status_snapshot_shows_empty_limits_message() { fn status_snapshot_shows_stale_limits_message() { let temp_home = TempDir::new().expect("temp home"); let mut config = test_config(&temp_home); - config.model = "arcticfox".to_string(); + config.model = "gpt-5.1-codex-max".to_string(); config.cwd = PathBuf::from("/workspace/tests"); let auth_manager = test_auth_manager(&config); diff --git a/docs/config.md b/docs/config.md index fee86db30d..a1879effa1 100644 --- a/docs/config.md +++ b/docs/config.md @@ -64,7 +64,7 @@ Notes: The model that Codex should use. ```toml -model = "gpt-5.1" # overrides the default ("arcticfox" across platforms) +model = "gpt-5.1" # overrides the default ("gpt-5.1-codex-max" across platforms) ``` ### model_providers @@ -191,7 +191,7 @@ model = "mistral" ### model_reasoning_effort -If the selected model is known to support reasoning (for example: `o3`, `o4-mini`, `codex-*`, `arcticfox`, `gpt-5.1`, `gpt-5.1-codex`), reasoning is enabled by default when using the Responses API. As explained in the [OpenAI Platform documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning), this can be set to: +If the selected model is known to support reasoning (for example: `o3`, `o4-mini`, `codex-*`, `gpt-5.1-codex-max`, `gpt-5.1`, `gpt-5.1-codex`), reasoning is enabled by default when using the Responses API. As explained in the [OpenAI Platform documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning), this can be set to: - `"minimal"` - `"low"` @@ -835,7 +835,7 @@ Users can specify config values at multiple levels. Order of precedence is as fo 1. custom command-line argument, e.g., `--model o3` 2. as part of a profile, where the `--profile` is specified via a CLI (or in the config file itself) 3. as an entry in `config.toml`, e.g., `model = "o3"` -4. the default value that comes with Codex CLI (i.e., Codex CLI defaults to `arcticfox`) +4. the default value that comes with Codex CLI (i.e., Codex CLI defaults to `gpt-5.1-codex-max`) ### history @@ -938,7 +938,7 @@ Valid values: | Key | Type / Values | Notes | | ------------------------------------------------ | ----------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- | -| `model` | string | Model to use (e.g., `arcticfox`). | +| `model` | string | Model to use (e.g., `gpt-5.1-codex-max`). | | `model_provider` | string | Provider id from `model_providers` (default: `openai`). | | `model_context_window` | number | Context window tokens. | | `model_max_output_tokens` | number | Max output tokens. | diff --git a/docs/example-config.md b/docs/example-config.md index 26a4b8b503..335a826e26 100644 --- a/docs/example-config.md +++ b/docs/example-config.md @@ -18,11 +18,11 @@ Use this example configuration as a starting point. For an explanation of each f # Core Model Selection ################################################################################ -# Primary model used by Codex. Default: "arcticfox" on all platforms. -model = "arcticfox" +# Primary model used by Codex. Default: "gpt-5.1-codex-max" on all platforms. +model = "gpt-5.1-codex-max" -# Model used by the /review feature (code reviews). Default: "arcticfox". -review_model = "arcticfox" +# Model used by the /review feature (code reviews). Default: "gpt-5.1-codex-max". +review_model = "gpt-5.1-codex-max" # Provider id selected from [model_providers]. Default: "openai". model_provider = "openai" @@ -32,7 +32,7 @@ model_provider = "openai" # model_context_window = 128000 # tokens; default: auto for model # model_max_output_tokens = 8192 # tokens; default: auto for model # model_auto_compact_token_limit = 0 # disable/override auto; default: model family specific -# tool_output_token_limit = 10000 # tokens stored per tool output; default: 10000 for arcticfox +# tool_output_token_limit = 10000 # tokens stored per tool output; default: 10000 for gpt-5.1-codex-max ################################################################################ # Reasoning & Verbosity (Responses API capable models) @@ -315,7 +315,7 @@ mcp_oauth_credentials_store = "auto" [profiles] # [profiles.default] -# model = "arcticfox" +# model = "gpt-5.1-codex-max" # model_provider = "openai" # approval_policy = "on-request" # sandbox_mode = "read-only" diff --git a/docs/exec.md b/docs/exec.md index 0f5808d0db..5a17155a82 100644 --- a/docs/exec.md +++ b/docs/exec.md @@ -99,7 +99,7 @@ codex exec resume --last "Fix use-after-free issues" Only the conversation context is preserved; you must still provide flags to customize Codex behavior. ```shell -codex exec --model arcticfox --json "Review the change, look for use-after-free issues" +codex exec --model gpt-5.1-codex-max --json "Review the change, look for use-after-free issues" codex exec --model gpt-5.1 --json resume --last "Fix use-after-free issues" ``` From 9994914b8ce30483b4134f8a4365af9044b226e6 Mon Sep 17 00:00:00 2001 From: Ahmed Ibrahim Date: Wed, 19 Nov 2025 01:11:15 -0800 Subject: [PATCH 2/5] release max friends --- codex-rs/core/gpt-5.1-codex-max_prompt.md | 117 ++++++++++++++++++ ...s_snapshot_includes_monthly_limit.snap.new | 23 ++++ ...apshot_includes_reasoning_details.snap.new | 24 ++++ ...apshot_shows_empty_limits_message.snap.new | 23 ++++ ...shot_shows_missing_limits_message.snap.new | 23 ++++ ...apshot_shows_stale_limits_message.snap.new | 25 ++++ ...shot_truncates_in_narrow_terminal.snap.new | 23 ++++ 7 files changed, 258 insertions(+) create mode 100644 codex-rs/core/gpt-5.1-codex-max_prompt.md create mode 100644 codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap.new create mode 100644 codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap.new create mode 100644 codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap.new create mode 100644 codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap.new create mode 100644 codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap.new create mode 100644 codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap.new diff --git a/codex-rs/core/gpt-5.1-codex-max_prompt.md b/codex-rs/core/gpt-5.1-codex-max_prompt.md new file mode 100644 index 0000000000..292e5d7d0f --- /dev/null +++ b/codex-rs/core/gpt-5.1-codex-max_prompt.md @@ -0,0 +1,117 @@ +You are Codex, based on GPT-5. You are running as a coding agent in the Codex CLI on a user's computer. + +## General + +- When searching for text or files, prefer using `rg` or `rg --files` respectively because `rg` is much faster than alternatives like `grep`. (If the `rg` command is not found, then use alternatives.) + +## Editing constraints + +- Default to ASCII when editing or creating files. Only introduce non-ASCII or other Unicode characters when there is a clear justification and the file already uses them. +- Add succinct code comments that explain what is going on if code is not self-explanatory. You should not add comments like "Assigns the value to the variable", but a brief comment might be useful ahead of a complex code block that the user would otherwise have to spend time parsing out. Usage of these comments should be rare. +- Try to use apply_patch for single file edits, but it is fine to explore other options to make the edit if it does not work well. Do not use apply_patch for changes that are auto-generated (i.e. generating package.json or running a lint or format command like gofmt) or when scripting is more efficient (such as search and replacing a string across a codebase). +- You may be in a dirty git worktree. + * NEVER revert existing changes you did not make unless explicitly requested, since these changes were made by the user. + * If asked to make a commit or code edits and there are unrelated changes to your work or changes that you didn't make in those files, don't revert those changes. + * If the changes are in files you've touched recently, you should read carefully and understand how you can work with the changes rather than reverting them. + * If the changes are in unrelated files, just ignore them and don't revert them. +- Do not amend a commit unless explicitly requested to do so. +- While you are working, you might notice unexpected changes that you didn't make. If this happens, STOP IMMEDIATELY and ask the user how they would like to proceed. +- **NEVER** use destructive commands like `git reset --hard` or `git checkout --` unless specifically requested or approved by the user. + +## Plan tool + +When using the planning tool: +- Skip using the planning tool for straightforward tasks (roughly the easiest 25%). +- Do not make single-step plans. +- When you made a plan, update it after having performed one of the sub-tasks that you shared on the plan. + +## Codex CLI harness, sandboxing, and approvals + +The Codex CLI harness supports several different configurations for sandboxing and escalation approvals that the user can choose from. + +Filesystem sandboxing defines which files can be read or written. The options for `sandbox_mode` are: +- **read-only**: The sandbox only permits reading files. +- **workspace-write**: The sandbox permits reading files, and editing files in `cwd` and `writable_roots`. Editing files in other directories requires approval. +- **danger-full-access**: No filesystem sandboxing - all commands are permitted. + +Network sandboxing defines whether network can be accessed without approval. Options for `network_access` are: +- **restricted**: Requires approval +- **enabled**: No approval needed + +Approvals are your mechanism to get user consent to run shell commands without the sandbox. Possible configuration options for `approval_policy` are +- **untrusted**: The harness will escalate most commands for user approval, apart from a limited allowlist of safe "read" commands. +- **on-failure**: The harness will allow all commands to run in the sandbox (if enabled), and failures will be escalated to the user for approval to run again without the sandbox. +- **on-request**: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. (Note that this mode is not always available. If it is, you'll see parameters for it in the `shell` command description.) +- **never**: This is a non-interactive mode where you may NEVER ask the user for approval to run commands. Instead, you must always persist and work around constraints to solve the task for the user. You MUST do your utmost best to finish the task and validate your work before yielding. If this mode is paired with `danger-full-access`, take advantage of it to deliver the best outcome for the user. Further, in this mode, your default testing philosophy is overridden: Even if you don't see local patterns for testing, you may add tests and scripts to validate your work. Just remove them before yielding. + +When you are running with `approval_policy == on-request`, and sandboxing enabled, here are scenarios where you'll need to request approval: +- You need to run a command that writes to a directory that requires it (e.g. running tests that write to /var) +- You need to run a GUI app (e.g., open/xdg-open/osascript) to open browsers or files. +- You are running sandboxed and need to run a command that requires network access (e.g. installing packages) +- If you run a command that is important to solving the user's query, but it fails because of sandboxing, rerun the command with approval. ALWAYS proceed to use the `with_escalated_permissions` and `justification` parameters - do not message the user before requesting approval for the command. +- You are about to take a potentially destructive action such as an `rm` or `git reset` that the user did not explicitly ask for +- (for all of these, you should weigh alternative paths that do not require approval) + +When `sandbox_mode` is set to read-only, you'll need to request approval for any command that isn't a read. + +You will be told what filesystem sandboxing, network sandboxing, and approval mode are active in a developer or user message. If you are not told about this, assume that you are running with workspace-write, network sandboxing enabled, and approval on-failure. + +Although they introduce friction to the user because your work is paused until the user responds, you should leverage them when necessary to accomplish important work. If the completing the task requires escalated permissions, Do not let these settings or the sandbox deter you from attempting to accomplish the user's task unless it is set to "never", in which case never ask for approvals. + +When requesting approval to execute a command that will require escalated privileges: + - Provide the `with_escalated_permissions` parameter with the boolean value true + - Include a short, 1 sentence explanation for why you need to enable `with_escalated_permissions` in the justification parameter + +## Special user requests + +- If the user makes a simple request (such as asking for the time) which you can fulfill by running a terminal command (such as `date`), you should do so. +- If the user asks for a "review", default to a code review mindset: prioritise identifying bugs, risks, behavioural regressions, and missing tests. Findings must be the primary focus of the response - keep summaries or overviews brief and only after enumerating the issues. Present findings first (ordered by severity with file/line references), follow with open questions or assumptions, and offer a change-summary only as a secondary detail. If no findings are discovered, state that explicitly and mention any residual risks or testing gaps. + +## Frontend tasks +When doing frontend design tasks, avoid collapsing into "AI slop" or safe, average-looking layouts. +Aim for interfaces that feel intentional, bold, and a bit surprising. +- Typography: Use expressive, purposeful fonts and avoid default stacks (Inter, Roboto, Arial, system). +- Color & Look: Choose a clear visual direction; define CSS variables; avoid purple-on-white defaults. No purple bias or dark mode bias. +- Motion: Use a few meaningful animations (page-load, staggered reveals) instead of generic micro-motions. +- Background: Don't rely on flat, single-color backgrounds; use gradients, shapes, or subtle patterns to build atmosphere. +- Overall: Avoid boilerplate layouts and interchangeable UI patterns. Vary themes, type families, and visual languages across outputs. +- Ensure the page loads properly on both desktop and mobile + +Exception: If working within an existing website or design system, preserve the established patterns, structure, and visual language. + +## Presenting your work and final message + +You are producing plain text that will later be styled by the CLI. Follow these rules exactly. Formatting should make results easy to scan, but not feel mechanical. Use judgment to decide how much structure adds value. + +- Default: be very concise; friendly coding teammate tone. +- Ask only when needed; suggest ideas; mirror the user's style. +- For substantial work, summarize clearly; follow final‑answer formatting. +- Skip heavy formatting for simple confirmations. +- Don't dump large files you've written; reference paths only. +- No "save/copy this file" - User is on the same machine. +- Offer logical next steps (tests, commits, build) briefly; add verify steps if you couldn't do something. +- For code changes: + * Lead with a quick explanation of the change, and then give more details on the context covering where and why a change was made. Do not start this explanation with "summary", just jump right in. + * If there are natural next steps the user may want to take, suggest them at the end of your response. Do not make suggestions if there are no natural next steps. + * When suggesting multiple options, use numeric lists for the suggestions so the user can quickly respond with a single number. +- The user does not command execution outputs. When asked to show the output of a command (e.g. `git show`), relay the important details in your answer or summarize the key lines so the user understands the result. + +### Final answer structure and style guidelines + +- Plain text; CLI handles styling. Use structure only when it helps scanability. +- Headers: optional; short Title Case (1-3 words) wrapped in **…**; no blank line before the first bullet; add only if they truly help. +- Bullets: use - ; merge related points; keep to one line when possible; 4–6 per list ordered by importance; keep phrasing consistent. +- Monospace: backticks for commands/paths/env vars/code ids and inline examples; use for literal keyword bullets; never combine with **. +- Code samples or multi-line snippets should be wrapped in fenced code blocks; include an info string as often as possible. +- Structure: group related bullets; order sections general → specific → supporting; for subsections, start with a bolded keyword bullet, then items; match complexity to the task. +- Tone: collaborative, concise, factual; present tense, active voice; self‑contained; no "above/below"; parallel wording. +- Don'ts: no nested bullets/hierarchies; no ANSI codes; don't cram unrelated keywords; keep keyword lists short—wrap/reformat if long; avoid naming formatting styles in answers. +- Adaptation: code explanations → precise, structured with code refs; simple tasks → lead with outcome; big changes → logical walkthrough + rationale + next actions; casual one-offs → plain sentences, no headers/bullets. +- File References: When referencing files in your response follow the below rules: + * Use inline code to make file paths clickable. + * Each reference should have a stand alone path. Even if it's the same file. + * Accepted: absolute, workspace‑relative, a/ or b/ diff prefixes, or bare filename/suffix. + * Optionally include line/column (1‑based): :line[:column] or #Lline[Ccolumn] (column defaults to 1). + * Do not use URIs like file://, vscode://, or https://. + * Do not provide range of lines + * Examples: src/app.ts, src/app.ts:42, b/server/index.js#L10, C:\repo\project\main.rs:12:5 diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap.new b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap.new new file mode 100644 index 0000000000..9296c1f201 --- /dev/null +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap.new @@ -0,0 +1,23 @@ +--- +source: tui/src/status/tests.rs +assertion_line: 190 +expression: sanitized +--- +/status + +╭────────────────────────────────────────────────────────────────────────────╮ +│ >_ OpenAI Codex (v0.0.0) │ +│ │ +│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ +│ information on rate limits and credits │ +│ │ +│ Model: arcticfox (reasoning none, summaries auto) │ +│ Directory: [[workspace]] │ +│ Approval: on-request │ +│ Sandbox: read-only │ +│ Agents.md: │ +│ │ +│ Token usage: 1.2K total (800 input + 400 output) │ +│ Context window: 100% left (1.2K used / 272K) │ +│ Monthly limit: [██████████████████░░] 88% left (resets 07:08 on 7 May) │ +╰────────────────────────────────────────────────────────────────────────────╯ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap.new b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap.new new file mode 100644 index 0000000000..ba706be84d --- /dev/null +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap.new @@ -0,0 +1,24 @@ +--- +source: tui/src/status/tests.rs +assertion_line: 140 +expression: sanitized +--- +/status + +╭───────────────────────────────────────────────────────────────────╮ +│ >_ OpenAI Codex (v0.0.0) │ +│ │ +│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ +│ information on rate limits and credits │ +│ │ +│ Model: arcticfox (reasoning high, summaries detailed) │ +│ Directory: [[workspace]] │ +│ Approval: on-request │ +│ Sandbox: workspace-write │ +│ Agents.md: │ +│ │ +│ Token usage: 1.9K total (1K input + 900 output) │ +│ Context window: 100% left (2.25K used / 272K) │ +│ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14) │ +│ Weekly limit: [███████████░░░░░░░░░] 55% left (resets 03:24) │ +╰───────────────────────────────────────────────────────────────────╯ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap.new b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap.new new file mode 100644 index 0000000000..cb92f872c3 --- /dev/null +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap.new @@ -0,0 +1,23 @@ +--- +source: tui/src/status/tests.rs +assertion_line: 366 +expression: sanitized +--- +/status + +╭───────────────────────────────────────────────────────────────╮ +│ >_ OpenAI Codex (v0.0.0) │ +│ │ +│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ +│ information on rate limits and credits │ +│ │ +│ Model: arcticfox (reasoning none, summaries auto) │ +│ Directory: [[workspace]] │ +│ Approval: on-request │ +│ Sandbox: read-only │ +│ Agents.md: │ +│ │ +│ Token usage: 750 total (500 input + 250 output) │ +│ Context window: 100% left (750 used / 272K) │ +│ Limits: data not available yet │ +╰───────────────────────────────────────────────────────────────╯ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap.new b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap.new new file mode 100644 index 0000000000..180a4d8d0c --- /dev/null +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap.new @@ -0,0 +1,23 @@ +--- +source: tui/src/status/tests.rs +assertion_line: 321 +expression: sanitized +--- +/status + +╭───────────────────────────────────────────────────────────────╮ +│ >_ OpenAI Codex (v0.0.0) │ +│ │ +│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ +│ information on rate limits and credits │ +│ │ +│ Model: arcticfox (reasoning none, summaries auto) │ +│ Directory: [[workspace]] │ +│ Approval: on-request │ +│ Sandbox: read-only │ +│ Agents.md: │ +│ │ +│ Token usage: 750 total (500 input + 250 output) │ +│ Context window: 100% left (750 used / 272K) │ +│ Limits: data not available yet │ +╰───────────────────────────────────────────────────────────────╯ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap.new b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap.new new file mode 100644 index 0000000000..b79c7a4191 --- /dev/null +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap.new @@ -0,0 +1,25 @@ +--- +source: tui/src/status/tests.rs +assertion_line: 420 +expression: sanitized +--- +/status + +╭─────────────────────────────────────────────────────────────────────╮ +│ >_ OpenAI Codex (v0.0.0) │ +│ │ +│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ +│ information on rate limits and credits │ +│ │ +│ Model: arcticfox (reasoning none, summaries auto) │ +│ Directory: [[workspace]] │ +│ Approval: on-request │ +│ Sandbox: read-only │ +│ Agents.md: │ +│ │ +│ Token usage: 1.9K total (1K input + 900 output) │ +│ Context window: 100% left (2.25K used / 272K) │ +│ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14) │ +│ Weekly limit: [████████████░░░░░░░░] 60% left (resets 03:34) │ +│ Warning: limits may be stale - start new turn to refresh. │ +╰─────────────────────────────────────────────────────────────────────╯ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap.new b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap.new new file mode 100644 index 0000000000..a462f5fb5a --- /dev/null +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap.new @@ -0,0 +1,23 @@ +--- +source: tui/src/status/tests.rs +assertion_line: 281 +expression: sanitized +--- +/status + +╭───────────────────────────────────────────────────────────────────╮ +│ >_ OpenAI Codex (v0.0.0) │ +│ │ +│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ +│ information on rate limits and credits │ +│ │ +│ Model: arcticfox (reasoning high, summaries detailed) │ +│ Directory: [[workspace]] │ +│ Approval: on-request │ +│ Sandbox: read-only │ +│ Agents.md: │ +│ │ +│ Token usage: 1.9K total (1K input + 900 output) │ +│ Context window: 100% left (2.25K used / 272K) │ +│ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14) │ +╰───────────────────────────────────────────────────────────────────╯ From c74dd0f805ec1db469da8b924d1ce59a9c4289fe Mon Sep 17 00:00:00 2001 From: Ahmed Ibrahim Date: Wed, 19 Nov 2025 01:47:08 -0800 Subject: [PATCH 3/5] max is happy --- codex-rs/common/src/model_presets.rs | 12 +- codex-rs/tui/src/app.rs | 24 ++- codex-rs/tui/src/model_migration.rs | 166 ++++++++++++++++-- ...ration__tests__model_migration_prompt.snap | 17 +- ...s_snapshot_includes_monthly_limit.snap.new | 23 --- ...apshot_includes_reasoning_details.snap.new | 24 --- ...apshot_shows_empty_limits_message.snap.new | 23 --- ...shot_shows_missing_limits_message.snap.new | 23 --- ...apshot_shows_stale_limits_message.snap.new | 25 --- ...shot_truncates_in_narrow_terminal.snap.new | 23 --- 10 files changed, 188 insertions(+), 172 deletions(-) delete mode 100644 codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap.new delete mode 100644 codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap.new delete mode 100644 codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap.new delete mode 100644 codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap.new delete mode 100644 codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap.new delete mode 100644 codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap.new diff --git a/codex-rs/common/src/model_presets.rs b/codex-rs/common/src/model_presets.rs index c7ad7f8aec..2c98c00a7e 100644 --- a/codex-rs/common/src/model_presets.rs +++ b/codex-rs/common/src/model_presets.rs @@ -122,7 +122,11 @@ static PRESETS: Lazy> = Lazy::new(|| { }, ], is_default: false, - upgrade: None, + upgrade: Some(ModelUpgrade { + id: "gpt-5.1-codex-max", + reasoning_effort_mapping: None, + migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG, + }), show_in_picker: true, }, ModelPreset { @@ -146,7 +150,11 @@ static PRESETS: Lazy> = Lazy::new(|| { }, ], is_default: false, - upgrade: None, + upgrade: Some(ModelUpgrade { + id: "gpt-5.1-codex-max", + reasoning_effort_mapping: None, + migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG, + }), show_in_picker: true, }, // Deprecated models. diff --git a/codex-rs/tui/src/app.rs b/codex-rs/tui/src/app.rs index 1f4aa7eb9a..7c86dd3b6e 100644 --- a/codex-rs/tui/src/app.rs +++ b/codex-rs/tui/src/app.rs @@ -172,6 +172,11 @@ async fn handle_model_migration_prompt_if_needed( effort: mapped_effort, }); } + ModelMigrationOutcome::Rejected => { + app_event_tx.send(AppEvent::PersistModelMigrationPromptAcknowledged { + migration_config: migration_config_key.to_string(), + }); + } ModelMigrationOutcome::Exit => { return Some(AppExitInfo { token_usage: TokenUsage::default(), @@ -643,19 +648,17 @@ impl App { .await { Ok(()) => { - let effort_label = effort - .map(|eff| format!(" with {eff} reasoning")) - .unwrap_or_else(|| " with default reasoning".to_string()); + let reasoning_label = Self::reasoning_label(effort); if let Some(profile) = profile { self.chat_widget.add_info_message( format!( - "Model changed to {model}{effort_label} for {profile} profile" + "Model changed to {model} {reasoning_label} for {profile} profile" ), None, ); } else { self.chat_widget.add_info_message( - format!("Model changed to {model}{effort_label}"), + format!("Model changed to {model} {reasoning_label}"), None, ); } @@ -823,6 +826,17 @@ impl App { Ok(true) } + fn reasoning_label(reasoning_effort: Option) -> &'static str { + match reasoning_effort { + Some(ReasoningEffortConfig::Minimal) => "minimal", + Some(ReasoningEffortConfig::Low) => "low", + Some(ReasoningEffortConfig::Medium) => "medium", + Some(ReasoningEffortConfig::High) => "high", + Some(ReasoningEffortConfig::XHigh) => "xhigh", + None | Some(ReasoningEffortConfig::None) => "default", + } + } + pub(crate) fn token_usage(&self) -> codex_core::protocol::TokenUsage { self.chat_widget.token_usage() } diff --git a/codex-rs/tui/src/model_migration.rs b/codex-rs/tui/src/model_migration.rs index 492fc43517..283007e028 100644 --- a/codex-rs/tui/src/model_migration.rs +++ b/codex-rs/tui/src/model_migration.rs @@ -1,7 +1,9 @@ +use crate::key_hint; use crate::render::Insets; use crate::render::renderable::ColumnRenderable; use crate::render::renderable::Renderable; use crate::render::renderable::RenderableExt as _; +use crate::selection_list::selection_option_row; use crate::tui::FrameRequester; use crate::tui::Tui; use crate::tui::TuiEvent; @@ -22,8 +24,10 @@ use ratatui::widgets::Wrap; use tokio_stream::StreamExt; /// Outcome of the migration prompt. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] pub(crate) enum ModelMigrationOutcome { Accepted, + Rejected, Exit, } @@ -31,6 +35,26 @@ pub(crate) enum ModelMigrationOutcome { pub(crate) struct ModelMigrationCopy { pub heading: Vec>, pub content: Vec>, + pub can_opt_out: bool, +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +enum MigrationMenuOption { + TryNewModel, + UseExistingModel, +} + +impl MigrationMenuOption { + fn all() -> [Self; 2] { + [Self::TryNewModel, Self::UseExistingModel] + } + + fn label(self) -> &'static str { + match self { + Self::TryNewModel => "Try new model", + Self::UseExistingModel => "Use existing model", + } + } } pub(crate) fn migration_copy_for_config(migration_config_key: &str) -> ModelMigrationCopy { @@ -98,7 +122,8 @@ struct ModelMigrationScreen { request_frame: FrameRequester, copy: ModelMigrationCopy, done: bool, - should_exit: bool, + outcome: ModelMigrationOutcome, + highlighted_option: MigrationMenuOption, } impl ModelMigrationScreen { @@ -107,15 +132,47 @@ impl ModelMigrationScreen { request_frame, copy, done: false, - should_exit: false, + outcome: ModelMigrationOutcome::Accepted, + highlighted_option: MigrationMenuOption::TryNewModel, } } - fn accept(&mut self) { + fn finish_with(&mut self, outcome: ModelMigrationOutcome) { + self.outcome = outcome; self.done = true; self.request_frame.schedule_frame(); } + fn accept(&mut self) { + self.finish_with(ModelMigrationOutcome::Accepted); + } + + fn reject(&mut self) { + self.finish_with(ModelMigrationOutcome::Rejected); + } + + fn exit(&mut self) { + self.finish_with(ModelMigrationOutcome::Exit); + } + + fn confirm_selection(&mut self) { + if self.copy.can_opt_out { + match self.highlighted_option { + MigrationMenuOption::TryNewModel => self.accept(), + MigrationMenuOption::UseExistingModel => self.reject(), + } + } else { + self.accept(); + } + } + + fn highlight_option(&mut self, option: MigrationMenuOption) { + if self.highlighted_option != option { + self.highlighted_option = option; + self.request_frame.schedule_frame(); + } + } + fn handle_key(&mut self, key_event: KeyEvent) { if key_event.kind == KeyEventKind::Release { return; @@ -124,14 +181,36 @@ impl ModelMigrationScreen { if key_event.modifiers.contains(KeyModifiers::CONTROL) && matches!(key_event.code, KeyCode::Char('c') | KeyCode::Char('d')) { - self.should_exit = true; - self.done = true; - self.request_frame.schedule_frame(); + self.exit(); return; } - if matches!(key_event.code, KeyCode::Esc | KeyCode::Enter) { - self.accept(); + if !self.copy.can_opt_out { + if matches!(key_event.code, KeyCode::Esc | KeyCode::Enter) { + self.accept(); + } + return; + } + + match key_event.code { + KeyCode::Up | KeyCode::Char('k') => { + self.highlight_option(MigrationMenuOption::TryNewModel); + } + KeyCode::Down | KeyCode::Char('j') => { + self.highlight_option(MigrationMenuOption::UseExistingModel); + } + KeyCode::Char('1') => { + self.highlight_option(MigrationMenuOption::TryNewModel); + self.accept(); + } + KeyCode::Char('2') => { + self.highlight_option(MigrationMenuOption::UseExistingModel); + self.reject(); + } + KeyCode::Enter | KeyCode::Esc => { + self.confirm_selection(); + } + _ => {} } } @@ -140,11 +219,7 @@ impl ModelMigrationScreen { } fn outcome(&self) -> ModelMigrationOutcome { - if self.should_exit { - ModelMigrationOutcome::Exit - } else { - ModelMigrationOutcome::Accepted - } + self.outcome } } @@ -172,6 +247,38 @@ impl WidgetRef for &ModelMigrationScreen { ); } + if self.copy.can_opt_out { + column.push(Line::from("")); + column.push( + Paragraph::new("Choose how you'd like Codex to proceed.") + .wrap(Wrap { trim: false }) + .inset(Insets::tlbr(0, 2, 0, 0)), + ); + column.push(Line::from("")); + + for (idx, option) in MigrationMenuOption::all().into_iter().enumerate() { + column.push(selection_option_row( + idx, + option.label().to_string(), + self.highlighted_option == option, + )); + } + + column.push(Line::from("")); + column.push( + Line::from(vec![ + "Use ".dim(), + key_hint::plain(KeyCode::Up).into(), + "/".dim(), + key_hint::plain(KeyCode::Down).into(), + " to move, press ".dim(), + key_hint::plain(KeyCode::Enter).into(), + " to confirm".dim(), + ]) + .inset(Insets::tlbr(0, 2, 0, 0)), + ); + } + column.render(area, buf); } } @@ -181,18 +288,15 @@ fn gpt_5_1_codex_max_migration_copy() -> ModelMigrationCopy { heading: vec!["Codex just got an upgrade. Introducing gpt-5.1-codex-max".bold()], content: vec![ Line::from( - "Codex is now powered by gpt-5.1-codex-max, our new frontier agentic coding model built for long-running, project-scale work. It's faster, more capable, and more token-efficient than gpt-5.1-codex.", - ), - Line::from( - "You can continue using legacy models by specifying them directly with the -m option or in your config.toml.", + "Codex is now powered by gpt-5.1-codex-max, our latest frontier agentic coding model. It is smarter and faster than its predecessors and capable of long-running project-scale work.", ), Line::from(vec![ "Learn more at ".into(), "www.openai.com/index/gpt-5-1-codex-max".cyan().underlined(), ".".into(), ]), - Line::from(vec!["Press enter to continue".dim()]), ], + can_opt_out: true, } } @@ -213,6 +317,7 @@ fn gpt5_migration_copy() -> ModelMigrationCopy { ]), Line::from(vec!["Press enter to continue".dim()]), ], + can_opt_out: false, } } @@ -233,7 +338,7 @@ mod tests { #[test] fn prompt_snapshot() { let width: u16 = 60; - let height: u16 = 12; + let height: u16 = 20; let backend = VT100Backend::new(width, height); let mut terminal = Terminal::with_options(backend).expect("terminal"); terminal.set_viewport_area(Rect::new(0, 0, width, height)); @@ -325,4 +430,27 @@ mod tests { super::ModelMigrationOutcome::Accepted )); } + + #[test] + fn selecting_use_existing_model_rejects_upgrade() { + let mut screen = ModelMigrationScreen::new( + FrameRequester::test_dummy(), + gpt_5_1_codex_max_migration_copy(), + ); + + screen.handle_key(KeyEvent::new( + KeyCode::Down, + crossterm::event::KeyModifiers::NONE, + )); + screen.handle_key(KeyEvent::new( + KeyCode::Enter, + crossterm::event::KeyModifiers::NONE, + )); + + assert!(screen.is_done()); + assert!(matches!( + screen.outcome(), + super::ModelMigrationOutcome::Rejected + )); + } } diff --git a/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt.snap b/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt.snap index 11b5b01475..e13f19ec57 100644 --- a/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt.snap +++ b/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt.snap @@ -2,14 +2,21 @@ source: tui/src/model_migration.rs expression: terminal.backend() --- -> Introducing gpt-5.1-codex-max +> Codex just got an upgrade. Introducing gpt-5.1-codex-max - We've upgraded our family of models supported in Codex to - gpt-5.1-codex-max. + Codex is now powered by gpt-5.1-codex-max, our latest + frontier agentic coding model. It is smarter and faster + than its predecessors and capable of long-running + project-scale work. You can continue using legacy models by specifying them directly with the -m option or in your config.toml. - Learn more at www.openai.com/index/gpt-5.1-codex-max. + Learn more at www.openai.com/index/gpt-5-1-codex-max. - Press enter to continue + Choose how you'd like Codex to proceed. + +› 1. Try new model + 2. Use existing model + + Use ↑/↓ to move, press enter to confirm diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap.new b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap.new deleted file mode 100644 index 9296c1f201..0000000000 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap.new +++ /dev/null @@ -1,23 +0,0 @@ ---- -source: tui/src/status/tests.rs -assertion_line: 190 -expression: sanitized ---- -/status - -╭────────────────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ -│ │ -│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ -│ information on rate limits and credits │ -│ │ -│ Model: arcticfox (reasoning none, summaries auto) │ -│ Directory: [[workspace]] │ -│ Approval: on-request │ -│ Sandbox: read-only │ -│ Agents.md: │ -│ │ -│ Token usage: 1.2K total (800 input + 400 output) │ -│ Context window: 100% left (1.2K used / 272K) │ -│ Monthly limit: [██████████████████░░] 88% left (resets 07:08 on 7 May) │ -╰────────────────────────────────────────────────────────────────────────────╯ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap.new b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap.new deleted file mode 100644 index ba706be84d..0000000000 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap.new +++ /dev/null @@ -1,24 +0,0 @@ ---- -source: tui/src/status/tests.rs -assertion_line: 140 -expression: sanitized ---- -/status - -╭───────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ -│ │ -│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ -│ information on rate limits and credits │ -│ │ -│ Model: arcticfox (reasoning high, summaries detailed) │ -│ Directory: [[workspace]] │ -│ Approval: on-request │ -│ Sandbox: workspace-write │ -│ Agents.md: │ -│ │ -│ Token usage: 1.9K total (1K input + 900 output) │ -│ Context window: 100% left (2.25K used / 272K) │ -│ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14) │ -│ Weekly limit: [███████████░░░░░░░░░] 55% left (resets 03:24) │ -╰───────────────────────────────────────────────────────────────────╯ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap.new b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap.new deleted file mode 100644 index cb92f872c3..0000000000 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap.new +++ /dev/null @@ -1,23 +0,0 @@ ---- -source: tui/src/status/tests.rs -assertion_line: 366 -expression: sanitized ---- -/status - -╭───────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ -│ │ -│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ -│ information on rate limits and credits │ -│ │ -│ Model: arcticfox (reasoning none, summaries auto) │ -│ Directory: [[workspace]] │ -│ Approval: on-request │ -│ Sandbox: read-only │ -│ Agents.md: │ -│ │ -│ Token usage: 750 total (500 input + 250 output) │ -│ Context window: 100% left (750 used / 272K) │ -│ Limits: data not available yet │ -╰───────────────────────────────────────────────────────────────╯ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap.new b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap.new deleted file mode 100644 index 180a4d8d0c..0000000000 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap.new +++ /dev/null @@ -1,23 +0,0 @@ ---- -source: tui/src/status/tests.rs -assertion_line: 321 -expression: sanitized ---- -/status - -╭───────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ -│ │ -│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ -│ information on rate limits and credits │ -│ │ -│ Model: arcticfox (reasoning none, summaries auto) │ -│ Directory: [[workspace]] │ -│ Approval: on-request │ -│ Sandbox: read-only │ -│ Agents.md: │ -│ │ -│ Token usage: 750 total (500 input + 250 output) │ -│ Context window: 100% left (750 used / 272K) │ -│ Limits: data not available yet │ -╰───────────────────────────────────────────────────────────────╯ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap.new b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap.new deleted file mode 100644 index b79c7a4191..0000000000 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap.new +++ /dev/null @@ -1,25 +0,0 @@ ---- -source: tui/src/status/tests.rs -assertion_line: 420 -expression: sanitized ---- -/status - -╭─────────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ -│ │ -│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ -│ information on rate limits and credits │ -│ │ -│ Model: arcticfox (reasoning none, summaries auto) │ -│ Directory: [[workspace]] │ -│ Approval: on-request │ -│ Sandbox: read-only │ -│ Agents.md: │ -│ │ -│ Token usage: 1.9K total (1K input + 900 output) │ -│ Context window: 100% left (2.25K used / 272K) │ -│ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14) │ -│ Weekly limit: [████████████░░░░░░░░] 60% left (resets 03:34) │ -│ Warning: limits may be stale - start new turn to refresh. │ -╰─────────────────────────────────────────────────────────────────────╯ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap.new b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap.new deleted file mode 100644 index a462f5fb5a..0000000000 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap.new +++ /dev/null @@ -1,23 +0,0 @@ ---- -source: tui/src/status/tests.rs -assertion_line: 281 -expression: sanitized ---- -/status - -╭───────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ -│ │ -│ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ -│ information on rate limits and credits │ -│ │ -│ Model: arcticfox (reasoning high, summaries detailed) │ -│ Directory: [[workspace]] │ -│ Approval: on-request │ -│ Sandbox: read-only │ -│ Agents.md: │ -│ │ -│ Token usage: 1.9K total (1K input + 900 output) │ -│ Context window: 100% left (2.25K used / 272K) │ -│ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14) │ -╰───────────────────────────────────────────────────────────────────╯ From 8c6e2d6c0fe6115bd08b88757295267984163e14 Mon Sep 17 00:00:00 2001 From: Ahmed Ibrahim Date: Wed, 19 Nov 2025 01:52:10 -0800 Subject: [PATCH 4/5] max progress --- codex-rs/common/src/model_presets.rs | 9 +++------ ...__model_migration__tests__model_migration_prompt.snap | 3 --- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/codex-rs/common/src/model_presets.rs b/codex-rs/common/src/model_presets.rs index 2c98c00a7e..a031f23b1d 100644 --- a/codex-rs/common/src/model_presets.rs +++ b/codex-rs/common/src/model_presets.rs @@ -236,12 +236,9 @@ static PRESETS: Lazy> = Lazy::new(|| { ], is_default: false, upgrade: Some(ModelUpgrade { - id: "gpt-5.1", - reasoning_effort_mapping: Some(HashMap::from([( - ReasoningEffort::Minimal, - ReasoningEffort::Low, - )])), - migration_config_key: HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG, + id: "gpt-5.1-codex-max", + reasoning_effort_mapping: None, + migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG, }), show_in_picker: false, }, diff --git a/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt.snap b/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt.snap index e13f19ec57..5b3136803f 100644 --- a/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt.snap +++ b/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt.snap @@ -9,9 +9,6 @@ expression: terminal.backend() than its predecessors and capable of long-running project-scale work. - You can continue using legacy models by specifying them - directly with the -m option or in your config.toml. - Learn more at www.openai.com/index/gpt-5-1-codex-max. Choose how you'd like Codex to proceed. From 99c00150eb06b26f23d4132bb485d3f3a753c48d Mon Sep 17 00:00:00 2001 From: jif-oai Date: Wed, 19 Nov 2025 16:17:26 +0000 Subject: [PATCH 5/5] Prettier --- docs/config.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/config.md b/docs/config.md index a1879effa1..f47bf34643 100644 --- a/docs/config.md +++ b/docs/config.md @@ -938,7 +938,7 @@ Valid values: | Key | Type / Values | Notes | | ------------------------------------------------ | ----------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- | -| `model` | string | Model to use (e.g., `gpt-5.1-codex-max`). | +| `model` | string | Model to use (e.g., `gpt-5.1-codex-max`). | | `model_provider` | string | Provider id from `model_providers` (default: `openai`). | | `model_context_window` | number | Context window tokens. | | `model_max_output_tokens` | number | Max output tokens. |