Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions codex-rs/app-server-protocol/src/protocol/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -528,7 +528,7 @@ mod tests {
let request = ClientRequest::NewConversation {
request_id: RequestId::Integer(42),
params: v1::NewConversationParams {
model: Some("arcticfox".to_string()),
model: Some("gpt-5.1-codex-max".to_string()),
model_provider: None,
profile: None,
cwd: None,
Expand All @@ -546,7 +546,7 @@ mod tests {
"method": "newConversation",
"id": 42,
"params": {
"model": "arcticfox",
"model": "gpt-5.1-codex-max",
"modelProvider": null,
"profile": null,
"cwd": null,
Expand Down
4 changes: 2 additions & 2 deletions codex-rs/app-server/tests/suite/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
std::fs::write(
config_toml,
r#"
model = "arcticfox"
model = "gpt-5.1-codex-max"
approval_policy = "on-request"
sandbox_mode = "workspace-write"
model_reasoning_summary = "detailed"
Expand Down Expand Up @@ -87,7 +87,7 @@ async fn get_config_toml_parses_all_fields() -> Result<()> {
}),
forced_chatgpt_workspace_id: Some("12345678-0000-0000-0000-000000000000".into()),
forced_login_method: Some(ForcedLoginMethod::Chatgpt),
model: Some("arcticfox".into()),
model: Some("gpt-5.1-codex-max".into()),
model_reasoning_effort: Some(ReasoningEffort::High),
model_reasoning_summary: Some(ReasoningSummary::Detailed),
model_verbosity: Some(Verbosity::Medium),
Expand Down
2 changes: 1 addition & 1 deletion codex-rs/app-server/tests/suite/set_default_model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
std::fs::write(
config_toml,
r#"
model = "arcticfox"
model = "gpt-5.1-codex-max"
model_reasoning_effort = "medium"
"#,
)
Expand Down
8 changes: 4 additions & 4 deletions codex-rs/app-server/tests/suite/v2/model_list.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,9 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {

let expected_models = vec![
Model {
id: "arcticfox".to_string(),
model: "arcticfox".to_string(),
display_name: "arcticfox".to_string(),
id: "gpt-5.1-codex-max".to_string(),
model: "gpt-5.1-codex-max".to_string(),
display_name: "gpt-5.1-codex-max".to_string(),
description: "Latest Codex-optimized flagship for deep and fast reasoning.".to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
Expand Down Expand Up @@ -174,7 +174,7 @@ async fn list_models_pagination_works() -> Result<()> {
} = to_response::<ModelListResponse>(first_response)?;

assert_eq!(first_items.len(), 1);
assert_eq!(first_items[0].id, "arcticfox");
assert_eq!(first_items[0].id, "gpt-5.1-codex-max");
let next_cursor = first_cursor.ok_or_else(|| anyhow!("cursor for second page"))?;

let second_request = mcp
Expand Down
6 changes: 3 additions & 3 deletions codex-rs/app-server/tests/suite/v2/thread_resume.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ async fn thread_resume_returns_original_thread() -> Result<()> {
// Start a thread.
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("arcticfox".to_string()),
model: Some("gpt-5.1-codex-max".to_string()),
..Default::default()
})
.await?;
Expand Down Expand Up @@ -69,7 +69,7 @@ async fn thread_resume_prefers_path_over_thread_id() -> Result<()> {

let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("arcticfox".to_string()),
model: Some("gpt-5.1-codex-max".to_string()),
..Default::default()
})
.await?;
Expand Down Expand Up @@ -114,7 +114,7 @@ async fn thread_resume_supports_history_and_overrides() -> Result<()> {
// Start a thread.
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("arcticfox".to_string()),
model: Some("gpt-5.1-codex-max".to_string()),
..Default::default()
})
.await?;
Expand Down
48 changes: 29 additions & 19 deletions codex-rs/common/src/model_presets.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@ use codex_core::protocol_config_types::ReasoningEffort;
use once_cell::sync::Lazy;

pub const HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG: &str = "hide_gpt5_1_migration_prompt";
pub const HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG: &str = "hide_arcticfox_migration_prompt";
pub const HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG: &str =
"hide_gpt-5.1-codex-max_migration_prompt";

/// A reasoning effort option that can be surfaced for a model.
#[derive(Debug, Clone, Copy)]
Expand Down Expand Up @@ -49,9 +50,9 @@ pub struct ModelPreset {
static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
vec![
ModelPreset {
id: "arcticfox",
model: "arcticfox",
display_name: "arcticfox",
id: "gpt-5.1-codex-max",
model: "gpt-5.1-codex-max",
display_name: "gpt-5.1-codex-max",
description: "Latest Codex-optimized flagship for deep and fast reasoning.",
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: &[
Expand Down Expand Up @@ -98,9 +99,9 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
],
is_default: false,
upgrade: Some(ModelUpgrade {
id: "arcticfox",
id: "gpt-5.1-codex-max",
reasoning_effort_mapping: None,
migration_config_key: HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG,
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG,
}),
show_in_picker: true,
},
Expand All @@ -121,7 +122,11 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
},
],
is_default: false,
upgrade: None,
upgrade: Some(ModelUpgrade {
id: "gpt-5.1-codex-max",
reasoning_effort_mapping: None,
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG,
}),
show_in_picker: true,
},
ModelPreset {
Expand All @@ -145,7 +150,11 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
},
],
is_default: false,
upgrade: None,
upgrade: Some(ModelUpgrade {
id: "gpt-5.1-codex-max",
reasoning_effort_mapping: None,
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG,
}),
show_in_picker: true,
},
// Deprecated models.
Expand All @@ -171,9 +180,9 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
],
is_default: false,
upgrade: Some(ModelUpgrade {
id: "arcticfox",
id: "gpt-5.1-codex-max",
reasoning_effort_mapping: None,
migration_config_key: HIDE_ARCTICFOX_MIGRATION_PROMPT_CONFIG,
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG,
}),
show_in_picker: false,
},
Expand Down Expand Up @@ -227,12 +236,9 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
],
is_default: false,
upgrade: Some(ModelUpgrade {
id: "gpt-5.1",
reasoning_effort_mapping: Some(HashMap::from([(
ReasoningEffort::Minimal,
ReasoningEffort::Low,
)])),
migration_config_key: HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG,
id: "gpt-5.1-codex-max",
reasoning_effort_mapping: None,
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG,
}),
show_in_picker: false,
},
Expand All @@ -243,7 +249,7 @@ pub fn builtin_model_presets(auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
PRESETS
.iter()
.filter(|preset| match auth_mode {
Some(AuthMode::ApiKey) => preset.show_in_picker && preset.id != "arcticfox",
Some(AuthMode::ApiKey) => preset.show_in_picker && preset.id != "gpt-5.1-codex-max",
_ => preset.show_in_picker,
})
.cloned()
Expand All @@ -266,8 +272,12 @@ mod tests {
}

#[test]
fn arcticfox_hidden_for_api_key_auth() {
fn gpt_5_1_codex_max_hidden_for_api_key_auth() {
let presets = builtin_model_presets(Some(AuthMode::ApiKey));
assert!(presets.iter().all(|preset| preset.id != "arcticfox"));
assert!(
presets
.iter()
.all(|preset| preset.id != "gpt-5.1-codex-max")
);
}
}
2 changes: 1 addition & 1 deletion codex-rs/core/src/client_common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -431,7 +431,7 @@ mod tests {
expects_apply_patch_instructions: false,
},
InstructionsTestCase {
slug: "arcticfox",
slug: "gpt-5.1-codex-max",
expects_apply_patch_instructions: false,
},
];
Expand Down
6 changes: 3 additions & 3 deletions codex-rs/core/src/config/edit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -846,7 +846,7 @@ hide_gpt5_1_migration_prompt = true
}

#[test]
fn blocking_set_hide_arcticfox_migration_prompt_preserves_table() {
fn blocking_set_hide_gpt_5_1_codex_max_migration_prompt_preserves_table() {
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path();
std::fs::write(
Expand All @@ -860,7 +860,7 @@ existing = "value"
codex_home,
None,
&[ConfigEdit::SetNoticeHideModelMigrationPrompt(
"hide_arcticfox_migration_prompt".to_string(),
"hide_gpt-5.1-codex-max_migration_prompt".to_string(),
true,
)],
)
Expand All @@ -870,7 +870,7 @@ existing = "value"
std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
let expected = r#"[notice]
existing = "value"
hide_arcticfox_migration_prompt = true
"hide_gpt-5.1-codex-max_migration_prompt" = true
"#;
assert_eq!(contents, expected);
}
Expand Down
10 changes: 5 additions & 5 deletions codex-rs/core/src/config/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,11 +62,11 @@ pub mod profile;
pub mod types;

#[cfg(target_os = "windows")]
pub const OPENAI_DEFAULT_MODEL: &str = "arcticfox";
pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5.1-codex-max";
#[cfg(not(target_os = "windows"))]
pub const OPENAI_DEFAULT_MODEL: &str = "arcticfox";
const OPENAI_DEFAULT_REVIEW_MODEL: &str = "arcticfox";
pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "arcticfox";
pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5.1-codex-max";
const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5.1-codex-max";
pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "gpt-5.1-codex-max";

/// Maximum number of bytes of the documentation that will be embedded. Larger
/// files are *silently truncated* to this size so we do not take up too much of
Expand All @@ -81,7 +81,7 @@ pub struct Config {
/// Optional override of model selection.
pub model: String,

/// Model used specifically for review sessions. Defaults to "arcticfox".
/// Model used specifically for review sessions. Defaults to "gpt-5.1-codex-max".
pub review_model: String,

pub model_family: ModelFamily,
Expand Down
4 changes: 2 additions & 2 deletions codex-rs/core/src/config/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -378,8 +378,8 @@ pub struct Notice {
pub hide_rate_limit_model_nudge: Option<bool>,
/// Tracks whether the user has seen the model migration prompt
pub hide_gpt5_1_migration_prompt: Option<bool>,
/// Tracks whether the user has seen the arcticfox migration prompt
pub hide_arcticfox_migration_prompt: Option<bool>,
/// Tracks whether the user has seen the gpt-5.1-codex-max migration prompt
pub hide_gpt_5_1_codex_max_migration_prompt: Option<bool>,
}

impl Notice {
Expand Down
8 changes: 4 additions & 4 deletions codex-rs/core/src/model_family.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ const BASE_INSTRUCTIONS: &str = include_str!("../prompt.md");

const GPT_5_CODEX_INSTRUCTIONS: &str = include_str!("../gpt_5_codex_prompt.md");
const GPT_5_1_INSTRUCTIONS: &str = include_str!("../gpt_5_1_prompt.md");
const ARCTICFOX_INSTRUCTIONS: &str = include_str!("../arcticfox_prompt.md");
const GPT_5_1_CODEX_MAX_INSTRUCTIONS: &str = include_str!("../gpt-5.1-codex-max_prompt.md");

/// A model family is a group of models that share certain characteristics.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
Expand Down Expand Up @@ -173,12 +173,12 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
support_verbosity: true,
truncation_policy: TruncationPolicy::Tokens(10_000),
)
} else if slug.starts_with("arcticfox") {
} else if slug.starts_with("gpt-5.1-codex-max") {
model_family!(
slug, slug,
supports_reasoning_summaries: true,
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
base_instructions: ARCTICFOX_INSTRUCTIONS.to_string(),
base_instructions: GPT_5_1_CODEX_MAX_INSTRUCTIONS.to_string(),
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
shell_type: ConfigShellToolType::ShellCommand,
supports_parallel_tool_calls: true,
Expand All @@ -202,7 +202,7 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
support_verbosity: false,
truncation_policy: TruncationPolicy::Tokens(10_000),
)
} else if slug.starts_with("arcticfox") {
} else if slug.starts_with("gpt-5.1-codex-max") {
model_family!(
slug, slug,
supports_reasoning_summaries: true,
Expand Down
2 changes: 1 addition & 1 deletion codex-rs/core/src/openai_model_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ pub(crate) fn get_model_info(model_family: &ModelFamily) -> Option<ModelInfo> {

_ if slug.starts_with("gpt-5-codex")
|| slug.starts_with("gpt-5.1-codex")
|| slug.starts_with("arcticfox") =>
|| slug.starts_with("gpt-5.1-codex-max") =>
{
Some(ModelInfo::new(CONTEXT_WINDOW_272K, MAX_OUTPUT_TOKENS_128K))
}
Expand Down
2 changes: 1 addition & 1 deletion codex-rs/core/tests/suite/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1155,7 +1155,7 @@ async fn token_count_includes_rate_limits_snapshot() {
"reasoning_output_tokens": 0,
"total_tokens": 123
},
// Default model is arcticfox in tests → 95% usable context window
// Default model is gpt-5.1-codex-max in tests → 95% usable context window
"model_context_window": 258400
},
"rate_limits": {
Expand Down
Loading
Loading