Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 7 additions & 5 deletions codex-rs/app-server/tests/suite/v2/model_list.rs
Original file line number Diff line number Diff line change
Expand Up @@ -117,10 +117,12 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
is_default: false,
},
Model {
id: "robin".to_string(),
model: "robin".to_string(),
display_name: "robin".to_string(),
description: "Robin".to_string(),
id: "gpt-5.2".to_string(),
model: "gpt-5.2".to_string(),
display_name: "gpt-5.2".to_string(),
description:
"Latest frontier model with improvements across knowledge, reasoning and coding"
.to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Low,
Expand Down Expand Up @@ -274,7 +276,7 @@ async fn list_models_pagination_works() -> Result<()> {
} = to_response::<ModelListResponse>(fourth_response)?;

assert_eq!(fourth_items.len(), 1);
assert_eq!(fourth_items[0].id, "robin");
assert_eq!(fourth_items[0].id, "gpt-5.2");
let fifth_cursor = fourth_cursor.ok_or_else(|| anyhow!("cursor for fifth page"))?;

let fifth_request = mcp
Expand Down
370 changes: 370 additions & 0 deletions codex-rs/core/gpt_5_2_prompt.md

Large diffs are not rendered by default.

5 changes: 3 additions & 2 deletions codex-rs/core/src/openai_models/model_family.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ const BASE_INSTRUCTIONS: &str = include_str!("../../prompt.md");

const GPT_5_CODEX_INSTRUCTIONS: &str = include_str!("../../gpt_5_codex_prompt.md");
const GPT_5_1_INSTRUCTIONS: &str = include_str!("../../gpt_5_1_prompt.md");
const GPT_5_2_INSTRUCTIONS: &str = include_str!("../../gpt_5_2_prompt.md");
const GPT_5_1_CODEX_MAX_INSTRUCTIONS: &str = include_str!("../../gpt-5.1-codex-max_prompt.md");
pub(crate) const CONTEXT_WINDOW_272K: i64 = 272_000;

Expand Down Expand Up @@ -284,14 +285,14 @@ pub fn find_family_for_model(slug: &str) -> ModelFamily {
truncation_policy: TruncationPolicy::Tokens(10_000),
context_window: Some(CONTEXT_WINDOW_272K),
)
} else if slug.starts_with("robin") {
} else if slug.starts_with("gpt-5.2") {
model_family!(
slug, slug,
supports_reasoning_summaries: true,
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
support_verbosity: true,
default_verbosity: Some(Verbosity::Low),
base_instructions: GPT_5_1_INSTRUCTIONS.to_string(),
base_instructions: GPT_5_2_INSTRUCTIONS.to_string(),
default_reasoning_effort: Some(ReasoningEffort::Medium),
truncation_policy: TruncationPolicy::Bytes(10_000),
shell_type: ConfigShellToolType::ShellCommand,
Expand Down
8 changes: 4 additions & 4 deletions codex-rs/core/src/openai_models/model_presets.rs
Original file line number Diff line number Diff line change
Expand Up @@ -94,10 +94,10 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
show_in_picker: true,
},
ModelPreset {
id: "robin".to_string(),
model: "robin".to_string(),
display_name: "robin".to_string(),
description: "Robin".to_string(),
id: "gpt-5.2".to_string(),
model: "gpt-5.2".to_string(),
display_name: "gpt-5.2".to_string(),
description: "Latest frontier model with improvements across knowledge, reasoning and coding".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
ReasoningEffortPreset {
Expand Down
16 changes: 9 additions & 7 deletions codex-rs/core/tests/suite/list_models.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ fn expected_models_for_api_key() -> Vec<ModelPreset> {
gpt_5_1_codex_max(),
gpt_5_1_codex(),
gpt_5_1_codex_mini(),
robin(),
gpt_5_2(),
gpt_5_1(),
]
}
Expand All @@ -56,7 +56,7 @@ fn expected_models_for_chatgpt() -> Vec<ModelPreset> {
gpt_5_1_codex_max(),
gpt_5_1_codex(),
gpt_5_1_codex_mini(),
robin(),
gpt_5_2(),
gpt_5_1(),
]
}
Expand Down Expand Up @@ -142,12 +142,14 @@ fn gpt_5_1_codex_mini() -> ModelPreset {
}
}

fn robin() -> ModelPreset {
fn gpt_5_2() -> ModelPreset {
ModelPreset {
id: "robin".to_string(),
model: "robin".to_string(),
display_name: "robin".to_string(),
description: "Robin".to_string(),
id: "gpt-5.2".to_string(),
model: "gpt-5.2".to_string(),
display_name: "gpt-5.2".to_string(),
description:
"Latest frontier model with improvements across knowledge, reasoning and coding"
.to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@ expression: popup
2. gpt-5.1-codex Optimized for codex.
3. gpt-5.1-codex-mini Optimized for codex. Cheaper, faster, but
less capable.
4. robin Robin
4. gpt-5.2 Latest frontier model with improvements
across knowledge, reasoning and coding
5. gpt-5.1 Broad world knowledge with strong general
reasoning.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@ expression: popup
2. gpt-5.1-codex Optimized for codex.
3. gpt-5.1-codex-mini Optimized for codex. Cheaper, faster, but less
capable.
4. robin Robin
4. gpt-5.2 Latest frontier model with improvements across
knowledge, reasoning and coding
5. gpt-5.1 Broad world knowledge with strong general reasoning.

Press enter to select reasoning effort, or esc to dismiss.
Loading