Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
4201b1e
Update defaults to gpt-5.1
aibrahim-oai Nov 14, 2025
b26fb5c
bug
aibrahim-oai Nov 14, 2025
94a3753
bug
aibrahim-oai Nov 14, 2025
2004353
bug
aibrahim-oai Nov 14, 2025
53a2dc8
bug
aibrahim-oai Nov 14, 2025
d342090
bug
aibrahim-oai Nov 14, 2025
0ff2421
snapshots
aibrahim-oai Nov 14, 2025
ac878a0
Adjust model families and tests for gpt-5 vs gpt-5.1
aibrahim-oai Nov 14, 2025
894495b
Add parallel approvals and unified-exec tests for gpt-5.1
aibrahim-oai Nov 14, 2025
367590d
Widen narrow status snapshot width and regenerate
aibrahim-oai Nov 14, 2025
b9022dd
Further widen narrow status snapshot width
aibrahim-oai Nov 14, 2025
34815c9
Update narrow status snapshot content for width 70
aibrahim-oai Nov 14, 2025
c43e612
Align approvals scenarios for gpt-5 vs gpt-5.1
aibrahim-oai Nov 14, 2025
0b66d1e
fix
aibrahim-oai Nov 14, 2025
571bc5d
tests
aibrahim-oai Nov 14, 2025
9cbbdd5
Merge branch 'main' into codex/update-models-to-gpt-5.1-and-gpt-5.1-c…
aibrahim-oai Nov 14, 2025
c474991
codex/update-models-to-gpt-5.1-and-gpt-5.1-codex
aibrahim-oai Nov 15, 2025
e550fbe
Merge branch 'codex/update-models-to-gpt-5.1-and-gpt-5.1-codex' of ht…
aibrahim-oai Nov 15, 2025
d82ca66
lint
aibrahim-oai Nov 15, 2025
25a9909
windows
aibrahim-oai Nov 15, 2025
465ae99
Merge branch 'main' into codex/update-models-to-gpt-5.1-and-gpt-5.1-c…
aibrahim-oai Nov 16, 2025
27e7040
Merge branch 'main' into codex/update-models-to-gpt-5.1-and-gpt-5.1-c…
aibrahim-oai Nov 17, 2025
e20664e
tests
aibrahim-oai Nov 17, 2025
5bfbec4
Merge branch 'codex/update-models-to-gpt-5.1-and-gpt-5.1-codex' of ht…
aibrahim-oai Nov 17, 2025
0fb3371
lint
aibrahim-oai Nov 17, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/codex/home/config.toml
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
model = "gpt-5"
model = "gpt-5.1"

# Consider setting [mcp_servers] here!
2 changes: 1 addition & 1 deletion .github/workflows/issue-deduplicator.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ jobs:
with:
openai-api-key: ${{ secrets.CODEX_OPENAI_API_KEY }}
allow-users: "*"
model: gpt-5
model: gpt-5.1
prompt: |
You are an assistant that triages new GitHub issues by identifying potential duplicates.

Expand Down
4 changes: 2 additions & 2 deletions codex-rs/app-server-protocol/src/protocol/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -543,7 +543,7 @@ mod tests {
let request = ClientRequest::NewConversation {
request_id: RequestId::Integer(42),
params: v1::NewConversationParams {
model: Some("gpt-5-codex".to_string()),
model: Some("gpt-5.1-codex".to_string()),
model_provider: None,
profile: None,
cwd: None,
Expand All @@ -561,7 +561,7 @@ mod tests {
"method": "newConversation",
"id": 42,
"params": {
"model": "gpt-5-codex",
"model": "gpt-5.1-codex",
"modelProvider": null,
"profile": null,
"cwd": null,
Expand Down
4 changes: 2 additions & 2 deletions codex-rs/app-server/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ Start a fresh thread when you need a new Codex conversation.
{ "method": "thread/start", "id": 10, "params": {
// Optionally set config settings. If not specified, will use the user's
// current config settings.
"model": "gpt-5-codex",
"model": "gpt-5.1-codex",
"cwd": "/Users/me/project",
"approvalPolicy": "never",
"sandbox": "workspaceWrite",
Expand Down Expand Up @@ -137,7 +137,7 @@ You can optionally specify config overrides on the new turn. If specified, these
"writableRoots": ["/Users/me/project"],
"networkAccess": true
},
"model": "gpt-5-codex",
"model": "gpt-5.1-codex",
"effort": "medium",
"summary": "concise"
} }
Expand Down
4 changes: 2 additions & 2 deletions codex-rs/app-server/tests/suite/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
std::fs::write(
config_toml,
r#"
model = "gpt-5-codex"
model = "gpt-5.1-codex"
approval_policy = "on-request"
sandbox_mode = "workspace-write"
model_reasoning_summary = "detailed"
Expand Down Expand Up @@ -87,7 +87,7 @@ async fn get_config_toml_parses_all_fields() -> Result<()> {
}),
forced_chatgpt_workspace_id: Some("12345678-0000-0000-0000-000000000000".into()),
forced_login_method: Some(ForcedLoginMethod::Chatgpt),
model: Some("gpt-5-codex".into()),
model: Some("gpt-5.1-codex".into()),
model_reasoning_effort: Some(ReasoningEffort::High),
model_reasoning_summary: Some(ReasoningSummary::Detailed),
model_verbosity: Some(Verbosity::Medium),
Expand Down
2 changes: 1 addition & 1 deletion codex-rs/app-server/tests/suite/set_default_model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
std::fs::write(
config_toml,
r#"
model = "gpt-5-codex"
model = "gpt-5.1-codex"
model_reasoning_effort = "medium"
"#,
)
Expand Down
6 changes: 3 additions & 3 deletions codex-rs/app-server/tests/suite/v2/thread_resume.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ async fn thread_resume_returns_original_thread() -> Result<()> {
// Start a thread.
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5-codex".to_string()),
model: Some("gpt-5.1-codex".to_string()),
..Default::default()
})
.await?;
Expand Down Expand Up @@ -68,7 +68,7 @@ async fn thread_resume_prefers_path_over_thread_id() -> Result<()> {

let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5-codex".to_string()),
model: Some("gpt-5.1-codex".to_string()),
..Default::default()
})
.await?;
Expand Down Expand Up @@ -112,7 +112,7 @@ async fn thread_resume_supports_history_and_overrides() -> Result<()> {
// Start a thread.
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5-codex".to_string()),
model: Some("gpt-5.1-codex".to_string()),
..Default::default()
})
.await?;
Expand Down
2 changes: 1 addition & 1 deletion codex-rs/app-server/tests/suite/v2/thread_start.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ async fn thread_start_creates_thread_and_emits_started() -> Result<()> {
// Start a v2 thread with an explicit model override.
let req_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5".to_string()),
model: Some("gpt-5.1".to_string()),
..Default::default()
})
.await?;
Expand Down
8 changes: 4 additions & 4 deletions codex-rs/cli/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -761,9 +761,9 @@ mod tests {

#[test]
fn resume_model_flag_applies_when_no_root_flags() {
let interactive = finalize_from_args(["codex", "resume", "-m", "gpt-5-test"].as_ref());
let interactive = finalize_from_args(["codex", "resume", "-m", "gpt-5.1-test"].as_ref());

assert_eq!(interactive.model.as_deref(), Some("gpt-5-test"));
assert_eq!(interactive.model.as_deref(), Some("gpt-5.1-test"));
assert!(interactive.resume_picker);
assert!(!interactive.resume_last);
assert_eq!(interactive.resume_session_id, None);
Expand Down Expand Up @@ -808,7 +808,7 @@ mod tests {
"--ask-for-approval",
"on-request",
"-m",
"gpt-5-test",
"gpt-5.1-test",
"-p",
"my-profile",
"-C",
Expand All @@ -819,7 +819,7 @@ mod tests {
.as_ref(),
);

assert_eq!(interactive.model.as_deref(), Some("gpt-5-test"));
assert_eq!(interactive.model.as_deref(), Some("gpt-5.1-test"));
assert!(interactive.oss);
assert_eq!(interactive.config_profile.as_deref(), Some("my-profile"));
assert_matches!(
Expand Down
8 changes: 4 additions & 4 deletions codex-rs/core/src/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1225,7 +1225,7 @@ mod tests {

#[tokio::test]
async fn error_when_error_event() {
let raw_error = r#"{"type":"response.failed","sequence_number":3,"response":{"id":"resp_689bcf18d7f08194bf3440ba62fe05d803fee0cdac429894","object":"response","created_at":1755041560,"status":"failed","background":false,"error":{"code":"rate_limit_exceeded","message":"Rate limit reached for gpt-5 in organization org-AAA on tokens per min (TPM): Limit 30000, Used 22999, Requested 12528. Please try again in 11.054s. Visit https://platform.openai.com/account/rate-limits to learn more."}, "usage":null,"user":null,"metadata":{}}}"#;
let raw_error = r#"{"type":"response.failed","sequence_number":3,"response":{"id":"resp_689bcf18d7f08194bf3440ba62fe05d803fee0cdac429894","object":"response","created_at":1755041560,"status":"failed","background":false,"error":{"code":"rate_limit_exceeded","message":"Rate limit reached for gpt-5.1 in organization org-AAA on tokens per min (TPM): Limit 30000, Used 22999, Requested 12528. Please try again in 11.054s. Visit https://platform.openai.com/account/rate-limits to learn more."}, "usage":null,"user":null,"metadata":{}}}"#;

let sse1 = format!("event: response.failed\ndata: {raw_error}\n\n");
let provider = ModelProviderInfo {
Expand Down Expand Up @@ -1254,7 +1254,7 @@ mod tests {
Err(CodexErr::Stream(msg, delay)) => {
assert_eq!(
msg,
"Rate limit reached for gpt-5 in organization org-AAA on tokens per min (TPM): Limit 30000, Used 22999, Requested 12528. Please try again in 11.054s. Visit https://platform.openai.com/account/rate-limits to learn more."
"Rate limit reached for gpt-5.1 in organization org-AAA on tokens per min (TPM): Limit 30000, Used 22999, Requested 12528. Please try again in 11.054s. Visit https://platform.openai.com/account/rate-limits to learn more."
);
assert_eq!(*delay, Some(Duration::from_secs_f64(11.054)));
}
Expand Down Expand Up @@ -1473,7 +1473,7 @@ mod tests {
fn test_try_parse_retry_after() {
let err = Error {
r#type: None,
message: Some("Rate limit reached for gpt-5 in organization org- on tokens per min (TPM): Limit 1, Used 1, Requested 19304. Please try again in 28ms. Visit https://platform.openai.com/account/rate-limits to learn more.".to_string()),
message: Some("Rate limit reached for gpt-5.1 in organization org- on tokens per min (TPM): Limit 1, Used 1, Requested 19304. Please try again in 28ms. Visit https://platform.openai.com/account/rate-limits to learn more.".to_string()),
code: Some("rate_limit_exceeded".to_string()),
plan_type: None,
resets_at: None
Expand All @@ -1487,7 +1487,7 @@ mod tests {
fn test_try_parse_retry_after_no_delay() {
let err = Error {
r#type: None,
message: Some("Rate limit reached for gpt-5 in organization <ORG> on tokens per min (TPM): Limit 30000, Used 6899, Requested 24050. Please try again in 1.898s. Visit https://platform.openai.com/account/rate-limits to learn more.".to_string()),
message: Some("Rate limit reached for gpt-5.1 in organization <ORG> on tokens per min (TPM): Limit 30000, Used 6899, Requested 24050. Please try again in 1.898s. Visit https://platform.openai.com/account/rate-limits to learn more.".to_string()),
code: Some("rate_limit_exceeded".to_string()),
plan_type: None,
resets_at: None
Expand Down
8 changes: 4 additions & 4 deletions codex-rs/core/src/client_common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -427,7 +427,7 @@ mod tests {
expects_apply_patch_instructions: false,
},
InstructionsTestCase {
slug: "gpt-5-codex",
slug: "gpt-5.1-codex",
expects_apply_patch_instructions: false,
},
InstructionsTestCase {
Expand Down Expand Up @@ -457,7 +457,7 @@ mod tests {
let input: Vec<ResponseItem> = vec![];
let tools: Vec<serde_json::Value> = vec![];
let req = ResponsesApiRequest {
model: "gpt-5",
model: "gpt-5.1",
instructions: "i",
input: &input,
tools: &tools,
Expand Down Expand Up @@ -498,7 +498,7 @@ mod tests {
create_text_param_for_request(None, &Some(schema.clone())).expect("text controls");

let req = ResponsesApiRequest {
model: "gpt-5",
model: "gpt-5.1",
instructions: "i",
input: &input,
tools: &tools,
Expand Down Expand Up @@ -534,7 +534,7 @@ mod tests {
let input: Vec<ResponseItem> = vec![];
let tools: Vec<serde_json::Value> = vec![];
let req = ResponsesApiRequest {
model: "gpt-5",
model: "gpt-5.1",
instructions: "i",
input: &input,
tools: &tools,
Expand Down
14 changes: 7 additions & 7 deletions codex-rs/core/src/config/edit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -584,15 +584,15 @@ mod tests {
codex_home,
None,
&[ConfigEdit::SetModel {
model: Some("gpt-5-codex".to_string()),
model: Some("gpt-5.1-codex".to_string()),
effort: Some(ReasoningEffort::High),
}],
)
.expect("persist");

let contents =
std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
let expected = r#"model = "gpt-5-codex"
let expected = r#"model = "gpt-5.1-codex"
model_reasoning_effort = "high"
"#;
assert_eq!(contents, expected);
Expand Down Expand Up @@ -722,7 +722,7 @@ model = "o5-preview"
std::fs::write(
codex_home.join(CONFIG_TOML_FILE),
r#"[profiles."team a"]
model = "gpt-5-codex"
model = "gpt-5.1-codex"
"#,
)
.expect("seed");
Expand Down Expand Up @@ -972,14 +972,14 @@ B = \"2\"
let codex_home = tmp.path().to_path_buf();

ConfigEditsBuilder::new(&codex_home)
.set_model(Some("gpt-5-codex"), Some(ReasoningEffort::High))
.set_model(Some("gpt-5.1-codex"), Some(ReasoningEffort::High))
.apply()
.await
.expect("persist");

let contents =
std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
let expected = r#"model = "gpt-5-codex"
let expected = r#"model = "gpt-5.1-codex"
model_reasoning_effort = "high"
"#;
assert_eq!(contents, expected);
Expand All @@ -1001,11 +1001,11 @@ model_reasoning_effort = "low"
std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
assert_eq!(contents, initial_expected);

let updated_expected = r#"model = "gpt-5-codex"
let updated_expected = r#"model = "gpt-5.1-codex"
model_reasoning_effort = "high"
"#;
ConfigEditsBuilder::new(codex_home)
.set_model(Some("gpt-5-codex"), Some(ReasoningEffort::High))
.set_model(Some("gpt-5.1-codex"), Some(ReasoningEffort::High))
.apply_blocking()
.expect("persist update");
contents = std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
Expand Down
30 changes: 15 additions & 15 deletions codex-rs/core/src/config/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,11 +60,11 @@ pub mod profile;
pub mod types;

#[cfg(target_os = "windows")]
pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5";
pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5.1";
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

let's double check with @dylan-hurd-oai

Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

:shipit:

#[cfg(not(target_os = "windows"))]
pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5-codex";
const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5-codex";
pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "gpt-5-codex";
pub const OPENAI_DEFAULT_MODEL: &str = "gpt-5.1-codex";
const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5.1-codex";
pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "gpt-5.1-codex";

/// Maximum number of bytes of the documentation that will be embedded. Larger
/// files are *silently truncated* to this size so we do not take up too much of
Expand All @@ -79,7 +79,7 @@ pub struct Config {
/// Optional override of model selection.
pub model: String,

/// Model used specifically for review sessions. Defaults to "gpt-5-codex".
/// Model used specifically for review sessions. Defaults to "gpt-5.1-codex".
pub review_model: String,

pub model_family: ModelFamily,
Expand Down Expand Up @@ -2542,15 +2542,15 @@ url = "https://example.com/mcp"
let codex_home = TempDir::new()?;

ConfigEditsBuilder::new(codex_home.path())
.set_model(Some("gpt-5-codex"), Some(ReasoningEffort::High))
.set_model(Some("gpt-5.1-codex"), Some(ReasoningEffort::High))
.apply()
.await?;

let serialized =
tokio::fs::read_to_string(codex_home.path().join(CONFIG_TOML_FILE)).await?;
let parsed: ConfigToml = toml::from_str(&serialized)?;

assert_eq!(parsed.model.as_deref(), Some("gpt-5-codex"));
assert_eq!(parsed.model.as_deref(), Some("gpt-5.1-codex"));
assert_eq!(parsed.model_reasoning_effort, Some(ReasoningEffort::High));

Ok(())
Expand All @@ -2564,7 +2564,7 @@ url = "https://example.com/mcp"
tokio::fs::write(
&config_path,
r#"
model = "gpt-5-codex"
model = "gpt-5.1-codex"
model_reasoning_effort = "medium"

[profiles.dev]
Expand Down Expand Up @@ -2600,7 +2600,7 @@ model = "gpt-4.1"

ConfigEditsBuilder::new(codex_home.path())
.with_profile(Some("dev"))
.set_model(Some("gpt-5-codex"), Some(ReasoningEffort::Medium))
.set_model(Some("gpt-5.1-codex"), Some(ReasoningEffort::Medium))
.apply()
.await?;

Expand All @@ -2612,7 +2612,7 @@ model = "gpt-4.1"
.get("dev")
.expect("profile should be created");

assert_eq!(profile.model.as_deref(), Some("gpt-5-codex"));
assert_eq!(profile.model.as_deref(), Some("gpt-5.1-codex"));
assert_eq!(
profile.model_reasoning_effort,
Some(ReasoningEffort::Medium)
Expand All @@ -2634,7 +2634,7 @@ model = "gpt-4"
model_reasoning_effort = "medium"

[profiles.prod]
model = "gpt-5-codex"
model = "gpt-5.1-codex"
"#,
)
.await?;
Expand Down Expand Up @@ -2663,7 +2663,7 @@ model = "gpt-5-codex"
.profiles
.get("prod")
.and_then(|profile| profile.model.as_deref()),
Some("gpt-5-codex"),
Some("gpt-5.1-codex"),
);

Ok(())
Expand Down Expand Up @@ -2778,7 +2778,7 @@ model_provider = "openai"
approval_policy = "on-failure"

[profiles.gpt5]
model = "gpt-5"
model = "gpt-5.1"
model_provider = "openai"
approval_policy = "on-failure"
model_reasoning_effort = "high"
Expand Down Expand Up @@ -3094,9 +3094,9 @@ model_verbosity = "high"
fixture.codex_home(),
)?;
let expected_gpt5_profile_config = Config {
model: "gpt-5".to_string(),
model: "gpt-5.1".to_string(),
review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(),
model_family: find_family_for_model("gpt-5").expect("known model slug"),
model_family: find_family_for_model("gpt-5.1").expect("known model slug"),
model_context_window: Some(272_000),
model_max_output_tokens: Some(128_000),
model_auto_compact_token_limit: Some(244_800),
Expand Down
2 changes: 1 addition & 1 deletion codex-rs/core/src/model_family.rs
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
model_family!(slug, "gpt-4o", needs_special_apply_patch_instructions: true)
} else if slug.starts_with("gpt-3.5") {
model_family!(slug, "gpt-3.5", needs_special_apply_patch_instructions: true)
} else if slug.starts_with("test-gpt-5-codex") {
} else if slug.starts_with("test-gpt-5") {
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

??

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

to include test-gpt-5.1

Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

it has codex prompt

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

oh yeah I wanted to support gpt-5.1-codex

just for tests

model_family!(
slug, slug,
supports_reasoning_summaries: true,
Expand Down
Loading
Loading