Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 0 additions & 35 deletions codex-rs/core/tests/common/lib.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
#![expect(clippy::expect_used)]

use codex_utils_cargo_bin::CargoBinError;
use codex_utils_cargo_bin::find_resource;
use tempfile::TempDir;

use codex_core::CodexThread;
Expand Down Expand Up @@ -147,40 +146,6 @@ pub fn load_sse_fixture_with_id_from_str(raw: &str, id: &str) -> String {
.collect()
}

/// Same as [`load_sse_fixture`], but replaces the placeholder `__ID__` in the
/// fixture template with the supplied identifier before parsing. This lets a
/// single JSON template be reused by multiple tests that each need a unique
/// `response_id`.
pub fn load_sse_fixture_with_id(path: impl AsRef<std::path::Path>, id: &str) -> String {
let p = path.as_ref();
let full_path = match find_resource!(p) {
Ok(p) => p,
Err(err) => panic!(
"failed to find fixture template at {:?}: {err}",
path.as_ref()
),
};

let raw = std::fs::read_to_string(full_path).expect("read fixture template");
let replaced = raw.replace("__ID__", id);
let events: Vec<serde_json::Value> =
serde_json::from_str(&replaced).expect("parse JSON fixture");
events
.into_iter()
.map(|e| {
let kind = e
.get("type")
.and_then(|v| v.as_str())
.expect("fixture event missing type");
if e.as_object().map(|o| o.len() == 1).unwrap_or(false) {
format!("event: {kind}\n\n")
} else {
format!("event: {kind}\ndata: {e}\n\n")
}
})
.collect()
}

pub async fn wait_for_event<F>(codex: &CodexThread, predicate: F) -> codex_core::protocol::EventMsg
where
F: FnMut(&codex_core::protocol::EventMsg) -> bool,
Expand Down
16 changes: 0 additions & 16 deletions codex-rs/core/tests/fixtures/completed_template.json

This file was deleted.

124 changes: 98 additions & 26 deletions codex-rs/core/tests/suite/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,9 @@ use codex_protocol::models::WebSearchAction;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::user_input::UserInput;
use core_test_support::load_default_config_for_test;
use core_test_support::load_sse_fixture_with_id;
use core_test_support::responses::ev_completed;
use core_test_support::responses::ev_completed_with_tokens;
use core_test_support::responses::ev_response_created;
use core_test_support::responses::mount_sse_once;
use core_test_support::responses::mount_sse_once_match;
use core_test_support::responses::mount_sse_sequence;
Expand All @@ -64,11 +65,6 @@ use wiremock::matchers::method;
use wiremock::matchers::path;
use wiremock::matchers::query_param;

/// Build minimal SSE stream with completed marker using the JSON fixture.
fn sse_completed(id: &str) -> String {
load_sse_fixture_with_id("../fixtures/completed_template.json", id)
}

#[expect(clippy::unwrap_used)]
fn assert_message_role(request_body: &serde_json::Value, role: &str) {
assert_eq!(request_body["role"].as_str().unwrap(), role);
Expand Down Expand Up @@ -259,7 +255,11 @@ async fn resume_includes_initial_messages_and_sends_prior_items() {

// Mock server that will receive the resumed request
let server = MockServer::start().await;
let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;

// Configure Codex to resume from our file
let codex_home = Arc::new(TempDir::new().unwrap());
Expand Down Expand Up @@ -377,7 +377,11 @@ async fn includes_conversation_id_and_model_headers_in_request() {
// Mock server
let server = MockServer::start().await;

let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;

let mut builder = test_codex().with_auth(CodexAuth::from_api_key("Test API Key"));
let test = builder
Expand Down Expand Up @@ -418,7 +422,11 @@ async fn includes_base_instructions_override_in_request() {
skip_if_no_network!();
// Mock server
let server = MockServer::start().await;
let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;

let mut builder = test_codex()
.with_auth(CodexAuth::from_api_key("Test API Key"))
Expand Down Expand Up @@ -462,7 +470,11 @@ async fn chatgpt_auth_sends_correct_request() {
// Mock server
let server = MockServer::start().await;

let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;

let mut model_provider = built_in_model_providers()["openai"].clone();
model_provider.base_url = Some(format!("{}/api/codex", server.uri()));
Expand Down Expand Up @@ -524,7 +536,10 @@ async fn prefers_apikey_when_config_prefers_apikey_even_with_chatgpt_tokens() {

let first = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse_completed("resp1"), "text/event-stream");
.set_body_raw(
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
"text/event-stream",
);

// Expect API key header, no ChatGPT account header required.
Mock::given(method("POST"))
Expand Down Expand Up @@ -590,7 +605,11 @@ async fn includes_user_instructions_message_in_request() {
skip_if_no_network!();
let server = MockServer::start().await;

let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;

let mut builder = test_codex()
.with_auth(CodexAuth::from_api_key("Test API Key"))
Expand Down Expand Up @@ -652,7 +671,11 @@ async fn skills_append_to_instructions() {
skip_if_no_network!();
let server = MockServer::start().await;

let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;

let codex_home = Arc::new(TempDir::new().unwrap());
let skill_dir = codex_home.path().join("skills/demo");
Expand Down Expand Up @@ -720,7 +743,11 @@ async fn includes_configured_effort_in_request() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
let server = MockServer::start().await;

let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let TestCodex { codex, .. } = test_codex()
.with_model("gpt-5.1-codex")
.with_config(|config| {
Expand Down Expand Up @@ -761,7 +788,11 @@ async fn includes_no_effort_in_request() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
let server = MockServer::start().await;

let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let TestCodex { codex, .. } = test_codex()
.with_model("gpt-5.1-codex")
.build(&server)
Expand Down Expand Up @@ -800,7 +831,11 @@ async fn includes_default_reasoning_effort_in_request_when_defined_by_model_info
skip_if_no_network!(Ok(()));
let server = MockServer::start().await;

let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let TestCodex { codex, .. } = test_codex().with_model("gpt-5.1").build(&server).await?;

codex
Expand Down Expand Up @@ -835,7 +870,11 @@ async fn user_turn_collaboration_mode_overrides_model_and_effort() -> anyhow::Re
skip_if_no_network!(Ok(()));
let server = MockServer::start().await;

let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let TestCodex {
codex,
config,
Expand Down Expand Up @@ -893,7 +932,11 @@ async fn configured_reasoning_summary_is_sent() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
let server = MockServer::start().await;

let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let TestCodex { codex, .. } = test_codex()
.with_config(|config| {
config.model_reasoning_summary = ReasoningSummary::Concise;
Expand Down Expand Up @@ -933,7 +976,11 @@ async fn reasoning_summary_is_omitted_when_disabled() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
let server = MockServer::start().await;

let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let TestCodex { codex, .. } = test_codex()
.with_config(|config| {
config.model_reasoning_summary = ReasoningSummary::None;
Expand Down Expand Up @@ -972,7 +1019,11 @@ async fn includes_default_verbosity_in_request() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
let server = MockServer::start().await;

let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let TestCodex { codex, .. } = test_codex().with_model("gpt-5.1").build(&server).await?;

codex
Expand Down Expand Up @@ -1007,7 +1058,11 @@ async fn configured_verbosity_not_sent_for_models_without_support() -> anyhow::R
skip_if_no_network!(Ok(()));
let server = MockServer::start().await;

let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let TestCodex { codex, .. } = test_codex()
.with_model("gpt-5.1-codex")
.with_config(|config| {
Expand Down Expand Up @@ -1047,7 +1102,11 @@ async fn configured_verbosity_is_sent() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
let server = MockServer::start().await;

let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let TestCodex { codex, .. } = test_codex()
.with_model("gpt-5.1")
.with_config(|config| {
Expand Down Expand Up @@ -1088,7 +1147,11 @@ async fn includes_developer_instructions_message_in_request() {
skip_if_no_network!();
let server = MockServer::start().await;

let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
)
.await;
let mut builder = test_codex()
.with_auth(CodexAuth::from_api_key("Test API Key"))
.with_config(|config| {
Expand Down Expand Up @@ -1570,7 +1633,10 @@ async fn context_window_error_sets_total_tokens_to_model_window() -> anyhow::Res
mount_sse_once_match(
&server,
body_string_contains("seed turn"),
sse_completed("resp_seed"),
sse(vec![
ev_response_created("resp_seed"),
ev_completed("resp_seed"),
]),
)
.await;

Expand Down Expand Up @@ -1656,7 +1722,10 @@ async fn azure_overrides_assign_properties_used_for_responses_url() {
// First request – must NOT include `previous_response_id`.
let first = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse_completed("resp1"), "text/event-stream");
.set_body_raw(
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
"text/event-stream",
);

// Expect POST to /openai/responses with api-version query param
Mock::given(method("POST"))
Expand Down Expand Up @@ -1737,7 +1806,10 @@ async fn env_var_overrides_loaded_auth() {
// First request – must NOT include `previous_response_id`.
let first = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse_completed("resp1"), "text/event-stream");
.set_body_raw(
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
"text/event-stream",
);

// Expect POST to /openai/responses with api-version query param
Mock::given(method("POST"))
Expand Down
Loading
Loading