From 9ea54d873bc3f9b060757b8d317fc59f928b36f7 Mon Sep 17 00:00:00 2001 From: Owen Lin Date: Sat, 18 Oct 2025 21:08:49 -0700 Subject: [PATCH 1/7] [app-server] read rate limit API --- codex-rs/Cargo.lock | 3 + codex-rs/Cargo.toml | 1 + codex-rs/app-server-protocol/src/protocol.rs | 29 +++ codex-rs/app-server/Cargo.toml | 2 + .../app-server/src/codex_message_processor.rs | 121 +++++++++ .../app-server/tests/common/mcp_process.rs | 5 + codex-rs/app-server/tests/suite/mod.rs | 1 + .../app-server/tests/suite/rate_limits.rs | 242 ++++++++++++++++++ codex-rs/app-server/tests/suite/user_agent.rs | 4 +- codex-rs/backend-client/src/client.rs | 11 + codex-rs/backend-client/src/types.rs | 4 + .../codex-backend-openapi-models/Cargo.toml | 1 + .../src/models/mod.rs | 12 + .../src/models/rate_limit_status_details.rs | 46 ++++ .../src/models/rate_limit_status_payload.rs | 65 +++++ .../src/models/rate_limit_window_snapshot.rs | 40 +++ codex-rs/core/src/client.rs | 28 +- codex-rs/protocol/src/protocol.rs | 4 +- 18 files changed, 602 insertions(+), 17 deletions(-) create mode 100644 codex-rs/app-server/tests/suite/rate_limits.rs create mode 100644 codex-rs/codex-backend-openapi-models/src/models/rate_limit_status_details.rs create mode 100644 codex-rs/codex-backend-openapi-models/src/models/rate_limit_status_payload.rs create mode 100644 codex-rs/codex-backend-openapi-models/src/models/rate_limit_window_snapshot.rs diff --git a/codex-rs/Cargo.lock b/codex-rs/Cargo.lock index c700a2dd1cd..2d1052c9b83 100644 --- a/codex-rs/Cargo.lock +++ b/codex-rs/Cargo.lock @@ -834,8 +834,10 @@ dependencies = [ "app_test_support", "assert_cmd", "base64", + "chrono", "codex-app-server-protocol", "codex-arg0", + "codex-backend-client", "codex-common", "codex-core", "codex-file-search", @@ -929,6 +931,7 @@ version = "0.0.0" dependencies = [ "serde", "serde_json", + "serde_with", ] [[package]] diff --git a/codex-rs/Cargo.toml b/codex-rs/Cargo.toml index 5286ef8702a..20c6e422e36 100644 --- a/codex-rs/Cargo.toml +++ b/codex-rs/Cargo.toml @@ -56,6 +56,7 @@ codex-app-server-protocol = { path = "app-server-protocol" } codex-apply-patch = { path = "apply-patch" } codex-arg0 = { path = "arg0" } codex-async-utils = { path = "async-utils" } +codex-backend-client = { path = "backend-client" } codex-chatgpt = { path = "chatgpt" } codex-common = { path = "common" } codex-core = { path = "core" } diff --git a/codex-rs/app-server-protocol/src/protocol.rs b/codex-rs/app-server-protocol/src/protocol.rs index 9c10d23360d..080c1062b9e 100644 --- a/codex-rs/app-server-protocol/src/protocol.rs +++ b/codex-rs/app-server-protocol/src/protocol.rs @@ -14,6 +14,7 @@ use codex_protocol::parse_command::ParsedCommand; use codex_protocol::protocol::AskForApproval; use codex_protocol::protocol::EventMsg; use codex_protocol::protocol::FileChange; +use codex_protocol::protocol::RateLimitSnapshot; use codex_protocol::protocol::ReviewDecision; use codex_protocol::protocol::SandboxPolicy; use codex_protocol::protocol::TurnAbortReason; @@ -183,6 +184,12 @@ client_request_definitions! { params: ExecOneOffCommandParams, response: ExecOneOffCommandResponse, }, + #[serde(rename = "account/rateLimits/read")] + #[ts(rename = "account/rateLimits/read")] + GetAccountRateLimits { + params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>, + response: GetAccountRateLimitsResponse, + }, } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)] @@ -422,6 +429,12 @@ pub struct ExecOneOffCommandResponse { #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] #[serde(rename_all = "camelCase")] +pub struct GetAccountRateLimitsResponse { + pub rate_limits: RateLimitSnapshot, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)] +#[serde(rename_all = "camelCase")] pub struct GetAuthStatusResponse { #[serde(skip_serializing_if = "Option::is_none")] pub auth_method: Option, @@ -970,4 +983,20 @@ mod tests { assert_eq!(payload.request_with_id(RequestId::Integer(7)), request); Ok(()) } + + #[test] + fn serialize_get_account_rate_limits() -> Result<()> { + let request = ClientRequest::GetAccountRateLimits { + request_id: RequestId::Integer(1), + params: None, + }; + assert_eq!( + json!({ + "method": "account/rateLimits/read", + "id": 1, + }), + serde_json::to_value(&request)?, + ); + Ok(()) + } } diff --git a/codex-rs/app-server/Cargo.toml b/codex-rs/app-server/Cargo.toml index 545ef4898c9..c0dd2410c7b 100644 --- a/codex-rs/app-server/Cargo.toml +++ b/codex-rs/app-server/Cargo.toml @@ -19,11 +19,13 @@ anyhow = { workspace = true } codex-arg0 = { workspace = true } codex-common = { workspace = true, features = ["cli"] } codex-core = { workspace = true } +codex-backend-client = { workspace = true } codex-file-search = { workspace = true } codex-login = { workspace = true } codex-protocol = { workspace = true } codex-app-server-protocol = { workspace = true } codex-utils-json-to-toml = { workspace = true } +chrono = { workspace = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } tokio = { workspace = true, features = [ diff --git a/codex-rs/app-server/src/codex_message_processor.rs b/codex-rs/app-server/src/codex_message_processor.rs index 40e6a45fa35..6a22ac91644 100644 --- a/codex-rs/app-server/src/codex_message_processor.rs +++ b/codex-rs/app-server/src/codex_message_processor.rs @@ -9,6 +9,7 @@ use codex_app_server_protocol::ApplyPatchApprovalParams; use codex_app_server_protocol::ApplyPatchApprovalResponse; use codex_app_server_protocol::ArchiveConversationParams; use codex_app_server_protocol::ArchiveConversationResponse; +use codex_app_server_protocol::AuthMode; use codex_app_server_protocol::AuthStatusChangeNotification; use codex_app_server_protocol::ClientRequest; use codex_app_server_protocol::ConversationSummary; @@ -18,6 +19,7 @@ use codex_app_server_protocol::ExecOneOffCommandParams; use codex_app_server_protocol::ExecOneOffCommandResponse; use codex_app_server_protocol::FuzzyFileSearchParams; use codex_app_server_protocol::FuzzyFileSearchResponse; +use codex_app_server_protocol::GetAccountRateLimitsResponse; use codex_app_server_protocol::GetUserAgentResponse; use codex_app_server_protocol::GetUserSavedConfigResponse; use codex_app_server_protocol::GitDiffToRemoteResponse; @@ -49,6 +51,9 @@ use codex_app_server_protocol::SetDefaultModelParams; use codex_app_server_protocol::SetDefaultModelResponse; use codex_app_server_protocol::UserInfoResponse; use codex_app_server_protocol::UserSavedConfig; +use codex_backend_client::Client as BackendClient; +use codex_backend_client::types::RateLimitStatusPayload; +use codex_backend_client::types::RateLimitWindowSnapshot; use codex_core::AuthManager; use codex_core::CodexConversation; use codex_core::ConversationManager; @@ -88,6 +93,8 @@ use codex_protocol::config_types::ForcedLoginMethod; use codex_protocol::models::ContentItem; use codex_protocol::models::ResponseItem; use codex_protocol::protocol::InputMessageKind; +use codex_protocol::protocol::RateLimitSnapshot; +use codex_protocol::protocol::RateLimitWindow; use codex_protocol::protocol::USER_MESSAGE_BEGIN; use codex_utils_json_to_toml::json_to_toml; use std::collections::HashMap; @@ -240,6 +247,12 @@ impl CodexMessageProcessor { ClientRequest::ExecOneOffCommand { request_id, params } => { self.exec_one_off_command(request_id, params).await; } + ClientRequest::GetAccountRateLimits { + request_id, + params: _, + } => { + self.get_account_rate_limits(request_id).await; + } } } @@ -527,6 +540,70 @@ impl CodexMessageProcessor { self.outgoing.send_response(request_id, response).await; } + async fn get_account_rate_limits(&self, request_id: RequestId) { + match self.fetch_account_rate_limits().await { + Ok(rate_limits) => { + let response = GetAccountRateLimitsResponse { rate_limits }; + self.outgoing.send_response(request_id, response).await; + } + Err(error) => { + self.outgoing.send_error(request_id, error).await; + } + } + } + + async fn fetch_account_rate_limits(&self) -> Result { + let Some(auth) = self.auth_manager.auth() else { + return Err(JSONRPCErrorError { + code: INVALID_REQUEST_ERROR_CODE, + message: "codex account authentication required to read rate limits".to_string(), + data: None, + }); + }; + + if auth.mode != AuthMode::ChatGPT { + return Err(JSONRPCErrorError { + code: INVALID_REQUEST_ERROR_CODE, + message: "chatgpt authentication required to read rate limits".to_string(), + data: None, + }); + } + + let token = auth.get_token().await.map_err(|err| JSONRPCErrorError { + code: INTERNAL_ERROR_CODE, + message: format!("failed to read codex auth token: {err}"), + data: None, + })?; + + let mut client = + BackendClient::new(self.config.chatgpt_base_url.clone()).map_err(|err| { + JSONRPCErrorError { + code: INTERNAL_ERROR_CODE, + message: format!("failed to construct backend client: {err}"), + data: None, + } + })?; + + client = client + .with_user_agent(get_codex_user_agent()) + .with_bearer_token(token); + + if let Some(account_id) = auth.get_account_id() { + client = client.with_chatgpt_account_id(account_id); + } + + let payload = client + .get_rate_limits() + .await + .map_err(|err| JSONRPCErrorError { + code: INTERNAL_ERROR_CODE, + message: format!("failed to fetch codex rate limits: {err}"), + data: None, + })?; + + Ok(rate_limit_snapshot_from_payload(payload)) + } + async fn get_user_saved_config(&self, request_id: RequestId) { let toml_value = match load_config_as_toml(&self.config.codex_home).await { Ok(val) => val, @@ -1393,6 +1470,50 @@ async fn derive_config_from_params( Config::load_with_cli_overrides(cli_overrides, overrides).await } +fn rate_limit_snapshot_from_payload(payload: RateLimitStatusPayload) -> RateLimitSnapshot { + let Some(details) = payload + .rate_limit + .and_then(|inner| inner.map(|boxed| *boxed)) + else { + return RateLimitSnapshot { + primary: None, + secondary: None, + }; + }; + + RateLimitSnapshot { + primary: map_rate_limit_window(details.primary_window), + secondary: map_rate_limit_window(details.secondary_window), + } +} + +fn map_rate_limit_window( + window: Option>>, +) -> Option { + let snapshot = match window { + Some(Some(snapshot)) => *snapshot, + _ => return None, + }; + + let used_percent = f64::from(snapshot.used_percent); + let window_minutes = window_minutes_from_seconds(snapshot.limit_window_seconds); + let resets_at = snapshot.reset_at; + Some(RateLimitWindow { + used_percent, + window_minutes, + resets_at, + }) +} + +fn window_minutes_from_seconds(seconds: i32) -> Option { + if seconds <= 0 { + return None; + } + + let seconds_u64 = seconds as u64; + Some(seconds_u64.div_ceil(60)) +} + async fn on_patch_approval_response( event_id: String, receiver: oneshot::Receiver, diff --git a/codex-rs/app-server/tests/common/mcp_process.rs b/codex-rs/app-server/tests/common/mcp_process.rs index 29768df81cf..2b0f432be8e 100644 --- a/codex-rs/app-server/tests/common/mcp_process.rs +++ b/codex-rs/app-server/tests/common/mcp_process.rs @@ -236,6 +236,11 @@ impl McpProcess { self.send_request("getUserAgent", None).await } + /// Send an `account/rateLimits/read` JSON-RPC request. + pub async fn send_get_account_rate_limits_request(&mut self) -> anyhow::Result { + self.send_request("account/rateLimits/read", None).await + } + /// Send a `userInfo` JSON-RPC request. pub async fn send_user_info_request(&mut self) -> anyhow::Result { self.send_request("userInfo", None).await diff --git a/codex-rs/app-server/tests/suite/mod.rs b/codex-rs/app-server/tests/suite/mod.rs index 78ce310e749..507f71049c3 100644 --- a/codex-rs/app-server/tests/suite/mod.rs +++ b/codex-rs/app-server/tests/suite/mod.rs @@ -7,6 +7,7 @@ mod fuzzy_file_search; mod interrupt; mod list_resume; mod login; +mod rate_limits; mod send_message; mod set_default_model; mod user_agent; diff --git a/codex-rs/app-server/tests/suite/rate_limits.rs b/codex-rs/app-server/tests/suite/rate_limits.rs new file mode 100644 index 00000000000..4fc208884da --- /dev/null +++ b/codex-rs/app-server/tests/suite/rate_limits.rs @@ -0,0 +1,242 @@ +use std::path::Path; + +use app_test_support::McpProcess; +use app_test_support::to_response; +use base64::Engine; +use base64::engine::general_purpose::URL_SAFE_NO_PAD; +use chrono::Utc; +use codex_app_server_protocol::GetAccountRateLimitsResponse; +use codex_app_server_protocol::JSONRPCError; +use codex_app_server_protocol::JSONRPCResponse; +use codex_app_server_protocol::LoginApiKeyParams; +use codex_app_server_protocol::RequestId; +use codex_core::auth::AuthDotJson; +use codex_core::auth::get_auth_file; +use codex_core::auth::write_auth_json; +use codex_core::token_data::TokenData; +use codex_core::token_data::parse_id_token; +use codex_protocol::protocol::RateLimitSnapshot; +use codex_protocol::protocol::RateLimitWindow; +use pretty_assertions::assert_eq; +use serde_json::json; +use tempfile::TempDir; +use tokio::time::timeout; +use wiremock::Mock; +use wiremock::MockServer; +use wiremock::ResponseTemplate; +use wiremock::matchers::header; +use wiremock::matchers::method; +use wiremock::matchers::path; + +const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); +const INVALID_REQUEST_ERROR_CODE: i64 = -32600; + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_account_rate_limits_requires_auth() { + let codex_home = TempDir::new().unwrap(); + + let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]) + .await + .expect("spawn mcp process"); + timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()) + .await + .expect("initialize timeout") + .expect("initialize request"); + + let request_id = mcp + .send_get_account_rate_limits_request() + .await + .expect("send account/rateLimits/read"); + + let error: JSONRPCError = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_error_message(RequestId::Integer(request_id)), + ) + .await + .expect("account/rateLimits/read timeout") + .expect("account/rateLimits/read error"); + + assert_eq!(error.id, RequestId::Integer(request_id)); + assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE); + assert_eq!( + error.error.message, + "codex account authentication required to read rate limits" + ); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_account_rate_limits_requires_chatgpt_auth() { + let codex_home = TempDir::new().unwrap(); + + let mut mcp = McpProcess::new(codex_home.path()) + .await + .expect("spawn mcp process"); + timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()) + .await + .expect("initialize timeout") + .expect("initialize request"); + + login_with_api_key(&mut mcp, "sk-test-key").await; + + let request_id = mcp + .send_get_account_rate_limits_request() + .await + .expect("send account/rateLimits/read"); + + let error: JSONRPCError = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_error_message(RequestId::Integer(request_id)), + ) + .await + .expect("account/rateLimits/read timeout") + .expect("account/rateLimits/read error"); + + assert_eq!(error.id, RequestId::Integer(request_id)); + assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE); + assert_eq!( + error.error.message, + "chatgpt authentication required to read rate limits" + ); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_account_rate_limits_returns_snapshot() { + let codex_home = TempDir::new().unwrap(); + write_chatgpt_auth(codex_home.path(), "chatgpt-token", "account-123") + .expect("write chatgpt auth"); + + let server = MockServer::start().await; + let server_url = server.uri(); + write_chatgpt_base_url(codex_home.path(), &server_url).expect("write chatgpt base url"); + + let primary_reset_iso = "2025-01-01T00:02:00Z"; + let secondary_reset_iso = "2025-01-01T01:00:00Z"; + let response_body = json!({ + "plan_type": "pro", + "rate_limit": { + "allowed": true, + "limit_reached": false, + "primary_window": { + "used_percent": 42, + "limit_window_seconds": 3600, + "reset_after_seconds": 120, + "reset_at": primary_reset_iso, + }, + "secondary_window": { + "used_percent": 5, + "limit_window_seconds": 86400, + "reset_after_seconds": 43200, + "reset_at": secondary_reset_iso, + } + } + }); + + Mock::given(method("GET")) + .and(path("/api/codex/usage")) + .and(header("authorization", "Bearer chatgpt-token")) + .and(header("chatgpt-account-id", "account-123")) + .respond_with(ResponseTemplate::new(200).set_body_json(response_body)) + .mount(&server) + .await; + + let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]) + .await + .expect("spawn mcp process"); + timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()) + .await + .expect("initialize timeout") + .expect("initialize request"); + + let request_id = mcp + .send_get_account_rate_limits_request() + .await + .expect("send account/rateLimits/read"); + + let response: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(request_id)), + ) + .await + .expect("account/rateLimits/read timeout") + .expect("account/rateLimits/read response"); + + let received: GetAccountRateLimitsResponse = + to_response(response).expect("deserialize rate limit response"); + + let expected = GetAccountRateLimitsResponse { + rate_limits: RateLimitSnapshot { + primary: Some(RateLimitWindow { + used_percent: 42.0, + window_minutes: Some(60), + resets_at: Some(primary_reset_iso.to_string()), + }), + secondary: Some(RateLimitWindow { + used_percent: 5.0, + window_minutes: Some(1440), + resets_at: Some(secondary_reset_iso.to_string()), + }), + }, + }; + assert_eq!(received, expected); +} + +#[expect(clippy::expect_used)] +async fn login_with_api_key(mcp: &mut McpProcess, api_key: &str) { + let request_id = mcp + .send_login_api_key_request(LoginApiKeyParams { + api_key: api_key.to_string(), + }) + .await + .expect("send loginApiKey"); + + timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(request_id)), + ) + .await + .expect("loginApiKey timeout") + .expect("loginApiKey response"); +} + +fn write_chatgpt_base_url(codex_home: &Path, base_url: &str) -> std::io::Result<()> { + let config_toml = codex_home.join("config.toml"); + std::fs::write(config_toml, format!("chatgpt_base_url = \"{base_url}\"\n")) +} + +fn write_chatgpt_auth( + codex_home: &Path, + access_token: &str, + account_id: &str, +) -> std::io::Result<()> { + let auth_path = get_auth_file(codex_home); + let id_token_raw = encode_chatgpt_id_token("pro"); + let id_token = parse_id_token(&id_token_raw).map_err(std::io::Error::other)?; + let auth = AuthDotJson { + openai_api_key: None, + tokens: Some(TokenData { + id_token, + access_token: access_token.to_string(), + refresh_token: "refresh-token".to_string(), + account_id: Some(account_id.to_string()), + }), + last_refresh: Some(Utc::now()), + }; + write_auth_json(&auth_path, &auth) +} + +fn encode_chatgpt_id_token(plan_type: &str) -> String { + let header = json!({ "alg": "none", "typ": "JWT" }); + let payload = json!({ + "https://api.openai.com/auth": { + "chatgpt_plan_type": plan_type + } + }); + let header_b64 = URL_SAFE_NO_PAD.encode( + serde_json::to_vec(&header).unwrap_or_else(|err| panic!("serialize jwt header: {err}")), + ); + let payload_b64 = URL_SAFE_NO_PAD.encode( + serde_json::to_vec(&payload).unwrap_or_else(|err| panic!("serialize jwt payload: {err}")), + ); + let signature_b64 = URL_SAFE_NO_PAD.encode(b"signature"); + format!("{header_b64}.{payload_b64}.{signature_b64}") +} diff --git a/codex-rs/app-server/tests/suite/user_agent.rs b/codex-rs/app-server/tests/suite/user_agent.rs index 95a0b1a3e0b..9a432dda0f1 100644 --- a/codex-rs/app-server/tests/suite/user_agent.rs +++ b/codex-rs/app-server/tests/suite/user_agent.rs @@ -34,8 +34,10 @@ async fn get_user_agent_returns_current_codex_user_agent() { .expect("getUserAgent response"); let os_info = os_info::get(); + let originator = codex_core::default_client::originator(); let user_agent = format!( - "codex_cli_rs/0.0.0 ({} {}; {}) {} (codex-app-server-tests; 0.1.0)", + "{}/0.0.0 ({} {}; {}) {} (codex-app-server-tests; 0.1.0)", + originator.value.as_str(), os_info.os_type(), os_info.version(), os_info.architecture().unwrap_or("unknown"), diff --git a/codex-rs/backend-client/src/client.rs b/codex-rs/backend-client/src/client.rs index 06ad00cb806..4bb1460eb32 100644 --- a/codex-rs/backend-client/src/client.rs +++ b/codex-rs/backend-client/src/client.rs @@ -1,5 +1,6 @@ use crate::types::CodeTaskDetailsResponse; use crate::types::PaginatedListTaskListItem; +use crate::types::RateLimitStatusPayload; use crate::types::TurnAttemptsSiblingTurnsResponse; use anyhow::Result; use reqwest::header::AUTHORIZATION; @@ -138,6 +139,16 @@ impl Client { } } + pub async fn get_rate_limits(&self) -> Result { + let url = match self.path_style { + PathStyle::CodexApi => format!("{}/api/codex/usage", self.base_url), + PathStyle::ChatGptApi => format!("{}/wham/usage", self.base_url), + }; + let req = self.http.get(&url).headers(self.headers()); + let (body, ct) = self.exec_request(req, "GET", &url).await?; + self.decode_json::(&url, &ct, &body) + } + pub async fn list_tasks( &self, limit: Option, diff --git a/codex-rs/backend-client/src/types.rs b/codex-rs/backend-client/src/types.rs index 8e2dfb21267..9f196f9c2f9 100644 --- a/codex-rs/backend-client/src/types.rs +++ b/codex-rs/backend-client/src/types.rs @@ -1,4 +1,8 @@ pub use codex_backend_openapi_models::models::PaginatedListTaskListItem; +pub use codex_backend_openapi_models::models::PlanType; +pub use codex_backend_openapi_models::models::RateLimitStatusDetails; +pub use codex_backend_openapi_models::models::RateLimitStatusPayload; +pub use codex_backend_openapi_models::models::RateLimitWindowSnapshot; pub use codex_backend_openapi_models::models::TaskListItem; use serde::Deserialize; diff --git a/codex-rs/codex-backend-openapi-models/Cargo.toml b/codex-rs/codex-backend-openapi-models/Cargo.toml index 811ee72d8f5..cdf58108127 100644 --- a/codex-rs/codex-backend-openapi-models/Cargo.toml +++ b/codex-rs/codex-backend-openapi-models/Cargo.toml @@ -15,3 +15,4 @@ path = "src/lib.rs" [dependencies] serde = { version = "1", features = ["derive"] } serde_json = "1" +serde_with = "3" diff --git a/codex-rs/codex-backend-openapi-models/src/models/mod.rs b/codex-rs/codex-backend-openapi-models/src/models/mod.rs index e2cb972f101..96348d72c2f 100644 --- a/codex-rs/codex-backend-openapi-models/src/models/mod.rs +++ b/codex-rs/codex-backend-openapi-models/src/models/mod.rs @@ -3,6 +3,7 @@ // Currently export only the types referenced by the workspace // The process for this will change +// Cloud Tasks pub mod code_task_details_response; pub use self::code_task_details_response::CodeTaskDetailsResponse; @@ -20,3 +21,14 @@ pub use self::task_list_item::TaskListItem; pub mod paginated_list_task_list_item_; pub use self::paginated_list_task_list_item_::PaginatedListTaskListItem; + +// Rate Limits +pub mod rate_limit_status_payload; +pub use self::rate_limit_status_payload::PlanType; +pub use self::rate_limit_status_payload::RateLimitStatusPayload; + +pub mod rate_limit_status_details; +pub use self::rate_limit_status_details::RateLimitStatusDetails; + +pub mod rate_limit_window_snapshot; +pub use self::rate_limit_window_snapshot::RateLimitWindowSnapshot; diff --git a/codex-rs/codex-backend-openapi-models/src/models/rate_limit_status_details.rs b/codex-rs/codex-backend-openapi-models/src/models/rate_limit_status_details.rs new file mode 100644 index 00000000000..ca9fdfe2406 --- /dev/null +++ b/codex-rs/codex-backend-openapi-models/src/models/rate_limit_status_details.rs @@ -0,0 +1,46 @@ +/* + * codex-backend + * + * codex-backend + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::Deserialize; +use serde::Serialize; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct RateLimitStatusDetails { + #[serde(rename = "allowed")] + pub allowed: bool, + #[serde(rename = "limit_reached")] + pub limit_reached: bool, + #[serde( + rename = "primary_window", + default, + with = "::serde_with::rust::double_option", + skip_serializing_if = "Option::is_none" + )] + pub primary_window: Option>>, + #[serde( + rename = "secondary_window", + default, + with = "::serde_with::rust::double_option", + skip_serializing_if = "Option::is_none" + )] + pub secondary_window: Option>>, +} + +impl RateLimitStatusDetails { + pub fn new(allowed: bool, limit_reached: bool) -> RateLimitStatusDetails { + RateLimitStatusDetails { + allowed, + limit_reached, + primary_window: None, + secondary_window: None, + } + } +} diff --git a/codex-rs/codex-backend-openapi-models/src/models/rate_limit_status_payload.rs b/codex-rs/codex-backend-openapi-models/src/models/rate_limit_status_payload.rs new file mode 100644 index 00000000000..d2af76f4d7d --- /dev/null +++ b/codex-rs/codex-backend-openapi-models/src/models/rate_limit_status_payload.rs @@ -0,0 +1,65 @@ +/* + * codex-backend + * + * codex-backend + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::Deserialize; +use serde::Serialize; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct RateLimitStatusPayload { + #[serde(rename = "plan_type")] + pub plan_type: PlanType, + #[serde( + rename = "rate_limit", + default, + with = "::serde_with::rust::double_option", + skip_serializing_if = "Option::is_none" + )] + pub rate_limit: Option>>, +} + +impl RateLimitStatusPayload { + pub fn new(plan_type: PlanType) -> RateLimitStatusPayload { + RateLimitStatusPayload { + plan_type, + rate_limit: None, + } + } +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum PlanType { + #[serde(rename = "free")] + Free, + #[serde(rename = "go")] + Go, + #[serde(rename = "plus")] + Plus, + #[serde(rename = "pro")] + Pro, + #[serde(rename = "team")] + Team, + #[serde(rename = "business")] + Business, + #[serde(rename = "education")] + Education, + #[serde(rename = "quorum")] + Quorum, + #[serde(rename = "enterprise")] + Enterprise, + #[serde(rename = "edu")] + Edu, +} + +impl Default for PlanType { + fn default() -> PlanType { + Self::Free + } +} diff --git a/codex-rs/codex-backend-openapi-models/src/models/rate_limit_window_snapshot.rs b/codex-rs/codex-backend-openapi-models/src/models/rate_limit_window_snapshot.rs new file mode 100644 index 00000000000..4151ff12b0f --- /dev/null +++ b/codex-rs/codex-backend-openapi-models/src/models/rate_limit_window_snapshot.rs @@ -0,0 +1,40 @@ +/* + * codex-backend + * + * codex-backend + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +use serde::Deserialize; +use serde::Serialize; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct RateLimitWindowSnapshot { + #[serde(rename = "used_percent")] + pub used_percent: i32, + #[serde(rename = "limit_window_seconds")] + pub limit_window_seconds: i32, + #[serde(rename = "reset_after_seconds")] + pub reset_after_seconds: i32, + #[serde(rename = "reset_at", deserialize_with = "Option::deserialize")] + pub reset_at: Option, +} + +impl RateLimitWindowSnapshot { + pub fn new( + used_percent: i32, + limit_window_seconds: i32, + reset_after_seconds: i32, + reset_at: Option, + ) -> RateLimitWindowSnapshot { + RateLimitWindowSnapshot { + used_percent, + limit_window_seconds, + reset_after_seconds, + reset_at, + } + } +} diff --git a/codex-rs/core/src/client.rs b/codex-rs/core/src/client.rs index 0d48a87dcd5..5ed496215aa 100644 --- a/codex-rs/core/src/client.rs +++ b/codex-rs/core/src/client.rs @@ -1,17 +1,18 @@ use std::io::BufRead; use std::path::Path; +use std::sync::Arc; use std::sync::OnceLock; use std::time::Duration; -use crate::AuthManager; -use crate::auth::CodexAuth; -use crate::error::ConnectionFailedError; -use crate::error::ResponseStreamFailed; -use crate::error::RetryLimitReachedError; -use crate::error::UnexpectedResponseError; use bytes::Bytes; +use chrono::DateTime; +use chrono::Utc; use codex_app_server_protocol::AuthMode; +use codex_otel::otel_event_manager::OtelEventManager; use codex_protocol::ConversationId; +use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig; +use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig; +use codex_protocol::models::ResponseItem; use eventsource_stream::Eventsource; use futures::prelude::*; use regex_lite::Regex; @@ -27,6 +28,8 @@ use tracing::debug; use tracing::trace; use tracing::warn; +use crate::AuthManager; +use crate::auth::CodexAuth; use crate::chat_completions::AggregateStreamExt; use crate::chat_completions::stream_chat_completions; use crate::client_common::Prompt; @@ -38,7 +41,11 @@ use crate::client_common::create_text_param_for_request; use crate::config::Config; use crate::default_client::create_client; use crate::error::CodexErr; +use crate::error::ConnectionFailedError; +use crate::error::ResponseStreamFailed; use crate::error::Result; +use crate::error::RetryLimitReachedError; +use crate::error::UnexpectedResponseError; use crate::error::UsageLimitReachedError; use crate::flags::CODEX_RS_SSE_FIXTURE; use crate::model_family::ModelFamily; @@ -52,13 +59,6 @@ use crate::protocol::TokenUsage; use crate::state::TaskKind; use crate::token_data::PlanType; use crate::util::backoff; -use chrono::DateTime; -use chrono::Utc; -use codex_otel::otel_event_manager::OtelEventManager; -use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig; -use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig; -use codex_protocol::models::ResponseItem; -use std::sync::Arc; #[derive(Debug, Deserialize)] struct ErrorResponse { @@ -628,7 +628,7 @@ fn parse_rate_limit_window( headers: &HeaderMap, used_percent_header: &str, window_minutes_header: &str, - resets_header: &str, + resets_at_header: &str, ) -> Option { let used_percent: Option = parse_header_f64(headers, used_percent_header); diff --git a/codex-rs/protocol/src/protocol.rs b/codex-rs/protocol/src/protocol.rs index e22981745c2..3cbf5fbae5b 100644 --- a/codex-rs/protocol/src/protocol.rs +++ b/codex-rs/protocol/src/protocol.rs @@ -645,13 +645,13 @@ pub struct TokenCountEvent { pub rate_limits: Option, } -#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)] +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)] pub struct RateLimitSnapshot { pub primary: Option, pub secondary: Option, } -#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)] +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)] pub struct RateLimitWindow { /// Percentage (0-100) of the window that has been consumed. pub used_percent: f64, From 7b52ba86a2f8e9bb2bffc9a380a2722768938c4a Mon Sep 17 00:00:00 2001 From: Owen Lin Date: Mon, 20 Oct 2025 09:40:06 -0700 Subject: [PATCH 2/7] move rate limit struct parsing into backend-client --- codex-rs/Cargo.lock | 1 + .../app-server/src/codex_message_processor.rs | 53 +------------------ codex-rs/backend-client/Cargo.toml | 1 + codex-rs/backend-client/src/client.rs | 53 ++++++++++++++++++- 4 files changed, 55 insertions(+), 53 deletions(-) diff --git a/codex-rs/Cargo.lock b/codex-rs/Cargo.lock index 2d1052c9b83..a60ca25d71f 100644 --- a/codex-rs/Cargo.lock +++ b/codex-rs/Cargo.lock @@ -919,6 +919,7 @@ version = "0.0.0" dependencies = [ "anyhow", "codex-backend-openapi-models", + "codex-protocol", "pretty_assertions", "reqwest", "serde", diff --git a/codex-rs/app-server/src/codex_message_processor.rs b/codex-rs/app-server/src/codex_message_processor.rs index 6a22ac91644..558e9383f5e 100644 --- a/codex-rs/app-server/src/codex_message_processor.rs +++ b/codex-rs/app-server/src/codex_message_processor.rs @@ -52,8 +52,6 @@ use codex_app_server_protocol::SetDefaultModelResponse; use codex_app_server_protocol::UserInfoResponse; use codex_app_server_protocol::UserSavedConfig; use codex_backend_client::Client as BackendClient; -use codex_backend_client::types::RateLimitStatusPayload; -use codex_backend_client::types::RateLimitWindowSnapshot; use codex_core::AuthManager; use codex_core::CodexConversation; use codex_core::ConversationManager; @@ -94,7 +92,6 @@ use codex_protocol::models::ContentItem; use codex_protocol::models::ResponseItem; use codex_protocol::protocol::InputMessageKind; use codex_protocol::protocol::RateLimitSnapshot; -use codex_protocol::protocol::RateLimitWindow; use codex_protocol::protocol::USER_MESSAGE_BEGIN; use codex_utils_json_to_toml::json_to_toml; use std::collections::HashMap; @@ -592,16 +589,14 @@ impl CodexMessageProcessor { client = client.with_chatgpt_account_id(account_id); } - let payload = client + client .get_rate_limits() .await .map_err(|err| JSONRPCErrorError { code: INTERNAL_ERROR_CODE, message: format!("failed to fetch codex rate limits: {err}"), data: None, - })?; - - Ok(rate_limit_snapshot_from_payload(payload)) + }) } async fn get_user_saved_config(&self, request_id: RequestId) { @@ -1470,50 +1465,6 @@ async fn derive_config_from_params( Config::load_with_cli_overrides(cli_overrides, overrides).await } -fn rate_limit_snapshot_from_payload(payload: RateLimitStatusPayload) -> RateLimitSnapshot { - let Some(details) = payload - .rate_limit - .and_then(|inner| inner.map(|boxed| *boxed)) - else { - return RateLimitSnapshot { - primary: None, - secondary: None, - }; - }; - - RateLimitSnapshot { - primary: map_rate_limit_window(details.primary_window), - secondary: map_rate_limit_window(details.secondary_window), - } -} - -fn map_rate_limit_window( - window: Option>>, -) -> Option { - let snapshot = match window { - Some(Some(snapshot)) => *snapshot, - _ => return None, - }; - - let used_percent = f64::from(snapshot.used_percent); - let window_minutes = window_minutes_from_seconds(snapshot.limit_window_seconds); - let resets_at = snapshot.reset_at; - Some(RateLimitWindow { - used_percent, - window_minutes, - resets_at, - }) -} - -fn window_minutes_from_seconds(seconds: i32) -> Option { - if seconds <= 0 { - return None; - } - - let seconds_u64 = seconds as u64; - Some(seconds_u64.div_ceil(60)) -} - async fn on_patch_approval_response( event_id: String, receiver: oneshot::Receiver, diff --git a/codex-rs/backend-client/Cargo.toml b/codex-rs/backend-client/Cargo.toml index a8e2ee85fb1..93c0a14f21e 100644 --- a/codex-rs/backend-client/Cargo.toml +++ b/codex-rs/backend-client/Cargo.toml @@ -13,6 +13,7 @@ serde = { version = "1", features = ["derive"] } serde_json = "1" reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] } codex-backend-openapi-models = { path = "../codex-backend-openapi-models" } +codex-protocol = { workspace = true } [dev-dependencies] pretty_assertions = "1" diff --git a/codex-rs/backend-client/src/client.rs b/codex-rs/backend-client/src/client.rs index 4bb1460eb32..78d7c3080e5 100644 --- a/codex-rs/backend-client/src/client.rs +++ b/codex-rs/backend-client/src/client.rs @@ -1,8 +1,11 @@ use crate::types::CodeTaskDetailsResponse; use crate::types::PaginatedListTaskListItem; use crate::types::RateLimitStatusPayload; +use crate::types::RateLimitWindowSnapshot; use crate::types::TurnAttemptsSiblingTurnsResponse; use anyhow::Result; +use codex_protocol::protocol::RateLimitSnapshot; +use codex_protocol::protocol::RateLimitWindow; use reqwest::header::AUTHORIZATION; use reqwest::header::CONTENT_TYPE; use reqwest::header::HeaderMap; @@ -139,14 +142,15 @@ impl Client { } } - pub async fn get_rate_limits(&self) -> Result { + pub async fn get_rate_limits(&self) -> Result { let url = match self.path_style { PathStyle::CodexApi => format!("{}/api/codex/usage", self.base_url), PathStyle::ChatGptApi => format!("{}/wham/usage", self.base_url), }; let req = self.http.get(&url).headers(self.headers()); let (body, ct) = self.exec_request(req, "GET", &url).await?; - self.decode_json::(&url, &ct, &body) + let payload: RateLimitStatusPayload = self.decode_json(&url, &ct, &body)?; + Ok(Self::rate_limit_snapshot_from_payload(payload)) } pub async fn list_tasks( @@ -252,4 +256,49 @@ impl Client { Err(e) => anyhow::bail!("Decode error for {url}: {e}; content-type={ct}; body={body}"), } } + + // rate limit helpers + fn rate_limit_snapshot_from_payload(payload: RateLimitStatusPayload) -> RateLimitSnapshot { + let Some(details) = payload + .rate_limit + .and_then(|inner| inner.map(|boxed| *boxed)) + else { + return RateLimitSnapshot { + primary: None, + secondary: None, + }; + }; + + RateLimitSnapshot { + primary: Self::map_rate_limit_window(details.primary_window), + secondary: Self::map_rate_limit_window(details.secondary_window), + } + } + + fn map_rate_limit_window( + window: Option>>, + ) -> Option { + let snapshot = match window { + Some(Some(snapshot)) => *snapshot, + _ => return None, + }; + + let used_percent = f64::from(snapshot.used_percent); + let window_minutes = Self::window_minutes_from_seconds(snapshot.limit_window_seconds); + let resets_at = snapshot.reset_at; + Some(RateLimitWindow { + used_percent, + window_minutes, + resets_at, + }) + } + + fn window_minutes_from_seconds(seconds: i32) -> Option { + if seconds <= 0 { + return None; + } + + let seconds_u64 = seconds as u64; + Some(seconds_u64.div_ceil(60)) + } } From 0e90a4a6ba87c31b9bb275630c12aac76b4acbb9 Mon Sep 17 00:00:00 2001 From: Owen Lin Date: Mon, 20 Oct 2025 09:46:00 -0700 Subject: [PATCH 3/7] use anyhow result for rate limits tests --- .../app-server/tests/suite/rate_limits.rs | 75 +++++++++++-------- 1 file changed, 42 insertions(+), 33 deletions(-) diff --git a/codex-rs/app-server/tests/suite/rate_limits.rs b/codex-rs/app-server/tests/suite/rate_limits.rs index 4fc208884da..e6d6e18adf4 100644 --- a/codex-rs/app-server/tests/suite/rate_limits.rs +++ b/codex-rs/app-server/tests/suite/rate_limits.rs @@ -1,5 +1,7 @@ use std::path::Path; +use anyhow::Context; +use anyhow::Result; use app_test_support::McpProcess; use app_test_support::to_response; use base64::Engine; @@ -32,29 +34,29 @@ const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs const INVALID_REQUEST_ERROR_CODE: i64 = -32600; #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn get_account_rate_limits_requires_auth() { - let codex_home = TempDir::new().unwrap(); +async fn get_account_rate_limits_requires_auth() -> Result<()> { + let codex_home = TempDir::new().context("create codex home tempdir")?; let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]) .await - .expect("spawn mcp process"); + .context("spawn mcp process")?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()) .await - .expect("initialize timeout") - .expect("initialize request"); + .context("initialize timeout")? + .context("initialize request")?; let request_id = mcp .send_get_account_rate_limits_request() .await - .expect("send account/rateLimits/read"); + .context("send account/rateLimits/read")?; let error: JSONRPCError = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_error_message(RequestId::Integer(request_id)), ) .await - .expect("account/rateLimits/read timeout") - .expect("account/rateLimits/read error"); + .context("account/rateLimits/read timeout")? + .context("account/rateLimits/read error")?; assert_eq!(error.id, RequestId::Integer(request_id)); assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE); @@ -62,34 +64,36 @@ async fn get_account_rate_limits_requires_auth() { error.error.message, "codex account authentication required to read rate limits" ); + + Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn get_account_rate_limits_requires_chatgpt_auth() { - let codex_home = TempDir::new().unwrap(); +async fn get_account_rate_limits_requires_chatgpt_auth() -> Result<()> { + let codex_home = TempDir::new().context("create codex home tempdir")?; let mut mcp = McpProcess::new(codex_home.path()) .await - .expect("spawn mcp process"); + .context("spawn mcp process")?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()) .await - .expect("initialize timeout") - .expect("initialize request"); + .context("initialize timeout")? + .context("initialize request")?; - login_with_api_key(&mut mcp, "sk-test-key").await; + login_with_api_key(&mut mcp, "sk-test-key").await?; let request_id = mcp .send_get_account_rate_limits_request() .await - .expect("send account/rateLimits/read"); + .context("send account/rateLimits/read")?; let error: JSONRPCError = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_error_message(RequestId::Integer(request_id)), ) .await - .expect("account/rateLimits/read timeout") - .expect("account/rateLimits/read error"); + .context("account/rateLimits/read timeout")? + .context("account/rateLimits/read error")?; assert_eq!(error.id, RequestId::Integer(request_id)); assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE); @@ -97,17 +101,19 @@ async fn get_account_rate_limits_requires_chatgpt_auth() { error.error.message, "chatgpt authentication required to read rate limits" ); + + Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn get_account_rate_limits_returns_snapshot() { - let codex_home = TempDir::new().unwrap(); +async fn get_account_rate_limits_returns_snapshot() -> Result<()> { + let codex_home = TempDir::new().context("create codex home tempdir")?; write_chatgpt_auth(codex_home.path(), "chatgpt-token", "account-123") - .expect("write chatgpt auth"); + .context("write chatgpt auth")?; let server = MockServer::start().await; let server_url = server.uri(); - write_chatgpt_base_url(codex_home.path(), &server_url).expect("write chatgpt base url"); + write_chatgpt_base_url(codex_home.path(), &server_url).context("write chatgpt base url")?; let primary_reset_iso = "2025-01-01T00:02:00Z"; let secondary_reset_iso = "2025-01-01T01:00:00Z"; @@ -141,27 +147,27 @@ async fn get_account_rate_limits_returns_snapshot() { let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]) .await - .expect("spawn mcp process"); + .context("spawn mcp process")?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()) .await - .expect("initialize timeout") - .expect("initialize request"); + .context("initialize timeout")? + .context("initialize request")?; let request_id = mcp .send_get_account_rate_limits_request() .await - .expect("send account/rateLimits/read"); + .context("send account/rateLimits/read")?; let response: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id)), ) .await - .expect("account/rateLimits/read timeout") - .expect("account/rateLimits/read response"); + .context("account/rateLimits/read timeout")? + .context("account/rateLimits/read response")?; let received: GetAccountRateLimitsResponse = - to_response(response).expect("deserialize rate limit response"); + to_response(response).context("deserialize rate limit response")?; let expected = GetAccountRateLimitsResponse { rate_limits: RateLimitSnapshot { @@ -178,24 +184,27 @@ async fn get_account_rate_limits_returns_snapshot() { }, }; assert_eq!(received, expected); + + Ok(()) } -#[expect(clippy::expect_used)] -async fn login_with_api_key(mcp: &mut McpProcess, api_key: &str) { +async fn login_with_api_key(mcp: &mut McpProcess, api_key: &str) -> Result<()> { let request_id = mcp .send_login_api_key_request(LoginApiKeyParams { api_key: api_key.to_string(), }) .await - .expect("send loginApiKey"); + .context("send loginApiKey")?; timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id)), ) .await - .expect("loginApiKey timeout") - .expect("loginApiKey response"); + .context("loginApiKey timeout")? + .context("loginApiKey response")?; + + Ok(()) } fn write_chatgpt_base_url(codex_home: &Path, base_url: &str) -> std::io::Result<()> { From 1655c7ee91472d21b4ffd62073ad808350e10f3e Mon Sep 17 00:00:00 2001 From: Owen Lin Date: Mon, 20 Oct 2025 09:47:52 -0700 Subject: [PATCH 4/7] check against constant in user_agent.rs --- codex-rs/app-server/tests/suite/user_agent.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/codex-rs/app-server/tests/suite/user_agent.rs b/codex-rs/app-server/tests/suite/user_agent.rs index 9a432dda0f1..95a0b1a3e0b 100644 --- a/codex-rs/app-server/tests/suite/user_agent.rs +++ b/codex-rs/app-server/tests/suite/user_agent.rs @@ -34,10 +34,8 @@ async fn get_user_agent_returns_current_codex_user_agent() { .expect("getUserAgent response"); let os_info = os_info::get(); - let originator = codex_core::default_client::originator(); let user_agent = format!( - "{}/0.0.0 ({} {}; {}) {} (codex-app-server-tests; 0.1.0)", - originator.value.as_str(), + "codex_cli_rs/0.0.0 ({} {}; {}) {} (codex-app-server-tests; 0.1.0)", os_info.os_type(), os_info.version(), os_info.architecture().unwrap_or("unknown"), From 81e01a3d409169dcb57719bdab24dee44df8d3ad Mon Sep 17 00:00:00 2001 From: Owen Lin Date: Mon, 20 Oct 2025 10:02:20 -0700 Subject: [PATCH 5/7] helper for passing in codex auth into backend-client --- codex-rs/Cargo.lock | 1 + .../app-server/src/codex_message_processor.rs | 27 +++++-------------- codex-rs/backend-client/Cargo.toml | 1 + codex-rs/backend-client/src/client.rs | 13 +++++++++ 4 files changed, 21 insertions(+), 21 deletions(-) diff --git a/codex-rs/Cargo.lock b/codex-rs/Cargo.lock index a60ca25d71f..e8252c8efc7 100644 --- a/codex-rs/Cargo.lock +++ b/codex-rs/Cargo.lock @@ -919,6 +919,7 @@ version = "0.0.0" dependencies = [ "anyhow", "codex-backend-openapi-models", + "codex-core", "codex-protocol", "pretty_assertions", "reqwest", diff --git a/codex-rs/app-server/src/codex_message_processor.rs b/codex-rs/app-server/src/codex_message_processor.rs index 558e9383f5e..1e0e7b34d40 100644 --- a/codex-rs/app-server/src/codex_message_processor.rs +++ b/codex-rs/app-server/src/codex_message_processor.rs @@ -566,29 +566,14 @@ impl CodexMessageProcessor { }); } - let token = auth.get_token().await.map_err(|err| JSONRPCErrorError { - code: INTERNAL_ERROR_CODE, - message: format!("failed to read codex auth token: {err}"), - data: None, - })?; - - let mut client = - BackendClient::new(self.config.chatgpt_base_url.clone()).map_err(|err| { - JSONRPCErrorError { - code: INTERNAL_ERROR_CODE, - message: format!("failed to construct backend client: {err}"), - data: None, - } + let client = BackendClient::from_auth(self.config.chatgpt_base_url.clone(), &auth) + .await + .map_err(|err| JSONRPCErrorError { + code: INTERNAL_ERROR_CODE, + message: format!("failed to construct backend client: {err}"), + data: None, })?; - client = client - .with_user_agent(get_codex_user_agent()) - .with_bearer_token(token); - - if let Some(account_id) = auth.get_account_id() { - client = client.with_chatgpt_account_id(account_id); - } - client .get_rate_limits() .await diff --git a/codex-rs/backend-client/Cargo.toml b/codex-rs/backend-client/Cargo.toml index 93c0a14f21e..0cf802399ce 100644 --- a/codex-rs/backend-client/Cargo.toml +++ b/codex-rs/backend-client/Cargo.toml @@ -14,6 +14,7 @@ serde_json = "1" reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] } codex-backend-openapi-models = { path = "../codex-backend-openapi-models" } codex-protocol = { workspace = true } +codex-core = { workspace = true } [dev-dependencies] pretty_assertions = "1" diff --git a/codex-rs/backend-client/src/client.rs b/codex-rs/backend-client/src/client.rs index 78d7c3080e5..76203de98ed 100644 --- a/codex-rs/backend-client/src/client.rs +++ b/codex-rs/backend-client/src/client.rs @@ -4,6 +4,8 @@ use crate::types::RateLimitStatusPayload; use crate::types::RateLimitWindowSnapshot; use crate::types::TurnAttemptsSiblingTurnsResponse; use anyhow::Result; +use codex_core::auth::CodexAuth; +use codex_core::default_client::get_codex_user_agent; use codex_protocol::protocol::RateLimitSnapshot; use codex_protocol::protocol::RateLimitWindow; use reqwest::header::AUTHORIZATION; @@ -68,6 +70,17 @@ impl Client { }) } + pub async fn from_auth(base_url: impl Into, auth: &CodexAuth) -> Result { + let token = auth.get_token().await.map_err(anyhow::Error::from)?; + let mut client = Self::new(base_url)? + .with_user_agent(get_codex_user_agent()) + .with_bearer_token(token); + if let Some(account_id) = auth.get_account_id() { + client = client.with_chatgpt_account_id(account_id); + } + Ok(client) + } + pub fn with_bearer_token(mut self, token: impl Into) -> Self { self.bearer_token = Some(token.into()); self From 93bf41212beccdf997515c3e594d033bb46eb07b Mon Sep 17 00:00:00 2001 From: Owen Lin Date: Mon, 20 Oct 2025 10:17:55 -0700 Subject: [PATCH 6/7] create auth_fixtures.rs helpers --- codex-rs/Cargo.lock | 3 + codex-rs/app-server/tests/common/Cargo.toml | 3 + .../app-server/tests/common/auth_fixtures.rs | 131 ++++++++++++++++++ codex-rs/app-server/tests/common/lib.rs | 5 + .../app-server/tests/suite/rate_limits.rs | 60 ++------ codex-rs/app-server/tests/suite/user_info.rs | 45 ++---- 6 files changed, 161 insertions(+), 86 deletions(-) create mode 100644 codex-rs/app-server/tests/common/auth_fixtures.rs diff --git a/codex-rs/Cargo.lock b/codex-rs/Cargo.lock index e8252c8efc7..1c99a235b6f 100644 --- a/codex-rs/Cargo.lock +++ b/codex-rs/Cargo.lock @@ -182,7 +182,10 @@ version = "0.0.0" dependencies = [ "anyhow", "assert_cmd", + "base64", + "chrono", "codex-app-server-protocol", + "codex-core", "serde", "serde_json", "tokio", diff --git a/codex-rs/app-server/tests/common/Cargo.toml b/codex-rs/app-server/tests/common/Cargo.toml index 306b1e187ed..ece8174dd94 100644 --- a/codex-rs/app-server/tests/common/Cargo.toml +++ b/codex-rs/app-server/tests/common/Cargo.toml @@ -9,7 +9,10 @@ path = "lib.rs" [dependencies] anyhow = { workspace = true } assert_cmd = { workspace = true } +base64 = { workspace = true } +chrono = { workspace = true } codex-app-server-protocol = { workspace = true } +codex-core = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } tokio = { workspace = true, features = [ diff --git a/codex-rs/app-server/tests/common/auth_fixtures.rs b/codex-rs/app-server/tests/common/auth_fixtures.rs new file mode 100644 index 00000000000..bc1df74da27 --- /dev/null +++ b/codex-rs/app-server/tests/common/auth_fixtures.rs @@ -0,0 +1,131 @@ +use std::path::Path; + +use anyhow::Context; +use anyhow::Result; +use base64::Engine; +use base64::engine::general_purpose::URL_SAFE_NO_PAD; +use chrono::DateTime; +use chrono::Utc; +use codex_core::auth::AuthDotJson; +use codex_core::auth::get_auth_file; +use codex_core::auth::write_auth_json; +use codex_core::token_data::TokenData; +use codex_core::token_data::parse_id_token; +use serde_json::json; + +/// Builder for writing a fake ChatGPT auth.json in tests. +#[derive(Debug, Clone)] +pub struct ChatGptAuthFixture { + access_token: String, + refresh_token: String, + account_id: Option, + claims: ChatGptIdTokenClaims, + last_refresh: Option>>, +} + +impl ChatGptAuthFixture { + pub fn new(access_token: impl Into) -> Self { + Self { + access_token: access_token.into(), + refresh_token: "refresh-token".to_string(), + account_id: None, + claims: ChatGptIdTokenClaims::default(), + last_refresh: None, + } + } + + pub fn refresh_token(mut self, refresh_token: impl Into) -> Self { + self.refresh_token = refresh_token.into(); + self + } + + pub fn account_id(mut self, account_id: impl Into) -> Self { + self.account_id = Some(account_id.into()); + self + } + + pub fn plan_type(mut self, plan_type: impl Into) -> Self { + self.claims.plan_type = Some(plan_type.into()); + self + } + + pub fn email(mut self, email: impl Into) -> Self { + self.claims.email = Some(email.into()); + self + } + + pub fn last_refresh(mut self, last_refresh: Option>) -> Self { + self.last_refresh = Some(last_refresh); + self + } + + pub fn claims(mut self, claims: ChatGptIdTokenClaims) -> Self { + self.claims = claims; + self + } +} + +#[derive(Debug, Clone, Default)] +pub struct ChatGptIdTokenClaims { + pub email: Option, + pub plan_type: Option, +} + +impl ChatGptIdTokenClaims { + pub fn new() -> Self { + Self::default() + } + + pub fn email(mut self, email: impl Into) -> Self { + self.email = Some(email.into()); + self + } + + pub fn plan_type(mut self, plan_type: impl Into) -> Self { + self.plan_type = Some(plan_type.into()); + self + } +} + +pub fn encode_id_token(claims: &ChatGptIdTokenClaims) -> Result { + let header = json!({ "alg": "none", "typ": "JWT" }); + let mut payload = serde_json::Map::new(); + if let Some(email) = &claims.email { + payload.insert("email".to_string(), json!(email)); + } + if let Some(plan_type) = &claims.plan_type { + payload.insert( + "https://api.openai.com/auth".to_string(), + json!({ "chatgpt_plan_type": plan_type }), + ); + } + let payload = serde_json::Value::Object(payload); + + let header_b64 = + URL_SAFE_NO_PAD.encode(serde_json::to_vec(&header).context("serialize jwt header")?); + let payload_b64 = + URL_SAFE_NO_PAD.encode(serde_json::to_vec(&payload).context("serialize jwt payload")?); + let signature_b64 = URL_SAFE_NO_PAD.encode(b"signature"); + Ok(format!("{header_b64}.{payload_b64}.{signature_b64}")) +} + +pub fn write_chatgpt_auth(codex_home: &Path, fixture: ChatGptAuthFixture) -> Result<()> { + let id_token_raw = encode_id_token(&fixture.claims)?; + let id_token = parse_id_token(&id_token_raw).context("parse id token")?; + let tokens = TokenData { + id_token, + access_token: fixture.access_token, + refresh_token: fixture.refresh_token, + account_id: fixture.account_id, + }; + + let last_refresh = fixture.last_refresh.unwrap_or_else(|| Some(Utc::now())); + + let auth = AuthDotJson { + openai_api_key: None, + tokens: Some(tokens), + last_refresh, + }; + + write_auth_json(&get_auth_file(codex_home), &auth).context("write auth.json") +} diff --git a/codex-rs/app-server/tests/common/lib.rs b/codex-rs/app-server/tests/common/lib.rs index 2acb52de69f..71b27190c2f 100644 --- a/codex-rs/app-server/tests/common/lib.rs +++ b/codex-rs/app-server/tests/common/lib.rs @@ -1,7 +1,12 @@ +mod auth_fixtures; mod mcp_process; mod mock_model_server; mod responses; +pub use auth_fixtures::ChatGptAuthFixture; +pub use auth_fixtures::ChatGptIdTokenClaims; +pub use auth_fixtures::encode_id_token; +pub use auth_fixtures::write_chatgpt_auth; use codex_app_server_protocol::JSONRPCResponse; pub use mcp_process::McpProcess; pub use mock_model_server::create_mock_chat_completions_server; diff --git a/codex-rs/app-server/tests/suite/rate_limits.rs b/codex-rs/app-server/tests/suite/rate_limits.rs index e6d6e18adf4..6f236dcea46 100644 --- a/codex-rs/app-server/tests/suite/rate_limits.rs +++ b/codex-rs/app-server/tests/suite/rate_limits.rs @@ -1,26 +1,19 @@ -use std::path::Path; - use anyhow::Context; use anyhow::Result; +use app_test_support::ChatGptAuthFixture; use app_test_support::McpProcess; use app_test_support::to_response; -use base64::Engine; -use base64::engine::general_purpose::URL_SAFE_NO_PAD; -use chrono::Utc; +use app_test_support::write_chatgpt_auth; use codex_app_server_protocol::GetAccountRateLimitsResponse; use codex_app_server_protocol::JSONRPCError; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::LoginApiKeyParams; use codex_app_server_protocol::RequestId; -use codex_core::auth::AuthDotJson; -use codex_core::auth::get_auth_file; -use codex_core::auth::write_auth_json; -use codex_core::token_data::TokenData; -use codex_core::token_data::parse_id_token; use codex_protocol::protocol::RateLimitSnapshot; use codex_protocol::protocol::RateLimitWindow; use pretty_assertions::assert_eq; use serde_json::json; +use std::path::Path; use tempfile::TempDir; use tokio::time::timeout; use wiremock::Mock; @@ -108,8 +101,13 @@ async fn get_account_rate_limits_requires_chatgpt_auth() -> Result<()> { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_account_rate_limits_returns_snapshot() -> Result<()> { let codex_home = TempDir::new().context("create codex home tempdir")?; - write_chatgpt_auth(codex_home.path(), "chatgpt-token", "account-123") - .context("write chatgpt auth")?; + write_chatgpt_auth( + codex_home.path(), + ChatGptAuthFixture::new("chatgpt-token") + .account_id("account-123") + .plan_type("pro"), + ) + .context("write chatgpt auth")?; let server = MockServer::start().await; let server_url = server.uri(); @@ -211,41 +209,3 @@ fn write_chatgpt_base_url(codex_home: &Path, base_url: &str) -> std::io::Result< let config_toml = codex_home.join("config.toml"); std::fs::write(config_toml, format!("chatgpt_base_url = \"{base_url}\"\n")) } - -fn write_chatgpt_auth( - codex_home: &Path, - access_token: &str, - account_id: &str, -) -> std::io::Result<()> { - let auth_path = get_auth_file(codex_home); - let id_token_raw = encode_chatgpt_id_token("pro"); - let id_token = parse_id_token(&id_token_raw).map_err(std::io::Error::other)?; - let auth = AuthDotJson { - openai_api_key: None, - tokens: Some(TokenData { - id_token, - access_token: access_token.to_string(), - refresh_token: "refresh-token".to_string(), - account_id: Some(account_id.to_string()), - }), - last_refresh: Some(Utc::now()), - }; - write_auth_json(&auth_path, &auth) -} - -fn encode_chatgpt_id_token(plan_type: &str) -> String { - let header = json!({ "alg": "none", "typ": "JWT" }); - let payload = json!({ - "https://api.openai.com/auth": { - "chatgpt_plan_type": plan_type - } - }); - let header_b64 = URL_SAFE_NO_PAD.encode( - serde_json::to_vec(&header).unwrap_or_else(|err| panic!("serialize jwt header: {err}")), - ); - let payload_b64 = URL_SAFE_NO_PAD.encode( - serde_json::to_vec(&payload).unwrap_or_else(|err| panic!("serialize jwt payload: {err}")), - ); - let signature_b64 = URL_SAFE_NO_PAD.encode(b"signature"); - format!("{header_b64}.{payload_b64}.{signature_b64}") -} diff --git a/codex-rs/app-server/tests/suite/user_info.rs b/codex-rs/app-server/tests/suite/user_info.rs index edd041e553c..b730b77fed8 100644 --- a/codex-rs/app-server/tests/suite/user_info.rs +++ b/codex-rs/app-server/tests/suite/user_info.rs @@ -1,20 +1,13 @@ use std::time::Duration; -use anyhow::Context; +use app_test_support::ChatGptAuthFixture; use app_test_support::McpProcess; use app_test_support::to_response; -use base64::Engine; -use base64::engine::general_purpose::URL_SAFE_NO_PAD; +use app_test_support::write_chatgpt_auth; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::RequestId; use codex_app_server_protocol::UserInfoResponse; -use codex_core::auth::AuthDotJson; -use codex_core::auth::get_auth_file; -use codex_core::auth::write_auth_json; -use codex_core::token_data::IdTokenInfo; -use codex_core::token_data::TokenData; use pretty_assertions::assert_eq; -use serde_json::json; use tempfile::TempDir; use tokio::time::timeout; @@ -24,22 +17,13 @@ const DEFAULT_READ_TIMEOUT: Duration = Duration::from_secs(10); async fn user_info_returns_email_from_auth_json() { let codex_home = TempDir::new().expect("create tempdir"); - let auth_path = get_auth_file(codex_home.path()); - let mut id_token = IdTokenInfo::default(); - id_token.email = Some("user@example.com".to_string()); - id_token.raw_jwt = encode_id_token_with_email("user@example.com").expect("encode id token"); - - let auth = AuthDotJson { - openai_api_key: None, - tokens: Some(TokenData { - id_token, - access_token: "access".to_string(), - refresh_token: "refresh".to_string(), - account_id: None, - }), - last_refresh: None, - }; - write_auth_json(&auth_path, &auth).expect("write auth.json"); + write_chatgpt_auth( + codex_home.path(), + ChatGptAuthFixture::new("access") + .refresh_token("refresh") + .email("user@example.com"), + ) + .expect("write chatgpt auth"); let mut mcp = McpProcess::new(codex_home.path()) .await @@ -65,14 +49,3 @@ async fn user_info_returns_email_from_auth_json() { assert_eq!(received, expected); } - -fn encode_id_token_with_email(email: &str) -> anyhow::Result { - let header_b64 = URL_SAFE_NO_PAD.encode( - serde_json::to_vec(&json!({ "alg": "none", "typ": "JWT" })) - .context("serialize jwt header")?, - ); - let payload = - serde_json::to_vec(&json!({ "email": email })).context("serialize jwt payload")?; - let payload_b64 = URL_SAFE_NO_PAD.encode(payload); - Ok(format!("{header_b64}.{payload_b64}.signature")) -} From e43ab2887bf71598ba024f406029ca14ac6108d6 Mon Sep 17 00:00:00 2001 From: Owen Lin Date: Mon, 20 Oct 2025 13:29:13 -0700 Subject: [PATCH 7/7] update to use int timestamps --- codex-rs/app-server-protocol/src/protocol.rs | 2 +- codex-rs/app-server/tests/suite/rate_limits.rs | 16 ++++++++++------ codex-rs/backend-client/src/client.rs | 8 ++++---- .../src/models/rate_limit_window_snapshot.rs | 6 +++--- codex-rs/core/src/client.rs | 2 +- 5 files changed, 19 insertions(+), 15 deletions(-) diff --git a/codex-rs/app-server-protocol/src/protocol.rs b/codex-rs/app-server-protocol/src/protocol.rs index 080c1062b9e..3574110bd92 100644 --- a/codex-rs/app-server-protocol/src/protocol.rs +++ b/codex-rs/app-server-protocol/src/protocol.rs @@ -433,7 +433,7 @@ pub struct GetAccountRateLimitsResponse { pub rate_limits: RateLimitSnapshot, } -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] #[serde(rename_all = "camelCase")] pub struct GetAuthStatusResponse { #[serde(skip_serializing_if = "Option::is_none")] diff --git a/codex-rs/app-server/tests/suite/rate_limits.rs b/codex-rs/app-server/tests/suite/rate_limits.rs index 6f236dcea46..302fd603490 100644 --- a/codex-rs/app-server/tests/suite/rate_limits.rs +++ b/codex-rs/app-server/tests/suite/rate_limits.rs @@ -113,8 +113,12 @@ async fn get_account_rate_limits_returns_snapshot() -> Result<()> { let server_url = server.uri(); write_chatgpt_base_url(codex_home.path(), &server_url).context("write chatgpt base url")?; - let primary_reset_iso = "2025-01-01T00:02:00Z"; - let secondary_reset_iso = "2025-01-01T01:00:00Z"; + let primary_reset_timestamp = chrono::DateTime::parse_from_rfc3339("2025-01-01T00:02:00Z") + .expect("parse primary reset timestamp") + .timestamp(); + let secondary_reset_timestamp = chrono::DateTime::parse_from_rfc3339("2025-01-01T01:00:00Z") + .expect("parse secondary reset timestamp") + .timestamp(); let response_body = json!({ "plan_type": "pro", "rate_limit": { @@ -124,13 +128,13 @@ async fn get_account_rate_limits_returns_snapshot() -> Result<()> { "used_percent": 42, "limit_window_seconds": 3600, "reset_after_seconds": 120, - "reset_at": primary_reset_iso, + "reset_at": primary_reset_timestamp, }, "secondary_window": { "used_percent": 5, "limit_window_seconds": 86400, "reset_after_seconds": 43200, - "reset_at": secondary_reset_iso, + "reset_at": secondary_reset_timestamp, } } }); @@ -172,12 +176,12 @@ async fn get_account_rate_limits_returns_snapshot() -> Result<()> { primary: Some(RateLimitWindow { used_percent: 42.0, window_minutes: Some(60), - resets_at: Some(primary_reset_iso.to_string()), + resets_at: Some(primary_reset_timestamp), }), secondary: Some(RateLimitWindow { used_percent: 5.0, window_minutes: Some(1440), - resets_at: Some(secondary_reset_iso.to_string()), + resets_at: Some(secondary_reset_timestamp), }), }, }; diff --git a/codex-rs/backend-client/src/client.rs b/codex-rs/backend-client/src/client.rs index 76203de98ed..28a51598eb4 100644 --- a/codex-rs/backend-client/src/client.rs +++ b/codex-rs/backend-client/src/client.rs @@ -298,7 +298,7 @@ impl Client { let used_percent = f64::from(snapshot.used_percent); let window_minutes = Self::window_minutes_from_seconds(snapshot.limit_window_seconds); - let resets_at = snapshot.reset_at; + let resets_at = Some(i64::from(snapshot.reset_at)); Some(RateLimitWindow { used_percent, window_minutes, @@ -306,12 +306,12 @@ impl Client { }) } - fn window_minutes_from_seconds(seconds: i32) -> Option { + fn window_minutes_from_seconds(seconds: i32) -> Option { if seconds <= 0 { return None; } - let seconds_u64 = seconds as u64; - Some(seconds_u64.div_ceil(60)) + let seconds_i64 = i64::from(seconds); + Some((seconds_i64 + 59) / 60) } } diff --git a/codex-rs/codex-backend-openapi-models/src/models/rate_limit_window_snapshot.rs b/codex-rs/codex-backend-openapi-models/src/models/rate_limit_window_snapshot.rs index 4151ff12b0f..4fc04f4be66 100644 --- a/codex-rs/codex-backend-openapi-models/src/models/rate_limit_window_snapshot.rs +++ b/codex-rs/codex-backend-openapi-models/src/models/rate_limit_window_snapshot.rs @@ -19,8 +19,8 @@ pub struct RateLimitWindowSnapshot { pub limit_window_seconds: i32, #[serde(rename = "reset_after_seconds")] pub reset_after_seconds: i32, - #[serde(rename = "reset_at", deserialize_with = "Option::deserialize")] - pub reset_at: Option, + #[serde(rename = "reset_at")] + pub reset_at: i32, } impl RateLimitWindowSnapshot { @@ -28,7 +28,7 @@ impl RateLimitWindowSnapshot { used_percent: i32, limit_window_seconds: i32, reset_after_seconds: i32, - reset_at: Option, + reset_at: i32, ) -> RateLimitWindowSnapshot { RateLimitWindowSnapshot { used_percent, diff --git a/codex-rs/core/src/client.rs b/codex-rs/core/src/client.rs index 5ed496215aa..7d864078bfc 100644 --- a/codex-rs/core/src/client.rs +++ b/codex-rs/core/src/client.rs @@ -634,7 +634,7 @@ fn parse_rate_limit_window( used_percent.and_then(|used_percent| { let window_minutes = parse_header_i64(headers, window_minutes_header); - let resets_at = parse_header_i64(headers, resets_header); + let resets_at = parse_header_i64(headers, resets_at_header); let has_data = used_percent != 0.0 || window_minutes.is_some_and(|minutes| minutes != 0)