diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
index 259e59ab310..d020dfe7344 100644
--- a/.devcontainer/Dockerfile
+++ b/.devcontainer/Dockerfile
@@ -1,4 +1,4 @@
-FROM ubuntu:22.04
+FROM ubuntu:24.04
ARG DEBIAN_FRONTEND=noninteractive
# enable 'universe' because musl-tools & clang live there
@@ -11,19 +11,17 @@ RUN apt-get update && \
RUN apt-get update && \
apt-get install -y --no-install-recommends \
build-essential curl git ca-certificates \
- pkg-config clang musl-tools libssl-dev && \
+ pkg-config clang musl-tools libssl-dev just && \
rm -rf /var/lib/apt/lists/*
-# non-root dev user
-ARG USER=dev
-ARG UID=1000
-RUN useradd -m -u $UID $USER
-USER $USER
+# Ubuntu 24.04 ships with user 'ubuntu' already created with UID 1000.
+USER ubuntu
# install Rust + musl target as dev user
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal && \
- ~/.cargo/bin/rustup target add aarch64-unknown-linux-musl
+ ~/.cargo/bin/rustup target add aarch64-unknown-linux-musl && \
+ ~/.cargo/bin/rustup component add clippy rustfmt
-ENV PATH="/home/${USER}/.cargo/bin:${PATH}"
+ENV PATH="/home/ubuntu/.cargo/bin:${PATH}"
WORKDIR /workspace
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
index 17aee91421f..f2768684840 100644
--- a/.devcontainer/devcontainer.json
+++ b/.devcontainer/devcontainer.json
@@ -15,15 +15,13 @@
"CARGO_TARGET_DIR": "${containerWorkspaceFolder}/codex-rs/target-arm64"
},
- "remoteUser": "dev",
+ "remoteUser": "ubuntu",
"customizations": {
"vscode": {
"settings": {
- "terminal.integrated.defaultProfile.linux": "bash"
+ "terminal.integrated.defaultProfile.linux": "bash"
},
- "extensions": [
- "rust-lang.rust-analyzer"
- ],
+ "extensions": ["rust-lang.rust-analyzer"]
}
}
}
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 24697f2f78f..9d8675fa5fd 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -74,7 +74,12 @@ jobs:
GH_TOKEN: ${{ github.token }}
run: pnpm stage-release
- - name: Ensure README.md contains only ASCII and certain Unicode code points
+ - name: Ensure root README.md contains only ASCII and certain Unicode code points
run: ./scripts/asciicheck.py README.md
- - name: Check README ToC
+ - name: Check root README ToC
run: python3 scripts/readme_toc.py README.md
+
+ - name: Ensure codex-cli/README.md contains only ASCII and certain Unicode code points
+ run: ./scripts/asciicheck.py codex-cli/README.md
+ - name: Check codex-cli/README ToC
+ run: python3 scripts/readme_toc.py codex-cli/README.md
diff --git a/.github/workflows/rust-release.yml b/.github/workflows/rust-release.yml
index 83f160757ab..6531af6a523 100644
--- a/.github/workflows/rust-release.yml
+++ b/.github/workflows/rust-release.yml
@@ -15,9 +15,6 @@ concurrency:
group: ${{ github.workflow }}
cancel-in-progress: true
-env:
- TAG_REGEX: '^rust-v[0-9]+\.[0-9]+\.[0-9]+$'
-
jobs:
tag-check:
runs-on: ubuntu-latest
@@ -33,8 +30,8 @@ jobs:
# 1. Must be a tag and match the regex
[[ "${GITHUB_REF_TYPE}" == "tag" ]] \
|| { echo "❌ Not a tag push"; exit 1; }
- [[ "${GITHUB_REF_NAME}" =~ ${TAG_REGEX} ]] \
- || { echo "❌ Tag '${GITHUB_REF_NAME}' != ${TAG_REGEX}"; exit 1; }
+ [[ "${GITHUB_REF_NAME}" =~ ^rust-v[0-9]+\.[0-9]+\.[0-9]+(-(alpha|beta)(\.[0-9]+)?)?$ ]] \
+ || { echo "❌ Tag '${GITHUB_REF_NAME}' doesn't match expected format"; exit 1; }
# 2. Extract versions
tag_ver="${GITHUB_REF_NAME#rust-v}"
@@ -160,9 +157,7 @@ jobs:
release:
needs: build
name: release
- runs-on: ubuntu-24.04
- env:
- RELEASE_TAG: codex-rs-${{ github.sha }}-${{ github.run_attempt }}-${{ github.ref_name }}
+ runs-on: ubuntu-latest
steps:
- uses: actions/download-artifact@v4
@@ -172,9 +167,19 @@ jobs:
- name: List
run: ls -R dist/
- - uses: softprops/action-gh-release@v2
+ - name: Define release name
+ id: release_name
+ run: |
+ # Extract the version from the tag name, which is in the format
+ # "rust-v0.1.0".
+ version="${GITHUB_REF_NAME#rust-v}"
+ echo "name=${version}" >> $GITHUB_OUTPUT
+
+ - name: Create GitHub Release
+ uses: softprops/action-gh-release@v2
with:
- tag_name: ${{ env.RELEASE_TAG }}
+ name: ${{ steps.release_name.outputs.name }}
+ tag_name: ${{ github.ref_name }}
files: dist/**
# For now, tag releases as "prerelease" because we are not claiming
# the Rust CLI is stable yet.
@@ -184,5 +189,5 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
- tag: ${{ env.RELEASE_TAG }}
+ tag: ${{ github.ref_name }}
config: .github/dotslash-config.json
diff --git a/README.md b/README.md
index 24f362f77f8..23eeb7c86c0 100644
--- a/README.md
+++ b/README.md
@@ -1,9 +1,11 @@
OpenAI Codex CLI
Lightweight coding agent that runs in your terminal
-
npm i -g @openai/codex
+
brew install codex
-
+This is the home of the **Codex CLI**, which is a coding agent from OpenAI that runs locally on your computer. If you are looking for the _cloud-based agent_ from OpenAI, **Codex [Web]**, see .
+
+
---
@@ -14,6 +16,8 @@
- [Experimental technology disclaimer](#experimental-technology-disclaimer)
- [Quickstart](#quickstart)
+ - [OpenAI API Users](#openai-api-users)
+ - [OpenAI Plus/Pro Users](#openai-pluspro-users)
- [Why Codex?](#why-codex)
- [Security model & permissions](#security-model--permissions)
- [Platform sandboxing details](#platform-sandboxing-details)
@@ -21,24 +25,17 @@
- [CLI reference](#cli-reference)
- [Memory & project docs](#memory--project-docs)
- [Non-interactive / CI mode](#non-interactive--ci-mode)
+- [Model Context Protocol (MCP)](#model-context-protocol-mcp)
- [Tracing / verbose logging](#tracing--verbose-logging)
- [Recipes](#recipes)
- [Installation](#installation)
-- [Configuration guide](#configuration-guide)
- - [Basic configuration parameters](#basic-configuration-parameters)
- - [Custom AI provider configuration](#custom-ai-provider-configuration)
- - [History configuration](#history-configuration)
- - [Configuration examples](#configuration-examples)
- - [Full configuration example](#full-configuration-example)
- - [Custom instructions](#custom-instructions)
- - [Environment variables setup](#environment-variables-setup)
+ - [DotSlash](#dotslash)
+- [Configuration](#configuration)
- [FAQ](#faq)
- [Zero data retention (ZDR) usage](#zero-data-retention-zdr-usage)
- [Codex open source fund](#codex-open-source-fund)
- [Contributing](#contributing)
- [Development workflow](#development-workflow)
- - [Git hooks with Husky](#git-hooks-with-husky)
- - [Debugging](#debugging)
- [Writing high-impact code changes](#writing-high-impact-code-changes)
- [Opening a pull request](#opening-a-pull-request)
- [Review process](#review-process)
@@ -47,8 +44,6 @@
- [Contributor license agreement (CLA)](#contributor-license-agreement-cla)
- [Quick fixes](#quick-fixes)
- [Releasing `codex`](#releasing-codex)
- - [Alternative build options](#alternative-build-options)
- - [Nix flake development](#nix-flake-development)
- [Security & responsible AI](#security--responsible-ai)
- [License](#license)
@@ -74,51 +69,91 @@ Help us improve by filing issues or submitting PRs (see the section below for ho
Install globally:
```shell
-npm install -g @openai/codex
+brew install codex
```
+Or go to the [latest GitHub Release](https://github.com/openai/codex/releases/latest) and download the appropriate binary for your platform.
+
+### OpenAI API Users
+
Next, set your OpenAI API key as an environment variable:
```shell
export OPENAI_API_KEY="your-api-key-here"
```
-> **Note:** This command sets the key only for your current terminal session. You can add the `export` line to your shell's configuration file (e.g., `~/.zshrc`) but we recommend setting for the session. **Tip:** You can also place your API key into a `.env` file at the root of your project:
->
-> ```env
-> OPENAI_API_KEY=your-api-key-here
-> ```
->
-> The CLI will automatically load variables from `.env` (via `dotenv/config`).
+> [!NOTE]
+> This command sets the key only for your current terminal session. You can add the `export` line to your shell's configuration file (e.g., `~/.zshrc`), but we recommend setting it for the session.
+
+### OpenAI Plus/Pro Users
+
+If you have a paid OpenAI account, run the following to start the login process:
+
+```
+codex login
+```
+
+If you complete the process successfully, you should have a `~/.codex/auth.json` file that contains the credentials that Codex will use.
+
+If you encounter problems with the login flow, please comment on .
-Use --provider to use other models
-
-> Codex also allows you to use other providers that support the OpenAI Chat Completions API. You can set the provider in the config file or use the `--provider` flag. The possible options for `--provider` are:
->
-> - openai (default)
-> - openrouter
-> - azure
-> - gemini
-> - ollama
-> - mistral
-> - deepseek
-> - xai
-> - groq
-> - arceeai
-> - any other provider that is compatible with the OpenAI API
->
-> If you use a provider other than OpenAI, you will need to set the API key for the provider in the config file or in the environment variable as:
->
-> ```shell
-> export _API_KEY="your-api-key-here"
-> ```
->
-> If you use a provider not listed above, you must also set the base URL for the provider:
->
-> ```shell
-> export _BASE_URL="https://your-provider-api-base-url"
-> ```
+Use --profile to use other models
+
+Codex also allows you to use other providers that support the OpenAI Chat Completions (or Responses) API.
+
+To do so, you must first define custom [providers](./config.md#model_providers) in `~/.codex/config.toml`. For example, the provider for a standard Ollama setup would be defined as follows:
+
+```toml
+[model_providers.ollama]
+name = "Ollama"
+base_url = "http://localhost:11434/v1"
+```
+
+The `base_url` will have `/chat/completions` appended to it to build the full URL for the request.
+
+For providers that also require an `Authorization` header of the form `Bearer: SECRET`, an `env_key` can be specified, which indicates the environment variable to read to use as the value of `SECRET` when making a request:
+
+```toml
+[model_providers.openrouter]
+name = "OpenRouter"
+base_url = "https://openrouter.ai/api/v1"
+env_key = "OPENROUTER_API_KEY"
+```
+
+Providers that speak the Responses API are also supported by adding `wire_api = "responses"` as part of the definition. Accessing OpenAI models via Azure is an example of such a provider, though it also requires specifying additional `query_params` that need to be appended to the request URL:
+
+```toml
+[model_providers.azure]
+name = "Azure"
+# Make sure you set the appropriate subdomain for this URL.
+base_url = "https://YOUR_PROJECT_NAME.openai.azure.com/openai"
+env_key = "AZURE_OPENAI_API_KEY" # Or "OPENAI_API_KEY", whichever you use.
+# Newer versions appear to support the responses API, see https://github.com/openai/codex/pull/1321
+query_params = { api-version = "2025-04-01-preview" }
+wire_api = "responses"
+```
+
+Once you have defined a provider you wish to use, you can configure it as your default provider as follows:
+
+```toml
+model_provider = "azure"
+```
+
+> [!TIP]
+> If you find yourself experimenting with a variety of models and providers, then you likely want to invest in defining a _profile_ for each configuration like so:
+
+```toml
+[profiles.o3]
+model_provider = "azure"
+model = "o3"
+
+[profiles.mistral]
+model_provider = "ollama"
+model = "mistral"
+```
+
+This way, you can specify one command-line argument (.e.g., `--profile o3`, `--profile mistral`) to override multiple settings together.
@@ -136,7 +171,7 @@ codex "explain this codebase to me"
```
```shell
-codex --approval-mode full-auto "create the fanciest todo-list app"
+codex --full-auto "create the fanciest todo-list app"
```
That's it - Codex will scaffold a file, run it inside a sandbox, install any
@@ -162,41 +197,35 @@ And it's **fully open-source** so you can see and contribute to how it develops!
## Security model & permissions
-Codex lets you decide _how much autonomy_ the agent receives and auto-approval policy via the
-`--approval-mode` flag (or the interactive onboarding prompt):
+Codex lets you decide _how much autonomy_ you want to grant the agent. The following options can be configured independently:
-| Mode | What the agent may do without asking | Still requires approval |
-| ------------------------- | --------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- |
-| **Suggest** (default) |
Execute shell commands (network disabled, writes limited to your workdir) | - |
+- [`approval_policy`](./codex-rs/config.md#approval_policy) determines when you should be prompted to approve whether Codex can execute a command
+- [`sandbox`](./codex-rs/config.md#sandbox) determines the _sandbox policy_ that Codex uses to execute untrusted commands
-In **Full Auto** every command is run **network-disabled** and confined to the
-current working directory (plus temporary files) for defense-in-depth. Codex
-will also show a warning/confirmation if you start in **auto-edit** or
-**full-auto** while the directory is _not_ tracked by Git, so you always have a
-safety net.
+By default, Codex runs with `approval_policy = "untrusted"` and `sandbox.mode = "read-only"`, which means that:
-Coming soon: you'll be able to whitelist specific commands to auto-execute with
-the network enabled, once we're confident in additional safeguards.
+- The user is prompted to approve every command not on the set of "trusted" commands built into Codex (`cat`, `ls`, etc.)
+- Approved commands are run outside of a sandbox because user approval implies "trust," in this case.
-### Platform sandboxing details
+Though running Codex with the `--full-auto` option changes the configuration to `approval_policy = "on-failure"` and `sandbox.mode = "workspace-write"`, which means that:
+
+- Codex does not initially ask for user approval before running an individual command.
+- Though when it runs a command, it is run under a sandbox in which:
+ - It can read any file on the system.
+ - It can only write files under the current directory (or the directory specified via `--cd`).
+ - Network requests are completely disabled.
+- Only if the command exits with a non-zero exit code will it ask the user for approval. If granted, it will re-attempt the command outside of the sandbox. (A common case is when Codex cannot `npm install` a dependency because that requires network access.)
+
+Again, these two options can be configured independently. For example, if you want Codex to perform an "exploration" where you are happy for it to read anything it wants but you never want to be prompted, you could run Codex with `approval_policy = "never"` and `sandbox.mode = "read-only"`.
-The hardening mechanism Codex uses depends on your OS:
+### Platform sandboxing details
-- **macOS 12+** - commands are wrapped with **Apple Seatbelt** (`sandbox-exec`).
+The mechanism Codex uses to implement the sandbox policy depends on your OS:
- - Everything is placed in a read-only jail except for a small set of
- writable roots (`$PWD`, `$TMPDIR`, `~/.codex`, etc.).
- - Outbound network is _fully blocked_ by default - even if a child process
- tries to `curl` somewhere it will fail.
+- **macOS 12+** uses **Apple Seatbelt** and runs commands using `sandbox-exec` with a profile (`-p`) that corresponds to the `sandbox.mode` that was specified.
+- **Linux** uses a combination of Landlock/seccomp APIs to enforce the `sandbox` configuration.
-- **Linux** - there is no sandboxing by default.
- We recommend using Docker for sandboxing, where Codex launches itself inside a **minimal
- container image** and mounts your repo _read/write_ at the same path. A
- custom `iptables`/`ipset` firewall script denies all egress except the
- OpenAI API. This gives you deterministic, reproducible runs without needing
- root on the host. You can use the [`run_in_container.sh`](./codex-cli/scripts/run_in_container.sh) script to set up the sandbox.
+Note that when running Linux in a containerized environment such as Docker, sandboxing may not work if the host/container configuration does not support the necessary Landlock/seccomp APIs. In such cases, we recommend configuring your Docker container so that it provides the sandbox guarantees you are looking for and then running `codex` with `sandbox.mode = "danger-full-access"` (or more simply, the `--dangerously-bypass-approvals-and-sandbox` flag) within your container.
---
@@ -205,24 +234,20 @@ The hardening mechanism Codex uses depends on your OS:
| Requirement | Details |
| --------------------------- | --------------------------------------------------------------- |
| Operating systems | macOS 12+, Ubuntu 20.04+/Debian 10+, or Windows 11 **via WSL2** |
-| Node.js | **22 or newer** (LTS recommended) |
| Git (optional, recommended) | 2.23+ for built-in PR helpers |
| RAM | 4-GB minimum (8-GB recommended) |
-> Never run `sudo npm install -g`; fix npm permissions instead.
-
---
## CLI reference
-| Command | Purpose | Example |
-| ------------------------------------ | ----------------------------------- | ------------------------------------ |
-| `codex` | Interactive REPL | `codex` |
-| `codex "..."` | Initial prompt for interactive REPL | `codex "fix lint errors"` |
-| `codex -q "..."` | Non-interactive "quiet mode" | `codex -q --json "explain utils.ts"` |
-| `codex completion ` | Print shell completion script | `codex completion bash` |
+| Command | Purpose | Example |
+| ------------------ | ---------------------------------- | ------------------------------- |
+| `codex` | Interactive TUI | `codex` |
+| `codex "..."` | Initial prompt for interactive TUI | `codex "fix lint errors"` |
+| `codex exec "..."` | Non-interactive "automation mode" | `codex exec "explain utils.ts"` |
-Key flags: `--model/-m`, `--approval-mode/-a`, `--quiet/-q`, and `--notify`.
+Key flags: `--model/-m`, `--ask-for-approval/-a`.
---
@@ -234,8 +259,6 @@ You can give Codex extra instructions and guidance using `AGENTS.md` files. Code
2. `AGENTS.md` at repo root - shared project notes
3. `AGENTS.md` in the current working directory - sub-folder/feature specifics
-Disable loading of these files with `--no-project-doc` or the environment variable `CODEX_DISABLE_PROJECT_DOC=1`.
-
---
## Non-interactive / CI mode
@@ -245,21 +268,40 @@ Run Codex head-less in pipelines. Example GitHub Action step:
```yaml
- name: Update changelog via Codex
run: |
- npm install -g @openai/codex
+ npm install -g @openai/codex@native # Note: we plan to drop the need for `@native`.
export OPENAI_API_KEY="${{ secrets.OPENAI_KEY }}"
- codex -a auto-edit --quiet "update CHANGELOG for next release"
+ codex exec --full-auto "update CHANGELOG for next release"
+```
+
+## Model Context Protocol (MCP)
+
+The Codex CLI can be configured to leverage MCP servers by defining an [`mcp_servers`](./codex-rs/config.md#mcp_servers) section in `~/.codex/config.toml`. It is intended to mirror how tools such as Claude and Cursor define `mcpServers` in their respective JSON config files, though the Codex format is slightly different since it uses TOML rather than JSON, e.g.:
+
+```toml
+# IMPORTANT: the top-level key is `mcp_servers` rather than `mcpServers`.
+[mcp_servers.server-name]
+command = "npx"
+args = ["-y", "mcp-server"]
+env = { "API_KEY" = "value" }
```
-Set `CODEX_QUIET_MODE=1` to silence interactive UI noise.
+> [!TIP]
+> It is somewhat experimental, but the Codex CLI can also be run as an MCP _server_ via `codex mcp`. If you launch it with an MCP client such as `npx @modelcontextprotocol/inspector codex mcp` and send it a `tools/list` request, you will see that there is only one tool, `codex`, that accepts a grab-bag of inputs, including a catch-all `config` map for anything you might want to override. Feel free to play around with it and provide feedback via GitHub issues.
## Tracing / verbose logging
-Setting the environment variable `DEBUG=true` prints full API request and response details:
+Because Codex is written in Rust, it honors the `RUST_LOG` environment variable to configure its logging behavior.
-```shell
-DEBUG=true codex
+The TUI defaults to `RUST_LOG=codex_core=info,codex_tui=info` and log messages are written to `~/.codex/log/codex-tui.log`, so you can leave the following running in a separate terminal to monitor log messages as they are written:
+
+```
+tail -F ~/.codex/log/codex-tui.log
```
+By comparison, the non-interactive mode (`codex exec`) defaults to `RUST_LOG=error`, but messages are printed inline, so there is no need to monitor a separate file.
+
+See the Rust documentation on [`RUST_LOG`](https://docs.rs/env_logger/latest/env_logger/#enabling-logging) for more information on the configuration options.
+
---
## Recipes
@@ -281,201 +323,70 @@ Below are a few bite-size examples you can copy-paste. Replace the text in quote
## Installation
-From npm (Recommended)
+From brew (Recommended)
```bash
-npm install -g @openai/codex
-# or
-yarn global add @openai/codex
-# or
-bun install -g @openai/codex
-# or
-pnpm add -g @openai/codex
+brew install codex
```
-
+Or go to the [latest GitHub Release](https://github.com/openai/codex/releases/latest) and download the appropriate binary for your platform.
-
-Build from source
+Admittedly, each GitHub Release contains many executables, but in practice, you likely want one of these:
-```bash
-# Clone the repository and navigate to the CLI package
-git clone https://github.com/openai/codex.git
-cd codex/codex-cli
-
-# Enable corepack
-corepack enable
-
-# Install dependencies and build
-pnpm install
-pnpm build
-
-# Linux-only: download prebuilt sandboxing binaries (requires gh and zstd).
-./scripts/install_native_deps.sh
+- macOS
+ - Apple Silicon/arm64: `codex-aarch64-apple-darwin.tar.gz`
+ - x86_64 (older Mac hardware): `codex-x86_64-apple-darwin.tar.gz`
+- Linux
+ - x86_64: `codex-x86_64-unknown-linux-musl.tar.gz`
+ - arm64: `codex-aarch64-unknown-linux-musl.tar.gz`
-# Get the usage and the options
-node ./dist/cli.js --help
+Each archive contains a single entry with the platform baked into the name (e.g., `codex-x86_64-unknown-linux-musl`), so you likely want to rename it to `codex` after extracting it.
-# Run the locally-built CLI directly
-node ./dist/cli.js
+### DotSlash
-# Or link the command globally for convenience
-pnpm link
-```
+The GitHub Release also contains a [DotSlash](https://dotslash-cli.com/) file for the Codex CLI named `codex`. Using a DotSlash file makes it possible to make a lightweight commit to source control to ensure all contributors use the same version of an executable, regardless of what platform they use for development.
----
-
-## Configuration guide
-
-Codex configuration files can be placed in the `~/.codex/` directory, supporting both YAML and JSON formats.
-
-### Basic configuration parameters
-
-| Parameter | Type | Default | Description | Available Options |
-| ------------------- | ------- | ---------- | -------------------------------- | ---------------------------------------------------------------------------------------------- |
-| `model` | string | `o4-mini` | AI model to use | Any model name supporting OpenAI API |
-| `approvalMode` | string | `suggest` | AI assistant's permission mode | `suggest` (suggestions only) `auto-edit` (automatic edits) `full-auto` (fully automatic) |
-| `fullAutoErrorMode` | string | `ask-user` | Error handling in full-auto mode | `ask-user` (prompt for user input) `ignore-and-continue` (ignore and proceed) |
-| `notify` | boolean | `true` | Enable desktop notifications | `true`/`false` |
-
-### Custom AI provider configuration
-
-In the `providers` object, you can configure multiple AI service providers. Each provider requires the following parameters:
-
-| Parameter | Type | Description | Example |
-| --------- | ------ | --------------------------------------- | ----------------------------- |
-| `name` | string | Display name of the provider | `"OpenAI"` |
-| `baseURL` | string | API service URL | `"https://api.openai.com/v1"` |
-| `envKey` | string | Environment variable name (for API key) | `"OPENAI_API_KEY"` |
-
-### History configuration
-
-In the `history` object, you can configure conversation history settings:
-
-| Parameter | Type | Description | Example Value |
-| ------------------- | ------- | ------------------------------------------------------ | ------------- |
-| `maxSize` | number | Maximum number of history entries to save | `1000` |
-| `saveHistory` | boolean | Whether to save history | `true` |
-| `sensitivePatterns` | array | Patterns of sensitive information to filter in history | `[]` |
+
+Build from source
-### Configuration examples
+```bash
+# Clone the repository and navigate to the root of the Cargo workspace.
+git clone https://github.com/openai/codex.git
+cd codex/codex-rs
-1. YAML format (save as `~/.codex/config.yaml`):
+# Install the Rust toolchain, if necessary.
+curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
+source "$HOME/.cargo/env"
+rustup component add rustfmt
+rustup component add clippy
-```yaml
-model: o4-mini
-approvalMode: suggest
-fullAutoErrorMode: ask-user
-notify: true
-```
+# Build Codex.
+cargo build
-2. JSON format (save as `~/.codex/config.json`):
+# Launch the TUI with a sample prompt.
+cargo run --bin codex -- "explain this codebase to me"
-```json
-{
- "model": "o4-mini",
- "approvalMode": "suggest",
- "fullAutoErrorMode": "ask-user",
- "notify": true
-}
-```
+# After making changes, ensure the code is clean.
+cargo fmt -- --config imports_granularity=Item
+cargo clippy --tests
-### Full configuration example
-
-Below is a comprehensive example of `config.json` with multiple custom providers:
-
-```json
-{
- "model": "o4-mini",
- "provider": "openai",
- "providers": {
- "openai": {
- "name": "OpenAI",
- "baseURL": "https://api.openai.com/v1",
- "envKey": "OPENAI_API_KEY"
- },
- "azure": {
- "name": "AzureOpenAI",
- "baseURL": "https://YOUR_PROJECT_NAME.openai.azure.com/openai",
- "envKey": "AZURE_OPENAI_API_KEY"
- },
- "openrouter": {
- "name": "OpenRouter",
- "baseURL": "https://openrouter.ai/api/v1",
- "envKey": "OPENROUTER_API_KEY"
- },
- "gemini": {
- "name": "Gemini",
- "baseURL": "https://generativelanguage.googleapis.com/v1beta/openai",
- "envKey": "GEMINI_API_KEY"
- },
- "ollama": {
- "name": "Ollama",
- "baseURL": "http://localhost:11434/v1",
- "envKey": "OLLAMA_API_KEY"
- },
- "mistral": {
- "name": "Mistral",
- "baseURL": "https://api.mistral.ai/v1",
- "envKey": "MISTRAL_API_KEY"
- },
- "deepseek": {
- "name": "DeepSeek",
- "baseURL": "https://api.deepseek.com",
- "envKey": "DEEPSEEK_API_KEY"
- },
- "xai": {
- "name": "xAI",
- "baseURL": "https://api.x.ai/v1",
- "envKey": "XAI_API_KEY"
- },
- "groq": {
- "name": "Groq",
- "baseURL": "https://api.groq.com/openai/v1",
- "envKey": "GROQ_API_KEY"
- },
- "arceeai": {
- "name": "ArceeAI",
- "baseURL": "https://conductor.arcee.ai/v1",
- "envKey": "ARCEEAI_API_KEY"
- }
- },
- "history": {
- "maxSize": 1000,
- "saveHistory": true,
- "sensitivePatterns": []
- }
-}
+# Run the tests.
+cargo test
```
-### Custom instructions
-
-You can create a `~/.codex/AGENTS.md` file to define custom guidance for the agent:
-
-```markdown
-- Always respond with emojis
-- Only use git commands when explicitly requested
-```
+
-### Environment variables setup
+---
-For each AI provider, you need to set the corresponding API key in your environment variables. For example:
+## Configuration
-```bash
-# OpenAI
-export OPENAI_API_KEY="your-api-key-here"
+Codex supports a rich set of configuration options documented in [`codex-rs/config.md`](./codex-rs/config.md).
-# Azure OpenAI
-export AZURE_OPENAI_API_KEY="your-azure-api-key-here"
-export AZURE_OPENAI_API_VERSION="2025-03-01-preview" (Optional)
+By default, Codex loads its configuration from `~/.codex/config.toml`.
-# OpenRouter
-export OPENROUTER_API_KEY="your-openrouter-key-here"
-
-# Similarly for other providers
-```
+Though `--config` can be used to set/override ad-hoc config values for individual invocations of `codex`.
---
@@ -524,7 +435,13 @@ Codex CLI **does** support OpenAI organizations with [Zero Data Retention (ZDR)]
OpenAI rejected the request. Error details: Status: 400, Code: unsupported_parameter, Type: invalid_request_error, Message: 400 Previous response cannot be used for this organization due to Zero Data Retention.
```
-You may need to upgrade to a more recent version with: `npm i -g @openai/codex@latest`
+Ensure you are running `codex` with `--config disable_response_storage=true` or add this line to `~/.codex/config.toml` to avoid specifying the command line option each time:
+
+```toml
+disable_response_storage = true
+```
+
+See [the configuration documentation on `disable_response_storage`](./codex-rs/config.md#disable_response_storage) for details.
---
@@ -549,51 +466,7 @@ More broadly we welcome contributions - whether you are opening your very first
- Create a _topic branch_ from `main` - e.g. `feat/interactive-prompt`.
- Keep your changes focused. Multiple unrelated fixes should be opened as separate PRs.
-- Use `pnpm test:watch` during development for super-fast feedback.
-- We use **Vitest** for unit tests, **ESLint** + **Prettier** for style, and **TypeScript** for type-checking.
-- Before pushing, run the full test/type/lint suite:
-
-### Git hooks with Husky
-
-This project uses [Husky](https://typicode.github.io/husky/) to enforce code quality checks:
-
-- **Pre-commit hook**: Automatically runs lint-staged to format and lint files before committing
-- **Pre-push hook**: Runs tests and type checking before pushing to the remote
-
-These hooks help maintain code quality and prevent pushing code with failing tests. For more details, see [HUSKY.md](./codex-cli/HUSKY.md).
-
-```bash
-pnpm test && pnpm run lint && pnpm run typecheck
-```
-
-- If you have **not** yet signed the Contributor License Agreement (CLA), add a PR comment containing the exact text
-
- ```text
- I have read the CLA Document and I hereby sign the CLA
- ```
-
- The CLA-Assistant bot will turn the PR status green once all authors have signed.
-
-```bash
-# Watch mode (tests rerun on change)
-pnpm test:watch
-
-# Type-check without emitting files
-pnpm typecheck
-
-# Automatically fix lint + prettier issues
-pnpm lint:fix
-pnpm format:fix
-```
-
-### Debugging
-
-To debug the CLI with a visual debugger, do the following in the `codex-cli` folder:
-
-- Run `pnpm run build` to build the CLI, which will generate `cli.js.map` alongside `cli.js` in the `dist` folder.
-- Run the CLI with `node --inspect-brk ./dist/cli.js` The program then waits until a debugger is attached before proceeding. Options:
- - In VS Code, choose **Debug: Attach to Node Process** from the command palette and choose the option in the dropdown with debug port `9229` (likely the first option)
- - Go to in Chrome and find **localhost:9229** and click **trace**
+- Following the [development setup](#development-workflow) instructions above, ensure your change is free of lint warnings and test failures.
### Writing high-impact code changes
@@ -605,7 +478,7 @@ To debug the CLI with a visual debugger, do the following in the `codex-cli` fol
### Opening a pull request
- Fill in the PR template (or include similar information) - **What? Why? How?**
-- Run **all** checks locally (`npm test && npm run lint && npm run typecheck`). CI failures that could have been caught locally slow down the process.
+- Run **all** checks locally (`cargo test && cargo clippy --tests && cargo fmt -- --config imports_granularity=Item`). CI failures that could have been caught locally slow down the process.
- Make sure your branch is up-to-date with `main` and that you have resolved merge conflicts.
- Mark the PR as **Ready for review** only when you believe it is in a merge-able state.
@@ -652,73 +525,22 @@ The **DCO check** blocks merges until every commit in the PR carries the footer
### Releasing `codex`
-To publish a new version of the CLI you first need to stage the npm package. A
-helper script in `codex-cli/scripts/` does all the heavy lifting. Inside the
-`codex-cli` folder run:
-
-```bash
-# Classic, JS implementation that includes small, native binaries for Linux sandboxing.
-pnpm stage-release
-
-# Optionally specify the temp directory to reuse between runs.
-RELEASE_DIR=$(mktemp -d)
-pnpm stage-release --tmp "$RELEASE_DIR"
-
-# "Fat" package that additionally bundles the native Rust CLI binaries for
-# Linux. End-users can then opt-in at runtime by setting CODEX_RUST=1.
-pnpm stage-release --native
-```
-
-Go to the folder where the release is staged and verify that it works as intended. If so, run the following from the temp folder:
-
-```
-cd "$RELEASE_DIR"
-npm publish
-```
+_For admins only._
-### Alternative build options
+Make sure you are on `main` and have no local changes. Then run:
-#### Nix flake development
-
-Prerequisite: Nix >= 2.4 with flakes enabled (`experimental-features = nix-command flakes` in `~/.config/nix/nix.conf`).
-
-Enter a Nix development shell:
-
-```bash
-# Use either one of the commands according to which implementation you want to work with
-nix develop .#codex-cli # For entering codex-cli specific shell
-nix develop .#codex-rs # For entering codex-rs specific shell
-```
-
-This shell includes Node.js, installs dependencies, builds the CLI, and provides a `codex` command alias.
-
-Build and run the CLI directly:
-
-```bash
-# Use either one of the commands according to which implementation you want to work with
-nix build .#codex-cli # For building codex-cli
-nix build .#codex-rs # For building codex-rs
-./result/bin/codex --help
+```shell
+VERSION=0.2.0 # Can also be 0.2.0-alpha.1 or any valid Rust version.
+./codex-rs/scripts/create_github_release.sh "$VERSION"
```
-Run the CLI via the flake app:
+This will make a local commit on top of `main` with `version` set to `$VERSION` in `codex-rs/Cargo.toml` (note that on `main`, we leave the version as `version = "0.0.0"`).
-```bash
-# Use either one of the commands according to which implementation you want to work with
-nix run .#codex-cli # For running codex-cli
-nix run .#codex-rs # For running codex-rs
-```
+This will push the commit using the tag `rust-v${VERSION}`, which in turn kicks off [the release workflow](.github/workflows/rust-release.yml). This will create a new GitHub Release named `$VERSION`.
-Use direnv with flakes
+If everything looks good in the generated GitHub Release, uncheck the **pre-release** box so it is the latest release.
-If you have direnv installed, you can use the following `.envrc` to automatically enter the Nix shell when you `cd` into the project directory:
-
-```bash
-cd codex-rs
-echo "use flake ../flake.nix#codex-cli" >> .envrc && direnv allow
-cd codex-cli
-echo "use flake ../flake.nix#codex-rs" >> .envrc && direnv allow
-```
+Create a PR to update [`Formula/c/codex.rb`](https://github.com/Homebrew/homebrew-core/blob/main/Formula/c/codex.rb) on Homebrew.
---
diff --git a/codex-cli/README.md b/codex-cli/README.md
new file mode 100644
index 00000000000..e988b384ab2
--- /dev/null
+++ b/codex-cli/README.md
@@ -0,0 +1,736 @@
+
OpenAI Codex CLI
+
Lightweight coding agent that runs in your terminal
+
+
npm i -g @openai/codex
+
+> [!IMPORTANT]
+> This is the documentation for the _legacy_ TypeScript implementation of the Codex CLI. It has been superseded by the _Rust_ implementation. See the [README in the root of the Codex repository](https://github.com/openai/codex/blob/main/README.md) for details.
+
+
+
+---
+
+
+Table of contents
+
+
+
+- [Experimental technology disclaimer](#experimental-technology-disclaimer)
+- [Quickstart](#quickstart)
+- [Why Codex?](#why-codex)
+- [Security model & permissions](#security-model--permissions)
+ - [Platform sandboxing details](#platform-sandboxing-details)
+- [System requirements](#system-requirements)
+- [CLI reference](#cli-reference)
+- [Memory & project docs](#memory--project-docs)
+- [Non-interactive / CI mode](#non-interactive--ci-mode)
+- [Tracing / verbose logging](#tracing--verbose-logging)
+- [Recipes](#recipes)
+- [Installation](#installation)
+- [Configuration guide](#configuration-guide)
+ - [Basic configuration parameters](#basic-configuration-parameters)
+ - [Custom AI provider configuration](#custom-ai-provider-configuration)
+ - [History configuration](#history-configuration)
+ - [Configuration examples](#configuration-examples)
+ - [Full configuration example](#full-configuration-example)
+ - [Custom instructions](#custom-instructions)
+ - [Environment variables setup](#environment-variables-setup)
+- [FAQ](#faq)
+- [Zero data retention (ZDR) usage](#zero-data-retention-zdr-usage)
+- [Codex open source fund](#codex-open-source-fund)
+- [Contributing](#contributing)
+ - [Development workflow](#development-workflow)
+ - [Git hooks with Husky](#git-hooks-with-husky)
+ - [Debugging](#debugging)
+ - [Writing high-impact code changes](#writing-high-impact-code-changes)
+ - [Opening a pull request](#opening-a-pull-request)
+ - [Review process](#review-process)
+ - [Community values](#community-values)
+ - [Getting help](#getting-help)
+ - [Contributor license agreement (CLA)](#contributor-license-agreement-cla)
+ - [Quick fixes](#quick-fixes)
+ - [Releasing `codex`](#releasing-codex)
+ - [Alternative build options](#alternative-build-options)
+ - [Nix flake development](#nix-flake-development)
+- [Security & responsible AI](#security--responsible-ai)
+- [License](#license)
+
+
+
+
+
+---
+
+## Experimental technology disclaimer
+
+Codex CLI is an experimental project under active development. It is not yet stable, may contain bugs, incomplete features, or undergo breaking changes. We're building it in the open with the community and welcome:
+
+- Bug reports
+- Feature requests
+- Pull requests
+- Good vibes
+
+Help us improve by filing issues or submitting PRs (see the section below for how to contribute)!
+
+## Quickstart
+
+Install globally:
+
+```shell
+npm install -g @openai/codex
+```
+
+Next, set your OpenAI API key as an environment variable:
+
+```shell
+export OPENAI_API_KEY="your-api-key-here"
+```
+
+> **Note:** This command sets the key only for your current terminal session. You can add the `export` line to your shell's configuration file (e.g., `~/.zshrc`) but we recommend setting for the session. **Tip:** You can also place your API key into a `.env` file at the root of your project:
+>
+> ```env
+> OPENAI_API_KEY=your-api-key-here
+> ```
+>
+> The CLI will automatically load variables from `.env` (via `dotenv/config`).
+
+
+Use --provider to use other models
+
+> Codex also allows you to use other providers that support the OpenAI Chat Completions API. You can set the provider in the config file or use the `--provider` flag. The possible options for `--provider` are:
+>
+> - openai (default)
+> - openrouter
+> - azure
+> - gemini
+> - ollama
+> - mistral
+> - deepseek
+> - xai
+> - groq
+> - arceeai
+> - any other provider that is compatible with the OpenAI API
+>
+> If you use a provider other than OpenAI, you will need to set the API key for the provider in the config file or in the environment variable as:
+>
+> ```shell
+> export _API_KEY="your-api-key-here"
+> ```
+>
+> If you use a provider not listed above, you must also set the base URL for the provider:
+>
+> ```shell
+> export _BASE_URL="https://your-provider-api-base-url"
+> ```
+
+
+
+
+Run interactively:
+
+```shell
+codex
+```
+
+Or, run with a prompt as input (and optionally in `Full Auto` mode):
+
+```shell
+codex "explain this codebase to me"
+```
+
+```shell
+codex --approval-mode full-auto "create the fanciest todo-list app"
+```
+
+That's it - Codex will scaffold a file, run it inside a sandbox, install any
+missing dependencies, and show you the live result. Approve the changes and
+they'll be committed to your working directory.
+
+---
+
+## Why Codex?
+
+Codex CLI is built for developers who already **live in the terminal** and want
+ChatGPT-level reasoning **plus** the power to actually run code, manipulate
+files, and iterate - all under version control. In short, it's _chat-driven
+development_ that understands and executes your repo.
+
+- **Zero setup** - bring your OpenAI API key and it just works!
+- **Full auto-approval, while safe + secure** by running network-disabled and directory-sandboxed
+- **Multimodal** - pass in screenshots or diagrams to implement features ✨
+
+And it's **fully open-source** so you can see and contribute to how it develops!
+
+---
+
+## Security model & permissions
+
+Codex lets you decide _how much autonomy_ the agent receives and auto-approval policy via the
+`--approval-mode` flag (or the interactive onboarding prompt):
+
+| Mode | What the agent may do without asking | Still requires approval |
+| ------------------------- | --------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- |
+| **Suggest** (default) |
Execute shell commands (network disabled, writes limited to your workdir) | - |
+
+In **Full Auto** every command is run **network-disabled** and confined to the
+current working directory (plus temporary files) for defense-in-depth. Codex
+will also show a warning/confirmation if you start in **auto-edit** or
+**full-auto** while the directory is _not_ tracked by Git, so you always have a
+safety net.
+
+Coming soon: you'll be able to whitelist specific commands to auto-execute with
+the network enabled, once we're confident in additional safeguards.
+
+### Platform sandboxing details
+
+The hardening mechanism Codex uses depends on your OS:
+
+- **macOS 12+** - commands are wrapped with **Apple Seatbelt** (`sandbox-exec`).
+
+ - Everything is placed in a read-only jail except for a small set of
+ writable roots (`$PWD`, `$TMPDIR`, `~/.codex`, etc.).
+ - Outbound network is _fully blocked_ by default - even if a child process
+ tries to `curl` somewhere it will fail.
+
+- **Linux** - there is no sandboxing by default.
+ We recommend using Docker for sandboxing, where Codex launches itself inside a **minimal
+ container image** and mounts your repo _read/write_ at the same path. A
+ custom `iptables`/`ipset` firewall script denies all egress except the
+ OpenAI API. This gives you deterministic, reproducible runs without needing
+ root on the host. You can use the [`run_in_container.sh`](../codex-cli/scripts/run_in_container.sh) script to set up the sandbox.
+
+---
+
+## System requirements
+
+| Requirement | Details |
+| --------------------------- | --------------------------------------------------------------- |
+| Operating systems | macOS 12+, Ubuntu 20.04+/Debian 10+, or Windows 11 **via WSL2** |
+| Node.js | **22 or newer** (LTS recommended) |
+| Git (optional, recommended) | 2.23+ for built-in PR helpers |
+| RAM | 4-GB minimum (8-GB recommended) |
+
+> Never run `sudo npm install -g`; fix npm permissions instead.
+
+---
+
+## CLI reference
+
+| Command | Purpose | Example |
+| ------------------------------------ | ----------------------------------- | ------------------------------------ |
+| `codex` | Interactive REPL | `codex` |
+| `codex "..."` | Initial prompt for interactive REPL | `codex "fix lint errors"` |
+| `codex -q "..."` | Non-interactive "quiet mode" | `codex -q --json "explain utils.ts"` |
+| `codex completion ` | Print shell completion script | `codex completion bash` |
+
+Key flags: `--model/-m`, `--approval-mode/-a`, `--quiet/-q`, and `--notify`.
+
+---
+
+## Memory & project docs
+
+You can give Codex extra instructions and guidance using `AGENTS.md` files. Codex looks for `AGENTS.md` files in the following places, and merges them top-down:
+
+1. `~/.codex/AGENTS.md` - personal global guidance
+2. `AGENTS.md` at repo root - shared project notes
+3. `AGENTS.md` in the current working directory - sub-folder/feature specifics
+
+Disable loading of these files with `--no-project-doc` or the environment variable `CODEX_DISABLE_PROJECT_DOC=1`.
+
+---
+
+## Non-interactive / CI mode
+
+Run Codex head-less in pipelines. Example GitHub Action step:
+
+```yaml
+- name: Update changelog via Codex
+ run: |
+ npm install -g @openai/codex
+ export OPENAI_API_KEY="${{ secrets.OPENAI_KEY }}"
+ codex -a auto-edit --quiet "update CHANGELOG for next release"
+```
+
+Set `CODEX_QUIET_MODE=1` to silence interactive UI noise.
+
+## Tracing / verbose logging
+
+Setting the environment variable `DEBUG=true` prints full API request and response details:
+
+```shell
+DEBUG=true codex
+```
+
+---
+
+## Recipes
+
+Below are a few bite-size examples you can copy-paste. Replace the text in quotes with your own task. See the [prompting guide](https://github.com/openai/codex/blob/main/codex-cli/examples/prompting_guide.md) for more tips and usage patterns.
+
+| ✨ | What you type | What happens |
+| --- | ------------------------------------------------------------------------------- | -------------------------------------------------------------------------- |
+| 1 | `codex "Refactor the Dashboard component to React Hooks"` | Codex rewrites the class component, runs `npm test`, and shows the diff. |
+| 2 | `codex "Generate SQL migrations for adding a users table"` | Infers your ORM, creates migration files, and runs them in a sandboxed DB. |
+| 3 | `codex "Write unit tests for utils/date.ts"` | Generates tests, executes them, and iterates until they pass. |
+| 4 | `codex "Bulk-rename *.jpeg -> *.jpg with git mv"` | Safely renames files and updates imports/usages. |
+| 5 | `codex "Explain what this regex does: ^(?=.*[A-Z]).{8,}$"` | Outputs a step-by-step human explanation. |
+| 6 | `codex "Carefully review this repo, and propose 3 high impact well-scoped PRs"` | Suggests impactful PRs in the current codebase. |
+| 7 | `codex "Look for vulnerabilities and create a security review report"` | Finds and explains security bugs. |
+
+---
+
+## Installation
+
+
+From npm (Recommended)
+
+```bash
+npm install -g @openai/codex
+# or
+yarn global add @openai/codex
+# or
+bun install -g @openai/codex
+# or
+pnpm add -g @openai/codex
+```
+
+
+
+
+Build from source
+
+```bash
+# Clone the repository and navigate to the CLI package
+git clone https://github.com/openai/codex.git
+cd codex/codex-cli
+
+# Enable corepack
+corepack enable
+
+# Install dependencies and build
+pnpm install
+pnpm build
+
+# Linux-only: download prebuilt sandboxing binaries (requires gh and zstd).
+./scripts/install_native_deps.sh
+
+# Get the usage and the options
+node ./dist/cli.js --help
+
+# Run the locally-built CLI directly
+node ./dist/cli.js
+
+# Or link the command globally for convenience
+pnpm link
+```
+
+
+
+---
+
+## Configuration guide
+
+Codex configuration files can be placed in the `~/.codex/` directory, supporting both YAML and JSON formats.
+
+### Basic configuration parameters
+
+| Parameter | Type | Default | Description | Available Options |
+| ------------------- | ------- | ---------- | -------------------------------- | ---------------------------------------------------------------------------------------------- |
+| `model` | string | `o4-mini` | AI model to use | Any model name supporting OpenAI API |
+| `approvalMode` | string | `suggest` | AI assistant's permission mode | `suggest` (suggestions only) `auto-edit` (automatic edits) `full-auto` (fully automatic) |
+| `fullAutoErrorMode` | string | `ask-user` | Error handling in full-auto mode | `ask-user` (prompt for user input) `ignore-and-continue` (ignore and proceed) |
+| `notify` | boolean | `true` | Enable desktop notifications | `true`/`false` |
+
+### Custom AI provider configuration
+
+In the `providers` object, you can configure multiple AI service providers. Each provider requires the following parameters:
+
+| Parameter | Type | Description | Example |
+| --------- | ------ | --------------------------------------- | ----------------------------- |
+| `name` | string | Display name of the provider | `"OpenAI"` |
+| `baseURL` | string | API service URL | `"https://api.openai.com/v1"` |
+| `envKey` | string | Environment variable name (for API key) | `"OPENAI_API_KEY"` |
+
+### History configuration
+
+In the `history` object, you can configure conversation history settings:
+
+| Parameter | Type | Description | Example Value |
+| ------------------- | ------- | ------------------------------------------------------ | ------------- |
+| `maxSize` | number | Maximum number of history entries to save | `1000` |
+| `saveHistory` | boolean | Whether to save history | `true` |
+| `sensitivePatterns` | array | Patterns of sensitive information to filter in history | `[]` |
+
+### Configuration examples
+
+1. YAML format (save as `~/.codex/config.yaml`):
+
+```yaml
+model: o4-mini
+approvalMode: suggest
+fullAutoErrorMode: ask-user
+notify: true
+```
+
+2. JSON format (save as `~/.codex/config.json`):
+
+```json
+{
+ "model": "o4-mini",
+ "approvalMode": "suggest",
+ "fullAutoErrorMode": "ask-user",
+ "notify": true
+}
+```
+
+### Full configuration example
+
+Below is a comprehensive example of `config.json` with multiple custom providers:
+
+```json
+{
+ "model": "o4-mini",
+ "provider": "openai",
+ "providers": {
+ "openai": {
+ "name": "OpenAI",
+ "baseURL": "https://api.openai.com/v1",
+ "envKey": "OPENAI_API_KEY"
+ },
+ "azure": {
+ "name": "AzureOpenAI",
+ "baseURL": "https://YOUR_PROJECT_NAME.openai.azure.com/openai",
+ "envKey": "AZURE_OPENAI_API_KEY"
+ },
+ "openrouter": {
+ "name": "OpenRouter",
+ "baseURL": "https://openrouter.ai/api/v1",
+ "envKey": "OPENROUTER_API_KEY"
+ },
+ "gemini": {
+ "name": "Gemini",
+ "baseURL": "https://generativelanguage.googleapis.com/v1beta/openai",
+ "envKey": "GEMINI_API_KEY"
+ },
+ "ollama": {
+ "name": "Ollama",
+ "baseURL": "http://localhost:11434/v1",
+ "envKey": "OLLAMA_API_KEY"
+ },
+ "mistral": {
+ "name": "Mistral",
+ "baseURL": "https://api.mistral.ai/v1",
+ "envKey": "MISTRAL_API_KEY"
+ },
+ "deepseek": {
+ "name": "DeepSeek",
+ "baseURL": "https://api.deepseek.com",
+ "envKey": "DEEPSEEK_API_KEY"
+ },
+ "xai": {
+ "name": "xAI",
+ "baseURL": "https://api.x.ai/v1",
+ "envKey": "XAI_API_KEY"
+ },
+ "groq": {
+ "name": "Groq",
+ "baseURL": "https://api.groq.com/openai/v1",
+ "envKey": "GROQ_API_KEY"
+ },
+ "arceeai": {
+ "name": "ArceeAI",
+ "baseURL": "https://conductor.arcee.ai/v1",
+ "envKey": "ARCEEAI_API_KEY"
+ }
+ },
+ "history": {
+ "maxSize": 1000,
+ "saveHistory": true,
+ "sensitivePatterns": []
+ }
+}
+```
+
+### Custom instructions
+
+You can create a `~/.codex/AGENTS.md` file to define custom guidance for the agent:
+
+```markdown
+- Always respond with emojis
+- Only use git commands when explicitly requested
+```
+
+### Environment variables setup
+
+For each AI provider, you need to set the corresponding API key in your environment variables. For example:
+
+```bash
+# OpenAI
+export OPENAI_API_KEY="your-api-key-here"
+
+# Azure OpenAI
+export AZURE_OPENAI_API_KEY="your-azure-api-key-here"
+export AZURE_OPENAI_API_VERSION="2025-04-01-preview" (Optional)
+
+# OpenRouter
+export OPENROUTER_API_KEY="your-openrouter-key-here"
+
+# Similarly for other providers
+```
+
+---
+
+## FAQ
+
+
+OpenAI released a model called Codex in 2021 - is this related?
+
+In 2021, OpenAI released Codex, an AI system designed to generate code from natural language prompts. That original Codex model was deprecated as of March 2023 and is separate from the CLI tool.
+
+
+
+
+Which models are supported?
+
+Any model available with [Responses API](https://platform.openai.com/docs/api-reference/responses). The default is `o4-mini`, but pass `--model gpt-4.1` or set `model: gpt-4.1` in your config file to override.
+
+
+
+Why does o3 or o4-mini not work for me?
+
+It's possible that your [API account needs to be verified](https://help.openai.com/en/articles/10910291-api-organization-verification) in order to start streaming responses and seeing chain of thought summaries from the API. If you're still running into issues, please let us know!
+
+
+
+
+How do I stop Codex from editing my files?
+
+Codex runs model-generated commands in a sandbox. If a proposed command or file change doesn't look right, you can simply type **n** to deny the command or give the model feedback.
+
+
+
+Does it work on Windows?
+
+Not directly. It requires [Windows Subsystem for Linux (WSL2)](https://learn.microsoft.com/en-us/windows/wsl/install) - Codex has been tested on macOS and Linux with Node 22.
+
+
+
+---
+
+## Zero data retention (ZDR) usage
+
+Codex CLI **does** support OpenAI organizations with [Zero Data Retention (ZDR)](https://platform.openai.com/docs/guides/your-data#zero-data-retention) enabled. If your OpenAI organization has Zero Data Retention enabled and you still encounter errors such as:
+
+```
+OpenAI rejected the request. Error details: Status: 400, Code: unsupported_parameter, Type: invalid_request_error, Message: 400 Previous response cannot be used for this organization due to Zero Data Retention.
+```
+
+You may need to upgrade to a more recent version with: `npm i -g @openai/codex@latest`
+
+---
+
+## Codex open source fund
+
+We're excited to launch a **$1 million initiative** supporting open source projects that use Codex CLI and other OpenAI models.
+
+- Grants are awarded up to **$25,000** API credits.
+- Applications are reviewed **on a rolling basis**.
+
+**Interested? [Apply here](https://openai.com/form/codex-open-source-fund/).**
+
+---
+
+## Contributing
+
+This project is under active development and the code will likely change pretty significantly. We'll update this message once that's complete!
+
+More broadly we welcome contributions - whether you are opening your very first pull request or you're a seasoned maintainer. At the same time we care about reliability and long-term maintainability, so the bar for merging code is intentionally **high**. The guidelines below spell out what "high-quality" means in practice and should make the whole process transparent and friendly.
+
+### Development workflow
+
+- Create a _topic branch_ from `main` - e.g. `feat/interactive-prompt`.
+- Keep your changes focused. Multiple unrelated fixes should be opened as separate PRs.
+- Use `pnpm test:watch` during development for super-fast feedback.
+- We use **Vitest** for unit tests, **ESLint** + **Prettier** for style, and **TypeScript** for type-checking.
+- Before pushing, run the full test/type/lint suite:
+
+### Git hooks with Husky
+
+This project uses [Husky](https://typicode.github.io/husky/) to enforce code quality checks:
+
+- **Pre-commit hook**: Automatically runs lint-staged to format and lint files before committing
+- **Pre-push hook**: Runs tests and type checking before pushing to the remote
+
+These hooks help maintain code quality and prevent pushing code with failing tests. For more details, see [HUSKY.md](./HUSKY.md).
+
+```bash
+pnpm test && pnpm run lint && pnpm run typecheck
+```
+
+- If you have **not** yet signed the Contributor License Agreement (CLA), add a PR comment containing the exact text
+
+ ```text
+ I have read the CLA Document and I hereby sign the CLA
+ ```
+
+ The CLA-Assistant bot will turn the PR status green once all authors have signed.
+
+```bash
+# Watch mode (tests rerun on change)
+pnpm test:watch
+
+# Type-check without emitting files
+pnpm typecheck
+
+# Automatically fix lint + prettier issues
+pnpm lint:fix
+pnpm format:fix
+```
+
+### Debugging
+
+To debug the CLI with a visual debugger, do the following in the `codex-cli` folder:
+
+- Run `pnpm run build` to build the CLI, which will generate `cli.js.map` alongside `cli.js` in the `dist` folder.
+- Run the CLI with `node --inspect-brk ./dist/cli.js` The program then waits until a debugger is attached before proceeding. Options:
+ - In VS Code, choose **Debug: Attach to Node Process** from the command palette and choose the option in the dropdown with debug port `9229` (likely the first option)
+ - Go to in Chrome and find **localhost:9229** and click **trace**
+
+### Writing high-impact code changes
+
+1. **Start with an issue.** Open a new one or comment on an existing discussion so we can agree on the solution before code is written.
+2. **Add or update tests.** Every new feature or bug-fix should come with test coverage that fails before your change and passes afterwards. 100% coverage is not required, but aim for meaningful assertions.
+3. **Document behaviour.** If your change affects user-facing behaviour, update the README, inline help (`codex --help`), or relevant example projects.
+4. **Keep commits atomic.** Each commit should compile and the tests should pass. This makes reviews and potential rollbacks easier.
+
+### Opening a pull request
+
+- Fill in the PR template (or include similar information) - **What? Why? How?**
+- Run **all** checks locally (`npm test && npm run lint && npm run typecheck`). CI failures that could have been caught locally slow down the process.
+- Make sure your branch is up-to-date with `main` and that you have resolved merge conflicts.
+- Mark the PR as **Ready for review** only when you believe it is in a merge-able state.
+
+### Review process
+
+1. One maintainer will be assigned as a primary reviewer.
+2. We may ask for changes - please do not take this personally. We value the work, we just also value consistency and long-term maintainability.
+3. When there is consensus that the PR meets the bar, a maintainer will squash-and-merge.
+
+### Community values
+
+- **Be kind and inclusive.** Treat others with respect; we follow the [Contributor Covenant](https://www.contributor-covenant.org/).
+- **Assume good intent.** Written communication is hard - err on the side of generosity.
+- **Teach & learn.** If you spot something confusing, open an issue or PR with improvements.
+
+### Getting help
+
+If you run into problems setting up the project, would like feedback on an idea, or just want to say _hi_ - please open a Discussion or jump into the relevant issue. We are happy to help.
+
+Together we can make Codex CLI an incredible tool. **Happy hacking!** :rocket:
+
+### Contributor license agreement (CLA)
+
+All contributors **must** accept the CLA. The process is lightweight:
+
+1. Open your pull request.
+2. Paste the following comment (or reply `recheck` if you've signed before):
+
+ ```text
+ I have read the CLA Document and I hereby sign the CLA
+ ```
+
+3. The CLA-Assistant bot records your signature in the repo and marks the status check as passed.
+
+No special Git commands, email attachments, or commit footers required.
+
+#### Quick fixes
+
+| Scenario | Command |
+| ----------------- | ------------------------------------------------ |
+| Amend last commit | `git commit --amend -s --no-edit && git push -f` |
+
+The **DCO check** blocks merges until every commit in the PR carries the footer (with squash this is just the one).
+
+### Releasing `codex`
+
+To publish a new version of the CLI you first need to stage the npm package. A
+helper script in `codex-cli/scripts/` does all the heavy lifting. Inside the
+`codex-cli` folder run:
+
+```bash
+# Classic, JS implementation that includes small, native binaries for Linux sandboxing.
+pnpm stage-release
+
+# Optionally specify the temp directory to reuse between runs.
+RELEASE_DIR=$(mktemp -d)
+pnpm stage-release --tmp "$RELEASE_DIR"
+
+# "Fat" package that additionally bundles the native Rust CLI binaries for
+# Linux. End-users can then opt-in at runtime by setting CODEX_RUST=1.
+pnpm stage-release --native
+```
+
+Go to the folder where the release is staged and verify that it works as intended. If so, run the following from the temp folder:
+
+```
+cd "$RELEASE_DIR"
+npm publish
+```
+
+### Alternative build options
+
+#### Nix flake development
+
+Prerequisite: Nix >= 2.4 with flakes enabled (`experimental-features = nix-command flakes` in `~/.config/nix/nix.conf`).
+
+Enter a Nix development shell:
+
+```bash
+# Use either one of the commands according to which implementation you want to work with
+nix develop .#codex-cli # For entering codex-cli specific shell
+nix develop .#codex-rs # For entering codex-rs specific shell
+```
+
+This shell includes Node.js, installs dependencies, builds the CLI, and provides a `codex` command alias.
+
+Build and run the CLI directly:
+
+```bash
+# Use either one of the commands according to which implementation you want to work with
+nix build .#codex-cli # For building codex-cli
+nix build .#codex-rs # For building codex-rs
+./result/bin/codex --help
+```
+
+Run the CLI via the flake app:
+
+```bash
+# Use either one of the commands according to which implementation you want to work with
+nix run .#codex-cli # For running codex-cli
+nix run .#codex-rs # For running codex-rs
+```
+
+Use direnv with flakes
+
+If you have direnv installed, you can use the following `.envrc` to automatically enter the Nix shell when you `cd` into the project directory:
+
+```bash
+cd codex-rs
+echo "use flake ../flake.nix#codex-cli" >> .envrc && direnv allow
+cd codex-cli
+echo "use flake ../flake.nix#codex-rs" >> .envrc && direnv allow
+```
+
+---
+
+## Security & responsible AI
+
+Have you discovered a vulnerability or have concerns about model output? Please e-mail **security@openai.com** and we will respond promptly.
+
+---
+
+## License
+
+This repository is licensed under the [Apache-2.0 License](LICENSE).
diff --git a/codex-cli/src/cli.tsx b/codex-cli/src/cli.tsx
index c7e5d9ff318..0442a6c3770 100644
--- a/codex-cli/src/cli.tsx
+++ b/codex-cli/src/cli.tsx
@@ -45,6 +45,7 @@ import { createInputItem } from "./utils/input-utils";
import { initLogger } from "./utils/logger/log";
import { isModelSupportedForResponses } from "./utils/model-utils.js";
import { parseToolCall } from "./utils/parsers";
+import { providers } from "./utils/providers";
import { onExit, setInkRenderer } from "./utils/terminal";
import chalk from "chalk";
import { spawnSync } from "child_process";
@@ -327,26 +328,44 @@ try {
// ignore errors
}
-if (cli.flags.login) {
- apiKey = await fetchApiKey(client.issuer, client.client_id);
- try {
- const home = os.homedir();
- const authDir = path.join(home, ".codex");
- const authFile = path.join(authDir, "auth.json");
- if (fs.existsSync(authFile)) {
- const data = JSON.parse(fs.readFileSync(authFile, "utf-8"));
- savedTokens = data.tokens;
+// Get provider-specific API key if not OpenAI
+if (provider.toLowerCase() !== "openai") {
+ const providerInfo = providers[provider.toLowerCase()];
+ if (providerInfo) {
+ const providerApiKey = process.env[providerInfo.envKey];
+ if (providerApiKey) {
+ apiKey = providerApiKey;
}
- } catch {
- /* ignore */
}
-} else if (!apiKey) {
- apiKey = await fetchApiKey(client.issuer, client.client_id);
}
+
+// Only proceed with OpenAI auth flow if:
+// 1. Provider is OpenAI and no API key is set, or
+// 2. Login flag is explicitly set
+if (provider.toLowerCase() === "openai" && !apiKey) {
+ if (cli.flags.login) {
+ apiKey = await fetchApiKey(client.issuer, client.client_id);
+ try {
+ const home = os.homedir();
+ const authDir = path.join(home, ".codex");
+ const authFile = path.join(authDir, "auth.json");
+ if (fs.existsSync(authFile)) {
+ const data = JSON.parse(fs.readFileSync(authFile, "utf-8"));
+ savedTokens = data.tokens;
+ }
+ } catch {
+ /* ignore */
+ }
+ } else {
+ apiKey = await fetchApiKey(client.issuer, client.client_id);
+ }
+}
+
// Ensure the API key is available as an environment variable for legacy code
process.env["OPENAI_API_KEY"] = apiKey;
-if (cli.flags.free) {
+// Only attempt credit redemption for OpenAI provider
+if (cli.flags.free && provider.toLowerCase() === "openai") {
// eslint-disable-next-line no-console
console.log(`${chalk.bold("codex --free")} attempting to redeem credits...`);
if (!savedTokens?.refresh_token) {
@@ -379,13 +398,18 @@ if (!apiKey && !NO_API_KEY_REQUIRED.has(provider.toLowerCase())) {
? `You can create a key here: ${chalk.bold(
chalk.underline("https://platform.openai.com/account/api-keys"),
)}\n`
- : provider.toLowerCase() === "gemini"
+ : provider.toLowerCase() === "azure"
? `You can create a ${chalk.bold(
- `${provider.toUpperCase()}_API_KEY`,
- )} ` + `in the ${chalk.bold(`Google AI Studio`)}.\n`
- : `You can create a ${chalk.bold(
- `${provider.toUpperCase()}_API_KEY`,
- )} ` + `in the ${chalk.bold(`${provider}`)} dashboard.\n`
+ `${provider.toUpperCase()}_OPENAI_API_KEY`,
+ )} ` +
+ `in Azure AI Foundry portal at ${chalk.bold(chalk.underline("https://ai.azure.com"))}.\n`
+ : provider.toLowerCase() === "gemini"
+ ? `You can create a ${chalk.bold(
+ `${provider.toUpperCase()}_API_KEY`,
+ )} ` + `in the ${chalk.bold(`Google AI Studio`)}.\n`
+ : `You can create a ${chalk.bold(
+ `${provider.toUpperCase()}_API_KEY`,
+ )} ` + `in the ${chalk.bold(`${provider}`)} dashboard.\n`
}`,
);
process.exit(1);
diff --git a/codex-cli/src/utils/agent/agent-loop.ts b/codex-cli/src/utils/agent/agent-loop.ts
index cc57239b40f..8a5adbeb23f 100644
--- a/codex-cli/src/utils/agent/agent-loop.ts
+++ b/codex-cli/src/utils/agent/agent-loop.ts
@@ -800,7 +800,8 @@ export class AgentLoop {
const responseCall =
!this.config.provider ||
- this.config.provider?.toLowerCase() === "openai"
+ this.config.provider?.toLowerCase() === "openai" ||
+ this.config.provider?.toLowerCase() === "azure"
? (params: ResponseCreateParams) =>
this.oai.responses.create(params)
: (params: ResponseCreateParams) =>
@@ -1188,7 +1189,8 @@ export class AgentLoop {
const responseCall =
!this.config.provider ||
- this.config.provider?.toLowerCase() === "openai"
+ this.config.provider?.toLowerCase() === "openai" ||
+ this.config.provider?.toLowerCase() === "azure"
? (params: ResponseCreateParams) =>
this.oai.responses.create(params)
: (params: ResponseCreateParams) =>
diff --git a/codex-cli/src/utils/config.ts b/codex-cli/src/utils/config.ts
index 51761bf6d4d..3fafdb44e8f 100644
--- a/codex-cli/src/utils/config.ts
+++ b/codex-cli/src/utils/config.ts
@@ -69,7 +69,7 @@ export const OPENAI_BASE_URL = process.env["OPENAI_BASE_URL"] || "";
export let OPENAI_API_KEY = process.env["OPENAI_API_KEY"] || "";
export const AZURE_OPENAI_API_VERSION =
- process.env["AZURE_OPENAI_API_VERSION"] || "2025-03-01-preview";
+ process.env["AZURE_OPENAI_API_VERSION"] || "2025-04-01-preview";
export const DEFAULT_REASONING_EFFORT = "high";
export const OPENAI_ORGANIZATION = process.env["OPENAI_ORGANIZATION"] || "";
diff --git a/codex-cli/tests/agent-azure-responses-endpoint.test.ts b/codex-cli/tests/agent-azure-responses-endpoint.test.ts
new file mode 100644
index 00000000000..aecf587150c
--- /dev/null
+++ b/codex-cli/tests/agent-azure-responses-endpoint.test.ts
@@ -0,0 +1,107 @@
+/**
+ * tests/agent-azure-responses-endpoint.test.ts
+ *
+ * Verifies that AgentLoop calls the `/responses` endpoint when provider is set to Azure.
+ */
+
+import { describe, it, expect, vi, beforeEach } from "vitest";
+
+// Fake stream that yields a completed response event
+class FakeStream {
+ async *[Symbol.asyncIterator]() {
+ yield {
+ type: "response.completed",
+ response: { id: "azure_resp", status: "completed", output: [] },
+ } as any;
+ }
+}
+
+let lastCreateParams: any = null;
+
+vi.mock("openai", () => {
+ class FakeDefaultClient {
+ public responses = {
+ create: async (params: any) => {
+ lastCreateParams = params;
+ return new FakeStream();
+ },
+ };
+ }
+ class FakeAzureClient {
+ public responses = {
+ create: async (params: any) => {
+ lastCreateParams = params;
+ return new FakeStream();
+ },
+ };
+ }
+ class APIConnectionTimeoutError extends Error {}
+ return {
+ __esModule: true,
+ default: FakeDefaultClient,
+ AzureOpenAI: FakeAzureClient,
+ APIConnectionTimeoutError,
+ };
+});
+
+// Stub approvals to bypass command approval logic
+vi.mock("../src/approvals.js", () => ({
+ __esModule: true,
+ alwaysApprovedCommands: new Set(),
+ canAutoApprove: () => ({ type: "auto-approve", runInSandbox: false }),
+ isSafeCommand: () => null,
+}));
+
+// Stub format-command to avoid formatting side effects
+vi.mock("../src/format-command.js", () => ({
+ __esModule: true,
+ formatCommandForDisplay: (cmd: Array) => cmd.join(" "),
+}));
+
+// Stub internal logging to keep output clean
+vi.mock("../src/utils/agent/log.js", () => ({
+ __esModule: true,
+ log: () => {},
+ isLoggingEnabled: () => false,
+}));
+
+import { AgentLoop } from "../src/utils/agent/agent-loop.js";
+
+describe("AgentLoop Azure provider responses endpoint", () => {
+ beforeEach(() => {
+ lastCreateParams = null;
+ });
+
+ it("calls the /responses endpoint when provider is azure", async () => {
+ const cfg: any = {
+ model: "test-model",
+ provider: "azure",
+ instructions: "",
+ disableResponseStorage: false,
+ notify: false,
+ };
+ const loop = new AgentLoop({
+ additionalWritableRoots: [],
+ model: cfg.model,
+ config: cfg,
+ instructions: cfg.instructions,
+ approvalPolicy: { mode: "suggest" } as any,
+ onItem: () => {},
+ onLoading: () => {},
+ getCommandConfirmation: async () => ({ review: "yes" }) as any,
+ onLastResponseId: () => {},
+ });
+
+ await loop.run([
+ {
+ type: "message",
+ role: "user",
+ content: [{ type: "input_text", text: "hello" }],
+ },
+ ]);
+
+ expect(lastCreateParams).not.toBeNull();
+ expect(lastCreateParams.model).toBe(cfg.model);
+ expect(Array.isArray(lastCreateParams.input)).toBe(true);
+ });
+});
diff --git a/codex-rs/Cargo.lock b/codex-rs/Cargo.lock
index 66b4fa3e00d..035f37e5527 100644
--- a/codex-rs/Cargo.lock
+++ b/codex-rs/Cargo.lock
@@ -691,13 +691,25 @@ dependencies = [
"tempfile",
]
+[[package]]
+name = "codex-file-search"
+version = "0.0.0"
+dependencies = [
+ "anyhow",
+ "clap",
+ "ignore",
+ "nucleo-matcher",
+ "serde",
+ "serde_json",
+ "tokio",
+]
+
[[package]]
name = "codex-linux-sandbox"
version = "0.0.0"
dependencies = [
"anyhow",
"clap",
- "codex-common",
"codex-core",
"landlock",
"libc",
@@ -759,6 +771,7 @@ dependencies = [
"codex-ansi-escape",
"codex-common",
"codex-core",
+ "codex-file-search",
"codex-linux-sandbox",
"codex-login",
"color-eyre",
@@ -1602,6 +1615,19 @@ version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2"
+[[package]]
+name = "globset"
+version = "0.4.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "54a1028dfc5f5df5da8a56a73e6c153c9a9708ec57232470703592a3f18e49f5"
+dependencies = [
+ "aho-corasick",
+ "bstr",
+ "log",
+ "regex-automata 0.4.9",
+ "regex-syntax 0.8.5",
+]
+
[[package]]
name = "h2"
version = "0.4.9"
@@ -1986,6 +2012,22 @@ dependencies = [
"icu_properties",
]
+[[package]]
+name = "ignore"
+version = "0.4.23"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6d89fd380afde86567dfba715db065673989d6253f42b88179abd3eae47bda4b"
+dependencies = [
+ "crossbeam-deque",
+ "globset",
+ "log",
+ "memchr",
+ "regex-automata 0.4.9",
+ "same-file",
+ "walkdir",
+ "winapi-util",
+]
+
[[package]]
name = "image"
version = "0.25.6"
@@ -2578,6 +2620,16 @@ dependencies = [
"winapi",
]
+[[package]]
+name = "nucleo-matcher"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bf33f538733d1a5a3494b836ba913207f14d9d4a1d3cd67030c5061bdd2cac85"
+dependencies = [
+ "memchr",
+ "unicode-segmentation",
+]
+
[[package]]
name = "num-bigint"
version = "0.4.6"
@@ -4363,6 +4415,7 @@ dependencies = [
"bytes",
"libc",
"mio",
+ "parking_lot",
"pin-project-lite",
"signal-hook-registry",
"socket2",
diff --git a/codex-rs/Cargo.toml b/codex-rs/Cargo.toml
index 6991a6223a7..eba43e548be 100644
--- a/codex-rs/Cargo.toml
+++ b/codex-rs/Cargo.toml
@@ -8,6 +8,7 @@ members = [
"core",
"exec",
"execpolicy",
+ "file-search",
"linux-sandbox",
"login",
"mcp-client",
@@ -36,3 +37,6 @@ lto = "fat"
# Because we bundle some of these executables with the TypeScript CLI, we
# remove everything to make the binary as small as possible.
strip = "symbols"
+
+# See https://github.com/openai/codex/issues/1411 for details.
+codegen-units = 1
diff --git a/codex-rs/cli/src/debug_sandbox.rs b/codex-rs/cli/src/debug_sandbox.rs
index deacca5f280..a21cd4e73ef 100644
--- a/codex-rs/cli/src/debug_sandbox.rs
+++ b/codex-rs/cli/src/debug_sandbox.rs
@@ -1,7 +1,6 @@
use std::path::PathBuf;
use codex_common::CliConfigOverrides;
-use codex_common::SandboxPermissionOption;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::exec::StdioPolicy;
@@ -20,13 +19,11 @@ pub async fn run_command_under_seatbelt(
) -> anyhow::Result<()> {
let SeatbeltCommand {
full_auto,
- sandbox,
config_overrides,
command,
} = command;
run_command_under_sandbox(
full_auto,
- sandbox,
command,
config_overrides,
codex_linux_sandbox_exe,
@@ -41,13 +38,11 @@ pub async fn run_command_under_landlock(
) -> anyhow::Result<()> {
let LandlockCommand {
full_auto,
- sandbox,
config_overrides,
command,
} = command;
run_command_under_sandbox(
full_auto,
- sandbox,
command,
config_overrides,
codex_linux_sandbox_exe,
@@ -63,13 +58,12 @@ enum SandboxType {
async fn run_command_under_sandbox(
full_auto: bool,
- sandbox: SandboxPermissionOption,
command: Vec,
config_overrides: CliConfigOverrides,
codex_linux_sandbox_exe: Option,
sandbox_type: SandboxType,
) -> anyhow::Result<()> {
- let sandbox_policy = create_sandbox_policy(full_auto, sandbox);
+ let sandbox_policy = create_sandbox_policy(full_auto);
let cwd = std::env::current_dir()?;
let config = Config::load_with_cli_overrides(
config_overrides
@@ -110,13 +104,10 @@ async fn run_command_under_sandbox(
handle_exit_status(status);
}
-pub fn create_sandbox_policy(full_auto: bool, sandbox: SandboxPermissionOption) -> SandboxPolicy {
+pub fn create_sandbox_policy(full_auto: bool) -> SandboxPolicy {
if full_auto {
- SandboxPolicy::new_full_auto_policy()
+ SandboxPolicy::new_workspace_write_policy()
} else {
- match sandbox.permissions.map(Into::into) {
- Some(sandbox_policy) => sandbox_policy,
- None => SandboxPolicy::new_read_only_policy(),
- }
+ SandboxPolicy::new_read_only_policy()
}
}
diff --git a/codex-rs/cli/src/lib.rs b/codex-rs/cli/src/lib.rs
index fa78d18ab43..c6d80c0adfa 100644
--- a/codex-rs/cli/src/lib.rs
+++ b/codex-rs/cli/src/lib.rs
@@ -5,7 +5,6 @@ pub mod proto;
use clap::Parser;
use codex_common::CliConfigOverrides;
-use codex_common::SandboxPermissionOption;
#[derive(Debug, Parser)]
pub struct SeatbeltCommand {
@@ -13,9 +12,6 @@ pub struct SeatbeltCommand {
#[arg(long = "full-auto", default_value_t = false)]
pub full_auto: bool,
- #[clap(flatten)]
- pub sandbox: SandboxPermissionOption,
-
#[clap(skip)]
pub config_overrides: CliConfigOverrides,
@@ -30,9 +26,6 @@ pub struct LandlockCommand {
#[arg(long = "full-auto", default_value_t = false)]
pub full_auto: bool,
- #[clap(flatten)]
- pub sandbox: SandboxPermissionOption,
-
#[clap(skip)]
pub config_overrides: CliConfigOverrides,
diff --git a/codex-rs/common/Cargo.toml b/codex-rs/common/Cargo.toml
index b4b658dabf1..eff7a6c0b47 100644
--- a/codex-rs/common/Cargo.toml
+++ b/codex-rs/common/Cargo.toml
@@ -16,3 +16,4 @@ serde = { version = "1", optional = true }
# Separate feature so that `clap` is not a mandatory dependency.
cli = ["clap", "toml", "serde"]
elapsed = []
+sandbox_summary = []
diff --git a/codex-rs/common/src/approval_mode_cli_arg.rs b/codex-rs/common/src/approval_mode_cli_arg.rs
index 199541148a0..a74ceb2b813 100644
--- a/codex-rs/common/src/approval_mode_cli_arg.rs
+++ b/codex-rs/common/src/approval_mode_cli_arg.rs
@@ -1,27 +1,23 @@
//! Standard type to use with the `--approval-mode` CLI option.
//! Available when the `cli` feature is enabled for the crate.
-use clap::ArgAction;
-use clap::Parser;
use clap::ValueEnum;
-use codex_core::config::parse_sandbox_permission_with_base_path;
use codex_core::protocol::AskForApproval;
-use codex_core::protocol::SandboxPermission;
#[derive(Clone, Copy, Debug, ValueEnum)]
#[value(rename_all = "kebab-case")]
pub enum ApprovalModeCliArg {
+ /// Only run "trusted" commands (e.g. ls, cat, sed) without asking for user
+ /// approval. Will escalate to the user if the model proposes a command that
+ /// is not in the "trusted" set.
+ Untrusted,
+
/// Run all commands without asking for user approval.
/// Only asks for approval if a command fails to execute, in which case it
/// will escalate to the user to ask for un-sandboxed execution.
OnFailure,
- /// Only run "known safe" commands (e.g. ls, cat, sed) without
- /// asking for user approval. Will escalate to the user if the model
- /// proposes a command that is not allow-listed.
- UnlessAllowListed,
-
/// Never ask for user approval
/// Execution failures are immediately returned to the model.
Never,
@@ -30,44 +26,9 @@ pub enum ApprovalModeCliArg {
impl From for AskForApproval {
fn from(value: ApprovalModeCliArg) -> Self {
match value {
+ ApprovalModeCliArg::Untrusted => AskForApproval::UnlessTrusted,
ApprovalModeCliArg::OnFailure => AskForApproval::OnFailure,
- ApprovalModeCliArg::UnlessAllowListed => AskForApproval::UnlessAllowListed,
ApprovalModeCliArg::Never => AskForApproval::Never,
}
}
}
-
-#[derive(Parser, Debug)]
-pub struct SandboxPermissionOption {
- /// Specify this flag multiple times to specify the full set of permissions
- /// to grant to Codex.
- ///
- /// ```shell
- /// codex -s disk-full-read-access \
- /// -s disk-write-cwd \
- /// -s disk-write-platform-user-temp-folder \
- /// -s disk-write-platform-global-temp-folder
- /// ```
- ///
- /// Note disk-write-folder takes a value:
- ///
- /// ```shell
- /// -s disk-write-folder=$HOME/.pyenv/shims
- /// ```
- ///
- /// These permissions are quite broad and should be used with caution:
- ///
- /// ```shell
- /// -s disk-full-write-access
- /// -s network-full-access
- /// ```
- #[arg(long = "sandbox-permission", short = 's', action = ArgAction::Append, value_parser = parse_sandbox_permission)]
- pub permissions: Option>,
-}
-
-/// Custom value-parser so we can keep the CLI surface small *and*
-/// still handle the parameterised `disk-write-folder` case.
-fn parse_sandbox_permission(raw: &str) -> std::io::Result {
- let base_path = std::env::current_dir()?;
- parse_sandbox_permission_with_base_path(raw, base_path)
-}
diff --git a/codex-rs/common/src/lib.rs b/codex-rs/common/src/lib.rs
index c2283640cb3..18ed49e5a7d 100644
--- a/codex-rs/common/src/lib.rs
+++ b/codex-rs/common/src/lib.rs
@@ -6,11 +6,14 @@ pub mod elapsed;
#[cfg(feature = "cli")]
pub use approval_mode_cli_arg::ApprovalModeCliArg;
-#[cfg(feature = "cli")]
-pub use approval_mode_cli_arg::SandboxPermissionOption;
#[cfg(any(feature = "cli", test))]
mod config_override;
#[cfg(feature = "cli")]
pub use config_override::CliConfigOverrides;
+
+mod sandbox_summary;
+
+#[cfg(feature = "sandbox_summary")]
+pub use sandbox_summary::summarize_sandbox_policy;
diff --git a/codex-rs/common/src/sandbox_summary.rs b/codex-rs/common/src/sandbox_summary.rs
new file mode 100644
index 00000000000..3d33d928365
--- /dev/null
+++ b/codex-rs/common/src/sandbox_summary.rs
@@ -0,0 +1,28 @@
+use codex_core::protocol::SandboxPolicy;
+
+pub fn summarize_sandbox_policy(sandbox_policy: &SandboxPolicy) -> String {
+ match sandbox_policy {
+ SandboxPolicy::DangerFullAccess => "danger-full-access".to_string(),
+ SandboxPolicy::ReadOnly => "read-only".to_string(),
+ SandboxPolicy::WorkspaceWrite {
+ writable_roots,
+ network_access,
+ } => {
+ let mut summary = "workspace-write".to_string();
+ if !writable_roots.is_empty() {
+ summary.push_str(&format!(
+ " [{}]",
+ writable_roots
+ .iter()
+ .map(|p| p.to_string_lossy())
+ .collect::>()
+ .join(", ")
+ ));
+ }
+ if *network_access {
+ summary.push_str(" (network access enabled)");
+ }
+ summary
+ }
+ }
+}
diff --git a/codex-rs/config.md b/codex-rs/config.md
index ffa735ff212..f7e72581ab5 100644
--- a/codex-rs/config.md
+++ b/codex-rs/config.md
@@ -20,59 +20,73 @@ The model that Codex should use.
model = "o3" # overrides the default of "codex-mini-latest"
```
-## model_provider
+## model_providers
-Codex comes bundled with a number of "model providers" predefined. This config value is a string that indicates which provider to use. You can also define your own providers via `model_providers`.
+This option lets you override and amend the default set of model providers bundled with Codex. This value is a map where the key is the value to use with `model_provider` to select the corresponding provider.
-For example, if you are running ollama with Mistral locally, then you would need to add the following to your config:
+For example, if you wanted to add a provider that uses the OpenAI 4o model via the chat completions API, then you could add the following configuration:
```toml
-model = "mistral"
-model_provider = "ollama"
+# Recall that in TOML, root keys must be listed before tables.
+model = "gpt-4o"
+model_provider = "openai-chat-completions"
+
+[model_providers.openai-chat-completions]
+# Name of the provider that will be displayed in the Codex UI.
+name = "OpenAI using Chat Completions"
+# The path `/chat/completions` will be amended to this URL to make the POST
+# request for the chat completions.
+base_url = "https://api.openai.com/v1"
+# If `env_key` is set, identifies an environment variable that must be set when
+# using Codex with this provider. The value of the environment variable must be
+# non-empty and will be used in the `Bearer TOKEN` HTTP header for the POST request.
+env_key = "OPENAI_API_KEY"
+# Valid values for wire_api are "chat" and "responses". Defaults to "chat" if omitted.
+wire_api = "chat"
+# If necessary, extra query params that need to be added to the URL.
+# See the Azure example below.
+query_params = {}
```
-because the following definition for `ollama` is included in Codex:
+Note this makes it possible to use Codex CLI with non-OpenAI models, so long as they use a wire API that is compatible with the OpenAI chat completions API. For example, you could define the following provider to use Codex CLI with Ollama running locally:
```toml
[model_providers.ollama]
name = "Ollama"
base_url = "http://localhost:11434/v1"
-wire_api = "chat"
```
-This option defaults to `"openai"` and the corresponding provider is defined as follows:
+Or a third-party provider (using a distinct environment variable for the API key):
```toml
-[model_providers.openai]
-name = "OpenAI"
-base_url = "https://api.openai.com/v1"
-env_key = "OPENAI_API_KEY"
-wire_api = "responses"
+[model_providers.mistral]
+name = "Mistral"
+base_url = "https://api.mistral.ai/v1"
+env_key = "MISTRAL_API_KEY"
```
-## model_providers
+Note that Azure requires `api-version` to be passed as a query parameter, so be sure to specify it as part of `query_params` when defining the Azure provider:
-This option lets you override and amend the default set of model providers bundled with Codex. This value is a map where the key is the value to use with `model_provider` to select the correspodning provider.
+```toml
+[model_providers.azure]
+name = "Azure"
+# Make sure you set the appropriate subdomain for this URL.
+base_url = "https://YOUR_PROJECT_NAME.openai.azure.com/openai"
+env_key = "AZURE_OPENAI_API_KEY" # Or "OPENAI_API_KEY", whichever you use.
+query_params = { api-version = "2025-04-01-preview" }
+```
-For example, if you wanted to add a provider that uses the OpenAI 4o model via the chat completions API, then you
+## model_provider
-```toml
-# Recall that in TOML, root keys must be listed before tables.
-model = "gpt-4o"
-model_provider = "openai-chat-completions"
+Identifies which provider to use from the `model_providers` map. Defaults to `"openai"`.
-[model_providers.openai-chat-completions]
-# Name of the provider that will be displayed in the Codex UI.
-name = "OpenAI using Chat Completions"
-# The path `/chat/completions` will be amended to this URL to make the POST
-# request for the chat completions.
-base_url = "https://api.openai.com/v1"
-# If `env_key` is set, identifies an environment variable that must be set when
-# using Codex with this provider. The value of the environment variable must be
-# non-empty and will be used in the `Bearer TOKEN` HTTP header for the POST request.
-env_key = "OPENAI_API_KEY"
-# valid values for wire_api are "chat" and "responses".
-wire_api = "chat"
+Note that if you override `model_provider`, then you likely want to override
+`model`, as well. For example, if you are running ollama with Mistral locally,
+then you would need to add the following to your config in addition to the new entry in the `model_providers` map:
+
+```toml
+model = "mistral"
+model_provider = "ollama"
```
## approval_policy
@@ -80,8 +94,13 @@ wire_api = "chat"
Determines when the user should be prompted to approve whether Codex can execute a command:
```toml
-# This is analogous to --suggest in the TypeScript Codex CLI
-approval_policy = "unless-allow-listed"
+# Codex has hardcoded logic that defines a set of "trusted" commands.
+# Setting the approval_policy to `untrusted` means that Codex will prompt the
+# user before running a command not in the "trusted" set.
+#
+# See https://github.com/openai/codex/issues/1260 for the plan to enable
+# end-users to define their own trusted commands.
+approval_policy = "untrusted"
```
```toml
@@ -106,7 +125,6 @@ Here is an example of a `config.toml` that defines multiple profiles:
```toml
model = "o3"
approval_policy = "unless-allow-listed"
-sandbox_permissions = ["disk-full-read-access"]
disable_response_storage = false
# Setting `profile` is equivalent to specifying `--profile o3` on the command
@@ -170,31 +188,42 @@ To disable reasoning summaries, set `model_reasoning_summary` to `"none"` in you
model_reasoning_summary = "none" # disable reasoning summaries
```
-## sandbox_permissions
+## sandbox
+
+The `sandbox` configuration determines the _sandbox policy_ that Codex uses to execute untrusted commands. The `mode` determines the "base policy." Currently, only `workspace-write` supports additional configuration options, but this may change in the future.
-List of permissions to grant to the sandbox that Codex uses to execute untrusted commands:
+The default policy is `read-only`, which means commands can read any file on disk, but attempts to write a file or access the network will be blocked.
```toml
-# This is comparable to --full-auto in the TypeScript Codex CLI, though
-# specifying `disk-write-platform-global-temp-folder` adds /tmp as a writable
-# folder in addition to $TMPDIR.
-sandbox_permissions = [
- "disk-full-read-access",
- "disk-write-platform-user-temp-folder",
- "disk-write-platform-global-temp-folder",
- "disk-write-cwd",
-]
+[sandbox]
+mode = "read-only"
```
-To add additional writable folders, use `disk-write-folder`, which takes a parameter (this can be specified multiple times):
+A more relaxed policy is `workspace-write`. When specified, the current working directory for the Codex task will be writable (as well as `$TMPDIR` on macOS). Note that the CLI defaults to using `cwd` where it was spawned, though this can be overridden using `--cwd/-C`.
```toml
-sandbox_permissions = [
- # ...
- "disk-write-folder=/Users/mbolin/.pyenv/shims",
+[sandbox]
+mode = "workspace-write"
+
+# By default, only the cwd for the Codex session will be writable (and $TMPDIR on macOS),
+# but you can specify additional writable folders in this array.
+writable_roots = [
+ "/tmp",
]
+network_access = false # Like read-only, this also defaults to false and can be omitted.
+```
+
+To disable sandboxing altogether, specify `danger-full-access` like so:
+
+```toml
+[sandbox]
+mode = "danger-full-access"
```
+This is reasonable to use if Codex is running in an environment that provides its own sandboxing (such as a Docker container) such that further sandboxing is unnecessary.
+
+Though using this option may also be necessary if you try to use Codex in environments where its native sandboxing mechanisms are unsupported, such as older Linux kernels or on Windows.
+
## mcp_servers
Defines the list of MCP servers that Codex can consult for tool use. Currently, only servers that are launched by executing a program that communicate over stdio are supported. For servers that use the SSE transport, consider an adapter like [mcp-proxy](https://github.com/sparfenyuk/mcp-proxy).
@@ -392,6 +421,16 @@ Setting `hide_agent_reasoning` to `true` suppresses these events in **both** the
hide_agent_reasoning = true # defaults to false
```
+## model_context_window
+
+The size of the context window for the model, in tokens.
+
+In general, Codex knows the context window for the most common OpenAI models, but if you are using a new model with an old version of the Codex CLI, then you can use `model_context_window` to tell Codex what value to use to determine how much context is left during a conversation.
+
+## model_max_output_tokens
+
+This is analogous to `model_context_window`, but for the maximum number of output tokens for the model.
+
## project_doc_max_bytes
Maximum number of bytes to read from an `AGENTS.md` file to include in the instructions sent with the first turn of a session. Defaults to 32 KiB.
diff --git a/codex-rs/core/src/chat_completions.rs b/codex-rs/core/src/chat_completions.rs
index f381c72e513..ce2ab0539b0 100644
--- a/codex-rs/core/src/chat_completions.rs
+++ b/codex-rs/core/src/chat_completions.rs
@@ -114,8 +114,7 @@ pub(crate) async fn stream_chat_completions(
"tools": tools_json,
});
- let base_url = provider.base_url.trim_end_matches('/');
- let url = format!("{}/chat/completions", base_url);
+ let url = provider.get_full_url();
debug!(
"POST to {url}: {}",
@@ -215,6 +214,7 @@ where
let _ = tx_event
.send(Ok(ResponseEvent::Completed {
response_id: String::new(),
+ token_usage: None,
}))
.await;
return;
@@ -232,6 +232,7 @@ where
let _ = tx_event
.send(Ok(ResponseEvent::Completed {
response_id: String::new(),
+ token_usage: None,
}))
.await;
return;
@@ -317,6 +318,7 @@ where
let _ = tx_event
.send(Ok(ResponseEvent::Completed {
response_id: String::new(),
+ token_usage: None,
}))
.await;
@@ -394,7 +396,10 @@ where
// Not an assistant message – forward immediately.
return Poll::Ready(Some(Ok(ResponseEvent::OutputItemDone(item))));
}
- Poll::Ready(Some(Ok(ResponseEvent::Completed { response_id }))) => {
+ Poll::Ready(Some(Ok(ResponseEvent::Completed {
+ response_id,
+ token_usage,
+ }))) => {
if !this.cumulative.is_empty() {
let aggregated_item = crate::models::ResponseItem::Message {
role: "assistant".to_string(),
@@ -404,7 +409,10 @@ where
};
// Buffer Completed so it is returned *after* the aggregated message.
- this.pending_completed = Some(ResponseEvent::Completed { response_id });
+ this.pending_completed = Some(ResponseEvent::Completed {
+ response_id,
+ token_usage,
+ });
return Poll::Ready(Some(Ok(ResponseEvent::OutputItemDone(
aggregated_item,
@@ -412,8 +420,16 @@ where
}
// Nothing aggregated – forward Completed directly.
- return Poll::Ready(Some(Ok(ResponseEvent::Completed { response_id })));
- } // No other `Ok` variants exist at the moment, continue polling.
+ return Poll::Ready(Some(Ok(ResponseEvent::Completed {
+ response_id,
+ token_usage,
+ })));
+ }
+ Poll::Ready(Some(Ok(ResponseEvent::Created))) => {
+ // These events are exclusive to the Responses API and
+ // will never appear in a Chat Completions stream.
+ continue;
+ }
}
}
}
@@ -427,7 +443,7 @@ pub(crate) trait AggregateStreamExt: Stream> + Size
///
/// ```ignore
/// OutputItemDone()
- /// Completed { .. }
+ /// Completed
/// ```
///
/// No other `OutputItemDone` events will be seen by the caller.
diff --git a/codex-rs/core/src/client.rs b/codex-rs/core/src/client.rs
index aff838887a2..91a84bf380c 100644
--- a/codex-rs/core/src/client.rs
+++ b/codex-rs/core/src/client.rs
@@ -35,6 +35,7 @@ use crate::model_provider_info::ModelProviderInfo;
use crate::model_provider_info::WireApi;
use crate::models::ResponseItem;
use crate::openai_tools::create_tools_json_for_responses_api;
+use crate::protocol::TokenUsage;
use crate::util::backoff;
#[derive(Clone)]
@@ -122,9 +123,7 @@ impl ModelClient {
stream: true,
};
- let base_url = self.provider.base_url.clone();
- let base_url = base_url.trim_end_matches('/');
- let url = format!("{}/responses", base_url);
+ let url = self.provider.get_full_url();
trace!("POST to {url}: {}", serde_json::to_string(&payload)?);
let mut attempt = 0;
@@ -167,7 +166,7 @@ impl ModelClient {
// negligible.
if !(status == StatusCode::TOO_MANY_REQUESTS || status.is_server_error()) {
// Surface the error body to callers. Use `unwrap_or_default` per Clippy.
- let body = (res.text().await).unwrap_or_default();
+ let body = res.text().await.unwrap_or_default();
return Err(CodexErr::UnexpectedStatus(status, body));
}
@@ -207,9 +206,44 @@ struct SseEvent {
item: Option,
}
+#[derive(Debug, Deserialize)]
+struct ResponseCreated {}
+
#[derive(Debug, Deserialize)]
struct ResponseCompleted {
id: String,
+ usage: Option,
+}
+
+#[derive(Debug, Deserialize)]
+struct ResponseCompletedUsage {
+ input_tokens: u64,
+ input_tokens_details: Option,
+ output_tokens: u64,
+ output_tokens_details: Option,
+ total_tokens: u64,
+}
+
+impl From for TokenUsage {
+ fn from(val: ResponseCompletedUsage) -> Self {
+ TokenUsage {
+ input_tokens: val.input_tokens,
+ cached_input_tokens: val.input_tokens_details.map(|d| d.cached_tokens),
+ output_tokens: val.output_tokens,
+ reasoning_output_tokens: val.output_tokens_details.map(|d| d.reasoning_tokens),
+ total_tokens: val.total_tokens,
+ }
+ }
+}
+
+#[derive(Debug, Deserialize)]
+struct ResponseCompletedInputTokensDetails {
+ cached_tokens: u64,
+}
+
+#[derive(Debug, Deserialize)]
+struct ResponseCompletedOutputTokensDetails {
+ reasoning_tokens: u64,
}
async fn process_sse(stream: S, tx_event: mpsc::Sender>)
@@ -221,7 +255,7 @@ where
// If the stream stays completely silent for an extended period treat it as disconnected.
let idle_timeout = *OPENAI_STREAM_IDLE_TIMEOUT_MS;
// The response id returned from the "complete" message.
- let mut response_id = None;
+ let mut response_completed: Option = None;
loop {
let sse = match timeout(idle_timeout, stream.next()).await {
@@ -233,9 +267,15 @@ where
return;
}
Ok(None) => {
- match response_id {
- Some(response_id) => {
- let event = ResponseEvent::Completed { response_id };
+ match response_completed {
+ Some(ResponseCompleted {
+ id: response_id,
+ usage,
+ }) => {
+ let event = ResponseEvent::Completed {
+ response_id,
+ token_usage: usage.map(Into::into),
+ };
let _ = tx_event.send(Ok(event)).await;
}
None => {
@@ -296,12 +336,17 @@ where
return;
}
}
+ "response.created" => {
+ if event.response.is_some() {
+ let _ = tx_event.send(Ok(ResponseEvent::Created {})).await;
+ }
+ }
// Final response completed – includes array of output items & id
"response.completed" => {
if let Some(resp_val) = event.response {
match serde_json::from_value::(resp_val) {
Ok(r) => {
- response_id = Some(r.id);
+ response_completed = Some(r);
}
Err(e) => {
debug!("failed to parse ResponseCompleted: {e}");
@@ -311,7 +356,6 @@ where
};
}
"response.content_part.done"
- | "response.created"
| "response.function_call_arguments.delta"
| "response.in_progress"
| "response.output_item.added"
diff --git a/codex-rs/core/src/client_common.rs b/codex-rs/core/src/client_common.rs
index a2633475dfe..b08880a0df4 100644
--- a/codex-rs/core/src/client_common.rs
+++ b/codex-rs/core/src/client_common.rs
@@ -2,6 +2,7 @@ use crate::config_types::ReasoningEffort as ReasoningEffortConfig;
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
use crate::error::Result;
use crate::models::ResponseItem;
+use crate::protocol::TokenUsage;
use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS;
use futures::Stream;
use serde::Serialize;
@@ -50,8 +51,12 @@ impl Prompt {
#[derive(Debug)]
pub enum ResponseEvent {
+ Created,
OutputItemDone(ResponseItem),
- Completed { response_id: String },
+ Completed {
+ response_id: String,
+ token_usage: Option,
+ },
}
#[derive(Debug, Serialize)]
diff --git a/codex-rs/core/src/codex.rs b/codex-rs/core/src/codex.rs
index 2837dd032e5..ec6e0bd185b 100644
--- a/codex-rs/core/src/codex.rs
+++ b/codex-rs/core/src/codex.rs
@@ -1,6 +1,7 @@
// Poisoned mutex should fail the program
#![allow(clippy::unwrap_used)]
+use std::borrow::Cow;
use std::collections::HashMap;
use std::collections::HashSet;
use std::path::Path;
@@ -188,7 +189,7 @@ pub(crate) struct Session {
/// Optional rollout recorder for persisting the conversation transcript so
/// sessions can be replayed or inspected later.
- rollout: Mutex