diff --git a/README.md b/README.md index 668a03acaf..31813acdb8 100644 --- a/README.md +++ b/README.md @@ -65,7 +65,7 @@ Codex CLI supports [MCP servers](./docs/advanced.md#model-context-protocol-mcp). ### Configuration -Codex CLI supports a rich set of configuration options, with preferences stored in `~/.codex/config.toml`. For full configuration options, see [Configuration](./docs/config.md). +Codex CLI supports a rich set of configuration options, with preferences stored in `~/.codex/config.toml`. For a starter file you can copy, see [config.toml.example](./docs/config.toml.example). For full configuration options, see [Configuration](./docs/config.md). --- @@ -99,4 +99,3 @@ Codex CLI supports a rich set of configuration options, with preferences stored ## License This repository is licensed under the [Apache-2.0 License](LICENSE). - diff --git a/codex-rs/README.md b/codex-rs/README.md index 390f5d31aa..2a4782ec80 100644 --- a/codex-rs/README.md +++ b/codex-rs/README.md @@ -19,7 +19,7 @@ While we are [working to close the gap between the TypeScript and Rust implement ### Config -Codex supports a rich set of configuration options. Note that the Rust CLI uses `config.toml` instead of `config.json`. See [`docs/config.md`](../docs/config.md) for details. +Codex supports a rich set of configuration options. Note that the Rust CLI uses `config.toml` instead of `config.json`. A copy‑ready starter file is available at [`docs/config.toml.example`](../docs/config.toml.example). For full details, see [`docs/config.md`](../docs/config.md). ### Model Context Protocol Support diff --git a/docs/advanced.md b/docs/advanced.md index 26f735991f..7074b9b311 100644 --- a/docs/advanced.md +++ b/docs/advanced.md @@ -39,4 +39,7 @@ env = { "API_KEY" = "value" } ``` > [!TIP] -> It is somewhat experimental, but the Codex CLI can also be run as an MCP _server_ via `codex mcp`. If you launch it with an MCP client such as `npx @modelcontextprotocol/inspector codex mcp` and send it a `tools/list` request, you will see that there is only one tool, `codex`, that accepts a grab-bag of inputs, including a catch-all `config` map for anything you might want to override. Feel free to play around with it and provide feedback via GitHub issues. \ No newline at end of file +> Looking for a starter config you can copy? See [`config.toml.example`](./config.toml.example). + +> [!TIP] +> It is somewhat experimental, but the Codex CLI can also be run as an MCP _server_ via `codex mcp`. If you launch it with an MCP client such as `npx @modelcontextprotocol/inspector codex mcp` and send it a `tools/list` request, you will see that there is only one tool, `codex`, that accepts a grab-bag of inputs, including a catch-all `config` map for anything you might want to override. Feel free to play around with it and provide feedback via GitHub issues. diff --git a/docs/config.md b/docs/config.md index 7d0bd7d2b8..85c4ac880a 100644 --- a/docs/config.md +++ b/docs/config.md @@ -11,6 +11,13 @@ Codex supports several mechanisms for setting config values: - If `value` cannot be parsed as a valid TOML value, it is treated as a string value. This means that both `-c model="o3"` and `-c model=o3` are equivalent. - The `$CODEX_HOME/config.toml` configuration file where the `CODEX_HOME` environment value defaults to `~/.codex`. (Note `CODEX_HOME` will also be where logs and other Codex-related information are stored.) +Quick links: + +- Example config to copy: [config.toml.example](./config.toml.example) +- Sandbox details: [docs/sandbox.md](./sandbox.md) +- Authentication options: [docs/authentication.md](./authentication.md) +- See examples: [Examples](#examples) + Both the `--config` flag and the `config.toml` file support the following options: ## model @@ -18,7 +25,7 @@ Both the `--config` flag and the `config.toml` file support the following option The model that Codex should use. ```toml -model = "o3" # overrides the default of "gpt-5" +model = "gpt-5" # default; set explicitly to be clear ``` ## model_providers @@ -93,6 +100,25 @@ http_headers = { "X-Example-Header" = "example-value" } env_http_headers = { "X-Example-Features": "EXAMPLE_FEATURES" } ``` +### Built-in providers + +Codex ships with two built-in providers: + +- `openai`: + - Uses the Responses API (requests end with `/responses`). + - Default endpoint depends on auth mode: + - ChatGPT sign‑in: `https://chatgpt.com/backend-api/codex/responses`. + - API key: `https://api.openai.com/v1/responses`. + - If `OPENAI_BASE_URL` is set, it overrides the base for both modes; Codex still appends `/responses`. + - When available, adds headers sourced from env: `OpenAI-Organization` (`OPENAI_ORGANIZATION`) and `OpenAI-Project` (`OPENAI_PROJECT`). + - Requires OpenAI auth (ChatGPT login or API key). +- `oss`: + - Uses Chat Completions (`/v1/chat/completions`). + - Defaults to `http://localhost:11434/v1` (or `CODEX_OSS_BASE_URL` if set). + - Intended for local OSS endpoints (e.g., Ollama-compatible servers). + +If you want Chat Completions against OpenAI’s API, define your own provider (e.g., `openai-chat-completions`) as shown above and set `model_provider = "openai-chat-completions"`. + ### Per-provider network tuning The following optional settings control retry behaviour and streaming idle timeouts **per model provider**. They must be specified inside the corresponding `[model_providers.]` block in `config.toml`. (Older releases accepted top‑level keys; those are now ignored.) @@ -105,9 +131,9 @@ name = "OpenAI" base_url = "https://api.openai.com/v1" env_key = "OPENAI_API_KEY" # network tuning overrides (all optional; falls back to built‑in defaults) -request_max_retries = 4 # retry failed HTTP requests -stream_max_retries = 10 # retry dropped SSE streams -stream_idle_timeout_ms = 300000 # 5m idle timeout +request_max_retries = 4 # retry failed HTTP requests (default) +stream_max_retries = 5 # retry dropped SSE streams (default) +stream_idle_timeout_ms = 300000 # 5m idle timeout (default) ``` #### request_max_retries @@ -116,7 +142,7 @@ How many times Codex will retry a failed HTTP request to the model provider. Def #### stream_max_retries -Number of times Codex will attempt to reconnect when a streaming response is interrupted. Defaults to `10`. +Number of times Codex will attempt to reconnect when a streaming response is interrupted. Defaults to `5`. #### stream_idle_timeout_ms @@ -135,9 +161,11 @@ model_provider = "ollama" model = "mistral" ``` +Tip: The default model is `"gpt-5"` and the default provider is `"openai"`. + ## approval_policy -Determines when the user should be prompted to approve whether Codex can execute a command: +Determines when the user should be prompted to approve whether Codex can execute a command (default: `"on-request"`): ```toml # Codex has hardcoded logic that defines a set of "trusted" commands. @@ -181,7 +209,7 @@ Here is an example of a `config.toml` that defines multiple profiles: ```toml model = "o3" -approval_policy = "unless-allow-listed" +approval_policy = "untrusted" disable_response_storage = false # Setting `profile` is equivalent to specifying `--profile o3` on the command @@ -217,7 +245,7 @@ Users can specify config values at multiple levels. Order of precedence is as fo 1. custom command-line argument, e.g., `--model o3` 2. as part of a profile, where the `--profile` is specified via a CLI (or in the config file itself) 3. as an entry in `config.toml`, e.g., `model = "o3"` -4. the default value that comes with Codex CLI (i.e., Codex CLI defaults to `gpt-5`) +4. the default value that comes with Codex CLI (e.g., `model = "gpt-5"`, `approval_policy = "on-request"`) ## model_reasoning_effort @@ -388,13 +416,13 @@ set = { CI = "1" } include_only = ["PATH", "HOME"] ``` -| Field | Type | Default | Description | -| ------------------------- | -------------------------- | ------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | -| `inherit` | string | `all` | Starting template for the environment:
`all` (clone full parent env), `core` (`HOME`, `PATH`, `USER`, …), or `none` (start empty). | -| `ignore_default_excludes` | boolean | `false` | When `false`, Codex removes any var whose **name** contains `KEY`, `SECRET`, or `TOKEN` (case-insensitive) before other rules run. | -| `exclude` | array | `[]` | Case-insensitive glob patterns to drop after the default filter.
Examples: `"AWS_*"`, `"AZURE_*"`. | +| Field | Type | Default | Description | +| ------------------------- | -------------------- | ------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | +| `inherit` | string | `all` | Starting template for the environment: `all` (clone full parent env), `core` (`HOME`, `PATH`, `USER`, …), or `none` (start empty). | +| `ignore_default_excludes` | boolean | `false` | When `false`, Codex removes any var whose name contains `KEY` or `TOKEN` (case-insensitive) before other rules run. | +| `exclude` | array | `[]` | Case-insensitive glob patterns to drop after the default filter. Examples: `"AWS_*"`, `"AZURE_*"`. | | `set` | table | `{}` | Explicit key/value overrides or additions – always win over inherited values. | -| `include_only` | array | `[]` | If non-empty, a whitelist of patterns; only variables that match _one_ pattern survive the final step. (Generally used with `inherit = "all"`.) | +| `include_only` | array | `[]` | If non-empty, a whitelist of patterns; only variables that match one pattern survive the final step. (Generally used with `inherit = "all"`.) | The patterns are **glob style**, not full regular expressions: `*` matches any number of characters, `?` matches exactly one, and character classes like @@ -568,17 +596,17 @@ Options that are specific to the TUI. | Key | Type / Values | Notes | | --- | --- | --- | -| `model` | string | Model to use (e.g., `gpt-5`). | -| `model_provider` | string | Provider id from `model_providers` (default: `openai`). | -| `model_context_window` | number | Context window tokens. | -| `model_max_output_tokens` | number | Max output tokens. | -| `approval_policy` | `untrusted` | `on-failure` | `on-request` | `never` | When to prompt for approval. | -| `sandbox_mode` | `read-only` | `workspace-write` | `danger-full-access` | OS sandbox policy. | +| `model` | string | Model name (default: `"gpt-5"`). | +| `model_provider` | string | Provider id from `model_providers` (default: `"openai"`). | +| `model_context_window` | number | Context window tokens (optional; inferred for common models). | +| `model_max_output_tokens` | number | Max output tokens (optional; inferred for common models). | +| `approval_policy` | one of `"untrusted"`, `"on-failure"`, `"on-request"`, `"never"` | When to prompt for approval (default: `"on-request"`). | +| `sandbox_mode` | one of `"read-only"`, `"workspace-write"`, `"danger-full-access"` | OS sandbox policy (default: `"read-only"`). See `docs/sandbox.md`. | | `sandbox_workspace_write.writable_roots` | array | Extra writable roots in workspace‑write. | -| `sandbox_workspace_write.network_access` | boolean | Allow network in workspace‑write (default: false). | -| `sandbox_workspace_write.exclude_tmpdir_env_var` | boolean | Exclude `$TMPDIR` from writable roots (default: false). | -| `sandbox_workspace_write.exclude_slash_tmp` | boolean | Exclude `/tmp` from writable roots (default: false). | -| `disable_response_storage` | boolean | Required for ZDR orgs. | +| `sandbox_workspace_write.network_access` | boolean | Allow network in workspace‑write (default: `false`). | +| `sandbox_workspace_write.exclude_tmpdir_env_var` | boolean | Exclude `$TMPDIR` from writable roots (default: `false`). | +| `sandbox_workspace_write.exclude_slash_tmp` | boolean | Exclude `/tmp` from writable roots (default: `false`). | +| `disable_response_storage` | boolean | Required for ZDR orgs (default: `false`). | | `notify` | array | External program for notifications. | | `instructions` | string | Currently ignored; use `experimental_instructions_file` or `AGENTS.md`. | | `mcp_servers..command` | string | MCP server launcher command. | @@ -587,31 +615,133 @@ Options that are specific to the TUI. | `model_providers..name` | string | Display name. | | `model_providers..base_url` | string | API base URL. | | `model_providers..env_key` | string | Env var for API key. | -| `model_providers..wire_api` | `chat` | `responses` | Protocol used (default: `chat`). | +| `model_providers..wire_api` | `"chat"` or `"responses"` | Protocol used (default: `"chat"`). | | `model_providers..query_params` | map | Extra query params (e.g., Azure `api-version`). | | `model_providers..http_headers` | map | Additional static headers. | | `model_providers..env_http_headers` | map | Headers sourced from env vars. | -| `model_providers..request_max_retries` | number | Per‑provider HTTP retry count (default: 4). | -| `model_providers..stream_max_retries` | number | SSE stream retry count (default: 5). | -| `model_providers..stream_idle_timeout_ms` | number | SSE idle timeout (ms) (default: 300000). | -| `project_doc_max_bytes` | number | Max bytes to read from `AGENTS.md`. | +| `model_providers..request_max_retries` | number | Per‑provider HTTP retry count (default: `4`). | +| `model_providers..stream_max_retries` | number | SSE stream retry count (default: `5`). | +| `model_providers..stream_idle_timeout_ms` | number | SSE idle timeout (ms) (default: `300000`). | +| `project_doc_max_bytes` | number | Max bytes to read from `AGENTS.md` (default: `32768`). | | `profile` | string | Active profile name. | | `profiles..*` | various | Profile‑scoped overrides of the same keys. | -| `history.persistence` | `save-all` | `none` | History file persistence (default: `save-all`). | +| `history.persistence` | `"save-all"` or `"none"` | History file persistence (default: `"save-all"`). | | `history.max_bytes` | number | Currently ignored (not enforced). | -| `file_opener` | `vscode` | `vscode-insiders` | `windsurf` | `cursor` | `none` | URI scheme for clickable citations (default: `vscode`). | +| `file_opener` | `"vscode"`, `"vscode-insiders"`, `"windsurf"`, `"cursor"`, `"none"` | URI scheme for clickable citations (default: `"vscode"`). | | `tui` | table | TUI‑specific options (reserved). | -| `hide_agent_reasoning` | boolean | Hide model reasoning events. | -| `show_raw_agent_reasoning` | boolean | Show raw reasoning (when available). | -| `model_reasoning_effort` | `minimal` | `low` | `medium` | `high` | Responses API reasoning effort. | -| `model_reasoning_summary` | `auto` | `concise` | `detailed` | `none` | Reasoning summaries. | -| `model_verbosity` | `low` | `medium` | `high` | GPT‑5 text verbosity (Responses API). | -| `model_supports_reasoning_summaries` | boolean | Force‑enable reasoning summaries. | -| `chatgpt_base_url` | string | Base URL for ChatGPT auth flow. | +| `hide_agent_reasoning` | boolean | Hide model reasoning events (default: `false`). | +| `show_raw_agent_reasoning` | boolean | Show raw reasoning when available (default: `false`). | +| `model_reasoning_effort` | `"minimal"`, `"low"`, `"medium"`, `"high"` | Responses API reasoning effort (default: `"medium"`). | +| `model_reasoning_summary` | `"auto"`, `"concise"`, `"detailed"`, `"none"` | Reasoning summaries (default: `"auto"`). | +| `model_verbosity` | `"low"`, `"medium"`, `"high"` | GPT‑5 text verbosity (Responses API; optional). | +| `model_supports_reasoning_summaries` | boolean | Force‑enable reasoning summaries (default: `false`). | +| `chatgpt_base_url` | string | Base URL for ChatGPT auth flow (default: `"https://chatgpt.com/backend-api/"`). | | `experimental_resume` | string (path) | Resume JSONL path (internal/experimental). | | `experimental_instructions_file` | string (path) | Replace built‑in instructions (experimental). | -| `experimental_use_exec_command_tool` | boolean | Use experimental exec command tool. | +| `experimental_use_exec_command_tool` | boolean | Use experimental exec command tool (default: `false`). | | `responses_originator_header_internal_override` | string | Override `originator` header value. | | `projects..trust_level` | string | Mark project/worktree as trusted (only `"trusted"` is recognized). | -| `preferred_auth_method` | `chatgpt` | `apikey` | Select default auth method (default: `chatgpt`). | -| `tools.web_search` | boolean | Enable web search tool (alias: `web_search_request`) (default: false). | +| `preferred_auth_method` | `"chatgpt"` or `"apikey"` | Select default auth method (default: `"chatgpt"`). | +| `tools.web_search` | boolean | Enable web search tool (alias: `web_search_request`) (default: `false`). | +| `tools.view_image` | boolean | Enable image attachment tool (default: `true`). | + +--- + +## Examples + +Minimal (ChatGPT login; defaults shown explicitly): + +```toml +# ~/.codex/config.toml +model = "gpt-5" +approval_policy = "on-request" +sandbox_mode = "read-only" +``` + +OpenAI via Chat Completions (user-defined provider): + +```toml +model = "gpt-4o" +model_provider = "openai-chat-completions" + +[model_providers.openai-chat-completions] +name = "OpenAI using Chat Completions" +base_url = "https://api.openai.com/v1" +env_key = "OPENAI_API_KEY" +wire_api = "chat" +# request_max_retries = 4 +# stream_max_retries = 5 +# stream_idle_timeout_ms = 300000 +``` + +Azure (api-version required): + +```toml +model = "gpt-4o" +model_provider = "azure" + +[model_providers.azure] +name = "Azure" +base_url = "https://YOUR_PROJECT_NAME.openai.azure.com/openai" +env_key = "AZURE_OPENAI_API_KEY" +query_params = { api-version = "2025-04-01-preview" } +wire_api = "chat" +``` + +Local OSS (e.g., Ollama-compatible on localhost): + +```toml +model = "llama3.1" +model_provider = "oss" # built-in, chat completions-compatible +``` + +Workspace-write sandbox with optional network: + +```toml +sandbox_mode = "workspace-write" + +[sandbox_workspace_write] +writable_roots = ["/Users/YOU/.pyenv/shims"] +network_access = false +exclude_tmpdir_env_var = false +exclude_slash_tmp = false +``` + +MCP server: + +```toml +[mcp_servers.my-server] +command = "npx" +args = ["-y", "mcp-server"] +env = { "API_KEY" = "value" } +``` + +Profiles and precedence: + +```toml +model = "o3" +approval_policy = "untrusted" +profile = "o3" + +[profiles.o3] +model = "o3" +model_provider = "openai" +approval_policy = "never" +model_reasoning_effort = "high" +model_reasoning_summary = "detailed" + +[profiles.gpt3] +model = "gpt-3.5-turbo" +model_provider = "openai-chat-completions" +``` + +Shell environment policy: + +```toml +[shell_environment_policy] +inherit = "core" # all|core|none (default: all) +ignore_default_excludes = false # filters *KEY*, *TOKEN* +exclude = ["AWS_*", "AZURE_*"] +set = { CI = "1" } +include_only = ["PATH", "HOME"] +``` diff --git a/docs/config.toml.example b/docs/config.toml.example new file mode 100644 index 0000000000..06fdbb99e4 --- /dev/null +++ b/docs/config.toml.example @@ -0,0 +1,128 @@ +# Codex CLI configuration example +# +# Copy or rename this file to: ~/.codex/config.toml +# Then edit values to match your setup. + +# --- Core settings --------------------------------------------------------- + +# Model to use (default is "gpt-5"). +model = "gpt-5" + +# When to ask for permission to run commands: "untrusted", "on-failure", +# "on-request" (default), or "never". +approval_policy = "on-request" + +# Sandboxing policy for command execution: "read-only" (default), +# "workspace-write", or "danger-full-access". +sandbox_mode = "read-only" + +# URI scheme for clickable file citations: "vscode" (default), "vscode-insiders", +# "windsurf", "cursor", or "none" to disable hyperlinks. +file_opener = "vscode" + +# Optional: toggle visibility of agent reasoning messages in output. +# hide_agent_reasoning = false +# show_raw_agent_reasoning = false + +# Optional: Responses API reasoning controls (when supported by the model). +# model_reasoning_effort = "medium" # minimal|low|medium|high +# model_reasoning_summary = "auto" # auto|concise|detailed|none +# model_verbosity = "medium" # low|medium|high (GPT‑5 family) + +# Optional: disable server-side response storage (required for ZDR accounts). +# disable_response_storage = false + +# --- Sandbox: workspace-write options ------------------------------------- + +# Only used when sandbox_mode = "workspace-write". +# [sandbox_workspace_write] +# writable_roots = ["/Users/YOU/.pyenv/shims"] +# network_access = false +# exclude_tmpdir_env_var = false +# exclude_slash_tmp = false + +# --- Shell environment policy --------------------------------------------- + +# Control which environment variables are passed to spawned processes. +[shell_environment_policy] +# inherit can be: "all" (default), "core", or "none" +inherit = "all" +# When false (default), filter out names containing KEY or TOKEN (case-insensitive) +ignore_default_excludes = false +# Additional excludes (case-insensitive globs), e.g., "AWS_*", "AZURE_*" +exclude = [] +# Force-set / override values +set = {} +# If non-empty, only variables matching one of these patterns are kept +include_only = [] + +# --- History --------------------------------------------------------------- + +[history] +# "save-all" (default) or "none" +persistence = "save-all" +# max_bytes is currently ignored (not enforced) +# max_bytes = 1048576 + +# --- Tools ----------------------------------------------------------------- + +[tools] +# Enable web search (alias: web_search_request). Default is false. +web_search = false +# Enable the image attachment tool. Default is true when unset. +# view_image = true + +# --- Providers ------------------------------------------------------------- + +# Built-in providers: +# • openai (Responses API) — default provider; base can be overridden with OPENAI_BASE_URL +# • oss (Chat Completions) — defaults to http://localhost:11434/v1 or CODEX_OSS_BASE_URL + +# To use OpenAI Chat Completions instead of Responses, define a provider: +# [model_providers.openai-chat-completions] +# name = "OpenAI using Chat Completions" +# base_url = "https://api.openai.com/v1" +# env_key = "OPENAI_API_KEY" +# wire_api = "chat" +# request_max_retries = 4 +# stream_max_retries = 5 +# stream_idle_timeout_ms = 300000 + +# Azure requires api-version in query params and typically uses Chat Completions: +# [model_providers.azure] +# name = "Azure" +# base_url = "https://YOUR_PROJECT_NAME.openai.azure.com/openai" +# env_key = "AZURE_OPENAI_API_KEY" +# query_params = { api-version = "2025-04-01-preview" } +# wire_api = "chat" + +# --- Profiles -------------------------------------------------------------- + +# Uncomment to select a default profile (can be overridden with --profile) +# profile = "o3" + +[profiles.o3] +model = "o3" +model_provider = "openai" +approval_policy = "never" +model_reasoning_effort = "high" +model_reasoning_summary = "detailed" + +[profiles.gpt3] +model = "gpt-3.5-turbo" +model_provider = "openai-chat-completions" + +# --- MCP Servers ----------------------------------------------------------- + +# Example MCP server (commented out so copy-paste won't try to start it): +# [mcp_servers.my-server] +# command = "npx" +# args = ["-y", "mcp-server"] +# env = { "API_KEY" = "value" } + +# --- Notifications --------------------------------------------------------- + +# Program is executed with a single JSON argument when a turn completes. +# Example on macOS using a custom script: +# notify = ["python3", "/Users/YOU/.codex/notify.py"] +