diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 000000000..259e59ab3 --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,29 @@ +FROM ubuntu:22.04 + +ARG DEBIAN_FRONTEND=noninteractive +# enable 'universe' because musl-tools & clang live there +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + software-properties-common && \ + add-apt-repository --yes universe + +# now install build deps +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + build-essential curl git ca-certificates \ + pkg-config clang musl-tools libssl-dev && \ + rm -rf /var/lib/apt/lists/* + +# non-root dev user +ARG USER=dev +ARG UID=1000 +RUN useradd -m -u $UID $USER +USER $USER + +# install Rust + musl target as dev user +RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal && \ + ~/.cargo/bin/rustup target add aarch64-unknown-linux-musl + +ENV PATH="/home/${USER}/.cargo/bin:${PATH}" + +WORKDIR /workspace diff --git a/.devcontainer/README.md b/.devcontainer/README.md new file mode 100644 index 000000000..58e4458a0 --- /dev/null +++ b/.devcontainer/README.md @@ -0,0 +1,30 @@ +# Containerized Development + +We provide the following options to facilitate Codex development in a container. This is particularly useful for verifying the Linux build when working on a macOS host. + +## Docker + +To build the Docker image locally for x64 and then run it with the repo mounted under `/workspace`: + +```shell +CODEX_DOCKER_IMAGE_NAME=codex-linux-dev +docker build --platform=linux/amd64 -t "$CODEX_DOCKER_IMAGE_NAME" ./.devcontainer +docker run --platform=linux/amd64 --rm -it -e CARGO_TARGET_DIR=/workspace/codex-rs/target-amd64 -v "$PWD":/workspace -w /workspace/codex-rs "$CODEX_DOCKER_IMAGE_NAME" +``` + +Note that `/workspace/target` will contain the binaries built for your host platform, so we include `-e CARGO_TARGET_DIR=/workspace/codex-rs/target-amd64` in the `docker run` command so that the binaries built inside your container are written to a separate directory. + +For arm64, specify `--platform=linux/amd64` instead for both `docker build` and `docker run`. + +Currently, the `Dockerfile` works for both x64 and arm64 Linux, though you need to run `rustup target add x86_64-unknown-linux-musl` yourself to install the musl toolchain for x64. + +## VS Code + +VS Code recognizes the `devcontainer.json` file and gives you the option to develop Codex in a container. Currently, `devcontainer.json` builds and runs the `arm64` flavor of the container. + +From the integrated terminal in VS Code, you can build either flavor of the `arm64` build (GNU or musl): + +```shell +cargo build --target aarch64-unknown-linux-musl +cargo build --target aarch64-unknown-linux-gnu +``` diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 000000000..17aee9142 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,29 @@ +{ + "name": "Codex", + "build": { + "dockerfile": "Dockerfile", + "context": "..", + "platform": "linux/arm64" + }, + + /* Force VS Code to run the container as arm64 in + case your host is x86 (or vice-versa). */ + "runArgs": ["--platform=linux/arm64"], + + "containerEnv": { + "RUST_BACKTRACE": "1", + "CARGO_TARGET_DIR": "${containerWorkspaceFolder}/codex-rs/target-arm64" + }, + + "remoteUser": "dev", + "customizations": { + "vscode": { + "settings": { + "terminal.integrated.defaultProfile.linux": "bash" + }, + "extensions": [ + "rust-lang.rust-analyzer" + ], + } + } +} diff --git a/.github/actions/codex/.gitignore b/.github/actions/codex/.gitignore new file mode 100644 index 000000000..2ccbe4656 --- /dev/null +++ b/.github/actions/codex/.gitignore @@ -0,0 +1 @@ +/node_modules/ diff --git a/.github/actions/codex/.prettierrc.toml b/.github/actions/codex/.prettierrc.toml new file mode 100644 index 000000000..4c58c583e --- /dev/null +++ b/.github/actions/codex/.prettierrc.toml @@ -0,0 +1,8 @@ +printWidth = 80 +quoteProps = "consistent" +semi = true +tabWidth = 2 +trailingComma = "all" + +# Preserve existing behavior for markdown/text wrapping. +proseWrap = "preserve" diff --git a/.github/actions/codex/README.md b/.github/actions/codex/README.md new file mode 100644 index 000000000..a0be8ecb6 --- /dev/null +++ b/.github/actions/codex/README.md @@ -0,0 +1,140 @@ +# openai/codex-action + +`openai/codex-action` is a GitHub Action that facilitates the use of [Codex](https://github.com/openai/codex) on GitHub issues and pull requests. Using the action, associate **labels** to run Codex with the appropriate prompt for the given context. Codex will respond by posting comments or creating PRs, whichever you specify! + +Here is a sample workflow that uses `openai/codex-action`: + +```yaml +name: Codex + +on: + issues: + types: [opened, labeled] + pull_request: + branches: [main] + types: [labeled] + +jobs: + codex: + if: ... # optional, but can be effective in conserving CI resources + runs-on: ubuntu-latest + # TODO(mbolin): Need to verify if/when `write` is necessary. + permissions: + contents: write + issues: write + pull-requests: write + steps: + # By default, Codex runs network disabled using --full-auto, so perform + # any setup that requires network (such as installing dependencies) + # before openai/codex-action. + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Run Codex + uses: openai/codex-action@latest + with: + openai_api_key: ${{ secrets.CODEX_OPENAI_API_KEY }} + github_token: ${{ secrets.GITHUB_TOKEN }} +``` + +See sample usage in [`codex.yml`](../../workflows/codex.yml). + +## Triggering the Action + +Using the sample workflow above, we have: + +```yaml +on: + issues: + types: [opened, labeled] + pull_request: + branches: [main] + types: [labeled] +``` + +which means our workflow will be triggered when any of the following events occur: + +- a label is added to an issue +- a label is added to a pull request against the `main` branch + +### Label-Based Triggers + +To define a GitHub label that should trigger Codex, create a file named `.github/codex/labels/LABEL-NAME.md` in your repository where `LABEL-NAME` is the name of the label. The content of the file is the prompt template to use when the label is added (see more on [Prompt Template Variables](#prompt-template-variables) below). + +For example, if the file `.github/codex/labels/codex-review.md` exists, then: + +- Adding the `codex-review` label will trigger the workflow containing the `openai/codex-action` GitHub Action. +- When `openai/codex-action` starts, it will replace the `codex-review` label with `codex-review-in-progress`. +- When `openai/codex-action` is finished, it will replace the `codex-review-in-progress` label with `codex-review-completed`. + +If Codex sees that either `codex-review-in-progress` or `codex-review-completed` is already present, it will not perform the action. + +As determined by the [default config](./src/default-label-config.ts), Codex will act on the following labels by default: + +- Adding the `codex-review` label to a pull request will have Codex review the PR and add it to the PR as a comment. +- Adding the `codex-triage` label to an issue will have Codex investigate the issue and report its findings as a comment. +- Adding the `codex-issue-fix` label to an issue will have Codex attempt to fix the issue and create a PR wit the fix, if any. + +## Action Inputs + +The `openai/codex-action` GitHub Action takes the following inputs + +### `openai_api_key` (required) + +Set your `OPENAI_API_KEY` as a [repository secret](https://docs.github.com/en/actions/security-for-github-actions/security-guides/using-secrets-in-github-actions). See **Secrets and varaibles** then **Actions** in the settings for your GitHub repo. + +Note that the secret name does not have to be `OPENAI_API_KEY`. For example, you might want to name it `CODEX_OPENAI_API_KEY` and then configure it on `openai/codex-action` as follows: + +```yaml +openai_api_key: ${{ secrets.CODEX_OPENAI_API_KEY }} +``` + +### `github_token` (required) + +This is required so that Codex can post a comment or create a PR. Set this value on the action as follows: + +```yaml +github_token: ${{ secrets.GITHUB_TOKEN }} +``` + +### `codex_args` + +A whitespace-delimited list of arguments to pass to Codex. Defaults to `--full-auto`, but if you want to override the default model to use `o3`: + +```yaml +codex_args: "--full-auto --model o3" +``` + +For more complex configurations, use the `codex_home` input. + +### `codex_home` + +If set, the value to use for the `$CODEX_HOME` environment variable when running Codex. As explained [in the docs](https://github.com/openai/codex/tree/main/codex-rs#readme), this folder can contain the `config.toml` to configure Codex, custom instructions, and log files. + +This should be a relative path within your repo. + +## Prompt Template Variables + +As shown above, `"prompt"` and `"promptPath"` are used to define prompt templates that will be populated and passed to Codex in response to certain events. All template variables are of the form `{CODEX_ACTION_...}` and the supported values are defined below. + +### `CODEX_ACTION_ISSUE_TITLE` + +If the action was triggered on a GitHub issue, this is the issue title. + +Specifically it is read as the `.issue.title` from the `$GITHUB_EVENT_PATH`. + +### `CODEX_ACTION_ISSUE_BODY` + +If the action was triggered on a GitHub issue, this is the issue body. + +Specifically it is read as the `.issue.body` from the `$GITHUB_EVENT_PATH`. + +### `CODEX_ACTION_GITHUB_EVENT_PATH` + +The value of the `$GITHUB_EVENT_PATH` environment variable, which is the path to the file that contains the JSON payload for the event that triggered the workflow. Codex can use `jq` to read only the fields of interest from this file. + +### `CODEX_ACTION_PR_DIFF` + +If the action was triggered on a pull request, this is the diff between the base and head commits of the PR. It is the output from `git diff`. + +Note that the content of the diff could be quite large, so is generally safer to point Codex at `CODEX_ACTION_GITHUB_EVENT_PATH` and let it decide how it wants to explore the change. diff --git a/.github/actions/codex/action.yml b/.github/actions/codex/action.yml new file mode 100644 index 000000000..f0af1cb3e --- /dev/null +++ b/.github/actions/codex/action.yml @@ -0,0 +1,124 @@ +name: "Codex [reusable action]" +description: "A reusable action that runs a Codex model." + +inputs: + openai_api_key: + description: "The value to use as the OPENAI_API_KEY environment variable when running Codex." + required: true + trigger_phrase: + description: "Text to trigger Codex from a PR/issue body or comment." + required: false + default: "" + github_token: + description: "Token so Codex can comment on the PR or issue." + required: true + codex_args: + description: "A whitespace-delimited list of arguments to pass to Codex. Due to limitations in YAML, arguments with spaces are not supported. For more complex configurations, use the `codex_home` input." + required: false + default: "--config hide_agent_reasoning=true --full-auto" + codex_home: + description: "Value to use as the CODEX_HOME environment variable when running Codex." + required: false + codex_release_tag: + description: "The release tag of the Codex model to run." + required: false + default: "codex-rs-ca8e97fcbcb991e542b8689f2d4eab9d30c399d6-1-rust-v0.0.2505302325" + +runs: + using: "composite" + steps: + # Do this in Bash so we do not even bother to install Bun if the sender does + # not have write access to the repo. + - name: Verify user has write access to the repo. + env: + GH_TOKEN: ${{ github.token }} + shell: bash + run: | + set -euo pipefail + + PERMISSION=$(gh api \ + "/repos/${GITHUB_REPOSITORY}/collaborators/${{ github.event.sender.login }}/permission" \ + | jq -r '.permission') + + if [[ "$PERMISSION" != "admin" && "$PERMISSION" != "write" ]]; then + exit 1 + fi + + - name: Download Codex + env: + GH_TOKEN: ${{ github.token }} + shell: bash + run: | + set -euo pipefail + + # Determine OS/arch and corresponding Codex artifact name. + uname_s=$(uname -s) + uname_m=$(uname -m) + + case "$uname_s" in + Linux*) os="linux" ;; + Darwin*) os="apple-darwin" ;; + *) echo "Unsupported operating system: $uname_s"; exit 1 ;; + esac + + case "$uname_m" in + x86_64*) arch="x86_64" ;; + arm64*|aarch64*) arch="aarch64" ;; + *) echo "Unsupported architecture: $uname_m"; exit 1 ;; + esac + + # linux builds differentiate between musl and gnu. + if [[ "$os" == "linux" ]]; then + if [[ "$arch" == "x86_64" ]]; then + triple="${arch}-unknown-linux-musl" + else + # Only other supported linux build is aarch64 gnu. + triple="${arch}-unknown-linux-gnu" + fi + else + # macOS + triple="${arch}-apple-darwin" + fi + + # Note that if we start baking version numbers into the artifact name, + # we will need to update this action.yml file to match. + artifact="codex-exec-${triple}.tar.gz" + + gh release download ${{ inputs.codex_release_tag }} --repo openai/codex \ + --pattern "$artifact" --output - \ + | tar xzO > /usr/local/bin/codex-exec + chmod +x /usr/local/bin/codex-exec + + # Display Codex version to confirm binary integrity; ensure we point it + # at the checked-out repository via --cd so that any subsequent commands + # use the correct working directory. + codex-exec --cd "$GITHUB_WORKSPACE" --version + + - name: Install Bun + uses: oven-sh/setup-bun@v2 + with: + bun-version: 1.2.11 + + - name: Install dependencies + shell: bash + run: | + cd ${{ github.action_path }} + bun install --production + + - name: Run Codex + shell: bash + run: bun run ${{ github.action_path }}/src/main.ts + # Process args plus environment variables often have a max of 128 KiB, + # so we should fit within that limit? + env: + INPUT_CODEX_ARGS: ${{ inputs.codex_args || '' }} + INPUT_CODEX_HOME: ${{ inputs.codex_home || ''}} + INPUT_TRIGGER_PHRASE: ${{ inputs.trigger_phrase || '' }} + OPENAI_API_KEY: ${{ inputs.openai_api_key }} + GITHUB_TOKEN: ${{ inputs.github_token }} + GITHUB_EVENT_ACTION: ${{ github.event.action || '' }} + GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name || '' }} + GITHUB_EVENT_ISSUE_NUMBER: ${{ github.event.issue.number || '' }} + GITHUB_EVENT_ISSUE_BODY: ${{ github.event.issue.body || '' }} + GITHUB_EVENT_REVIEW_BODY: ${{ github.event.review.body || '' }} + GITHUB_EVENT_COMMENT_BODY: ${{ github.event.comment.body || '' }} diff --git a/.github/actions/codex/bun.lock b/.github/actions/codex/bun.lock new file mode 100644 index 000000000..11b791654 --- /dev/null +++ b/.github/actions/codex/bun.lock @@ -0,0 +1,85 @@ +{ + "lockfileVersion": 1, + "workspaces": { + "": { + "name": "codex-action", + "dependencies": { + "@actions/core": "^1.11.1", + "@actions/github": "^6.0.1", + }, + "devDependencies": { + "@types/bun": "^1.2.11", + "@types/node": "^22.15.21", + "prettier": "^3.5.3", + "typescript": "^5.8.3", + }, + }, + }, + "packages": { + "@actions/core": ["@actions/core@1.11.1", "", { "dependencies": { "@actions/exec": "^1.1.1", "@actions/http-client": "^2.0.1" } }, "sha512-hXJCSrkwfA46Vd9Z3q4cpEpHB1rL5NG04+/rbqW9d3+CSvtB1tYe8UTpAlixa1vj0m/ULglfEK2UKxMGxCxv5A=="], + + "@actions/exec": ["@actions/exec@1.1.1", "", { "dependencies": { "@actions/io": "^1.0.1" } }, "sha512-+sCcHHbVdk93a0XT19ECtO/gIXoxvdsgQLzb2fE2/5sIZmWQuluYyjPQtrtTHdU1YzTZ7bAPN4sITq2xi1679w=="], + + "@actions/github": ["@actions/github@6.0.1", "", { "dependencies": { "@actions/http-client": "^2.2.0", "@octokit/core": "^5.0.1", "@octokit/plugin-paginate-rest": "^9.2.2", "@octokit/plugin-rest-endpoint-methods": "^10.4.0", "@octokit/request": "^8.4.1", "@octokit/request-error": "^5.1.1", "undici": "^5.28.5" } }, "sha512-xbZVcaqD4XnQAe35qSQqskb3SqIAfRyLBrHMd/8TuL7hJSz2QtbDwnNM8zWx4zO5l2fnGtseNE3MbEvD7BxVMw=="], + + "@actions/http-client": ["@actions/http-client@2.2.3", "", { "dependencies": { "tunnel": "^0.0.6", "undici": "^5.25.4" } }, "sha512-mx8hyJi/hjFvbPokCg4uRd4ZX78t+YyRPtnKWwIl+RzNaVuFpQHfmlGVfsKEJN8LwTCvL+DfVgAM04XaHkm6bA=="], + + "@actions/io": ["@actions/io@1.1.3", "", {}, "sha512-wi9JjgKLYS7U/z8PPbco+PvTb/nRWjeoFlJ1Qer83k/3C5PHQi28hiVdeE2kHXmIL99mQFawx8qt/JPjZilJ8Q=="], + + "@fastify/busboy": ["@fastify/busboy@2.1.1", "", {}, "sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA=="], + + "@octokit/auth-token": ["@octokit/auth-token@4.0.0", "", {}, "sha512-tY/msAuJo6ARbK6SPIxZrPBms3xPbfwBrulZe0Wtr/DIY9lje2HeV1uoebShn6mx7SjCHif6EjMvoREj+gZ+SA=="], + + "@octokit/core": ["@octokit/core@5.2.1", "", { "dependencies": { "@octokit/auth-token": "^4.0.0", "@octokit/graphql": "^7.1.0", "@octokit/request": "^8.4.1", "@octokit/request-error": "^5.1.1", "@octokit/types": "^13.0.0", "before-after-hook": "^2.2.0", "universal-user-agent": "^6.0.0" } }, "sha512-dKYCMuPO1bmrpuogcjQ8z7ICCH3FP6WmxpwC03yjzGfZhj9fTJg6+bS1+UAplekbN2C+M61UNllGOOoAfGCrdQ=="], + + "@octokit/endpoint": ["@octokit/endpoint@9.0.6", "", { "dependencies": { "@octokit/types": "^13.1.0", "universal-user-agent": "^6.0.0" } }, "sha512-H1fNTMA57HbkFESSt3Y9+FBICv+0jFceJFPWDePYlR/iMGrwM5ph+Dd4XRQs+8X+PUFURLQgX9ChPfhJ/1uNQw=="], + + "@octokit/graphql": ["@octokit/graphql@7.1.1", "", { "dependencies": { "@octokit/request": "^8.4.1", "@octokit/types": "^13.0.0", "universal-user-agent": "^6.0.0" } }, "sha512-3mkDltSfcDUoa176nlGoA32RGjeWjl3K7F/BwHwRMJUW/IteSa4bnSV8p2ThNkcIcZU2umkZWxwETSSCJf2Q7g=="], + + "@octokit/openapi-types": ["@octokit/openapi-types@24.2.0", "", {}, "sha512-9sIH3nSUttelJSXUrmGzl7QUBFul0/mB8HRYl3fOlgHbIWG+WnYDXU3v/2zMtAvuzZ/ed00Ei6on975FhBfzrg=="], + + "@octokit/plugin-paginate-rest": ["@octokit/plugin-paginate-rest@9.2.2", "", { "dependencies": { "@octokit/types": "^12.6.0" }, "peerDependencies": { "@octokit/core": "5" } }, "sha512-u3KYkGF7GcZnSD/3UP0S7K5XUFT2FkOQdcfXZGZQPGv3lm4F2Xbf71lvjldr8c1H3nNbF+33cLEkWYbokGWqiQ=="], + + "@octokit/plugin-rest-endpoint-methods": ["@octokit/plugin-rest-endpoint-methods@10.4.1", "", { "dependencies": { "@octokit/types": "^12.6.0" }, "peerDependencies": { "@octokit/core": "5" } }, "sha512-xV1b+ceKV9KytQe3zCVqjg+8GTGfDYwaT1ATU5isiUyVtlVAO3HNdzpS4sr4GBx4hxQ46s7ITtZrAsxG22+rVg=="], + + "@octokit/request": ["@octokit/request@8.4.1", "", { "dependencies": { "@octokit/endpoint": "^9.0.6", "@octokit/request-error": "^5.1.1", "@octokit/types": "^13.1.0", "universal-user-agent": "^6.0.0" } }, "sha512-qnB2+SY3hkCmBxZsR/MPCybNmbJe4KAlfWErXq+rBKkQJlbjdJeS85VI9r8UqeLYLvnAenU8Q1okM/0MBsAGXw=="], + + "@octokit/request-error": ["@octokit/request-error@5.1.1", "", { "dependencies": { "@octokit/types": "^13.1.0", "deprecation": "^2.0.0", "once": "^1.4.0" } }, "sha512-v9iyEQJH6ZntoENr9/yXxjuezh4My67CBSu9r6Ve/05Iu5gNgnisNWOsoJHTP6k0Rr0+HQIpnH+kyammu90q/g=="], + + "@octokit/types": ["@octokit/types@13.10.0", "", { "dependencies": { "@octokit/openapi-types": "^24.2.0" } }, "sha512-ifLaO34EbbPj0Xgro4G5lP5asESjwHracYJvVaPIyXMuiuXLlhic3S47cBdTb+jfODkTE5YtGCLt3Ay3+J97sA=="], + + "@types/bun": ["@types/bun@1.2.13", "", { "dependencies": { "bun-types": "1.2.13" } }, "sha512-u6vXep/i9VBxoJl3GjZsl/BFIsvML8DfVDO0RYLEwtSZSp981kEO1V5NwRcO1CPJ7AmvpbnDCiMKo3JvbDEjAg=="], + + "@types/node": ["@types/node@22.15.21", "", { "dependencies": { "undici-types": "~6.21.0" } }, "sha512-EV/37Td6c+MgKAbkcLG6vqZ2zEYHD7bvSrzqqs2RIhbA6w3x+Dqz8MZM3sP6kGTeLrdoOgKZe+Xja7tUB2DNkQ=="], + + "before-after-hook": ["before-after-hook@2.2.3", "", {}, "sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ=="], + + "bun-types": ["bun-types@1.2.13", "", { "dependencies": { "@types/node": "*" } }, "sha512-rRjA1T6n7wto4gxhAO/ErZEtOXyEZEmnIHQfl0Dt1QQSB4QV0iP6BZ9/YB5fZaHFQ2dwHFrmPaRQ9GGMX01k9Q=="], + + "deprecation": ["deprecation@2.3.1", "", {}, "sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ=="], + + "once": ["once@1.4.0", "", { "dependencies": { "wrappy": "1" } }, "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w=="], + + "prettier": ["prettier@3.5.3", "", { "bin": { "prettier": "bin/prettier.cjs" } }, "sha512-QQtaxnoDJeAkDvDKWCLiwIXkTgRhwYDEQCghU9Z6q03iyek/rxRh/2lC3HB7P8sWT2xC/y5JDctPLBIGzHKbhw=="], + + "tunnel": ["tunnel@0.0.6", "", {}, "sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg=="], + + "typescript": ["typescript@5.8.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ=="], + + "undici": ["undici@5.29.0", "", { "dependencies": { "@fastify/busboy": "^2.0.0" } }, "sha512-raqeBD6NQK4SkWhQzeYKd1KmIG6dllBOTt55Rmkt4HtI9mwdWtJljnrXjAFUBLTSN67HWrOIZ3EPF4kjUw80Bg=="], + + "undici-types": ["undici-types@6.21.0", "", {}, "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ=="], + + "universal-user-agent": ["universal-user-agent@6.0.1", "", {}, "sha512-yCzhz6FN2wU1NiiQRogkTQszlQSlpWaw8SvVegAc+bDxbzHgh1vX8uIe8OYyMH6DwH+sdTJsgMl36+mSMdRJIQ=="], + + "wrappy": ["wrappy@1.0.2", "", {}, "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="], + + "@octokit/plugin-paginate-rest/@octokit/types": ["@octokit/types@12.6.0", "", { "dependencies": { "@octokit/openapi-types": "^20.0.0" } }, "sha512-1rhSOfRa6H9w4YwK0yrf5faDaDTb+yLyBUKOCV4xtCDB5VmIPqd/v9yr9o6SAzOAlRxMiRiCic6JVM1/kunVkw=="], + + "@octokit/plugin-rest-endpoint-methods/@octokit/types": ["@octokit/types@12.6.0", "", { "dependencies": { "@octokit/openapi-types": "^20.0.0" } }, "sha512-1rhSOfRa6H9w4YwK0yrf5faDaDTb+yLyBUKOCV4xtCDB5VmIPqd/v9yr9o6SAzOAlRxMiRiCic6JVM1/kunVkw=="], + + "@octokit/plugin-paginate-rest/@octokit/types/@octokit/openapi-types": ["@octokit/openapi-types@20.0.0", "", {}, "sha512-EtqRBEjp1dL/15V7WiX5LJMIxxkdiGJnabzYx5Apx4FkQIFgAfKumXeYAqqJCj1s+BMX4cPFIFC4OLCR6stlnA=="], + + "@octokit/plugin-rest-endpoint-methods/@octokit/types/@octokit/openapi-types": ["@octokit/openapi-types@20.0.0", "", {}, "sha512-EtqRBEjp1dL/15V7WiX5LJMIxxkdiGJnabzYx5Apx4FkQIFgAfKumXeYAqqJCj1s+BMX4cPFIFC4OLCR6stlnA=="], + } +} diff --git a/.github/actions/codex/package.json b/.github/actions/codex/package.json new file mode 100644 index 000000000..bb35ee3a4 --- /dev/null +++ b/.github/actions/codex/package.json @@ -0,0 +1,21 @@ +{ + "name": "codex-action", + "version": "0.0.0", + "private": true, + "scripts": { + "format": "prettier --check src", + "format:fix": "prettier --write src", + "test": "bun test", + "typecheck": "tsc" + }, + "dependencies": { + "@actions/core": "^1.11.1", + "@actions/github": "^6.0.1" + }, + "devDependencies": { + "@types/bun": "^1.2.11", + "@types/node": "^22.15.21", + "prettier": "^3.5.3", + "typescript": "^5.8.3" + } +} diff --git a/.github/actions/codex/src/add-reaction.ts b/.github/actions/codex/src/add-reaction.ts new file mode 100644 index 000000000..85026dd9a --- /dev/null +++ b/.github/actions/codex/src/add-reaction.ts @@ -0,0 +1,85 @@ +import * as github from "@actions/github"; +import type { EnvContext } from "./env-context"; + +/** + * Add an "eyes" reaction to the entity (issue, issue comment, or pull request + * review comment) that triggered the current Codex invocation. + * + * The purpose is to provide immediate feedback to the user – similar to the + * *-in-progress label flow – indicating that the bot has acknowledged the + * request and is working on it. + * + * We attempt to add the reaction best suited for the current GitHub event: + * + * • issues → POST /repos/{owner}/{repo}/issues/{issue_number}/reactions + * • issue_comment → POST /repos/{owner}/{repo}/issues/comments/{comment_id}/reactions + * • pull_request_review_comment → POST /repos/{owner}/{repo}/pulls/comments/{comment_id}/reactions + * + * If the specific target is unavailable (e.g. unexpected payload shape) we + * silently skip instead of failing the whole action because the reaction is + * merely cosmetic. + */ +export async function addEyesReaction(ctx: EnvContext): Promise { + const octokit = ctx.getOctokit(); + const { owner, repo } = github.context.repo; + const eventName = github.context.eventName; + + try { + switch (eventName) { + case "issue_comment": { + const commentId = (github.context.payload as any)?.comment?.id; + if (commentId) { + await octokit.rest.reactions.createForIssueComment({ + owner, + repo, + comment_id: commentId, + content: "eyes", + }); + return; + } + break; + } + case "pull_request_review_comment": { + const commentId = (github.context.payload as any)?.comment?.id; + if (commentId) { + await octokit.rest.reactions.createForPullRequestReviewComment({ + owner, + repo, + comment_id: commentId, + content: "eyes", + }); + return; + } + break; + } + case "issues": { + const issueNumber = github.context.issue.number; + if (issueNumber) { + await octokit.rest.reactions.createForIssue({ + owner, + repo, + issue_number: issueNumber, + content: "eyes", + }); + return; + } + break; + } + default: { + // Fallback: try to react to the issue/PR if we have a number. + const issueNumber = github.context.issue.number; + if (issueNumber) { + await octokit.rest.reactions.createForIssue({ + owner, + repo, + issue_number: issueNumber, + content: "eyes", + }); + } + } + } + } catch (error) { + // Do not fail the action if reaction creation fails – log and continue. + console.warn(`Failed to add \"eyes\" reaction: ${error}`); + } +} diff --git a/.github/actions/codex/src/comment.ts b/.github/actions/codex/src/comment.ts new file mode 100644 index 000000000..6e2833aff --- /dev/null +++ b/.github/actions/codex/src/comment.ts @@ -0,0 +1,53 @@ +import type { EnvContext } from "./env-context"; +import { runCodex } from "./run-codex"; +import { postComment } from "./post-comment"; +import { addEyesReaction } from "./add-reaction"; + +/** + * Handle `issue_comment` and `pull_request_review_comment` events once we know + * the action is supported. + */ +export async function onComment(ctx: EnvContext): Promise { + const triggerPhrase = ctx.tryGet("INPUT_TRIGGER_PHRASE"); + if (!triggerPhrase) { + console.warn("Empty trigger phrase: skipping."); + return; + } + + // Attempt to get the body of the comment from the environment. Depending on + // the event type either `GITHUB_EVENT_COMMENT_BODY` (issue & PR comments) or + // `GITHUB_EVENT_REVIEW_BODY` (PR reviews) is set. + const commentBody = + ctx.tryGetNonEmpty("GITHUB_EVENT_COMMENT_BODY") ?? + ctx.tryGetNonEmpty("GITHUB_EVENT_REVIEW_BODY") ?? + ctx.tryGetNonEmpty("GITHUB_EVENT_ISSUE_BODY"); + + if (!commentBody) { + console.warn("Comment body not found in environment: skipping."); + return; + } + + // Check if the trigger phrase is present. + if (!commentBody.includes(triggerPhrase)) { + console.log( + `Trigger phrase '${triggerPhrase}' not found: nothing to do for this comment.`, + ); + return; + } + + // Derive the prompt by removing the trigger phrase. Remove only the first + // occurrence to keep any additional occurrences that might be meaningful. + const prompt = commentBody.replace(triggerPhrase, "").trim(); + + if (prompt.length === 0) { + console.warn("Prompt is empty after removing trigger phrase: skipping"); + return; + } + + // Provide immediate feedback that we are working on the request. + await addEyesReaction(ctx); + + // Run Codex and post the response as a new comment. + const lastMessage = await runCodex(prompt, ctx); + await postComment(lastMessage, ctx); +} diff --git a/.github/actions/codex/src/config.ts b/.github/actions/codex/src/config.ts new file mode 100644 index 000000000..1f98f946a --- /dev/null +++ b/.github/actions/codex/src/config.ts @@ -0,0 +1,11 @@ +import { readdirSync, statSync } from "fs"; +import * as path from "path"; + +export interface Config { + labels: Record; +} + +export interface LabelConfig { + /** Returns the prompt template. */ + getPromptTemplate(): string; +} diff --git a/.github/actions/codex/src/default-label-config.ts b/.github/actions/codex/src/default-label-config.ts new file mode 100644 index 000000000..270f1f9c5 --- /dev/null +++ b/.github/actions/codex/src/default-label-config.ts @@ -0,0 +1,44 @@ +import type { Config } from "./config"; + +export function getDefaultConfig(): Config { + return { + labels: { + "codex-investigate-issue": { + getPromptTemplate: () => + ` +Troubleshoot whether the reported issue is valid. + +Provide a concise and respectful comment summarizing the findings. + +### {CODEX_ACTION_ISSUE_TITLE} + +{CODEX_ACTION_ISSUE_BODY} +`.trim(), + }, + "codex-code-review": { + getPromptTemplate: () => + ` +Review this PR and respond with a very concise final message, formatted in Markdown. + +There should be a summary of the changes (1-2 sentences) and a few bullet points if necessary. + +Then provide the **review** (1-2 sentences plus bullet points, friendly tone). + +{CODEX_ACTION_GITHUB_EVENT_PATH} contains the JSON that triggered this GitHub workflow. It contains the \`base\` and \`head\` refs that define this PR. Both refs are available locally. +`.trim(), + }, + "codex-attempt-fix": { + getPromptTemplate: () => + ` +Attempt to solve the reported issue. + +If a code change is required, create a new branch, commit the fix, and open a pull-request that resolves the problem. + +### {CODEX_ACTION_ISSUE_TITLE} + +{CODEX_ACTION_ISSUE_BODY} +`.trim(), + }, + }, + }; +} diff --git a/.github/actions/codex/src/env-context.ts b/.github/actions/codex/src/env-context.ts new file mode 100644 index 000000000..9c18e0e6a --- /dev/null +++ b/.github/actions/codex/src/env-context.ts @@ -0,0 +1,116 @@ +/* + * Centralised access to environment variables used by the Codex GitHub + * Action. + * + * To enable proper unit-testing we avoid reading from `process.env` at module + * initialisation time. Instead a `EnvContext` object is created (usually from + * the real `process.env`) and passed around explicitly or – where that is not + * yet practical – imported as the shared `defaultContext` singleton. Tests can + * create their own context backed by a stubbed map of variables without having + * to mutate global state. + */ + +import { fail } from "./fail"; +import * as github from "@actions/github"; + +export interface EnvContext { + /** + * Return the value for a given environment variable or terminate the action + * via `fail` if it is missing / empty. + */ + get(name: string): string; + + /** + * Attempt to read an environment variable. Returns the value when present; + * otherwise returns undefined (does not call `fail`). + */ + tryGet(name: string): string | undefined; + + /** + * Attempt to read an environment variable. Returns non-empty string value or + * null if unset or empty string. + */ + tryGetNonEmpty(name: string): string | null; + + /** + * Return a memoised Octokit instance authenticated via the token resolved + * from the provided argument (when defined) or the environment variables + * `GITHUB_TOKEN`/`GH_TOKEN`. + * + * Subsequent calls return the same cached instance to avoid spawning + * multiple REST clients within a single action run. + */ + getOctokit(token?: string): ReturnType; +} + +/** Internal helper – *not* exported. */ +function _getRequiredEnv( + name: string, + env: Record, +): string | undefined { + const value = env[name]; + + // Avoid leaking secrets into logs while still logging non-secret variables. + if (name.endsWith("KEY") || name.endsWith("TOKEN")) { + if (value) { + console.log(`value for ${name} was found`); + } + } else { + console.log(`${name}=${value}`); + } + + return value; +} + +/** Create a context backed by the supplied environment map (defaults to `process.env`). */ +export function createEnvContext( + env: Record = process.env, +): EnvContext { + // Lazily instantiated Octokit client – shared across this context. + let cachedOctokit: ReturnType | null = null; + + return { + get(name: string): string { + const value = _getRequiredEnv(name, env); + if (value == null) { + fail(`Missing required environment variable: ${name}`); + } + return value; + }, + + tryGet(name: string): string | undefined { + return _getRequiredEnv(name, env); + }, + + tryGetNonEmpty(name: string): string | null { + const value = _getRequiredEnv(name, env); + return value == null || value === "" ? null : value; + }, + + getOctokit(token?: string) { + if (cachedOctokit) { + return cachedOctokit; + } + + // Determine the token to authenticate with. + const githubToken = token ?? env["GITHUB_TOKEN"] ?? env["GH_TOKEN"]; + + if (!githubToken) { + fail( + "Unable to locate a GitHub token. `github_token` should have been set on the action.", + ); + } + + cachedOctokit = github.getOctokit(githubToken!); + return cachedOctokit; + }, + }; +} + +/** + * Shared context built from the actual `process.env`. Production code that is + * not yet refactored to receive a context explicitly may import and use this + * singleton. Tests should avoid the singleton and instead pass their own + * context to the functions they exercise. + */ +export const defaultContext: EnvContext = createEnvContext(); diff --git a/.github/actions/codex/src/fail.ts b/.github/actions/codex/src/fail.ts new file mode 100644 index 000000000..924d70095 --- /dev/null +++ b/.github/actions/codex/src/fail.ts @@ -0,0 +1,4 @@ +export function fail(message: string): never { + console.error(message); + process.exit(1); +} diff --git a/.github/actions/codex/src/git-helpers.ts b/.github/actions/codex/src/git-helpers.ts new file mode 100644 index 000000000..001ccde35 --- /dev/null +++ b/.github/actions/codex/src/git-helpers.ts @@ -0,0 +1,149 @@ +import { spawnSync } from "child_process"; +import * as github from "@actions/github"; +import { EnvContext } from "./env-context"; + +function runGit(args: string[], silent = true): string { + console.info(`Running git ${args.join(" ")}`); + const res = spawnSync("git", args, { + encoding: "utf8", + stdio: silent ? ["ignore", "pipe", "pipe"] : "inherit", + }); + if (res.error) { + throw res.error; + } + if (res.status !== 0) { + // Return stderr so caller may handle; else throw. + throw new Error( + `git ${args.join(" ")} failed with code ${res.status}: ${res.stderr}`, + ); + } + return res.stdout.trim(); +} + +function stageAllChanges() { + runGit(["add", "-A"]); +} + +function hasStagedChanges(): boolean { + const res = spawnSync("git", ["diff", "--cached", "--quiet", "--exit-code"]); + return res.status !== 0; +} + +function ensureOnBranch( + issueNumber: number, + protectedBranches: string[], + suggestedSlug?: string, +): string { + let branch = ""; + try { + branch = runGit(["symbolic-ref", "--short", "-q", "HEAD"]); + } catch { + branch = ""; + } + + // If detached HEAD or on a protected branch, create a new branch. + if (!branch || protectedBranches.includes(branch)) { + if (suggestedSlug) { + const safeSlug = suggestedSlug + .toLowerCase() + .replace(/[^\w\s-]/g, "") + .trim() + .replace(/\s+/g, "-"); + branch = `codex-fix-${issueNumber}-${safeSlug}`; + } else { + branch = `codex-fix-${issueNumber}-${Date.now()}`; + } + runGit(["switch", "-c", branch]); + } + return branch; +} + +function commitIfNeeded(issueNumber: number) { + if (hasStagedChanges()) { + runGit([ + "commit", + "-m", + `fix: automated fix for #${issueNumber} via Codex`, + ]); + } +} + +function pushBranch(branch: string, githubToken: string, ctx: EnvContext) { + const repoSlug = ctx.get("GITHUB_REPOSITORY"); // owner/repo + const remoteUrl = `https://x-access-token:${githubToken}@github.com/${repoSlug}.git`; + + runGit(["push", "--force-with-lease", "-u", remoteUrl, `HEAD:${branch}`]); +} + +/** + * If this returns a string, it is the URL of the created PR. + */ +export async function maybePublishPRForIssue( + issueNumber: number, + lastMessage: string, + ctx: EnvContext, +): Promise { + // Only proceed if GITHUB_TOKEN available. + const githubToken = + ctx.tryGetNonEmpty("GITHUB_TOKEN") ?? ctx.tryGetNonEmpty("GH_TOKEN"); + if (!githubToken) { + console.warn("No GitHub token - skipping PR creation."); + return undefined; + } + + // Print `git status` for debugging. + runGit(["status"]); + + // Stage any remaining changes so they can be committed and pushed. + stageAllChanges(); + + const octokit = ctx.getOctokit(githubToken); + + const { owner, repo } = github.context.repo; + + // Determine default branch to treat as protected. + let defaultBranch = "main"; + try { + const repoInfo = await octokit.rest.repos.get({ owner, repo }); + defaultBranch = repoInfo.data.default_branch ?? "main"; + } catch (e) { + console.warn(`Failed to get default branch, assuming 'main': ${e}`); + } + + const sanitizedMessage = lastMessage.replace(/\u2022/g, "-"); + const [summaryLine] = sanitizedMessage.split(/\r?\n/); + const branch = ensureOnBranch(issueNumber, [defaultBranch, "master"], summaryLine); + commitIfNeeded(issueNumber); + pushBranch(branch, githubToken, ctx); + + // Try to find existing PR for this branch + const headParam = `${owner}:${branch}`; + const existing = await octokit.rest.pulls.list({ + owner, + repo, + head: headParam, + state: "open", + }); + if (existing.data.length > 0) { + return existing.data[0].html_url; + } + + // Determine base branch (default to main) + let baseBranch = "main"; + try { + const repoInfo = await octokit.rest.repos.get({ owner, repo }); + baseBranch = repoInfo.data.default_branch ?? "main"; + } catch (e) { + console.warn(`Failed to get default branch, assuming 'main': ${e}`); + } + + const pr = await octokit.rest.pulls.create({ + owner, + repo, + title: summaryLine, + head: branch, + base: baseBranch, + body: sanitizedMessage, + }); + return pr.data.html_url; +} diff --git a/.github/actions/codex/src/git-user.ts b/.github/actions/codex/src/git-user.ts new file mode 100644 index 000000000..bd84a61a7 --- /dev/null +++ b/.github/actions/codex/src/git-user.ts @@ -0,0 +1,16 @@ +export function setGitHubActionsUser(): void { + const commands = [ + ["git", "config", "--global", "user.name", "github-actions[bot]"], + [ + "git", + "config", + "--global", + "user.email", + "41898282+github-actions[bot]@users.noreply.github.com", + ], + ]; + + for (const command of commands) { + Bun.spawnSync(command); + } +} diff --git a/.github/actions/codex/src/github-workspace.ts b/.github/actions/codex/src/github-workspace.ts new file mode 100644 index 000000000..8a1f7cae5 --- /dev/null +++ b/.github/actions/codex/src/github-workspace.ts @@ -0,0 +1,11 @@ +import * as pathMod from "path"; +import { EnvContext } from "./env-context"; + +export function resolveWorkspacePath(path: string, ctx: EnvContext): string { + if (pathMod.isAbsolute(path)) { + return path; + } else { + const workspace = ctx.get("GITHUB_WORKSPACE"); + return pathMod.join(workspace, path); + } +} diff --git a/.github/actions/codex/src/load-config.ts b/.github/actions/codex/src/load-config.ts new file mode 100644 index 000000000..f225e81a0 --- /dev/null +++ b/.github/actions/codex/src/load-config.ts @@ -0,0 +1,56 @@ +import type { Config, LabelConfig } from "./config"; + +import { getDefaultConfig } from "./default-label-config"; +import { readFileSync, readdirSync, statSync } from "fs"; +import * as path from "path"; + +/** + * Build an in-memory configuration object by scanning the repository for + * Markdown templates located in `.github/codex/labels`. + * + * Each `*.md` file in that directory represents a label that can trigger the + * Codex GitHub Action. The filename **without** the extension is interpreted + * as the label name, e.g. `codex-review.md` ➜ `codex-review`. + * + * For every such label we derive the corresponding `doneLabel` by appending + * the suffix `-completed`. + */ +export function loadConfig(workspace: string): Config { + const labelsDir = path.join(workspace, ".github", "codex", "labels"); + + let entries: string[]; + try { + entries = readdirSync(labelsDir); + } catch { + // If the directory is missing, return the default configuration. + return getDefaultConfig(); + } + + const labels: Record = {}; + + for (const entry of entries) { + if (!entry.endsWith(".md")) { + continue; + } + + const fullPath = path.join(labelsDir, entry); + + if (!statSync(fullPath).isFile()) { + continue; + } + + const labelName = entry.slice(0, -3); // trim ".md" + + labels[labelName] = new FileLabelConfig(fullPath); + } + + return { labels }; +} + +class FileLabelConfig implements LabelConfig { + constructor(private readonly promptPath: string) {} + + getPromptTemplate(): string { + return readFileSync(this.promptPath, "utf8"); + } +} diff --git a/.github/actions/codex/src/main.ts b/.github/actions/codex/src/main.ts new file mode 100755 index 000000000..a334c6891 --- /dev/null +++ b/.github/actions/codex/src/main.ts @@ -0,0 +1,80 @@ +#!/usr/bin/env bun + +import type { Config } from "./config"; + +import { defaultContext, EnvContext } from "./env-context"; +import { loadConfig } from "./load-config"; +import { setGitHubActionsUser } from "./git-user"; +import { onLabeled } from "./process-label"; +import { ensureBaseAndHeadCommitsForPRAreAvailable } from "./prompt-template"; +import { performAdditionalValidation } from "./verify-inputs"; +import { onComment } from "./comment"; +import { onReview } from "./review"; + +async function main(): Promise { + const ctx: EnvContext = defaultContext; + + // Build the configuration dynamically by scanning `.github/codex/labels`. + const GITHUB_WORKSPACE = ctx.get("GITHUB_WORKSPACE"); + const config: Config = loadConfig(GITHUB_WORKSPACE); + + // Optionally perform additional validation of prompt template files. + performAdditionalValidation(config, GITHUB_WORKSPACE); + + const GITHUB_EVENT_NAME = ctx.get("GITHUB_EVENT_NAME"); + const GITHUB_EVENT_ACTION = ctx.get("GITHUB_EVENT_ACTION"); + + // Set user.name and user.email to a bot before Codex runs, just in case it + // creates a commit. + setGitHubActionsUser(); + + switch (GITHUB_EVENT_NAME) { + case "issues": { + if (GITHUB_EVENT_ACTION === "labeled") { + await onLabeled(config, ctx); + return; + } else if (GITHUB_EVENT_ACTION === "opened") { + await onComment(ctx); + return; + } + break; + } + case "issue_comment": { + if (GITHUB_EVENT_ACTION === "created") { + await onComment(ctx); + return; + } + break; + } + case "pull_request": { + if (GITHUB_EVENT_ACTION === "labeled") { + await ensureBaseAndHeadCommitsForPRAreAvailable(ctx); + await onLabeled(config, ctx); + return; + } + break; + } + case "pull_request_review": { + await ensureBaseAndHeadCommitsForPRAreAvailable(ctx); + if (GITHUB_EVENT_ACTION === "submitted") { + await onReview(ctx); + return; + } + break; + } + case "pull_request_review_comment": { + await ensureBaseAndHeadCommitsForPRAreAvailable(ctx); + if (GITHUB_EVENT_ACTION === "created") { + await onComment(ctx); + return; + } + break; + } + } + + console.warn( + `Unsupported action '${GITHUB_EVENT_ACTION}' for event '${GITHUB_EVENT_NAME}'.`, + ); +} + +main(); diff --git a/.github/actions/codex/src/post-comment.ts b/.github/actions/codex/src/post-comment.ts new file mode 100644 index 000000000..914fd0d32 --- /dev/null +++ b/.github/actions/codex/src/post-comment.ts @@ -0,0 +1,62 @@ +import { fail } from "./fail"; +import * as github from "@actions/github"; +import { EnvContext } from "./env-context"; + +/** + * Post a comment to the issue / pull request currently in scope. + * + * Provide the environment context so that token lookup (inside getOctokit) does + * not rely on global state. + */ +export async function postComment( + commentBody: string, + ctx: EnvContext, +): Promise { + // Append a footer with a link back to the workflow run, if available. + const footer = buildWorkflowRunFooter(ctx); + const bodyWithFooter = footer ? `${commentBody}${footer}` : commentBody; + + const octokit = ctx.getOctokit(); + console.info("Got Octokit instance for posting comment"); + const { owner, repo } = github.context.repo; + const issueNumber = github.context.issue.number; + + if (!issueNumber) { + console.warn( + "No issue or pull_request number found in GitHub context; skipping comment creation.", + ); + return; + } + + try { + console.info("Calling octokit.rest.issues.createComment()"); + await octokit.rest.issues.createComment({ + owner, + repo, + issue_number: issueNumber, + body: bodyWithFooter, + }); + } catch (error) { + fail(`Failed to create comment via GitHub API: ${error}`); + } +} + +/** + * Helper to build a Markdown fragment linking back to the workflow run that + * generated the current comment. Returns `undefined` if required environment + * variables are missing – e.g. when running outside of GitHub Actions – so we + * can gracefully skip the footer in those cases. + */ +function buildWorkflowRunFooter(ctx: EnvContext): string | undefined { + const serverUrl = + ctx.tryGetNonEmpty("GITHUB_SERVER_URL") ?? "https://github.com"; + const repository = ctx.tryGetNonEmpty("GITHUB_REPOSITORY"); + const runId = ctx.tryGetNonEmpty("GITHUB_RUN_ID"); + + if (!repository || !runId) { + return undefined; + } + + const url = `${serverUrl}/${repository}/actions/runs/${runId}`; + return `\n\n---\n*[_View workflow run_](${url})*`; +} diff --git a/.github/actions/codex/src/process-label.ts b/.github/actions/codex/src/process-label.ts new file mode 100644 index 000000000..4b4361e11 --- /dev/null +++ b/.github/actions/codex/src/process-label.ts @@ -0,0 +1,195 @@ +import { fail } from "./fail"; +import { EnvContext } from "./env-context"; +import { renderPromptTemplate } from "./prompt-template"; + +import { postComment } from "./post-comment"; +import { runCodex } from "./run-codex"; + +import * as github from "@actions/github"; +import { Config, LabelConfig } from "./config"; +import { maybePublishPRForIssue } from "./git-helpers"; + +export async function onLabeled( + config: Config, + ctx: EnvContext, +): Promise { + const GITHUB_EVENT_LABEL_NAME = ctx.get("GITHUB_EVENT_LABEL_NAME"); + const labelConfig = config.labels[GITHUB_EVENT_LABEL_NAME] as + | LabelConfig + | undefined; + if (!labelConfig) { + fail( + `Label \`${GITHUB_EVENT_LABEL_NAME}\` not found in config: ${JSON.stringify(config)}`, + ); + } + + await processLabelConfig(ctx, GITHUB_EVENT_LABEL_NAME, labelConfig); +} + +/** + * Wrapper that handles `-in-progress` and `-completed` semantics around the core lint/fix/review + * processing. It will: + * + * - Skip execution if the `-in-progress` or `-completed` label is already present. + * - Mark the PR/issue as `-in-progress`. + * - After successful execution, mark the PR/issue as `-completed`. + */ +async function processLabelConfig( + ctx: EnvContext, + label: string, + labelConfig: LabelConfig, +): Promise { + const octokit = ctx.getOctokit(); + const { owner, repo, issueNumber, labelNames } = + await getCurrentLabels(octokit); + + const inProgressLabel = `${label}-in-progress`; + const completedLabel = `${label}-completed`; + for (const markerLabel of [inProgressLabel, completedLabel]) { + if (labelNames.includes(markerLabel)) { + console.log( + `Label '${markerLabel}' already present on issue/PR #${issueNumber}. Skipping Codex action.`, + ); + + // Clean up: remove the triggering label to avoid confusion and re-runs. + await addAndRemoveLabels(octokit, { + owner, + repo, + issueNumber, + remove: markerLabel, + }); + + return; + } + } + + // Mark the PR/issue as in progress. + await addAndRemoveLabels(octokit, { + owner, + repo, + issueNumber, + add: inProgressLabel, + remove: label, + }); + + // Run the core Codex processing. + await processLabel(ctx, label, labelConfig); + + // Mark the PR/issue as completed. + await addAndRemoveLabels(octokit, { + owner, + repo, + issueNumber, + add: completedLabel, + remove: inProgressLabel, + }); +} + +async function processLabel( + ctx: EnvContext, + label: string, + labelConfig: LabelConfig, +): Promise { + const template = labelConfig.getPromptTemplate(); + const populatedTemplate = await renderPromptTemplate(template, ctx); + + // Always run Codex and post the resulting message as a comment. + let commentBody = await runCodex(populatedTemplate, ctx); + + // Current heuristic: only try to create a PR if "attempt" or "fix" is in the + // label name. (Yes, we plan to evolve this.) + if (label.indexOf("fix") !== -1 || label.indexOf("attempt") !== -1) { + console.info(`label ${label} indicates we should attempt to create a PR`); + const prUrl = await maybeFixIssue(ctx, commentBody); + if (prUrl) { + commentBody += `\n\n---\nOpened pull request: ${prUrl}`; + } + } else { + console.info( + `label ${label} does not indicate we should attempt to create a PR`, + ); + } + + await postComment(commentBody, ctx); +} + +async function maybeFixIssue( + ctx: EnvContext, + lastMessage: string, +): Promise { + // Attempt to create a PR out of any changes Codex produced. + const issueNumber = github.context.issue.number!; // exists for issues triggering this path + try { + return await maybePublishPRForIssue(issueNumber, lastMessage, ctx); + } catch (e) { + console.warn(`Failed to publish PR: ${e}`); + } +} + +async function getCurrentLabels( + octokit: ReturnType, +): Promise<{ + owner: string; + repo: string; + issueNumber: number; + labelNames: Array; +}> { + const { owner, repo } = github.context.repo; + const issueNumber = github.context.issue.number; + + if (!issueNumber) { + fail("No issue or pull_request number found in GitHub context."); + } + + const { data: issueData } = await octokit.rest.issues.get({ + owner, + repo, + issue_number: issueNumber, + }); + + const labelNames = + issueData.labels?.map((label: any) => + typeof label === "string" ? label : label.name, + ) ?? []; + + return { owner, repo, issueNumber, labelNames }; +} + +async function addAndRemoveLabels( + octokit: ReturnType, + opts: { + owner: string; + repo: string; + issueNumber: number; + add?: string; + remove?: string; + }, +): Promise { + const { owner, repo, issueNumber, add, remove } = opts; + + if (add) { + try { + await octokit.rest.issues.addLabels({ + owner, + repo, + issue_number: issueNumber, + labels: [add], + }); + } catch (error) { + console.warn(`Failed to add label '${add}': ${error}`); + } + } + + if (remove) { + try { + await octokit.rest.issues.removeLabel({ + owner, + repo, + issue_number: issueNumber, + name: remove, + }); + } catch (error) { + console.warn(`Failed to remove label '${remove}': ${error}`); + } + } +} diff --git a/.github/actions/codex/src/prompt-template.ts b/.github/actions/codex/src/prompt-template.ts new file mode 100644 index 000000000..aa52dd2af --- /dev/null +++ b/.github/actions/codex/src/prompt-template.ts @@ -0,0 +1,284 @@ +/* + * Utilities to render Codex prompt templates. + * + * A template is a Markdown (or plain-text) file that may contain one or more + * placeholders of the form `{CODEX_ACTION_}`. At runtime these + * placeholders are substituted with dynamically generated content. Each + * placeholder is resolved **exactly once** even if it appears multiple times + * in the same template. + */ + +import { readFile } from "fs/promises"; + +import { EnvContext } from "./env-context"; + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +/** + * Lazily caches parsed `$GITHUB_EVENT_PATH` contents keyed by the file path so + * we only hit the filesystem once per unique event payload. + */ +const githubEventDataCache: Map> = new Map(); + +function getGitHubEventData(ctx: EnvContext): Promise { + const eventPath = ctx.get("GITHUB_EVENT_PATH"); + let cached = githubEventDataCache.get(eventPath); + if (!cached) { + cached = readFile(eventPath, "utf8").then((raw) => JSON.parse(raw)); + githubEventDataCache.set(eventPath, cached); + } + return cached; +} + +async function runCommand(args: Array): Promise { + const result = Bun.spawnSync(args, { + stdout: "pipe", + stderr: "pipe", + }); + + if (result.success) { + return result.stdout.toString(); + } + + console.error(`Error running ${JSON.stringify(args)}: ${result.stderr}`); + return ""; +} + +// --------------------------------------------------------------------------- +// Public API +// --------------------------------------------------------------------------- + +// Regex that captures the variable name without the surrounding { } braces. +const VAR_REGEX = /\{(CODEX_ACTION_[A-Z0-9_]+)\}/g; + +// Cache individual placeholder values so each one is resolved at most once per +// process even if many templates reference it. +const placeholderCache: Map> = new Map(); + +/** + * Parse a template string, resolve all placeholders and return the rendered + * result. + */ +export async function renderPromptTemplate( + template: string, + ctx: EnvContext, +): Promise { + // --------------------------------------------------------------------- + // 1) Gather all *unique* placeholders present in the template. + // --------------------------------------------------------------------- + const variables = new Set(); + for (const match of template.matchAll(VAR_REGEX)) { + variables.add(match[1]); + } + + // --------------------------------------------------------------------- + // 2) Kick off (or reuse) async resolution for each variable. + // --------------------------------------------------------------------- + for (const variable of variables) { + if (!placeholderCache.has(variable)) { + placeholderCache.set(variable, resolveVariable(variable, ctx)); + } + } + + // --------------------------------------------------------------------- + // 3) Await completion so we can perform a simple synchronous replace below. + // --------------------------------------------------------------------- + const resolvedEntries: [string, string][] = []; + for (const [key, promise] of placeholderCache.entries()) { + resolvedEntries.push([key, await promise]); + } + const resolvedMap = new Map(resolvedEntries); + + // --------------------------------------------------------------------- + // 4) Replace each occurrence. We use replace with a callback to ensure + // correct substitution even if variable names overlap (they shouldn't, + // but better safe than sorry). + // --------------------------------------------------------------------- + return template.replace(VAR_REGEX, (_, varName: string) => { + return resolvedMap.get(varName) ?? ""; + }); +} + +export async function ensureBaseAndHeadCommitsForPRAreAvailable( + ctx: EnvContext, +): Promise<{ baseSha: string; headSha: string } | null> { + const prShas = await getPrShas(ctx); + if (prShas == null) { + console.warn("Unable to resolve PR branches"); + return null; + } + + const event = await getGitHubEventData(ctx); + const pr = event.pull_request; + if (!pr) { + console.warn("event.pull_request is not defined - unexpected"); + return null; + } + + const workspace = ctx.get("GITHUB_WORKSPACE"); + + // Refs (branch names) + const baseRef: string | undefined = pr.base?.ref; + const headRef: string | undefined = pr.head?.ref; + + // Clone URLs + const baseRemoteUrl: string | undefined = pr.base?.repo?.clone_url; + const headRemoteUrl: string | undefined = pr.head?.repo?.clone_url; + + if (!baseRef || !headRef || !baseRemoteUrl || !headRemoteUrl) { + console.warn( + "Missing PR ref or remote URL information - cannot fetch commits", + ); + return null; + } + + // Ensure we have the base branch. + await runCommand([ + "git", + "-C", + workspace, + "fetch", + "--no-tags", + "origin", + baseRef, + ]); + + // Ensure we have the head branch. + if (headRemoteUrl === baseRemoteUrl) { + // Same repository – the commit is available from `origin`. + await runCommand([ + "git", + "-C", + workspace, + "fetch", + "--no-tags", + "origin", + headRef, + ]); + } else { + // Fork – make sure a `pr` remote exists that points at the fork. Attempting + // to add a remote that already exists causes git to error, so we swallow + // any non-zero exit codes from that specific command. + await runCommand([ + "git", + "-C", + workspace, + "remote", + "add", + "pr", + headRemoteUrl, + ]); + + // Whether adding succeeded or the remote already existed, attempt to fetch + // the head ref from the `pr` remote. + await runCommand([ + "git", + "-C", + workspace, + "fetch", + "--no-tags", + "pr", + headRef, + ]); + } + + return prShas; +} + +// --------------------------------------------------------------------------- +// Internal helpers – still exported for use by other modules. +// --------------------------------------------------------------------------- + +export async function resolvePrDiff(ctx: EnvContext): Promise { + const prShas = await ensureBaseAndHeadCommitsForPRAreAvailable(ctx); + if (prShas == null) { + console.warn("Unable to resolve PR branches"); + return ""; + } + + const workspace = ctx.get("GITHUB_WORKSPACE"); + const { baseSha, headSha } = prShas; + return runCommand([ + "git", + "-C", + workspace, + "diff", + "--color=never", + `${baseSha}..${headSha}`, + ]); +} + +// --------------------------------------------------------------------------- +// Placeholder resolution +// --------------------------------------------------------------------------- + +async function resolveVariable(name: string, ctx: EnvContext): Promise { + switch (name) { + case "CODEX_ACTION_ISSUE_TITLE": { + const event = await getGitHubEventData(ctx); + const issue = event.issue ?? event.pull_request; + return issue?.title ?? ""; + } + + case "CODEX_ACTION_ISSUE_BODY": { + const event = await getGitHubEventData(ctx); + const issue = event.issue ?? event.pull_request; + return issue?.body ?? ""; + } + + case "CODEX_ACTION_GITHUB_EVENT_PATH": { + return ctx.get("GITHUB_EVENT_PATH"); + } + + case "CODEX_ACTION_BASE_REF": { + const event = await getGitHubEventData(ctx); + return event?.pull_request?.base?.ref ?? ""; + } + + case "CODEX_ACTION_HEAD_REF": { + const event = await getGitHubEventData(ctx); + return event?.pull_request?.head?.ref ?? ""; + } + + case "CODEX_ACTION_PR_DIFF": { + return resolvePrDiff(ctx); + } + + // ------------------------------------------------------------------- + // Add new template variables here. + // ------------------------------------------------------------------- + + default: { + // Unknown variable – leave it blank to avoid leaking placeholders to the + // final prompt. The alternative would be to `fail()` here, but silently + // ignoring unknown placeholders is more forgiving and better matches the + // behaviour of typical template engines. + console.warn(`Unknown template variable: ${name}`); + return ""; + } + } +} + +async function getPrShas( + ctx: EnvContext, +): Promise<{ baseSha: string; headSha: string } | null> { + const event = await getGitHubEventData(ctx); + const pr = event.pull_request; + if (!pr) { + console.warn("event.pull_request is not defined"); + return null; + } + + // Prefer explicit SHAs if available to avoid relying on local branch names. + const baseSha: string | undefined = pr.base?.sha; + const headSha: string | undefined = pr.head?.sha; + + if (!baseSha || !headSha) { + console.warn("one of base or head is not defined on event.pull_request"); + return null; + } + + return { baseSha, headSha }; +} diff --git a/.github/actions/codex/src/review.ts b/.github/actions/codex/src/review.ts new file mode 100644 index 000000000..64f826dcc --- /dev/null +++ b/.github/actions/codex/src/review.ts @@ -0,0 +1,42 @@ +import type { EnvContext } from "./env-context"; +import { runCodex } from "./run-codex"; +import { postComment } from "./post-comment"; +import { addEyesReaction } from "./add-reaction"; + +/** + * Handle `pull_request_review` events. We treat the review body the same way + * as a normal comment. + */ +export async function onReview(ctx: EnvContext): Promise { + const triggerPhrase = ctx.tryGet("INPUT_TRIGGER_PHRASE"); + if (!triggerPhrase) { + console.warn("Empty trigger phrase: skipping."); + return; + } + + const reviewBody = ctx.tryGet("GITHUB_EVENT_REVIEW_BODY"); + + if (!reviewBody) { + console.warn("Review body not found in environment: skipping."); + return; + } + + if (!reviewBody.includes(triggerPhrase)) { + console.log( + `Trigger phrase '${triggerPhrase}' not found: nothing to do for this review.`, + ); + return; + } + + const prompt = reviewBody.replace(triggerPhrase, "").trim(); + + if (prompt.length === 0) { + console.warn("Prompt is empty after removing trigger phrase: skipping."); + return; + } + + await addEyesReaction(ctx); + + const lastMessage = await runCodex(prompt, ctx); + await postComment(lastMessage, ctx); +} diff --git a/.github/actions/codex/src/run-codex.ts b/.github/actions/codex/src/run-codex.ts new file mode 100644 index 000000000..2c851823e --- /dev/null +++ b/.github/actions/codex/src/run-codex.ts @@ -0,0 +1,56 @@ +import { fail } from "./fail"; +import { EnvContext } from "./env-context"; +import { tmpdir } from "os"; +import { join } from "node:path"; +import { readFile, mkdtemp } from "fs/promises"; +import { resolveWorkspacePath } from "./github-workspace"; + +/** + * Runs the Codex CLI with the provided prompt and returns the output written + * to the "last message" file. + */ +export async function runCodex( + prompt: string, + ctx: EnvContext, +): Promise { + const OPENAI_API_KEY = ctx.get("OPENAI_API_KEY"); + + const tempDirPath = await mkdtemp(join(tmpdir(), "codex-")); + const lastMessageOutput = join(tempDirPath, "codex-prompt.md"); + + const args = ["/usr/local/bin/codex-exec"]; + + const inputCodexArgs = ctx.tryGet("INPUT_CODEX_ARGS")?.trim(); + if (inputCodexArgs) { + args.push(...inputCodexArgs.split(/\s+/)); + } + + args.push("--output-last-message", lastMessageOutput, prompt); + + const env: Record = { ...process.env, OPENAI_API_KEY }; + const INPUT_CODEX_HOME = ctx.tryGet("INPUT_CODEX_HOME"); + if (INPUT_CODEX_HOME) { + env.CODEX_HOME = resolveWorkspacePath(INPUT_CODEX_HOME, ctx); + } + + console.log(`Running Codex: ${JSON.stringify(args)}`); + const result = Bun.spawnSync(args, { + stdout: "inherit", + stderr: "inherit", + env, + }); + + if (!result.success) { + fail(`Codex failed: see above for details.`); + } + + // Read the output generated by Codex. + let lastMessage: string; + try { + lastMessage = await readFile(lastMessageOutput, "utf8"); + } catch (err) { + fail(`Failed to read Codex output at '${lastMessageOutput}': ${err}`); + } + + return lastMessage; +} diff --git a/.github/actions/codex/src/verify-inputs.ts b/.github/actions/codex/src/verify-inputs.ts new file mode 100644 index 000000000..bfc5dcda8 --- /dev/null +++ b/.github/actions/codex/src/verify-inputs.ts @@ -0,0 +1,33 @@ +// Validate the inputs passed to the composite action. +// The script currently ensures that the provided configuration file exists and +// matches the expected schema. + +import type { Config } from "./config"; + +import { existsSync } from "fs"; +import * as path from "path"; +import { fail } from "./fail"; + +export function performAdditionalValidation(config: Config, workspace: string) { + // Additional validation: ensure referenced prompt files exist and are Markdown. + for (const [label, details] of Object.entries(config.labels)) { + // Determine which prompt key is present (the schema guarantees exactly one). + const promptPathStr = + (details as any).prompt ?? (details as any).promptPath; + + if (promptPathStr) { + const promptPath = path.isAbsolute(promptPathStr) + ? promptPathStr + : path.join(workspace, promptPathStr); + + if (!existsSync(promptPath)) { + fail(`Prompt file for label '${label}' not found: ${promptPath}`); + } + if (!promptPath.endsWith(".md")) { + fail( + `Prompt file for label '${label}' must be a .md file (got ${promptPathStr}).`, + ); + } + } + } +} diff --git a/.github/actions/codex/tsconfig.json b/.github/actions/codex/tsconfig.json new file mode 100644 index 000000000..c05c2955b --- /dev/null +++ b/.github/actions/codex/tsconfig.json @@ -0,0 +1,15 @@ +{ + "compilerOptions": { + "lib": ["ESNext"], + "target": "ESNext", + "module": "ESNext", + "moduleDetection": "force", + "moduleResolution": "bundler", + + "noEmit": true, + "strict": true, + "skipLibCheck": true + }, + + "include": ["src"] +} diff --git a/.github/codex/home/config.toml b/.github/codex/home/config.toml new file mode 100644 index 000000000..bb1b362bb --- /dev/null +++ b/.github/codex/home/config.toml @@ -0,0 +1,3 @@ +model = "o3" + +# Consider setting [mcp_servers] here! diff --git a/.github/codex/labels/codex-attempt.md b/.github/codex/labels/codex-attempt.md new file mode 100644 index 000000000..b2a3e93af --- /dev/null +++ b/.github/codex/labels/codex-attempt.md @@ -0,0 +1,9 @@ +Attempt to solve the reported issue. + +If a code change is required, create a new branch, commit the fix, and open a pull request that resolves the problem. + +Here is the original GitHub issue that triggered this run: + +### {CODEX_ACTION_ISSUE_TITLE} + +{CODEX_ACTION_ISSUE_BODY} diff --git a/.github/codex/labels/codex-review.md b/.github/codex/labels/codex-review.md new file mode 100644 index 000000000..7c6c14ad5 --- /dev/null +++ b/.github/codex/labels/codex-review.md @@ -0,0 +1,7 @@ +Review this PR and respond with a very concise final message, formatted in Markdown. + +There should be a summary of the changes (1-2 sentences) and a few bullet points if necessary. + +Then provide the **review** (1-2 sentences plus bullet points, friendly tone). + +{CODEX_ACTION_GITHUB_EVENT_PATH} contains the JSON that triggered this GitHub workflow. It contains the `base` and `head` refs that define this PR. Both refs are available locally. diff --git a/.github/codex/labels/codex-triage.md b/.github/codex/labels/codex-triage.md new file mode 100644 index 000000000..46ed36241 --- /dev/null +++ b/.github/codex/labels/codex-triage.md @@ -0,0 +1,7 @@ +Troubleshoot whether the reported issue is valid. + +Provide a concise and respectful comment summarizing the findings. + +### {CODEX_ACTION_ISSUE_TITLE} + +{CODEX_ACTION_ISSUE_BODY} diff --git a/.github/dotslash-config.json b/.github/dotslash-config.json index 7ed1f9a60..1e32001e6 100644 --- a/.github/dotslash-config.json +++ b/.github/dotslash-config.json @@ -5,7 +5,7 @@ "macos-aarch64": { "regex": "^codex-exec-aarch64-apple-darwin\\.zst$", "path": "codex-exec" }, "macos-x86_64": { "regex": "^codex-exec-x86_64-apple-darwin\\.zst$", "path": "codex-exec" }, "linux-x86_64": { "regex": "^codex-exec-x86_64-unknown-linux-musl\\.zst$", "path": "codex-exec" }, - "linux-aarch64": { "regex": "^codex-exec-aarch64-unknown-linux-gnu\\.zst$", "path": "codex-exec" } + "linux-aarch64": { "regex": "^codex-exec-aarch64-unknown-linux-musl\\.zst$", "path": "codex-exec" } } }, @@ -14,14 +14,14 @@ "macos-aarch64": { "regex": "^codex-aarch64-apple-darwin\\.zst$", "path": "codex" }, "macos-x86_64": { "regex": "^codex-x86_64-apple-darwin\\.zst$", "path": "codex" }, "linux-x86_64": { "regex": "^codex-x86_64-unknown-linux-musl\\.zst$", "path": "codex" }, - "linux-aarch64": { "regex": "^codex-aarch64-unknown-linux-gnu\\.zst$", "path": "codex" } + "linux-aarch64": { "regex": "^codex-aarch64-unknown-linux-musl\\.zst$", "path": "codex" } } }, "codex-linux-sandbox": { "platforms": { "linux-x86_64": { "regex": "^codex-linux-sandbox-x86_64-unknown-linux-musl\\.zst$", "path": "codex-linux-sandbox" }, - "linux-aarch64": { "regex": "^codex-linux-sandbox-aarch64-unknown-linux-gnu\\.zst$", "path": "codex-linux-sandbox" } + "linux-aarch64": { "regex": "^codex-linux-sandbox-aarch64-unknown-linux-musl\\.zst$", "path": "codex-linux-sandbox" } } } } diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 24697f2f7..b32de962f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -78,3 +78,10 @@ jobs: run: ./scripts/asciicheck.py README.md - name: Check README ToC run: python3 scripts/readme_toc.py README.md + + build-nix: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: cachix/install-nix-action@v31 + - run: nix build diff --git a/.github/workflows/codex.yml b/.github/workflows/codex.yml new file mode 100644 index 000000000..a105581bb --- /dev/null +++ b/.github/workflows/codex.yml @@ -0,0 +1,95 @@ +name: Codex + +on: + issues: + types: [opened, labeled] + pull_request: + branches: [main] + types: [labeled] + +jobs: + codex: + # This `if` check provides complex filtering logic to avoid running Codex + # on every PR. Admittedly, one thing this does not verify is whether the + # sender has write access to the repo: that must be done as part of a + # runtime step. + # + # Note the label values should match the ones in the .github/codex/labels + # folder. + if: | + (github.event_name == 'issues' && ( + (github.event.action == 'labeled' && (github.event.label.name == 'codex-attempt' || github.event.label.name == 'codex-triage')) + )) || + (github.event_name == 'pull_request' && github.event.action == 'labeled' && github.event.label.name == 'codex-review') + runs-on: ubuntu-latest + permissions: + contents: write # can push or create branches + issues: write # for comments + labels on issues/PRs + pull-requests: write # for PR comments/labels + steps: + # TODO: Consider adding an optional mode (--dry-run?) to actions/codex + # that verifies whether Codex should actually be run for this event. + # (For example, it may be rejected because the sender does not have + # write access to the repo.) The benefit would be two-fold: + # 1. As the first step of this job, it gives us a chance to add a reaction + # or comment to the PR/issue ASAP to "ack" the request. + # 2. It saves resources by skipping the clone and setup steps below if + # Codex is not going to run. + + - name: Checkout repository + uses: actions/checkout@v4 + + # We install the dependencies like we would for an ordinary CI job, + # particularly because Codex will not have network access to install + # these dependencies. + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: 22 + + - name: Setup pnpm + uses: pnpm/action-setup@v4 + with: + version: 10.8.1 + run_install: false + + - name: Get pnpm store directory + id: pnpm-cache + shell: bash + run: | + echo "store_path=$(pnpm store path --silent)" >> $GITHUB_OUTPUT + + - name: Setup pnpm cache + uses: actions/cache@v4 + with: + path: ${{ steps.pnpm-cache.outputs.store_path }} + key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }} + restore-keys: | + ${{ runner.os }}-pnpm-store- + + - name: Install dependencies + run: pnpm install + + - uses: dtolnay/rust-toolchain@1.87 + with: + targets: x86_64-unknown-linux-gnu + components: clippy + + - uses: actions/cache@v4 + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + ${{ github.workspace }}/codex-rs/target/ + key: cargo-ubuntu-24.04-x86_64-unknown-linux-gnu-${{ hashFiles('**/Cargo.lock') }} + + # Note it is possible that the `verify` step internal to Run Codex will + # fail, in which case the work to setup the repo was worthless :( + - name: Run Codex + uses: ./.github/actions/codex + with: + openai_api_key: ${{ secrets.CODEX_OPENAI_API_KEY }} + github_token: ${{ secrets.GITHUB_TOKEN }} + codex_home: ./.github/codex/home diff --git a/.github/workflows/rust-ci.yml b/.github/workflows/rust-ci.yml index f0eadaf25..3c836b8a0 100644 --- a/.github/workflows/rust-ci.yml +++ b/.github/workflows/rust-ci.yml @@ -55,6 +55,10 @@ jobs: target: x86_64-unknown-linux-musl - runner: ubuntu-24.04 target: x86_64-unknown-linux-gnu + - runner: ubuntu-24.04-arm + target: aarch64-unknown-linux-musl + - runner: ubuntu-24.04-arm + target: aarch64-unknown-linux-gnu - runner: windows-latest target: x86_64-pc-windows-msvc @@ -75,7 +79,7 @@ jobs: ${{ github.workspace }}/codex-rs/target/ key: cargo-${{ matrix.runner }}-${{ matrix.target }}-${{ hashFiles('**/Cargo.lock') }} - - if: ${{ matrix.target == 'x86_64-unknown-linux-musl' }} + - if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}} name: Install musl build tools run: | sudo apt install -y musl-tools pkg-config diff --git a/.github/workflows/rust-release.yml b/.github/workflows/rust-release.yml index bb5ca67ce..83f160757 100644 --- a/.github/workflows/rust-release.yml +++ b/.github/workflows/rust-release.yml @@ -69,6 +69,8 @@ jobs: target: x86_64-unknown-linux-musl - runner: ubuntu-24.04 target: x86_64-unknown-linux-gnu + - runner: ubuntu-24.04-arm + target: aarch64-unknown-linux-musl - runner: ubuntu-24.04-arm target: aarch64-unknown-linux-gnu @@ -88,7 +90,7 @@ jobs: ${{ github.workspace }}/codex-rs/target/ key: cargo-release-${{ matrix.runner }}-${{ matrix.target }}-${{ hashFiles('**/Cargo.lock') }} - - if: ${{ matrix.target == 'x86_64-unknown-linux-musl' }} + - if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}} name: Install musl build tools run: | sudo apt install -y musl-tools pkg-config @@ -105,7 +107,10 @@ jobs: cp target/${{ matrix.target }}/release/codex-exec "$dest/codex-exec-${{ matrix.target }}" cp target/${{ matrix.target }}/release/codex "$dest/codex-${{ matrix.target }}" - - if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'x86_64-unknown-linux-gnu' || matrix.target == 'aarch64-unknown-linux-gnu' }} + # After https://github.com/openai/codex/pull/1228 is merged and a new + # release is cut with an artifacts built after that PR, the `-gnu` + # variants can go away as we will only use the `-musl` variants. + - if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'x86_64-unknown-linux-gnu' || matrix.target == 'aarch64-unknown-linux-gnu' || matrix.target == 'aarch64-unknown-linux-musl' }} name: Stage Linux-only artifacts shell: bash run: | diff --git a/codex-cli/bin/codex.js b/codex-cli/bin/codex.js index 1bfb9f5d5..45a597b27 100755 --- a/codex-cli/bin/codex.js +++ b/codex-cli/bin/codex.js @@ -46,7 +46,7 @@ if (wantsNative) { targetTriple = "x86_64-unknown-linux-musl"; break; case "arm64": - targetTriple = "aarch64-unknown-linux-gnu"; + targetTriple = "aarch64-unknown-linux-musl"; break; default: break; diff --git a/codex-cli/scripts/install_native_deps.sh b/codex-cli/scripts/install_native_deps.sh index 09c155322..01253d5d1 100755 --- a/codex-cli/scripts/install_native_deps.sh +++ b/codex-cli/scripts/install_native_deps.sh @@ -65,7 +65,7 @@ mkdir -p "$BIN_DIR" # Until we start publishing stable GitHub releases, we have to grab the binaries # from the GitHub Action that created them. Update the URL below to point to the # appropriate workflow run: -WORKFLOW_URL="https://github.com/openai/codex/actions/runs/15280451034" +WORKFLOW_URL="https://github.com/openai/codex/actions/runs/15483730027" WORKFLOW_ID="${WORKFLOW_URL##*/}" ARTIFACTS_DIR="$(mktemp -d)" @@ -78,7 +78,7 @@ gh run download --dir "$ARTIFACTS_DIR" --repo openai/codex "$WORKFLOW_ID" zstd -d "$ARTIFACTS_DIR/x86_64-unknown-linux-musl/codex-linux-sandbox-x86_64-unknown-linux-musl.zst" \ -o "$BIN_DIR/codex-linux-sandbox-x64" -zstd -d "$ARTIFACTS_DIR/aarch64-unknown-linux-gnu/codex-linux-sandbox-aarch64-unknown-linux-gnu.zst" \ +zstd -d "$ARTIFACTS_DIR/aarch64-unknown-linux-musl/codex-linux-sandbox-aarch64-unknown-linux-musl.zst" \ -o "$BIN_DIR/codex-linux-sandbox-arm64" if [[ "$INCLUDE_RUST" -eq 1 ]]; then @@ -86,8 +86,8 @@ if [[ "$INCLUDE_RUST" -eq 1 ]]; then zstd -d "$ARTIFACTS_DIR/x86_64-unknown-linux-musl/codex-x86_64-unknown-linux-musl.zst" \ -o "$BIN_DIR/codex-x86_64-unknown-linux-musl" # ARM64 Linux - zstd -d "$ARTIFACTS_DIR/aarch64-unknown-linux-gnu/codex-aarch64-unknown-linux-gnu.zst" \ - -o "$BIN_DIR/codex-aarch64-unknown-linux-gnu" + zstd -d "$ARTIFACTS_DIR/aarch64-unknown-linux-musl/codex-aarch64-unknown-linux-musl.zst" \ + -o "$BIN_DIR/codex-aarch64-unknown-linux-musl" # x64 macOS zstd -d "$ARTIFACTS_DIR/x86_64-apple-darwin/codex-x86_64-apple-darwin.zst" \ -o "$BIN_DIR/codex-x86_64-apple-darwin" diff --git a/codex-cli/scripts/stage_release.sh b/codex-cli/scripts/stage_release.sh index 9e251b905..cf2701c21 100755 --- a/codex-cli/scripts/stage_release.sh +++ b/codex-cli/scripts/stage_release.sh @@ -17,7 +17,7 @@ # When --native is supplied we copy the linux-sandbox binaries (as before) and # additionally fetch / unpack the two Rust targets that we currently support: # - x86_64-unknown-linux-musl -# - aarch64-unknown-linux-gnu +# - aarch64-unknown-linux-musl # # NOTE: This script is intended to be run from the repository root via # `pnpm --filter codex-cli stage-release ...` or inside codex-cli with the diff --git a/codex-cli/src/utils/get-api-key.tsx b/codex-cli/src/utils/get-api-key.tsx index 4817e396a..520f92efd 100644 --- a/codex-cli/src/utils/get-api-key.tsx +++ b/codex-cli/src/utils/get-api-key.tsx @@ -382,6 +382,8 @@ async function handleCallback( const exchanged = (await exchangeRes.json()) as { access_token: string; + // NOTE(mbolin): I did not see the "key" property set in practice. Note + // this property is not read by the code. key: string; }; diff --git a/codex-rs/.gitignore b/codex-rs/.gitignore index b83d22266..e99625376 100644 --- a/codex-rs/.gitignore +++ b/codex-rs/.gitignore @@ -1 +1,7 @@ /target/ + +# Recommended value of CARGO_TARGET_DIR when using Docker as explained in .devcontainer/README.md. +/target-amd64/ + +# Value of CARGO_TARGET_DIR when using .devcontainer/devcontainer.json. +/target-arm64/ diff --git a/codex-rs/Cargo.lock b/codex-rs/Cargo.lock index 309c671e7..66b4fa3e0 100644 --- a/codex-rs/Cargo.lock +++ b/codex-rs/Cargo.lock @@ -54,6 +54,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "aligned-vec" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4aa90d7ce82d4be67b64039a3d588d38dbcc6736577de4a847025ce5b0c468d1" + [[package]] name = "allocative" version = "0.3.4" @@ -177,6 +183,29 @@ version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" +[[package]] +name = "arbitrary" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" + +[[package]] +name = "arg_enum_proc_macro" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ae92a5119aa49cdbcf6b9f893fe4e1d98b04ccbf82ee0584ad948a44a734dea" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" + [[package]] name = "ascii-canvas" version = "3.0.0" @@ -247,6 +276,29 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +[[package]] +name = "av1-grain" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f3efb2ca85bc610acfa917b5aaa36f3fcbebed5b3182d7f877b02531c4b80c8" +dependencies = [ + "anyhow", + "arrayvec", + "log", + "nom", + "num-rational", + "v_frame", +] + +[[package]] +name = "avif-serialize" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98922d6a4cfbcb08820c69d8eeccc05bb1f29bfa06b4f5b1dbfe9a868bd7608e" +dependencies = [ + "arrayvec", +] + [[package]] name = "backtrace" version = "0.3.71" @@ -304,6 +356,12 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" +[[package]] +name = "bit_field" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc827186963e592360843fb5ba4b973e145841266c1357f7180c43526f2e5b61" + [[package]] name = "bitflags" version = "1.3.2" @@ -316,6 +374,12 @@ version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" +[[package]] +name = "bitstream-io" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6099cdc01846bc367c4e7dd630dc5966dccf36b652fae7a74e17b640411a91b2" + [[package]] name = "bstr" version = "1.12.0" @@ -327,6 +391,12 @@ dependencies = [ "serde", ] +[[package]] +name = "built" +version = "0.7.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56ed6191a7e78c36abdb16ab65341eefd73d64d303fffccdbb00d51e4205967b" + [[package]] name = "bumpalo" version = "3.17.0" @@ -339,12 +409,24 @@ version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ce89b21cab1437276d2650d57e971f9d548a2d9037cc231abdc0562b97498ce" +[[package]] +name = "bytemuck" +version = "1.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9134a6ef01ce4b366b50689c94f82c14bc72bc5d0386829828a2e2752ef7958c" + [[package]] name = "byteorder" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" +[[package]] +name = "byteorder-lite" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f1fe948ff07f4bd06c30984e69f5b4899c516a3ef74f34df92a2df2ab535495" + [[package]] name = "bytes" version = "1.10.1" @@ -372,9 +454,21 @@ version = "1.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e3a13707ac958681c13b39b458c073d0d9bc8a22cb1b2f4c8e55eb72c13f362" dependencies = [ + "jobserver", + "libc", "shlex", ] +[[package]] +name = "cfg-expr" +version = "0.15.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d067ad48b8650848b989a59a86c6c36a995d02d2bf778d45c3c5d57bc2718f02" +dependencies = [ + "smallvec", + "target-lexicon", +] + [[package]] name = "cfg-if" version = "1.0.0" @@ -473,7 +567,6 @@ version = "0.0.0" dependencies = [ "anyhow", "pretty_assertions", - "regex", "serde_json", "similar", "tempfile", @@ -492,6 +585,7 @@ dependencies = [ "codex-core", "codex-exec", "codex-linux-sandbox", + "codex-login", "codex-mcp-server", "codex-tui", "serde_json", @@ -506,6 +600,8 @@ version = "0.0.0" dependencies = [ "clap", "codex-core", + "serde", + "toml", ] [[package]] @@ -518,6 +614,7 @@ dependencies = [ "base64 0.21.7", "bytes", "codex-apply-patch", + "codex-login", "codex-mcp-client", "dirs", "env-flags", @@ -534,11 +631,13 @@ dependencies = [ "path-absolutize", "predicates", "pretty_assertions", - "rand", + "rand 0.9.1", "reqwest", "seccompiler", "serde", "serde_json", + "strum 0.27.1", + "strum_macros 0.27.1", "tempfile", "thiserror 2.0.12", "time", @@ -584,7 +683,7 @@ dependencies = [ "log", "multimap", "path-absolutize", - "regex", + "regex-lite", "serde", "serde_json", "serde_with", @@ -607,6 +706,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "codex-login" +version = "0.0.0" +dependencies = [ + "chrono", + "reqwest", + "serde", + "serde_json", + "tokio", +] + [[package]] name = "codex-mcp-client" version = "0.0.0" @@ -634,6 +744,7 @@ dependencies = [ "serde", "serde_json", "tokio", + "toml", "tracing", "tracing-subscriber", ] @@ -643,19 +754,23 @@ name = "codex-tui" version = "0.0.0" dependencies = [ "anyhow", + "base64 0.22.1", "clap", "codex-ansi-escape", "codex-common", "codex-core", "codex-linux-sandbox", + "codex-login", "color-eyre", "crossterm", + "image", "lazy_static", "mcp-types", "path-clean", "pretty_assertions", "ratatui", - "regex", + "ratatui-image", + "regex-lite", "serde_json", "shlex", "strum 0.27.1", @@ -667,6 +782,7 @@ dependencies = [ "tui-input", "tui-markdown", "tui-textarea", + "unicode-segmentation", "uuid", ] @@ -697,6 +813,12 @@ dependencies = [ "tracing-error", ] +[[package]] +name = "color_quant" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" + [[package]] name = "colorchoice" version = "1.0.3" @@ -769,6 +891,25 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "crossbeam-deque" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "crossbeam-utils" version = "0.8.21" @@ -1172,6 +1313,21 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "exr" +version = "1.73.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83197f59927b46c04a183a619b7c29df34e63e63c7869320862268c0ef687e0" +dependencies = [ + "bit_field", + "half", + "lebe", + "miniz_oxide 0.8.8", + "rayon-core", + "smallvec", + "zune-inflate", +] + [[package]] name = "eyre" version = "0.6.12" @@ -1199,6 +1355,15 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "fdeflate" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e6853b52649d4ac5c0bd02320cddc5ba956bdb407c4b75a2c6b75bf51500f8c" +dependencies = [ + "simd-adler32", +] + [[package]] name = "fixedbitset" version = "0.4.2" @@ -1415,6 +1580,16 @@ dependencies = [ "wasi 0.14.2+wasi-0.2.4", ] +[[package]] +name = "gif" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fb2d69b19215e18bb912fa30f7ce15846e301408695e44e0ef719f1da9e19f2" +dependencies = [ + "color_quant", + "weezl", +] + [[package]] name = "gimli" version = "0.28.1" @@ -1446,6 +1621,16 @@ dependencies = [ "tracing", ] +[[package]] +name = "half" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9" +dependencies = [ + "cfg-if", + "crunchy", +] + [[package]] name = "hashbrown" version = "0.12.3" @@ -1638,7 +1823,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core", + "windows-core 0.61.0", ] [[package]] @@ -1768,6 +1953,12 @@ dependencies = [ "syn 2.0.100", ] +[[package]] +name = "icy_sixel" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccc0a9c4770bc47b0a933256a496cfb8b6531f753ea9bccb19c6dff0ff7273fc" + [[package]] name = "ident_case" version = "1.0.1" @@ -1795,6 +1986,45 @@ dependencies = [ "icu_properties", ] +[[package]] +name = "image" +version = "0.25.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db35664ce6b9810857a38a906215e75a9c879f0696556a39f59c62829710251a" +dependencies = [ + "bytemuck", + "byteorder-lite", + "color_quant", + "exr", + "gif", + "image-webp", + "num-traits", + "png", + "qoi", + "ravif", + "rayon", + "rgb", + "tiff", + "zune-core", + "zune-jpeg", +] + +[[package]] +name = "image-webp" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b77d01e822461baa8409e156015a1d91735549f0f2c17691bd2d996bef238f7f" +dependencies = [ + "byteorder-lite", + "quick-error", +] + +[[package]] +name = "imgref" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0263a3d970d5c054ed9312c0057b4f3bde9c0b33836d3637361d4a9e6e7a408" + [[package]] name = "indenter" version = "0.3.3" @@ -1842,6 +2072,17 @@ dependencies = [ "syn 2.0.100", ] +[[package]] +name = "interpolate_name" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c34819042dc3d3971c46c2190835914dfbe0c3c13f61449b2997f4e9722dfa60" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + [[package]] name = "inventory" version = "0.3.20" @@ -1883,6 +2124,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + [[package]] name = "itertools" version = "0.13.0" @@ -1931,6 +2181,22 @@ dependencies = [ "syn 2.0.100", ] +[[package]] +name = "jobserver" +version = "0.1.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" +dependencies = [ + "getrandom 0.3.2", + "libc", +] + +[[package]] +name = "jpeg-decoder" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5d4a7da358eff58addd2877a45865158f0d78c911d43a5784ceb7bbf52833b0" + [[package]] name = "js-sys" version = "0.3.77" @@ -1989,12 +2255,28 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +[[package]] +name = "lebe" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03087c2bad5e1034e8cace5926dec053fb3790248370865f5117a7d0213354c8" + [[package]] name = "libc" version = "0.2.172" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" +[[package]] +name = "libfuzzer-sys" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf78f52d400cf2d84a3a973a78a592b4adc535739e0a5597a0da6f0c357adc75" +dependencies = [ + "arbitrary", + "cc", +] + [[package]] name = "libredox" version = "0.1.3" @@ -2068,6 +2350,15 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "loop9" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fae87c125b03c1d2c0150c90365d7d6bcc53fb73a9acaef207d2d065860f062" +dependencies = [ + "imgref", +] + [[package]] name = "lru" version = "0.12.5" @@ -2105,6 +2396,16 @@ dependencies = [ "regex-automata 0.1.10", ] +[[package]] +name = "maybe-rayon" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea1f30cedd69f0a2954655f7188c6a834246d2bcf1e315e2ac40c4b24dc9519" +dependencies = [ + "cfg-if", + "rayon", +] + [[package]] name = "mcp-types" version = "0.0.0" @@ -2166,6 +2467,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a" dependencies = [ "adler2", + "simd-adler32", ] [[package]] @@ -2254,6 +2556,12 @@ dependencies = [ "nom", ] +[[package]] +name = "noop_proc_macro" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0676bb32a98c1a483ce53e500a81ad9c3d5b3f7c920c28c24e9cb0980d0b5bc8" + [[package]] name = "normalize-line-endings" version = "0.3.0" @@ -2286,6 +2594,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +[[package]] +name = "num-derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + [[package]] name = "num-integer" version = "0.1.46" @@ -2295,6 +2614,17 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-rational" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", +] + [[package]] name = "num-traits" version = "0.2.19" @@ -2564,6 +2894,19 @@ dependencies = [ "time", ] +[[package]] +name = "png" +version = "0.17.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82151a2fc869e011c153adc57cf2789ccb8d9906ce52c0b39a6b5697749d7526" +dependencies = [ + "bitflags 1.3.2", + "crc32fast", + "fdeflate", + "flate2", + "miniz_oxide 0.8.8", +] + [[package]] name = "portable-atomic" version = "1.11.0" @@ -2658,6 +3001,25 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "profiling" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afbdc74edc00b6f6a218ca6a5364d6226a259d4b8ea1af4a0ea063f27e179f4d" +dependencies = [ + "profiling-procmacros", +] + +[[package]] +name = "profiling-procmacros" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a65f2e60fbf1063868558d69c6beacf412dc755f9fc020f514b7955fc914fe30" +dependencies = [ + "quote", + "syn 2.0.100", +] + [[package]] name = "pulldown-cmark" version = "0.13.0" @@ -2677,6 +3039,21 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "007d8adb5ddab6f8e3f491ac63566a7d5002cc7ed73901f72057943fa71ae1ae" +[[package]] +name = "qoi" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f6d64c71eb498fe9eae14ce4ec935c555749aef511cca85b5568910d6e48001" +dependencies = [ + "bytemuck", +] + +[[package]] +name = "quick-error" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" + [[package]] name = "quick-xml" version = "0.32.0" @@ -2711,14 +3088,35 @@ dependencies = [ "nibble_vec", ] +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + [[package]] name = "rand" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97" dependencies = [ - "rand_chacha", - "rand_core", + "rand_chacha 0.9.0", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", ] [[package]] @@ -2728,7 +3126,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.16", ] [[package]] @@ -2761,6 +3168,92 @@ dependencies = [ "unicode-width 0.2.0", ] +[[package]] +name = "ratatui-image" +version = "8.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3f1d31464920104b247593f008158372d2fdb8165e93a4299cdd6f994448c9a" +dependencies = [ + "base64 0.21.7", + "icy_sixel", + "image", + "rand 0.8.5", + "ratatui", + "rustix 0.38.44", + "thiserror 1.0.69", + "windows", +] + +[[package]] +name = "rav1e" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd87ce80a7665b1cce111f8a16c1f3929f6547ce91ade6addf4ec86a8dda5ce9" +dependencies = [ + "arbitrary", + "arg_enum_proc_macro", + "arrayvec", + "av1-grain", + "bitstream-io", + "built", + "cfg-if", + "interpolate_name", + "itertools 0.12.1", + "libc", + "libfuzzer-sys", + "log", + "maybe-rayon", + "new_debug_unreachable", + "noop_proc_macro", + "num-derive", + "num-traits", + "once_cell", + "paste", + "profiling", + "rand 0.8.5", + "rand_chacha 0.3.1", + "simd_helpers", + "system-deps", + "thiserror 1.0.69", + "v_frame", + "wasm-bindgen", +] + +[[package]] +name = "ravif" +version = "0.11.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6a5f31fcf7500f9401fea858ea4ab5525c99f2322cfcee732c0e6c74208c0c6" +dependencies = [ + "avif-serialize", + "imgref", + "loop9", + "quick-error", + "rav1e", + "rayon", + "rgb", +] + +[[package]] +name = "rayon" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + [[package]] name = "redox_syscall" version = "0.5.11" @@ -2844,6 +3337,12 @@ dependencies = [ "regex-syntax 0.8.5", ] +[[package]] +name = "regex-lite" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53a49587ad06b26609c52e423de037e7f57f20d53535d66e08c695f347df952a" + [[package]] name = "regex-syntax" version = "0.6.29" @@ -2908,6 +3407,12 @@ dependencies = [ "windows-registry", ] +[[package]] +name = "rgb" +version = "0.8.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57397d16646700483b67d2dd6511d79318f9d057fdbd21a4066aeac8b41d310a" + [[package]] name = "ring" version = "0.17.14" @@ -3345,6 +3850,21 @@ dependencies = [ "libc", ] +[[package]] +name = "simd-adler32" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" + +[[package]] +name = "simd_helpers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95890f873bec569a0362c235787f3aca6e1e887302ba4840839bcc6459c42da6" +dependencies = [ + "quote", +] + [[package]] name = "simdutf8" version = "0.1.5" @@ -3652,6 +4172,25 @@ dependencies = [ "libc", ] +[[package]] +name = "system-deps" +version = "6.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3e535eb8dded36d55ec13eddacd30dec501792ff23a0b1682c38601b8cf2349" +dependencies = [ + "cfg-expr", + "heck", + "pkg-config", + "toml", + "version-compare", +] + +[[package]] +name = "target-lexicon" +version = "0.12.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" + [[package]] name = "tempfile" version = "3.19.1" @@ -3751,6 +4290,17 @@ dependencies = [ "once_cell", ] +[[package]] +name = "tiff" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba1310fcea54c6a9a4fd1aad794ecc02c31682f6bfbecdf460bf19533eed1e3e" +dependencies = [ + "flate2", + "jpeg-decoder", + "weezl", +] + [[package]] name = "time" version = "0.3.41" @@ -4175,6 +4725,17 @@ dependencies = [ "serde", ] +[[package]] +name = "v_frame" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6f32aaa24bacd11e488aa9ba66369c7cd514885742c9fe08cfe85884db3e92b" +dependencies = [ + "aligned-vec", + "num-traits", + "wasm-bindgen", +] + [[package]] name = "valuable" version = "0.1.1" @@ -4187,6 +4748,12 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" +[[package]] +name = "version-compare" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "852e951cb7832cb45cb1169900d19760cfa39b82bc0ea9c0e5a14ae88411c98b" + [[package]] name = "version_check" version = "0.9.5" @@ -4330,6 +4897,12 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "weezl" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a751b3277700db47d3e574514de2eced5e54dc8a5436a3bf7a0b248b2cee16f3" + [[package]] name = "wildmatch" version = "2.4.0" @@ -4367,19 +4940,53 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" +dependencies = [ + "windows-core 0.58.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-core" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +dependencies = [ + "windows-implement 0.58.0", + "windows-interface 0.58.0", + "windows-result 0.2.0", + "windows-strings 0.1.0", + "windows-targets 0.52.6", +] + [[package]] name = "windows-core" version = "0.61.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4763c1de310c86d75a878046489e2e5ba02c649d185f21c67d4cf8a56d098980" dependencies = [ - "windows-implement", - "windows-interface", + "windows-implement 0.60.0", + "windows-interface 0.59.1", "windows-link", - "windows-result", + "windows-result 0.3.2", "windows-strings 0.4.0", ] +[[package]] +name = "windows-implement" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + [[package]] name = "windows-implement" version = "0.60.0" @@ -4391,6 +4998,17 @@ dependencies = [ "syn 2.0.100", ] +[[package]] +name = "windows-interface" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + [[package]] name = "windows-interface" version = "0.59.1" @@ -4414,11 +5032,20 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" dependencies = [ - "windows-result", + "windows-result 0.3.2", "windows-strings 0.3.1", "windows-targets 0.53.0", ] +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + [[package]] name = "windows-result" version = "0.3.2" @@ -4428,6 +5055,16 @@ dependencies = [ "windows-link", ] +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result 0.2.0", + "windows-targets 0.52.6", +] + [[package]] name = "windows-strings" version = "0.3.1" @@ -4773,3 +5410,27 @@ dependencies = [ "quote", "syn 2.0.100", ] + +[[package]] +name = "zune-core" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f423a2c17029964870cfaabb1f13dfab7d092a62a29a89264f4d36990ca414a" + +[[package]] +name = "zune-inflate" +version = "0.2.54" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73ab332fe2f6680068f3582b16a24f90ad7096d5d39b974d1c0aff0125116f02" +dependencies = [ + "simd-adler32", +] + +[[package]] +name = "zune-jpeg" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99a5bab8d7dedf81405c4bb1f2b83ea057643d9cb28778cea9eecddeedd2e028" +dependencies = [ + "zune-core", +] diff --git a/codex-rs/Cargo.toml b/codex-rs/Cargo.toml index 5af55f45c..6991a6223 100644 --- a/codex-rs/Cargo.toml +++ b/codex-rs/Cargo.toml @@ -9,6 +9,7 @@ members = [ "exec", "execpolicy", "linux-sandbox", + "login", "mcp-client", "mcp-server", "mcp-types", diff --git a/codex-rs/README.md b/codex-rs/README.md index a0e3f5846..caa21639f 100644 --- a/codex-rs/README.md +++ b/codex-rs/README.md @@ -1,392 +1,69 @@ -# codex-rs +# Codex CLI (Rust Implementation) -April 24, 2025 +We provide Codex CLI as a standalone, native executable to ensure a zero-dependency install. -Today, Codex CLI is written in TypeScript and requires Node.js 22+ to run it. For a number of users, this runtime requirement inhibits adoption: they would be better served by a standalone executable. As maintainers, we want Codex to run efficiently in a wide range of environments with minimal overhead. We also want to take advantage of operating system-specific APIs to provide better sandboxing, where possible. +## Installing Codex -To that end, we are moving forward with a Rust implementation of Codex CLI contained in this folder, which has the following benefits: +Today, the easiest way to install Codex is via `npm`, though we plan to publish Codex to other package managers soon. -- The CLI compiles to small, standalone, platform-specific binaries. -- Can make direct, native calls to [seccomp](https://man7.org/linux/man-pages/man2/seccomp.2.html) and [landlock](https://man7.org/linux/man-pages/man7/landlock.7.html) in order to support sandboxing on Linux. -- No runtime garbage collection, resulting in lower memory consumption and better, more predictable performance. - -Currently, the Rust implementation is materially behind the TypeScript implementation in functionality, so continue to use the TypeScript implementation for the time being. We will publish native executables via GitHub Releases as soon as we feel the Rust version is usable. - -## Code Organization - -This folder is the root of a Cargo workspace. It contains quite a bit of experimental code, but here are the key crates: - -- [`core/`](./core) contains the business logic for Codex. Ultimately, we hope this to be a library crate that is generally useful for building other Rust/native applications that use Codex. -- [`exec/`](./exec) "headless" CLI for use in automation. -- [`tui/`](./tui) CLI that launches a fullscreen TUI built with [Ratatui](https://ratatui.rs/). -- [`cli/`](./cli) CLI multitool that provides the aforementioned CLIs via subcommands. - -## Config - -The CLI can be configured via a file named `config.toml`. By default, configuration is read from `~/.codex/config.toml`, though the `CODEX_HOME` environment variable can be used to specify a directory other than `~/.codex`. - -The `config.toml` file supports the following options: - -### model - -The model that Codex should use. - -```toml -model = "o3" # overrides the default of "o4-mini" -``` - -### model_provider - -Codex comes bundled with a number of "model providers" predefined. This config value is a string that indicates which provider to use. You can also define your own providers via `model_providers`. - -For example, if you are running ollama with Mistral locally, then you would need to add the following to your config: - -```toml -model = "mistral" -model_provider = "ollama" -``` - -because the following definition for `ollama` is included in Codex: - -```toml -[model_providers.ollama] -name = "Ollama" -base_url = "http://localhost:11434/v1" -wire_api = "chat" -``` - -This option defaults to `"openai"` and the corresponding provider is defined as follows: - -```toml -[model_providers.openai] -name = "OpenAI" -base_url = "https://api.openai.com/v1" -env_key = "OPENAI_API_KEY" -wire_api = "responses" +```shell +npm i -g @openai/codex@native +codex ``` -### model_providers +You can also download a platform-specific release directly from our [GitHub Releases](https://github.com/openai/codex/releases). -This option lets you override and amend the default set of model providers bundled with Codex. This value is a map where the key is the value to use with `model_provider` to select the correspodning provider. +## What's new in the Rust CLI -For example, if you wanted to add a provider that uses the OpenAI 4o model via the chat completions API, then you +While we are [working to close the gap between the TypeScript and Rust implementations of Codex CLI](https://github.com/openai/codex/issues/1262), note that the Rust CLI has a number of features that the TypeScript CLI does not! -```toml -# Recall that in TOML, root keys must be listed before tables. -model = "gpt-4o" -model_provider = "openai-chat-completions" +### Config -[model_providers.openai-chat-completions] -# Name of the provider that will be displayed in the Codex UI. -name = "OpenAI using Chat Completions" -# The path `/chat/completions` will be amended to this URL to make the POST -# request for the chat completions. -base_url = "https://api.openai.com/v1" -# If `env_key` is set, identifies an environment variable that must be set when -# using Codex with this provider. The value of the environment variable must be -# non-empty and will be used in the `Bearer TOKEN` HTTP header for the POST request. -env_key = "OPENAI_API_KEY" -# valid values for wire_api are "chat" and "responses". -wire_api = "chat" -``` +Codex supports a rich set of configuration options. Note that the Rust CLI uses `config.toml` instead of `config.json`. See [`config.md`](./config.md) for details. -### approval_policy +### Model Context Protocol Support -Determines when the user should be prompted to approve whether Codex can execute a command: +Codex CLI functions as an MCP client that can connect to MCP servers on startup. See the [`mcp_servers`](./config.md#mcp_servers) section in the configuration documentation for details. -```toml -# This is analogous to --suggest in the TypeScript Codex CLI -approval_policy = "unless-allow-listed" -``` +It is still experimental, but you can also launch Codex as an MCP _server_ by running `codex mcp`. Use the [`@modelcontextprotocol/inspector`](https://github.com/modelcontextprotocol/inspector) to try it out: -```toml -# If the command fails when run in the sandbox, Codex asks for permission to -# retry the command outside the sandbox. -approval_policy = "on-failure" +```shell +npx @modelcontextprotocol/inspector codex mcp ``` -```toml -# User is never prompted: if the command fails, Codex will automatically try -# something out. Note the `exec` subcommand always uses this mode. -approval_policy = "never" -``` - -### profiles +### Notifications -A _profile_ is a collection of configuration values that can be set together. Multiple profiles can be defined in `config.toml` and you can specify the one you -want to use at runtime via the `--profile` flag. +You can enable notifications by configuring a script that is run whenever the agent finishes a turn. The [notify documentation](./config.md#notify) includes a detailed example that explains how to get desktop notifications via [terminal-notifier](https://github.com/julienXX/terminal-notifier) on macOS. -Here is an example of a `config.toml` that defines multiple profiles: +### `codex exec` to run Codex programmatially/non-interactively -```toml -model = "o3" -approval_policy = "unless-allow-listed" -sandbox_permissions = ["disk-full-read-access"] -disable_response_storage = false +To run Codex non-interactively, run `codex exec PROMPT` (you can also pass the prompt via `stdin`) and Codex will work on your task until it decides that it is done and exits. Output is printed to the terminal directly. You can set the `RUST_LOG` environment variable to see more about what's going on. -# Setting `profile` is equivalent to specifying `--profile o3` on the command -# line, though the `--profile` flag can still be used to override this value. -profile = "o3" +### `--cd`/`-C` flag -[model_providers.openai-chat-completions] -name = "OpenAI using Chat Completions" -base_url = "https://api.openai.com/v1" -env_key = "OPENAI_API_KEY" -wire_api = "chat" +Sometimes it is not convenient to `cd` to the directory you want Codex to use as the "working root" before running Codex. Fortunately, `codex` supports a `--cd` option so you can specify whatever folder you want. You can confirm that Codex is honoring `--cd` by double-checking the **workdir** it reports in the TUI at the start of a new session. -[profiles.o3] -model = "o3" -model_provider = "openai" -approval_policy = "never" +### Experimenting with the Codex Sandbox -[profiles.gpt3] -model = "gpt-3.5-turbo" -model_provider = "openai-chat-completions" +To test to see what happens when a command is run under the sandbox provided by Codex, we provide the following subcommands in Codex CLI: -[profiles.zdr] -model = "o3" -model_provider = "openai" -approval_policy = "on-failure" -disable_response_storage = true ``` +# macOS +codex debug seatbelt [-s SANDBOX_PERMISSION]... [COMMAND]... -Users can specify config values at multiple levels. Order of precedence is as follows: - -1. custom command-line argument, e.g., `--model o3` -2. as part of a profile, where the `--profile` is specified via a CLI (or in the config file itself) -3. as an entry in `config.toml`, e.g., `model = "o3"` -4. the default value that comes with Codex CLI (i.e., Codex CLI defaults to `o4-mini`) - -### sandbox_permissions - -List of permissions to grant to the sandbox that Codex uses to execute untrusted commands: - -```toml -# This is comparable to --full-auto in the TypeScript Codex CLI, though -# specifying `disk-write-platform-global-temp-folder` adds /tmp as a writable -# folder in addition to $TMPDIR. -sandbox_permissions = [ - "disk-full-read-access", - "disk-write-platform-user-temp-folder", - "disk-write-platform-global-temp-folder", - "disk-write-cwd", -] -``` - -To add additional writable folders, use `disk-write-folder`, which takes a parameter (this can be specified multiple times): - -```toml -sandbox_permissions = [ - # ... - "disk-write-folder=/Users/mbolin/.pyenv/shims", -] +# Linux +codex debug landlock [-s SANDBOX_PERMISSION]... [COMMAND]... ``` -### mcp_servers - -Defines the list of MCP servers that Codex can consult for tool use. Currently, only servers that are launched by executing a program that communicate over stdio are supported. For servers that use the SSE transport, consider an adapter like [mcp-proxy](https://github.com/sparfenyuk/mcp-proxy). - -**Note:** Codex may cache the list of tools and resources from an MCP server so that Codex can include this information in context at startup without spawning all the servers. This is designed to save resources by loading MCP servers lazily. - -This config option is comparable to how Claude and Cursor define `mcpServers` in their respective JSON config files, though because Codex uses TOML for its config language, the format is slightly different. For example, the following config in JSON: - -```json -{ - "mcpServers": { - "server-name": { - "command": "npx", - "args": ["-y", "mcp-server"], - "env": { - "API_KEY": "value" - } - } - } -} -``` - -Should be represented as follows in `~/.codex/config.toml`: - -```toml -# IMPORTANT: the top-level key is `mcp_servers` rather than `mcpServers`. -[mcp_servers.server-name] -command = "npx" -args = ["-y", "mcp-server"] -env = { "API_KEY" = "value" } -``` - -### disable_response_storage - -Currently, customers whose accounts are set to use Zero Data Retention (ZDR) must set `disable_response_storage` to `true` so that Codex uses an alternative to the Responses API that works with ZDR: - -```toml -disable_response_storage = true -``` +You can experiment with different values of `-s` to see what permissions the `COMMAND` needs to execute successfully. -### shell_environment_policy +Note that the exact API for the `-s` flag is currently in flux. See https://github.com/openai/codex/issues/1248 for details. -Codex spawns subprocesses (e.g. when executing a `local_shell` tool-call suggested by the assistant). By default it passes **only a minimal core subset** of your environment to those subprocesses to avoid leaking credentials. You can tune this behavior via the **`shell_environment_policy`** block in -`config.toml`: - -```toml -[shell_environment_policy] -# inherit can be "core" (default), "all", or "none" -inherit = "core" -# set to true to *skip* the filter for `"*KEY*"` and `"*TOKEN*"` -ignore_default_excludes = false -# exclude patterns (case-insensitive globs) -exclude = ["AWS_*", "AZURE_*"] -# force-set / override values -set = { CI = "1" } -# if provided, *only* vars matching these patterns are kept -include_only = ["PATH", "HOME"] -``` - -| Field | Type | Default | Description | -| ------------------------- | -------------------------- | ------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | -| `inherit` | string | `core` | Starting template for the environment:
`core` (`HOME`, `PATH`, `USER`, …), `all` (clone full parent env), or `none` (start empty). | -| `ignore_default_excludes` | boolean | `false` | When `false`, Codex removes any var whose **name** contains `KEY`, `SECRET`, or `TOKEN` (case-insensitive) before other rules run. | -| `exclude` | array<string> | `[]` | Case-insensitive glob patterns to drop after the default filter.
Examples: `"AWS_*"`, `"AZURE_*"`. | -| `set` | table<string,string> | `{}` | Explicit key/value overrides or additions – always win over inherited values. | -| `include_only` | array<string> | `[]` | If non-empty, a whitelist of patterns; only variables that match _one_ pattern survive the final step. (Generally used with `inherit = "all"`.) | - -The patterns are **glob style**, not full regular expressions: `*` matches any -number of characters, `?` matches exactly one, and character classes like -`[A-Z]`/`[^0-9]` are supported. Matching is always **case-insensitive**. This -syntax is documented in code as `EnvironmentVariablePattern` (see -`core/src/config_types.rs`). - -If you just need a clean slate with a few custom entries you can write: - -```toml -[shell_environment_policy] -inherit = "none" -set = { PATH = "/usr/bin", MY_FLAG = "1" } -``` - -Currently, `CODEX_SANDBOX_NETWORK_DISABLED=1` is also added to the environment, assuming network is disabled. This is not configurable. - -### notify - -Specify a program that will be executed to get notified about events generated by Codex. Note that the program will receive the notification argument as a string of JSON, e.g.: - -```json -{ - "type": "agent-turn-complete", - "turn-id": "12345", - "input-messages": ["Rename `foo` to `bar` and update the callsites."], - "last-assistant-message": "Rename complete and verified `cargo build` succeeds." -} -``` - -The `"type"` property will always be set. Currently, `"agent-turn-complete"` is the only notification type that is supported. - -As an example, here is a Python script that parses the JSON and decides whether to show a desktop push notification using [terminal-notifier](https://github.com/julienXX/terminal-notifier) on macOS: - -```python -#!/usr/bin/env python3 - -import json -import subprocess -import sys - - -def main() -> int: - if len(sys.argv) != 2: - print("Usage: notify.py ") - return 1 - - try: - notification = json.loads(sys.argv[1]) - except json.JSONDecodeError: - return 1 - - match notification_type := notification.get("type"): - case "agent-turn-complete": - assistant_message = notification.get("last-assistant-message") - if assistant_message: - title = f"Codex: {assistant_message}" - else: - title = "Codex: Turn Complete!" - input_messages = notification.get("input_messages", []) - message = " ".join(input_messages) - title += message - case _: - print(f"not sending a push notification for: {notification_type}") - return 0 - - subprocess.check_output( - [ - "terminal-notifier", - "-title", - title, - "-message", - message, - "-group", - "codex", - "-ignoreDnD", - "-activate", - "com.googlecode.iterm2", - ] - ) - - return 0 - - -if __name__ == "__main__": - sys.exit(main()) -``` - -To have Codex use this script for notifications, you would configure it via `notify` in `~/.codex/config.toml` using the appropriate path to `notify.py` on your computer: - -```toml -notify = ["python3", "/Users/mbolin/.codex/notify.py"] -``` - -### history - -By default, Codex CLI records messages sent to the model in `$CODEX_HOME/history.jsonl`. Note that on UNIX, the file permissions are set to `o600`, so it should only be readable and writable by the owner. - -To disable this behavior, configure `[history]` as follows: - -```toml -[history] -persistence = "none" # "save-all" is the default value -``` - -### file_opener - -Identifies the editor/URI scheme to use for hyperlinking citations in model output. If set, citations to files in the model output will be hyperlinked using the specified URI scheme so they can be ctrl/cmd-clicked from the terminal to open them. - -For example, if the model output includes a reference such as `【F:/home/user/project/main.py†L42-L50】`, then this would be rewritten to link to the URI `vscode://file/home/user/project/main.py:42`. - -Note this is **not** a general editor setting (like `$EDITOR`), as it only accepts a fixed set of values: - -- `"vscode"` (default) -- `"vscode-insiders"` -- `"windsurf"` -- `"cursor"` -- `"none"` to explicitly disable this feature - -Currently, `"vscode"` is the default, though Codex does not verify VS Code is installed. As such, `file_opener` may default to `"none"` or something else in the future. - -### project_doc_max_bytes - -Maximum number of bytes to read from an `AGENTS.md` file to include in the instructions sent with the first turn of a session. Defaults to 32 KiB. - -### tui +## Code Organization -Options that are specific to the TUI. +This folder is the root of a Cargo workspace. It contains quite a bit of experimental code, but here are the key crates: -```toml -[tui] -# This will make it so that Codex does not try to process mouse events, which -# means your Terminal's native drag-to-text to text selection and copy/paste -# should work. The tradeoff is that Codex will not receive any mouse events, so -# it will not be possible to use the mouse to scroll conversation history. -# -# Note that most terminals support holding down a modifier key when using the -# mouse to support text selection. For example, even if Codex mouse capture is -# enabled (i.e., this is set to `false`), you can still hold down alt while -# dragging the mouse to select text. -disable_mouse_capture = true # defaults to `false` -``` +- [`core/`](./core) contains the business logic for Codex. Ultimately, we hope this to be a library crate that is generally useful for building other Rust/native applications that use Codex. +- [`exec/`](./exec) "headless" CLI for use in automation. +- [`tui/`](./tui) CLI that launches a fullscreen TUI built with [Ratatui](https://ratatui.rs/). +- [`cli/`](./cli) CLI multitool that provides the aforementioned CLIs via subcommands. diff --git a/codex-rs/apply-patch/Cargo.toml b/codex-rs/apply-patch/Cargo.toml index 66935b202..1de09f86d 100644 --- a/codex-rs/apply-patch/Cargo.toml +++ b/codex-rs/apply-patch/Cargo.toml @@ -12,7 +12,6 @@ workspace = true [dependencies] anyhow = "1" -regex = "1.11.1" serde_json = "1.0.110" similar = "2.7.0" thiserror = "2.0.12" diff --git a/codex-rs/apply-patch/apply_patch_tool_instructions.md b/codex-rs/apply-patch/apply_patch_tool_instructions.md new file mode 100644 index 000000000..3c51d9cfb --- /dev/null +++ b/codex-rs/apply-patch/apply_patch_tool_instructions.md @@ -0,0 +1,40 @@ +To edit files, ALWAYS use the `shell` tool with `apply_patch` CLI. `apply_patch` effectively allows you to execute a diff/patch against a file, but the format of the diff specification is unique to this task, so pay careful attention to these instructions. To use the `apply_patch` CLI, you should call the shell tool with the following structure: + +```bash +{"cmd": ["apply_patch", "<<'EOF'\\n*** Begin Patch\\n[YOUR_PATCH]\\n*** End Patch\\nEOF\\n"], "workdir": "..."} +``` + +Where [YOUR_PATCH] is the actual content of your patch, specified in the following V4A diff format. + +*** [ACTION] File: [path/to/file] -> ACTION can be one of Add, Update, or Delete. +For each snippet of code that needs to be changed, repeat the following: +[context_before] -> See below for further instructions on context. +- [old_code] -> Precede the old code with a minus sign. ++ [new_code] -> Precede the new, replacement code with a plus sign. +[context_after] -> See below for further instructions on context. + +For instructions on [context_before] and [context_after]: +- By default, show 3 lines of code immediately above and 3 lines immediately below each change. If a change is within 3 lines of a previous change, do NOT duplicate the first change’s [context_after] lines in the second change’s [context_before] lines. +- If 3 lines of context is insufficient to uniquely identify the snippet of code within the file, use the @@ operator to indicate the class or function to which the snippet belongs. For instance, we might have: +@@ class BaseClass +[3 lines of pre-context] +- [old_code] ++ [new_code] +[3 lines of post-context] + +- If a code block is repeated so many times in a class or function such that even a single `@@` statement and 3 lines of context cannot uniquely identify the snippet of code, you can use multiple `@@` statements to jump to the right context. For instance: + +@@ class BaseClass +@@ def method(): +[3 lines of pre-context] +- [old_code] ++ [new_code] +[3 lines of post-context] + +Note, then, that we do not use line numbers in this diff format, as the context is enough to uniquely identify code. An example of a message that you might pass as "input" to this function, in order to apply a patch, is shown below. + +```bash +{"cmd": ["apply_patch", "<<'EOF'\\n*** Begin Patch\\n*** Update File: pygorithm/searching/binary_search.py\\n@@ class BaseClass\\n@@ def search():\\n- pass\\n+ raise NotImplementedError()\\n@@ class Subclass\\n@@ def search():\\n- pass\\n+ raise NotImplementedError()\\n*** End Patch\\nEOF\\n"], "workdir": "..."} +``` + +File references can only be relative, NEVER ABSOLUTE. After the apply_patch command is run, it will always say "Done!", regardless of whether the patch was successfully applied or not. However, you can determine if there are issue and errors by looking at any warnings or logging lines printed BEFORE the "Done!" is output. diff --git a/codex-rs/apply-patch/src/lib.rs b/codex-rs/apply-patch/src/lib.rs index fcbc97b4f..5a5290bff 100644 --- a/codex-rs/apply-patch/src/lib.rs +++ b/codex-rs/apply-patch/src/lib.rs @@ -19,6 +19,9 @@ use tree_sitter::LanguageError; use tree_sitter::Parser; use tree_sitter_bash::LANGUAGE as BASH; +/// Detailed instructions for gpt-4.1 on how to use the `apply_patch` tool. +pub const APPLY_PATCH_TOOL_INSTRUCTIONS: &str = include_str!("../apply_patch_tool_instructions.md"); + #[derive(Debug, Error, PartialEq)] pub enum ApplyPatchError { #[error(transparent)] diff --git a/codex-rs/apply-patch/src/parser.rs b/codex-rs/apply-patch/src/parser.rs index 391255def..d07691a49 100644 --- a/codex-rs/apply-patch/src/parser.rs +++ b/codex-rs/apply-patch/src/parser.rs @@ -37,7 +37,15 @@ const EOF_MARKER: &str = "*** End of File"; const CHANGE_CONTEXT_MARKER: &str = "@@ "; const EMPTY_CHANGE_CONTEXT_MARKER: &str = "@@"; -#[derive(Debug, PartialEq, Error)] +/// Currently, the only OpenAI model that knowingly requires lenient parsing is +/// gpt-4.1. While we could try to require everyone to pass in a strictness +/// param when invoking apply_patch, it is a pain to thread it through all of +/// the call sites, so we resign ourselves allowing lenient parsing for all +/// models. See [`ParseMode::Lenient`] for details on the exceptions we make for +/// gpt-4.1. +const PARSE_IN_STRICT_MODE: bool = false; + +#[derive(Debug, PartialEq, Error, Clone)] pub enum ParseError { #[error("invalid patch: {0}")] InvalidPatchError(String), @@ -46,7 +54,7 @@ pub enum ParseError { } use ParseError::*; -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone)] #[allow(clippy::enum_variant_names)] pub enum Hunk { AddFile { @@ -78,7 +86,7 @@ impl Hunk { use Hunk::*; -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone)] pub struct UpdateFileChunk { /// A single line of context used to narrow down the position of the chunk /// (this is usually a class, method, or function definition.) @@ -95,19 +103,68 @@ pub struct UpdateFileChunk { } pub fn parse_patch(patch: &str) -> Result, ParseError> { + let mode = if PARSE_IN_STRICT_MODE { + ParseMode::Strict + } else { + ParseMode::Lenient + }; + parse_patch_text(patch, mode) +} + +enum ParseMode { + /// Parse the patch text argument as is. + Strict, + + /// GPT-4.1 is known to formulate the `command` array for the `local_shell` + /// tool call for `apply_patch` call using something like the following: + /// + /// ```json + /// [ + /// "apply_patch", + /// "<<'EOF'\n*** Begin Patch\n*** Update File: README.md\n@@...\n*** End Patch\nEOF\n", + /// ] + /// ``` + /// + /// This is a problem because `local_shell` is a bit of a misnomer: the + /// `command` is not invoked by passing the arguments to a shell like Bash, + /// but are invoked using something akin to `execvpe(3)`. + /// + /// This is significant in this case because where a shell would interpret + /// `<<'EOF'...` as a heredoc and pass the contents via stdin (which is + /// fine, as `apply_patch` is specified to read from stdin if no argument is + /// passed), `execvpe(3)` interprets the heredoc as a literal string. To get + /// the `local_shell` tool to run a command the way shell would, the + /// `command` array must be something like: + /// + /// ```json + /// [ + /// "bash", + /// "-lc", + /// "apply_patch <<'EOF'\n*** Begin Patch\n*** Update File: README.md\n@@...\n*** End Patch\nEOF\n", + /// ] + /// ``` + /// + /// In lenient mode, we check if the argument to `apply_patch` starts with + /// `<<'EOF'` and ends with `EOF\n`. If so, we strip off these markers, + /// trim() the result, and treat what is left as the patch text. + Lenient, +} + +fn parse_patch_text(patch: &str, mode: ParseMode) -> Result, ParseError> { let lines: Vec<&str> = patch.trim().lines().collect(); - if lines.is_empty() || lines[0] != BEGIN_PATCH_MARKER { - return Err(InvalidPatchError(String::from( - "The first line of the patch must be '*** Begin Patch'", - ))); - } - let last_line_index = lines.len() - 1; - if lines[last_line_index] != END_PATCH_MARKER { - return Err(InvalidPatchError(String::from( - "The last line of the patch must be '*** End Patch'", - ))); - } + let lines: &[&str] = match check_patch_boundaries_strict(&lines) { + Ok(()) => &lines, + Err(e) => match mode { + ParseMode::Strict => { + return Err(e); + } + ParseMode::Lenient => check_patch_boundaries_lenient(&lines, e)?, + }, + }; + let mut hunks: Vec = Vec::new(); + // The above checks ensure that lines.len() >= 2. + let last_line_index = lines.len().saturating_sub(1); let mut remaining_lines = &lines[1..last_line_index]; let mut line_number = 2; while !remaining_lines.is_empty() { @@ -119,6 +176,64 @@ pub fn parse_patch(patch: &str) -> Result, ParseError> { Ok(hunks) } +/// Checks the start and end lines of the patch text for `apply_patch`, +/// returning an error if they do not match the expected markers. +fn check_patch_boundaries_strict(lines: &[&str]) -> Result<(), ParseError> { + let (first_line, last_line) = match lines { + [] => (None, None), + [first] => (Some(first), Some(first)), + [first, .., last] => (Some(first), Some(last)), + }; + check_start_and_end_lines_strict(first_line, last_line) +} + +/// If we are in lenient mode, we check if the first line starts with `<( + original_lines: &'a [&'a str], + original_parse_error: ParseError, +) -> Result<&'a [&'a str], ParseError> { + match original_lines { + [first, .., last] => { + if (first == &"<= 4 + { + let inner_lines = &original_lines[1..original_lines.len() - 1]; + match check_patch_boundaries_strict(inner_lines) { + Ok(()) => Ok(inner_lines), + Err(e) => Err(e), + } + } else { + Err(original_parse_error) + } + } + _ => Err(original_parse_error), + } +} + +fn check_start_and_end_lines_strict( + first_line: Option<&&str>, + last_line: Option<&&str>, +) -> Result<(), ParseError> { + match (first_line, last_line) { + (Some(&first), Some(&last)) if first == BEGIN_PATCH_MARKER && last == END_PATCH_MARKER => { + Ok(()) + } + (Some(&first), _) if first != BEGIN_PATCH_MARKER => Err(InvalidPatchError(String::from( + "The first line of the patch must be '*** Begin Patch'", + ))), + _ => Err(InvalidPatchError(String::from( + "The last line of the patch must be '*** End Patch'", + ))), + } +} + /// Attempts to parse a single hunk from the start of lines. /// Returns the parsed hunk and the number of lines parsed (or a ParseError). fn parse_one_hunk(lines: &[&str], line_number: usize) -> Result<(Hunk, usize), ParseError> { @@ -312,22 +427,23 @@ fn parse_update_file_chunk( #[test] fn test_parse_patch() { assert_eq!( - parse_patch("bad"), + parse_patch_text("bad", ParseMode::Strict), Err(InvalidPatchError( "The first line of the patch must be '*** Begin Patch'".to_string() )) ); assert_eq!( - parse_patch("*** Begin Patch\nbad"), + parse_patch_text("*** Begin Patch\nbad", ParseMode::Strict), Err(InvalidPatchError( "The last line of the patch must be '*** End Patch'".to_string() )) ); assert_eq!( - parse_patch( + parse_patch_text( "*** Begin Patch\n\ *** Update File: test.py\n\ - *** End Patch" + *** End Patch", + ParseMode::Strict ), Err(InvalidHunkError { message: "Update file hunk for path 'test.py' is empty".to_string(), @@ -335,14 +451,15 @@ fn test_parse_patch() { }) ); assert_eq!( - parse_patch( + parse_patch_text( "*** Begin Patch\n\ - *** End Patch" + *** End Patch", + ParseMode::Strict ), Ok(Vec::new()) ); assert_eq!( - parse_patch( + parse_patch_text( "*** Begin Patch\n\ *** Add File: path/add.py\n\ +abc\n\ @@ -353,7 +470,8 @@ fn test_parse_patch() { @@ def f():\n\ - pass\n\ + return 123\n\ - *** End Patch" + *** End Patch", + ParseMode::Strict ), Ok(vec![ AddFile { @@ -377,14 +495,15 @@ fn test_parse_patch() { ); // Update hunk followed by another hunk (Add File). assert_eq!( - parse_patch( + parse_patch_text( "*** Begin Patch\n\ *** Update File: file.py\n\ @@\n\ +line\n\ *** Add File: other.py\n\ +content\n\ - *** End Patch" + *** End Patch", + ParseMode::Strict ), Ok(vec![ UpdateFile { @@ -407,12 +526,13 @@ fn test_parse_patch() { // Update hunk without an explicit @@ header for the first chunk should parse. // Use a raw string to preserve the leading space diff marker on the context line. assert_eq!( - parse_patch( + parse_patch_text( r#"*** Begin Patch *** Update File: file2.py import foo +bar *** End Patch"#, + ParseMode::Strict ), Ok(vec![UpdateFile { path: PathBuf::from("file2.py"), @@ -427,6 +547,80 @@ fn test_parse_patch() { ); } +#[test] +fn test_parse_patch_lenient() { + let patch_text = r#"*** Begin Patch +*** Update File: file2.py + import foo ++bar +*** End Patch"#; + let expected_patch = vec![UpdateFile { + path: PathBuf::from("file2.py"), + move_path: None, + chunks: vec![UpdateFileChunk { + change_context: None, + old_lines: vec!["import foo".to_string()], + new_lines: vec!["import foo".to_string(), "bar".to_string()], + is_end_of_file: false, + }], + }]; + let expected_error = + InvalidPatchError("The first line of the patch must be '*** Begin Patch'".to_string()); + + let patch_text_in_heredoc = format!("<, + config_overrides: CliConfigOverrides, codex_linux_sandbox_exe: Option, sandbox_type: SandboxType, ) -> anyhow::Result<()> { let sandbox_policy = create_sandbox_policy(full_auto, sandbox); let cwd = std::env::current_dir()?; - let config = Config::load_with_overrides(ConfigOverrides { - sandbox_policy: Some(sandbox_policy), - codex_linux_sandbox_exe, - ..Default::default() - })?; + let config = Config::load_with_cli_overrides( + config_overrides + .parse_overrides() + .map_err(anyhow::Error::msg)?, + ConfigOverrides { + sandbox_policy: Some(sandbox_policy), + codex_linux_sandbox_exe, + ..Default::default() + }, + )?; let stdio_policy = StdioPolicy::Inherit; let env = create_env(&config.shell_environment_policy); diff --git a/codex-rs/cli/src/lib.rs b/codex-rs/cli/src/lib.rs index bf85c98c8..fa78d18ab 100644 --- a/codex-rs/cli/src/lib.rs +++ b/codex-rs/cli/src/lib.rs @@ -1,8 +1,10 @@ pub mod debug_sandbox; mod exit_status; +pub mod login; pub mod proto; use clap::Parser; +use codex_common::CliConfigOverrides; use codex_common::SandboxPermissionOption; #[derive(Debug, Parser)] @@ -14,6 +16,9 @@ pub struct SeatbeltCommand { #[clap(flatten)] pub sandbox: SandboxPermissionOption, + #[clap(skip)] + pub config_overrides: CliConfigOverrides, + /// Full command args to run under seatbelt. #[arg(trailing_var_arg = true)] pub command: Vec, @@ -28,6 +33,9 @@ pub struct LandlockCommand { #[clap(flatten)] pub sandbox: SandboxPermissionOption, + #[clap(skip)] + pub config_overrides: CliConfigOverrides, + /// Full command args to run under landlock. #[arg(trailing_var_arg = true)] pub command: Vec, diff --git a/codex-rs/cli/src/login.rs b/codex-rs/cli/src/login.rs new file mode 100644 index 000000000..af3fb667f --- /dev/null +++ b/codex-rs/cli/src/login.rs @@ -0,0 +1,35 @@ +use codex_common::CliConfigOverrides; +use codex_core::config::Config; +use codex_core::config::ConfigOverrides; +use codex_login::login_with_chatgpt; + +pub async fn run_login_with_chatgpt(cli_config_overrides: CliConfigOverrides) -> ! { + let cli_overrides = match cli_config_overrides.parse_overrides() { + Ok(v) => v, + Err(e) => { + eprintln!("Error parsing -c overrides: {e}"); + std::process::exit(1); + } + }; + + let config_overrides = ConfigOverrides::default(); + let config = match Config::load_with_cli_overrides(cli_overrides, config_overrides) { + Ok(config) => config, + Err(e) => { + eprintln!("Error loading configuration: {e}"); + std::process::exit(1); + } + }; + + let capture_output = false; + match login_with_chatgpt(&config.codex_home, capture_output).await { + Ok(_) => { + eprintln!("Successfully logged in"); + std::process::exit(0); + } + Err(e) => { + eprintln!("Error logging in: {e}"); + std::process::exit(1); + } + } +} diff --git a/codex-rs/cli/src/main.rs b/codex-rs/cli/src/main.rs index 8f44962e6..0e9ba0182 100644 --- a/codex-rs/cli/src/main.rs +++ b/codex-rs/cli/src/main.rs @@ -1,7 +1,9 @@ use clap::Parser; use codex_cli::LandlockCommand; use codex_cli::SeatbeltCommand; +use codex_cli::login::run_login_with_chatgpt; use codex_cli::proto; +use codex_common::CliConfigOverrides; use codex_exec::Cli as ExecCli; use codex_tui::Cli as TuiCli; use std::path::PathBuf; @@ -19,6 +21,9 @@ use crate::proto::ProtoCli; subcommand_negates_reqs = true )] struct MultitoolCli { + #[clap(flatten)] + pub config_overrides: CliConfigOverrides, + #[clap(flatten)] interactive: TuiCli, @@ -32,6 +37,9 @@ enum Subcommand { #[clap(visible_alias = "e")] Exec(ExecCli), + /// Login with ChatGPT. + Login(LoginCommand), + /// Experimental: run Codex as an MCP server. Mcp, @@ -59,7 +67,10 @@ enum DebugCommand { } #[derive(Debug, Parser)] -struct ReplProto {} +struct LoginCommand { + #[clap(skip)] + config_overrides: CliConfigOverrides, +} fn main() -> anyhow::Result<()> { codex_linux_sandbox::run_with_sandbox(|codex_linux_sandbox_exe| async move { @@ -73,28 +84,38 @@ async fn cli_main(codex_linux_sandbox_exe: Option) -> anyhow::Result<() match cli.subcommand { None => { - codex_tui::run_main(cli.interactive, codex_linux_sandbox_exe)?; + let mut tui_cli = cli.interactive; + prepend_config_flags(&mut tui_cli.config_overrides, cli.config_overrides); + codex_tui::run_main(tui_cli, codex_linux_sandbox_exe)?; } - Some(Subcommand::Exec(exec_cli)) => { + Some(Subcommand::Exec(mut exec_cli)) => { + prepend_config_flags(&mut exec_cli.config_overrides, cli.config_overrides); codex_exec::run_main(exec_cli, codex_linux_sandbox_exe).await?; } Some(Subcommand::Mcp) => { codex_mcp_server::run_main(codex_linux_sandbox_exe).await?; } - Some(Subcommand::Proto(proto_cli)) => { + Some(Subcommand::Login(mut login_cli)) => { + prepend_config_flags(&mut login_cli.config_overrides, cli.config_overrides); + run_login_with_chatgpt(login_cli.config_overrides).await; + } + Some(Subcommand::Proto(mut proto_cli)) => { + prepend_config_flags(&mut proto_cli.config_overrides, cli.config_overrides); proto::run_main(proto_cli).await?; } Some(Subcommand::Debug(debug_args)) => match debug_args.cmd { - DebugCommand::Seatbelt(seatbelt_command) => { + DebugCommand::Seatbelt(mut seatbelt_cli) => { + prepend_config_flags(&mut seatbelt_cli.config_overrides, cli.config_overrides); codex_cli::debug_sandbox::run_command_under_seatbelt( - seatbelt_command, + seatbelt_cli, codex_linux_sandbox_exe, ) .await?; } - DebugCommand::Landlock(landlock_command) => { + DebugCommand::Landlock(mut landlock_cli) => { + prepend_config_flags(&mut landlock_cli.config_overrides, cli.config_overrides); codex_cli::debug_sandbox::run_command_under_landlock( - landlock_command, + landlock_cli, codex_linux_sandbox_exe, ) .await?; @@ -104,3 +125,14 @@ async fn cli_main(codex_linux_sandbox_exe: Option) -> anyhow::Result<() Ok(()) } + +/// Prepend root-level overrides so they have lower precedence than +/// CLI-specific ones specified after the subcommand (if any). +fn prepend_config_flags( + subcommand_config_overrides: &mut CliConfigOverrides, + cli_config_overrides: CliConfigOverrides, +) { + subcommand_config_overrides + .raw_overrides + .splice(0..0, cli_config_overrides.raw_overrides); +} diff --git a/codex-rs/cli/src/proto.rs b/codex-rs/cli/src/proto.rs index 6dbe049cc..148699552 100644 --- a/codex-rs/cli/src/proto.rs +++ b/codex-rs/cli/src/proto.rs @@ -2,6 +2,7 @@ use std::io::IsTerminal; use std::sync::Arc; use clap::Parser; +use codex_common::CliConfigOverrides; use codex_core::Codex; use codex_core::config::Config; use codex_core::config::ConfigOverrides; @@ -13,9 +14,12 @@ use tracing::error; use tracing::info; #[derive(Debug, Parser)] -pub struct ProtoCli {} +pub struct ProtoCli { + #[clap(skip)] + pub config_overrides: CliConfigOverrides, +} -pub async fn run_main(_opts: ProtoCli) -> anyhow::Result<()> { +pub async fn run_main(opts: ProtoCli) -> anyhow::Result<()> { if std::io::stdin().is_terminal() { anyhow::bail!("Protocol mode expects stdin to be a pipe, not a terminal"); } @@ -24,7 +28,12 @@ pub async fn run_main(_opts: ProtoCli) -> anyhow::Result<()> { .with_writer(std::io::stderr) .init(); - let config = Config::load_with_overrides(ConfigOverrides::default())?; + let ProtoCli { config_overrides } = opts; + let overrides_vec = config_overrides + .parse_overrides() + .map_err(anyhow::Error::msg)?; + + let config = Config::load_with_cli_overrides(overrides_vec, ConfigOverrides::default())?; let ctrl_c = notify_on_sigint(); let (codex, _init_id) = Codex::spawn(config, ctrl_c.clone()).await?; let codex = Arc::new(codex); diff --git a/codex-rs/common/Cargo.toml b/codex-rs/common/Cargo.toml index 95e4a5318..b4b658dab 100644 --- a/codex-rs/common/Cargo.toml +++ b/codex-rs/common/Cargo.toml @@ -9,8 +9,10 @@ workspace = true [dependencies] clap = { version = "4", features = ["derive", "wrap_help"], optional = true } codex-core = { path = "../core" } +toml = { version = "0.8", optional = true } +serde = { version = "1", optional = true } [features] # Separate feature so that `clap` is not a mandatory dependency. -cli = ["clap"] +cli = ["clap", "toml", "serde"] elapsed = [] diff --git a/codex-rs/common/src/config_override.rs b/codex-rs/common/src/config_override.rs new file mode 100644 index 000000000..610195d6d --- /dev/null +++ b/codex-rs/common/src/config_override.rs @@ -0,0 +1,170 @@ +//! Support for `-c key=value` overrides shared across Codex CLI tools. +//! +//! This module provides a [`CliConfigOverrides`] struct that can be embedded +//! into a `clap`-derived CLI struct using `#[clap(flatten)]`. Each occurrence +//! of `-c key=value` (or `--config key=value`) will be collected as a raw +//! string. Helper methods are provided to convert the raw strings into +//! key/value pairs as well as to apply them onto a mutable +//! `serde_json::Value` representing the configuration tree. + +use clap::ArgAction; +use clap::Parser; +use serde::de::Error as SerdeError; +use toml::Value; + +/// CLI option that captures arbitrary configuration overrides specified as +/// `-c key=value`. It intentionally keeps both halves **unparsed** so that the +/// calling code can decide how to interpret the right-hand side. +#[derive(Parser, Debug, Default, Clone)] +pub struct CliConfigOverrides { + /// Override a configuration value that would otherwise be loaded from + /// `~/.codex/config.toml`. Use a dotted path (`foo.bar.baz`) to override + /// nested values. The `value` portion is parsed as JSON. If it fails to + /// parse as JSON, the raw string is used as a literal. + /// + /// Examples: + /// - `-c model="o3"` + /// - `-c 'sandbox_permissions=["disk-full-read-access"]'` + /// - `-c shell_environment_policy.inherit=all` + #[arg( + short = 'c', + long = "config", + value_name = "key=value", + action = ArgAction::Append, + global = true, + )] + pub raw_overrides: Vec, +} + +impl CliConfigOverrides { + /// Parse the raw strings captured from the CLI into a list of `(path, + /// value)` tuples where `value` is a `serde_json::Value`. + pub fn parse_overrides(&self) -> Result, String> { + self.raw_overrides + .iter() + .map(|s| { + // Only split on the *first* '=' so values are free to contain + // the character. + let mut parts = s.splitn(2, '='); + let key = match parts.next() { + Some(k) => k.trim(), + None => return Err("Override missing key".to_string()), + }; + let value_str = parts + .next() + .ok_or_else(|| format!("Invalid override (missing '='): {s}"))? + .trim(); + + if key.is_empty() { + return Err(format!("Empty key in override: {s}")); + } + + // Attempt to parse as JSON. If that fails, treat it as a raw + // string. This allows convenient usage such as + // `-c model=o3` without the quotes. + let value: Value = match parse_toml_value(value_str) { + Ok(v) => v, + Err(_) => Value::String(value_str.to_string()), + }; + + Ok((key.to_string(), value)) + }) + .collect() + } + + /// Apply all parsed overrides onto `target`. Intermediate objects will be + /// created as necessary. Values located at the destination path will be + /// replaced. + pub fn apply_on_value(&self, target: &mut Value) -> Result<(), String> { + let overrides = self.parse_overrides()?; + for (path, value) in overrides { + apply_single_override(target, &path, value); + } + Ok(()) + } +} + +/// Apply a single override onto `root`, creating intermediate objects as +/// necessary. +fn apply_single_override(root: &mut Value, path: &str, value: Value) { + use toml::value::Table; + + let parts: Vec<&str> = path.split('.').collect(); + let mut current = root; + + for (i, part) in parts.iter().enumerate() { + let is_last = i == parts.len() - 1; + + if is_last { + match current { + Value::Table(tbl) => { + tbl.insert((*part).to_string(), value); + } + _ => { + let mut tbl = Table::new(); + tbl.insert((*part).to_string(), value); + *current = Value::Table(tbl); + } + } + return; + } + + // Traverse or create intermediate table. + match current { + Value::Table(tbl) => { + current = tbl + .entry((*part).to_string()) + .or_insert_with(|| Value::Table(Table::new())); + } + _ => { + *current = Value::Table(Table::new()); + if let Value::Table(tbl) = current { + current = tbl + .entry((*part).to_string()) + .or_insert_with(|| Value::Table(Table::new())); + } + } + } + } +} + +fn parse_toml_value(raw: &str) -> Result { + let wrapped = format!("_x_ = {raw}"); + let table: toml::Table = toml::from_str(&wrapped)?; + table + .get("_x_") + .cloned() + .ok_or_else(|| SerdeError::custom("missing sentinel key")) +} + +#[cfg(all(test, feature = "cli"))] +#[allow(clippy::expect_used, clippy::unwrap_used)] +mod tests { + use super::*; + + #[test] + fn parses_basic_scalar() { + let v = parse_toml_value("42").expect("parse"); + assert_eq!(v.as_integer(), Some(42)); + } + + #[test] + fn fails_on_unquoted_string() { + assert!(parse_toml_value("hello").is_err()); + } + + #[test] + fn parses_array() { + let v = parse_toml_value("[1, 2, 3]").expect("parse"); + let arr = v.as_array().expect("array"); + assert_eq!(arr.len(), 3); + } + + #[test] + fn parses_inline_table() { + let v = parse_toml_value("{a = 1, b = 2}").expect("parse"); + let tbl = v.as_table().expect("table"); + assert_eq!(tbl.get("a").unwrap().as_integer(), Some(1)); + assert_eq!(tbl.get("b").unwrap().as_integer(), Some(2)); + } +} diff --git a/codex-rs/common/src/lib.rs b/codex-rs/common/src/lib.rs index 253371888..c2283640c 100644 --- a/codex-rs/common/src/lib.rs +++ b/codex-rs/common/src/lib.rs @@ -8,3 +8,9 @@ pub mod elapsed; pub use approval_mode_cli_arg::ApprovalModeCliArg; #[cfg(feature = "cli")] pub use approval_mode_cli_arg::SandboxPermissionOption; + +#[cfg(any(feature = "cli", test))] +mod config_override; + +#[cfg(feature = "cli")] +pub use config_override::CliConfigOverrides; diff --git a/codex-rs/config.md b/codex-rs/config.md new file mode 100644 index 000000000..ffa735ff2 --- /dev/null +++ b/codex-rs/config.md @@ -0,0 +1,415 @@ +# Config + +Codex supports several mechanisms for setting config values: + +- Config-specific command-line flags, such as `--model o3` (highest precedence). +- A generic `-c`/`--config` flag that takes a `key=value` pair, such as `--config model="o3"`. + - The key can contain dots to set a value deeper than the root, e.g. `--config model_providers.openai.wire_api="chat"`. + - Values can contain objects, such as `--config shell_environment_policy.include_only=["PATH", "HOME", "USER"]`. + - For consistency with `config.toml`, values are in TOML format rather than JSON format, so use `{a = 1, b = 2}` rather than `{"a": 1, "b": 2}`. + - If `value` cannot be parsed as a valid TOML value, it is treated as a string value. This means that both `-c model="o3"` and `-c model=o3` are equivalent. +- The `$CODEX_HOME/config.toml` configuration file where the `CODEX_HOME` environment value defaults to `~/.codex`. (Note `CODEX_HOME` will also be where logs and other Codex-related information are stored.) + +Both the `--config` flag and the `config.toml` file support the following options: + +## model + +The model that Codex should use. + +```toml +model = "o3" # overrides the default of "codex-mini-latest" +``` + +## model_provider + +Codex comes bundled with a number of "model providers" predefined. This config value is a string that indicates which provider to use. You can also define your own providers via `model_providers`. + +For example, if you are running ollama with Mistral locally, then you would need to add the following to your config: + +```toml +model = "mistral" +model_provider = "ollama" +``` + +because the following definition for `ollama` is included in Codex: + +```toml +[model_providers.ollama] +name = "Ollama" +base_url = "http://localhost:11434/v1" +wire_api = "chat" +``` + +This option defaults to `"openai"` and the corresponding provider is defined as follows: + +```toml +[model_providers.openai] +name = "OpenAI" +base_url = "https://api.openai.com/v1" +env_key = "OPENAI_API_KEY" +wire_api = "responses" +``` + +## model_providers + +This option lets you override and amend the default set of model providers bundled with Codex. This value is a map where the key is the value to use with `model_provider` to select the correspodning provider. + +For example, if you wanted to add a provider that uses the OpenAI 4o model via the chat completions API, then you + +```toml +# Recall that in TOML, root keys must be listed before tables. +model = "gpt-4o" +model_provider = "openai-chat-completions" + +[model_providers.openai-chat-completions] +# Name of the provider that will be displayed in the Codex UI. +name = "OpenAI using Chat Completions" +# The path `/chat/completions` will be amended to this URL to make the POST +# request for the chat completions. +base_url = "https://api.openai.com/v1" +# If `env_key` is set, identifies an environment variable that must be set when +# using Codex with this provider. The value of the environment variable must be +# non-empty and will be used in the `Bearer TOKEN` HTTP header for the POST request. +env_key = "OPENAI_API_KEY" +# valid values for wire_api are "chat" and "responses". +wire_api = "chat" +``` + +## approval_policy + +Determines when the user should be prompted to approve whether Codex can execute a command: + +```toml +# This is analogous to --suggest in the TypeScript Codex CLI +approval_policy = "unless-allow-listed" +``` + +```toml +# If the command fails when run in the sandbox, Codex asks for permission to +# retry the command outside the sandbox. +approval_policy = "on-failure" +``` + +```toml +# User is never prompted: if the command fails, Codex will automatically try +# something out. Note the `exec` subcommand always uses this mode. +approval_policy = "never" +``` + +## profiles + +A _profile_ is a collection of configuration values that can be set together. Multiple profiles can be defined in `config.toml` and you can specify the one you +want to use at runtime via the `--profile` flag. + +Here is an example of a `config.toml` that defines multiple profiles: + +```toml +model = "o3" +approval_policy = "unless-allow-listed" +sandbox_permissions = ["disk-full-read-access"] +disable_response_storage = false + +# Setting `profile` is equivalent to specifying `--profile o3` on the command +# line, though the `--profile` flag can still be used to override this value. +profile = "o3" + +[model_providers.openai-chat-completions] +name = "OpenAI using Chat Completions" +base_url = "https://api.openai.com/v1" +env_key = "OPENAI_API_KEY" +wire_api = "chat" + +[profiles.o3] +model = "o3" +model_provider = "openai" +approval_policy = "never" + +[profiles.gpt3] +model = "gpt-3.5-turbo" +model_provider = "openai-chat-completions" + +[profiles.zdr] +model = "o3" +model_provider = "openai" +approval_policy = "on-failure" +disable_response_storage = true +``` + +Users can specify config values at multiple levels. Order of precedence is as follows: + +1. custom command-line argument, e.g., `--model o3` +2. as part of a profile, where the `--profile` is specified via a CLI (or in the config file itself) +3. as an entry in `config.toml`, e.g., `model = "o3"` +4. the default value that comes with Codex CLI (i.e., Codex CLI defaults to `codex-mini-latest`) + +## model_reasoning_effort + +If the model name starts with `"o"` (as in `"o3"` or `"o4-mini"`) or `"codex"`, reasoning is enabled by default when using the Responses API. As explained in the [OpenAI Platform documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning), this can be set to: + +- `"low"` +- `"medium"` (default) +- `"high"` + +To disable reasoning, set `model_reasoning_effort` to `"none"` in your config: + +```toml +model_reasoning_effort = "none" # disable reasoning +``` + +## model_reasoning_summary + +If the model name starts with `"o"` (as in `"o3"` or `"o4-mini"`) or `"codex"`, reasoning is enabled by default when using the Responses API. As explained in the [OpenAI Platform documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries), this can be set to: + +- `"auto"` (default) +- `"concise"` +- `"detailed"` + +To disable reasoning summaries, set `model_reasoning_summary` to `"none"` in your config: + +```toml +model_reasoning_summary = "none" # disable reasoning summaries +``` + +## sandbox_permissions + +List of permissions to grant to the sandbox that Codex uses to execute untrusted commands: + +```toml +# This is comparable to --full-auto in the TypeScript Codex CLI, though +# specifying `disk-write-platform-global-temp-folder` adds /tmp as a writable +# folder in addition to $TMPDIR. +sandbox_permissions = [ + "disk-full-read-access", + "disk-write-platform-user-temp-folder", + "disk-write-platform-global-temp-folder", + "disk-write-cwd", +] +``` + +To add additional writable folders, use `disk-write-folder`, which takes a parameter (this can be specified multiple times): + +```toml +sandbox_permissions = [ + # ... + "disk-write-folder=/Users/mbolin/.pyenv/shims", +] +``` + +## mcp_servers + +Defines the list of MCP servers that Codex can consult for tool use. Currently, only servers that are launched by executing a program that communicate over stdio are supported. For servers that use the SSE transport, consider an adapter like [mcp-proxy](https://github.com/sparfenyuk/mcp-proxy). + +**Note:** Codex may cache the list of tools and resources from an MCP server so that Codex can include this information in context at startup without spawning all the servers. This is designed to save resources by loading MCP servers lazily. + +This config option is comparable to how Claude and Cursor define `mcpServers` in their respective JSON config files, though because Codex uses TOML for its config language, the format is slightly different. For example, the following config in JSON: + +```json +{ + "mcpServers": { + "server-name": { + "command": "npx", + "args": ["-y", "mcp-server"], + "env": { + "API_KEY": "value" + } + } + } +} +``` + +Should be represented as follows in `~/.codex/config.toml`: + +```toml +# IMPORTANT: the top-level key is `mcp_servers` rather than `mcpServers`. +[mcp_servers.server-name] +command = "npx" +args = ["-y", "mcp-server"] +env = { "API_KEY" = "value" } +``` + +## disable_response_storage + +Currently, customers whose accounts are set to use Zero Data Retention (ZDR) must set `disable_response_storage` to `true` so that Codex uses an alternative to the Responses API that works with ZDR: + +```toml +disable_response_storage = true +``` + +## shell_environment_policy + +Codex spawns subprocesses (e.g. when executing a `local_shell` tool-call suggested by the assistant). By default it passes **only a minimal core subset** of your environment to those subprocesses to avoid leaking credentials. You can tune this behavior via the **`shell_environment_policy`** block in +`config.toml`: + +```toml +[shell_environment_policy] +# inherit can be "core" (default), "all", or "none" +inherit = "core" +# set to true to *skip* the filter for `"*KEY*"` and `"*TOKEN*"` +ignore_default_excludes = false +# exclude patterns (case-insensitive globs) +exclude = ["AWS_*", "AZURE_*"] +# force-set / override values +set = { CI = "1" } +# if provided, *only* vars matching these patterns are kept +include_only = ["PATH", "HOME"] +``` + +| Field | Type | Default | Description | +| ------------------------- | -------------------------- | ------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | +| `inherit` | string | `core` | Starting template for the environment:
`core` (`HOME`, `PATH`, `USER`, …), `all` (clone full parent env), or `none` (start empty). | +| `ignore_default_excludes` | boolean | `false` | When `false`, Codex removes any var whose **name** contains `KEY`, `SECRET`, or `TOKEN` (case-insensitive) before other rules run. | +| `exclude` | array<string> | `[]` | Case-insensitive glob patterns to drop after the default filter.
Examples: `"AWS_*"`, `"AZURE_*"`. | +| `set` | table<string,string> | `{}` | Explicit key/value overrides or additions – always win over inherited values. | +| `include_only` | array<string> | `[]` | If non-empty, a whitelist of patterns; only variables that match _one_ pattern survive the final step. (Generally used with `inherit = "all"`.) | + +The patterns are **glob style**, not full regular expressions: `*` matches any +number of characters, `?` matches exactly one, and character classes like +`[A-Z]`/`[^0-9]` are supported. Matching is always **case-insensitive**. This +syntax is documented in code as `EnvironmentVariablePattern` (see +`core/src/config_types.rs`). + +If you just need a clean slate with a few custom entries you can write: + +```toml +[shell_environment_policy] +inherit = "none" +set = { PATH = "/usr/bin", MY_FLAG = "1" } +``` + +Currently, `CODEX_SANDBOX_NETWORK_DISABLED=1` is also added to the environment, assuming network is disabled. This is not configurable. + +## notify + +Specify a program that will be executed to get notified about events generated by Codex. Note that the program will receive the notification argument as a string of JSON, e.g.: + +```json +{ + "type": "agent-turn-complete", + "turn-id": "12345", + "input-messages": ["Rename `foo` to `bar` and update the callsites."], + "last-assistant-message": "Rename complete and verified `cargo build` succeeds." +} +``` + +The `"type"` property will always be set. Currently, `"agent-turn-complete"` is the only notification type that is supported. + +As an example, here is a Python script that parses the JSON and decides whether to show a desktop push notification using [terminal-notifier](https://github.com/julienXX/terminal-notifier) on macOS: + +```python +#!/usr/bin/env python3 + +import json +import subprocess +import sys + + +def main() -> int: + if len(sys.argv) != 2: + print("Usage: notify.py ") + return 1 + + try: + notification = json.loads(sys.argv[1]) + except json.JSONDecodeError: + return 1 + + match notification_type := notification.get("type"): + case "agent-turn-complete": + assistant_message = notification.get("last-assistant-message") + if assistant_message: + title = f"Codex: {assistant_message}" + else: + title = "Codex: Turn Complete!" + input_messages = notification.get("input_messages", []) + message = " ".join(input_messages) + title += message + case _: + print(f"not sending a push notification for: {notification_type}") + return 0 + + subprocess.check_output( + [ + "terminal-notifier", + "-title", + title, + "-message", + message, + "-group", + "codex", + "-ignoreDnD", + "-activate", + "com.googlecode.iterm2", + ] + ) + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) +``` + +To have Codex use this script for notifications, you would configure it via `notify` in `~/.codex/config.toml` using the appropriate path to `notify.py` on your computer: + +```toml +notify = ["python3", "/Users/mbolin/.codex/notify.py"] +``` + +## history + +By default, Codex CLI records messages sent to the model in `$CODEX_HOME/history.jsonl`. Note that on UNIX, the file permissions are set to `o600`, so it should only be readable and writable by the owner. + +To disable this behavior, configure `[history]` as follows: + +```toml +[history] +persistence = "none" # "save-all" is the default value +``` + +## file_opener + +Identifies the editor/URI scheme to use for hyperlinking citations in model output. If set, citations to files in the model output will be hyperlinked using the specified URI scheme so they can be ctrl/cmd-clicked from the terminal to open them. + +For example, if the model output includes a reference such as `【F:/home/user/project/main.py†L42-L50】`, then this would be rewritten to link to the URI `vscode://file/home/user/project/main.py:42`. + +Note this is **not** a general editor setting (like `$EDITOR`), as it only accepts a fixed set of values: + +- `"vscode"` (default) +- `"vscode-insiders"` +- `"windsurf"` +- `"cursor"` +- `"none"` to explicitly disable this feature + +Currently, `"vscode"` is the default, though Codex does not verify VS Code is installed. As such, `file_opener` may default to `"none"` or something else in the future. + +## hide_agent_reasoning + +Codex intermittently emits "reasoning" events that show the model’s internal "thinking" before it produces a final answer. Some users may find these events distracting, especially in CI logs or minimal terminal output. + +Setting `hide_agent_reasoning` to `true` suppresses these events in **both** the TUI as well as the headless `exec` sub-command: + +```toml +hide_agent_reasoning = true # defaults to false +``` + +## project_doc_max_bytes + +Maximum number of bytes to read from an `AGENTS.md` file to include in the instructions sent with the first turn of a session. Defaults to 32 KiB. + +## tui + +Options that are specific to the TUI. + +```toml +[tui] +# This will make it so that Codex does not try to process mouse events, which +# means your Terminal's native drag-to-text to text selection and copy/paste +# should work. The tradeoff is that Codex will not receive any mouse events, so +# it will not be possible to use the mouse to scroll conversation history. +# +# Note that most terminals support holding down a modifier key when using the +# mouse to support text selection. For example, even if Codex mouse capture is +# enabled (i.e., this is set to `false`), you can still hold down alt while +# dragging the mouse to select text. +disable_mouse_capture = true # defaults to `false` +``` diff --git a/codex-rs/core/Cargo.toml b/codex-rs/core/Cargo.toml index 468729498..a3ea33654 100644 --- a/codex-rs/core/Cargo.toml +++ b/codex-rs/core/Cargo.toml @@ -16,6 +16,7 @@ async-channel = "2.3.1" base64 = "0.21" bytes = "1.10.1" codex-apply-patch = { path = "../apply-patch" } +codex-login = { path = "../login" } codex-mcp-client = { path = "../mcp-client" } dirs = "6" env-flags = "0.1.1" @@ -31,6 +32,8 @@ rand = "0.9" reqwest = { version = "0.12", features = ["json", "stream"] } serde = { version = "1", features = ["derive"] } serde_json = "1" +strum = "0.27.1" +strum_macros = "0.27.1" thiserror = "2.0.12" time = { version = "0.3", features = ["formatting", "local-offset", "macros"] } tokio = { version = "1", features = [ @@ -56,6 +59,10 @@ seccompiler = "0.5.0" [target.x86_64-unknown-linux-musl.dependencies] openssl-sys = { version = "*", features = ["vendored"] } +# Build OpenSSL from source for musl builds. +[target.aarch64-unknown-linux-musl.dependencies] +openssl-sys = { version = "*", features = ["vendored"] } + [dev-dependencies] assert_cmd = "2" maplit = "1.0.2" diff --git a/codex-rs/core/src/chat_completions.rs b/codex-rs/core/src/chat_completions.rs index 7760c48fb..f381c72e5 100644 --- a/codex-rs/core/src/chat_completions.rs +++ b/codex-rs/core/src/chat_completions.rs @@ -25,10 +25,10 @@ use crate::flags::OPENAI_REQUEST_MAX_RETRIES; use crate::flags::OPENAI_STREAM_IDLE_TIMEOUT_MS; use crate::models::ContentItem; use crate::models::ResponseItem; +use crate::openai_tools::create_tools_json_for_chat_completions_api; use crate::util::backoff; -/// Implementation for the classic Chat Completions API. This is intentionally -/// minimal: we only stream back plain assistant text. +/// Implementation for the classic Chat Completions API. pub(crate) async fn stream_chat_completions( prompt: &Prompt, model: &str, @@ -38,35 +38,89 @@ pub(crate) async fn stream_chat_completions( // Build messages array let mut messages = Vec::::new(); - let full_instructions = prompt.get_full_instructions(); + let full_instructions = prompt.get_full_instructions(model); messages.push(json!({"role": "system", "content": full_instructions})); for item in &prompt.input { - if let ResponseItem::Message { role, content } = item { - let mut text = String::new(); - for c in content { - match c { - ContentItem::InputText { text: t } | ContentItem::OutputText { text: t } => { - text.push_str(t); + match item { + ResponseItem::Message { role, content } => { + let mut text = String::new(); + for c in content { + match c { + ContentItem::InputText { text: t } + | ContentItem::OutputText { text: t } => { + text.push_str(t); + } + _ => {} } - _ => {} } + messages.push(json!({"role": role, "content": text})); + } + ResponseItem::FunctionCall { + name, + arguments, + call_id, + } => { + messages.push(json!({ + "role": "assistant", + "content": null, + "tool_calls": [{ + "id": call_id, + "type": "function", + "function": { + "name": name, + "arguments": arguments, + } + }] + })); + } + ResponseItem::LocalShellCall { + id, + call_id: _, + status, + action, + } => { + // Confirm with API team. + messages.push(json!({ + "role": "assistant", + "content": null, + "tool_calls": [{ + "id": id.clone().unwrap_or_else(|| "".to_string()), + "type": "local_shell_call", + "status": status, + "action": action, + }] + })); + } + ResponseItem::FunctionCallOutput { call_id, output } => { + messages.push(json!({ + "role": "tool", + "tool_call_id": call_id, + "content": output.content, + })); + } + ResponseItem::Reasoning { .. } | ResponseItem::Other => { + // Omit these items from the conversation history. + continue; } - messages.push(json!({"role": role, "content": text})); } } + let tools_json = create_tools_json_for_chat_completions_api(prompt, model)?; let payload = json!({ "model": model, "messages": messages, - "stream": true + "stream": true, + "tools": tools_json, }); let base_url = provider.base_url.trim_end_matches('/'); let url = format!("{}/chat/completions", base_url); - debug!(url, "POST (chat)"); - trace!("request payload: {}", payload); + debug!( + "POST to {url}: {}", + serde_json::to_string_pretty(&payload).unwrap_or_default() + ); let api_key = provider.api_key()?; let mut attempt = 0; @@ -134,6 +188,21 @@ where let idle_timeout = *OPENAI_STREAM_IDLE_TIMEOUT_MS; + // State to accumulate a function call across streaming chunks. + // OpenAI may split the `arguments` string over multiple `delta` events + // until the chunk whose `finish_reason` is `tool_calls` is emitted. We + // keep collecting the pieces here and forward a single + // `ResponseItem::FunctionCall` once the call is complete. + #[derive(Default)] + struct FunctionCallState { + name: Option, + arguments: String, + call_id: Option, + active: bool, + } + + let mut fn_call_state = FunctionCallState::default(); + loop { let sse = match timeout(idle_timeout, stream.next()).await { Ok(Some(Ok(ev))) => ev, @@ -173,23 +242,89 @@ where Ok(v) => v, Err(_) => continue, }; + trace!("chat_completions received SSE chunk: {chunk:?}"); + + let choice_opt = chunk.get("choices").and_then(|c| c.get(0)); + + if let Some(choice) = choice_opt { + // Handle assistant content tokens. + if let Some(content) = choice + .get("delta") + .and_then(|d| d.get("content")) + .and_then(|c| c.as_str()) + { + let item = ResponseItem::Message { + role: "assistant".to_string(), + content: vec![ContentItem::OutputText { + text: content.to_string(), + }], + }; + + let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await; + } + + // Handle streaming function / tool calls. + if let Some(tool_calls) = choice + .get("delta") + .and_then(|d| d.get("tool_calls")) + .and_then(|tc| tc.as_array()) + { + if let Some(tool_call) = tool_calls.first() { + // Mark that we have an active function call in progress. + fn_call_state.active = true; + + // Extract call_id if present. + if let Some(id) = tool_call.get("id").and_then(|v| v.as_str()) { + fn_call_state.call_id.get_or_insert_with(|| id.to_string()); + } + + // Extract function details if present. + if let Some(function) = tool_call.get("function") { + if let Some(name) = function.get("name").and_then(|n| n.as_str()) { + fn_call_state.name.get_or_insert_with(|| name.to_string()); + } + + if let Some(args_fragment) = + function.get("arguments").and_then(|a| a.as_str()) + { + fn_call_state.arguments.push_str(args_fragment); + } + } + } + } + + // Emit end-of-turn when finish_reason signals completion. + if let Some(finish_reason) = choice.get("finish_reason").and_then(|v| v.as_str()) { + match finish_reason { + "tool_calls" if fn_call_state.active => { + // Build the FunctionCall response item. + let item = ResponseItem::FunctionCall { + name: fn_call_state.name.clone().unwrap_or_else(|| "".to_string()), + arguments: fn_call_state.arguments.clone(), + call_id: fn_call_state.call_id.clone().unwrap_or_else(String::new), + }; + + // Emit it downstream. + let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await; + } + "stop" => { + // Regular turn without tool-call. + } + _ => {} + } - let content_opt = chunk - .get("choices") - .and_then(|c| c.get(0)) - .and_then(|c| c.get("delta")) - .and_then(|d| d.get("content")) - .and_then(|c| c.as_str()); - - if let Some(content) = content_opt { - let item = ResponseItem::Message { - role: "assistant".to_string(), - content: vec![ContentItem::OutputText { - text: content.to_string(), - }], - }; - - let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await; + // Emit Completed regardless of reason so the agent can advance. + let _ = tx_event + .send(Ok(ResponseEvent::Completed { + response_id: String::new(), + })) + .await; + + // Prepare for potential next turn (should not happen in same stream). + // fn_call_state = FunctionCallState::default(); + + return; // End processing for this SSE stream. + } } } } @@ -236,9 +371,14 @@ where Poll::Ready(None) => return Poll::Ready(None), Poll::Ready(Some(Err(e))) => return Poll::Ready(Some(Err(e))), Poll::Ready(Some(Ok(ResponseEvent::OutputItemDone(item)))) => { - // Accumulate *assistant* text but do not emit yet. - if let crate::models::ResponseItem::Message { role, content } = &item { - if role == "assistant" { + // If this is an incremental assistant message chunk, accumulate but + // do NOT emit yet. Forward any other item (e.g. FunctionCall) right + // away so downstream consumers see it. + + let is_assistant_delta = matches!(&item, crate::models::ResponseItem::Message { role, .. } if role == "assistant"); + + if is_assistant_delta { + if let crate::models::ResponseItem::Message { content, .. } = &item { if let Some(text) = content.iter().find_map(|c| match c { crate::models::ContentItem::OutputText { text } => Some(text), _ => None, @@ -246,10 +386,13 @@ where this.cumulative.push_str(text); } } + + // Swallow partial assistant chunk; keep polling. + continue; } - // Swallow partial event; keep polling. - continue; + // Not an assistant message – forward immediately. + return Poll::Ready(Some(Ok(ResponseEvent::OutputItemDone(item)))); } Poll::Ready(Some(Ok(ResponseEvent::Completed { response_id }))) => { if !this.cumulative.is_empty() { diff --git a/codex-rs/core/src/client.rs b/codex-rs/core/src/client.rs index 57534e2f9..aff838887 100644 --- a/codex-rs/core/src/client.rs +++ b/codex-rs/core/src/client.rs @@ -1,7 +1,5 @@ -use std::collections::BTreeMap; use std::io::BufRead; use std::path::Path; -use std::sync::LazyLock; use std::time::Duration; use bytes::Bytes; @@ -11,7 +9,6 @@ use reqwest::StatusCode; use serde::Deserialize; use serde::Serialize; use serde_json::Value; -use serde_json::json; use tokio::sync::mpsc; use tokio::time::timeout; use tokio_util::io::ReaderStream; @@ -21,12 +18,13 @@ use tracing::warn; use crate::chat_completions::AggregateStreamExt; use crate::chat_completions::stream_chat_completions; -use crate::client_common::Payload; use crate::client_common::Prompt; -use crate::client_common::Reasoning; use crate::client_common::ResponseEvent; use crate::client_common::ResponseStream; -use crate::client_common::Summary; +use crate::client_common::ResponsesApiRequest; +use crate::client_common::create_reasoning_param_for_request; +use crate::config_types::ReasoningEffort as ReasoningEffortConfig; +use crate::config_types::ReasoningSummary as ReasoningSummaryConfig; use crate::error::CodexErr; use crate::error::EnvVarError; use crate::error::Result; @@ -36,84 +34,31 @@ use crate::flags::OPENAI_STREAM_IDLE_TIMEOUT_MS; use crate::model_provider_info::ModelProviderInfo; use crate::model_provider_info::WireApi; use crate::models::ResponseItem; +use crate::openai_tools::create_tools_json_for_responses_api; use crate::util::backoff; -/// When serialized as JSON, this produces a valid "Tool" in the OpenAI -/// Responses API. -#[derive(Debug, Clone, Serialize)] -#[serde(tag = "type")] -enum OpenAiTool { - #[serde(rename = "function")] - Function(ResponsesApiTool), - #[serde(rename = "local_shell")] - LocalShell {}, -} - -#[derive(Debug, Clone, Serialize)] -struct ResponsesApiTool { - name: &'static str, - description: &'static str, - strict: bool, - parameters: JsonSchema, -} - -/// Generic JSON‑Schema subset needed for our tool definitions -#[derive(Debug, Clone, Serialize)] -#[serde(tag = "type", rename_all = "lowercase")] -enum JsonSchema { - String, - Number, - Array { - items: Box, - }, - Object { - properties: BTreeMap, - required: &'static [&'static str], - #[serde(rename = "additionalProperties")] - additional_properties: bool, - }, -} - -/// Tool usage specification -static DEFAULT_TOOLS: LazyLock> = LazyLock::new(|| { - let mut properties = BTreeMap::new(); - properties.insert( - "command".to_string(), - JsonSchema::Array { - items: Box::new(JsonSchema::String), - }, - ); - properties.insert("workdir".to_string(), JsonSchema::String); - properties.insert("timeout".to_string(), JsonSchema::Number); - - vec![OpenAiTool::Function(ResponsesApiTool { - name: "shell", - description: "Runs a shell command, and returns its output.", - strict: false, - parameters: JsonSchema::Object { - properties, - required: &["command"], - additional_properties: false, - }, - })] -}); - -static DEFAULT_CODEX_MODEL_TOOLS: LazyLock> = - LazyLock::new(|| vec![OpenAiTool::LocalShell {}]); - #[derive(Clone)] pub struct ModelClient { model: String, client: reqwest::Client, provider: ModelProviderInfo, + effort: ReasoningEffortConfig, + summary: ReasoningSummaryConfig, } impl ModelClient { - pub fn new(model: impl ToString, provider: ModelProviderInfo) -> Self { + pub fn new( + model: impl ToString, + provider: ModelProviderInfo, + effort: ReasoningEffortConfig, + summary: ReasoningSummaryConfig, + ) -> Self { Self { model: model.to_string(), client: reqwest::Client::new(), provider, + effort, + summary, } } @@ -161,38 +106,17 @@ impl ModelClient { return stream_from_fixture(path).await; } - // Assemble tool list: built-in tools + any extra tools from the prompt. - let default_tools = if self.model.starts_with("codex") { - &DEFAULT_CODEX_MODEL_TOOLS - } else { - &DEFAULT_TOOLS - }; - let mut tools_json = Vec::with_capacity(default_tools.len() + prompt.extra_tools.len()); - for t in default_tools.iter() { - tools_json.push(serde_json::to_value(t)?); - } - tools_json.extend( - prompt - .extra_tools - .clone() - .into_iter() - .map(|(name, tool)| mcp_tool_to_openai_tool(name, tool)), - ); - - debug!("tools_json: {}", serde_json::to_string_pretty(&tools_json)?); - - let full_instructions = prompt.get_full_instructions(); - let payload = Payload { + let full_instructions = prompt.get_full_instructions(&self.model); + let tools_json = create_tools_json_for_responses_api(prompt, &self.model)?; + let reasoning = create_reasoning_param_for_request(&self.model, self.effort, self.summary); + let payload = ResponsesApiRequest { model: &self.model, instructions: &full_instructions, input: &prompt.input, tools: &tools_json, tool_choice: "auto", parallel_tool_calls: false, - reasoning: Some(Reasoning { - effort: "high", - summary: Some(Summary::Auto), - }), + reasoning, previous_response_id: prompt.prev_id.clone(), store: prompt.store, stream: true, @@ -201,8 +125,7 @@ impl ModelClient { let base_url = self.provider.base_url.clone(); let base_url = base_url.trim_end_matches('/'); let url = format!("{}/responses", base_url); - debug!(url, "POST"); - trace!("request payload: {}", serde_json::to_string(&payload)?); + trace!("POST to {url}: {}", serde_json::to_string(&payload)?); let mut attempt = 0; loop { @@ -276,20 +199,6 @@ impl ModelClient { } } -fn mcp_tool_to_openai_tool( - fully_qualified_name: String, - tool: mcp_types::Tool, -) -> serde_json::Value { - // TODO(mbolin): Change the contract of this function to return - // ResponsesApiTool. - json!({ - "name": fully_qualified_name, - "description": tool.description, - "parameters": tool.input_schema, - "type": "function", - }) -} - #[derive(Debug, Deserialize, Serialize)] struct SseEvent { #[serde(rename = "type")] @@ -401,6 +310,19 @@ where }; }; } + "response.content_part.done" + | "response.created" + | "response.function_call_arguments.delta" + | "response.in_progress" + | "response.output_item.added" + | "response.output_text.delta" + | "response.output_text.done" + | "response.reasoning_summary_part.added" + | "response.reasoning_summary_text.delta" + | "response.reasoning_summary_text.done" => { + // Currently, we ignore these events, but we handle them + // separately to skip the logging message in the `other` case. + } other => debug!(other, "sse event"), } } diff --git a/codex-rs/core/src/client_common.rs b/codex-rs/core/src/client_common.rs index 8eb8074b1..a2633475d 100644 --- a/codex-rs/core/src/client_common.rs +++ b/codex-rs/core/src/client_common.rs @@ -1,5 +1,8 @@ +use crate::config_types::ReasoningEffort as ReasoningEffortConfig; +use crate::config_types::ReasoningSummary as ReasoningSummaryConfig; use crate::error::Result; use crate::models::ResponseItem; +use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS; use futures::Stream; use serde::Serialize; use std::borrow::Cow; @@ -22,7 +25,7 @@ pub struct Prompt { pub prev_id: Option, /// Optional instructions from the user to amend to the built-in agent /// instructions. - pub instructions: Option, + pub user_instructions: Option, /// Whether to store response on server side (disable_response_storage = !store). pub store: bool, @@ -33,14 +36,15 @@ pub struct Prompt { } impl Prompt { - pub(crate) fn get_full_instructions(&self) -> Cow { - match &self.instructions { - Some(instructions) => { - let instructions = format!("{BASE_INSTRUCTIONS}\n{instructions}"); - Cow::Owned(instructions) - } - None => Cow::Borrowed(BASE_INSTRUCTIONS), + pub(crate) fn get_full_instructions(&self, model: &str) -> Cow { + let mut sections: Vec<&str> = vec![BASE_INSTRUCTIONS]; + if let Some(ref user) = self.user_instructions { + sections.push(user); } + if model.starts_with("gpt-4.1") { + sections.push(APPLY_PATCH_TOOL_INSTRUCTIONS); + } + Cow::Owned(sections.join("\n")) } } @@ -52,25 +56,59 @@ pub enum ResponseEvent { #[derive(Debug, Serialize)] pub(crate) struct Reasoning { - pub(crate) effort: &'static str, + pub(crate) effort: OpenAiReasoningEffort, #[serde(skip_serializing_if = "Option::is_none")] - pub(crate) summary: Option, + pub(crate) summary: Option, +} + +/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning +#[derive(Debug, Serialize, Default, Clone, Copy)] +#[serde(rename_all = "lowercase")] +pub(crate) enum OpenAiReasoningEffort { + Low, + #[default] + Medium, + High, +} + +impl From for Option { + fn from(effort: ReasoningEffortConfig) -> Self { + match effort { + ReasoningEffortConfig::Low => Some(OpenAiReasoningEffort::Low), + ReasoningEffortConfig::Medium => Some(OpenAiReasoningEffort::Medium), + ReasoningEffortConfig::High => Some(OpenAiReasoningEffort::High), + ReasoningEffortConfig::None => None, + } + } } /// A summary of the reasoning performed by the model. This can be useful for /// debugging and understanding the model's reasoning process. -#[derive(Debug, Serialize)] +/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries +#[derive(Debug, Serialize, Default, Clone, Copy)] #[serde(rename_all = "lowercase")] -pub(crate) enum Summary { +pub(crate) enum OpenAiReasoningSummary { + #[default] Auto, - #[allow(dead_code)] // Will go away once this is configurable. Concise, - #[allow(dead_code)] // Will go away once this is configurable. Detailed, } +impl From for Option { + fn from(summary: ReasoningSummaryConfig) -> Self { + match summary { + ReasoningSummaryConfig::Auto => Some(OpenAiReasoningSummary::Auto), + ReasoningSummaryConfig::Concise => Some(OpenAiReasoningSummary::Concise), + ReasoningSummaryConfig::Detailed => Some(OpenAiReasoningSummary::Detailed), + ReasoningSummaryConfig::None => None, + } + } +} + +/// Request object that is serialized as JSON and POST'ed when using the +/// Responses API. #[derive(Debug, Serialize)] -pub(crate) struct Payload<'a> { +pub(crate) struct ResponsesApiRequest<'a> { pub(crate) model: &'a str, pub(crate) instructions: &'a str, // TODO(mbolin): ResponseItem::Other should not be serialized. Currently, @@ -88,6 +126,40 @@ pub(crate) struct Payload<'a> { pub(crate) stream: bool, } +pub(crate) fn create_reasoning_param_for_request( + model: &str, + effort: ReasoningEffortConfig, + summary: ReasoningSummaryConfig, +) -> Option { + let effort: Option = effort.into(); + let effort = effort?; + + if model_supports_reasoning_summaries(model) { + Some(Reasoning { + effort, + summary: summary.into(), + }) + } else { + None + } +} + +pub fn model_supports_reasoning_summaries(model: &str) -> bool { + // Currently, we hardcode this rule to decide whether enable reasoning. + // We expect reasoning to apply only to OpenAI models, but we do not want + // users to have to mess with their config to disable reasoning for models + // that do not support it, such as `gpt-4.1`. + // + // Though if a user is using Codex with non-OpenAI models that, say, happen + // to start with "o", then they can set `model_reasoning_effort = "none` in + // config.toml to disable reasoning. + // + // Ultimately, this should also be configurable in config.toml, but we + // need to have defaults that "just work." Perhaps we could have a + // "reasoning models pattern" as part of ModelProviderInfo? + model.starts_with("o") || model.starts_with("codex") +} + pub(crate) struct ResponseStream { pub(crate) rx_event: mpsc::Receiver>, } diff --git a/codex-rs/core/src/codex.rs b/codex-rs/core/src/codex.rs index 2699a9ce7..2837dd032 100644 --- a/codex-rs/core/src/codex.rs +++ b/codex-rs/core/src/codex.rs @@ -20,6 +20,7 @@ use codex_apply_patch::MaybeApplyPatchVerified; use codex_apply_patch::maybe_parse_apply_patch_verified; use codex_apply_patch::print_summary; use futures::prelude::*; +use mcp_types::CallToolResult; use serde::Serialize; use serde_json; use tokio::sync::Notify; @@ -58,7 +59,7 @@ use crate::models::ReasoningItemReasoningSummary; use crate::models::ResponseInputItem; use crate::models::ResponseItem; use crate::models::ShellToolCallParams; -use crate::project_doc::create_full_instructions; +use crate::project_doc::get_user_instructions; use crate::protocol::AgentMessageEvent; use crate::protocol::AgentReasoningEvent; use crate::protocol::ApplyPatchApprovalRequestEvent; @@ -103,10 +104,12 @@ impl Codex { let (tx_sub, rx_sub) = async_channel::bounded(64); let (tx_event, rx_event) = async_channel::bounded(64); - let instructions = create_full_instructions(&config).await; + let instructions = get_user_instructions(&config).await; let configure_session = Op::ConfigureSession { provider: config.model_provider.clone(), model: config.model.clone(), + model_reasoning_effort: config.model_reasoning_effort, + model_reasoning_summary: config.model_reasoning_summary, instructions, approval_policy: config.approval_policy, sandbox_policy: config.sandbox_policy.clone(), @@ -295,6 +298,17 @@ impl Session { state.approved_commands.insert(cmd); } + /// Records items to both the rollout and the chat completions/ZDR + /// transcript, if enabled. + async fn record_conversation_items(&self, items: &[ResponseItem]) { + debug!("Recording items for conversation: {items:?}"); + self.record_rollout_items(items).await; + + if let Some(transcript) = self.state.lock().unwrap().zdr_transcript.as_mut() { + transcript.record_items(items); + } + } + /// Append the given items to the session's rollout transcript (if enabled) /// and persist them to disk. async fn record_rollout_items(&self, items: &[ResponseItem]) { @@ -388,7 +402,7 @@ impl Session { tool: &str, arguments: Option, timeout: Option, - ) -> anyhow::Result { + ) -> anyhow::Result { self.mcp_connection_manager .call_tool(server, tool, arguments, timeout) .await @@ -542,6 +556,8 @@ async fn submission_loop( Op::ConfigureSession { provider, model, + model_reasoning_effort, + model_reasoning_summary, instructions, approval_policy, sandbox_policy, @@ -563,7 +579,12 @@ async fn submission_loop( return; } - let client = ModelClient::new(model.clone(), provider.clone()); + let client = ModelClient::new( + model.clone(), + provider.clone(), + model_reasoning_effort, + model_reasoning_summary, + ); // abort any current running session and clone its state let retain_zdr_transcript = @@ -760,6 +781,19 @@ async fn submission_loop( debug!("Agent loop exited"); } +/// Takes a user message as input and runs a loop where, at each turn, the model +/// replies with either: +/// +/// - requested function calls +/// - an assistant message +/// +/// While it is possible for the model to return multiple of these items in a +/// single turn, in practice, we generally one item per turn: +/// +/// - If the model requests a function call, we execute it and send the output +/// back to the model in the next turn. +/// - If the model sends only an assistant message, we record it in the +/// conversation history and consider the task complete. async fn run_task(sess: Arc, sub_id: String, input: Vec) { if input.is_empty() { return; @@ -772,10 +806,14 @@ async fn run_task(sess: Arc, sub_id: String, input: Vec) { return; } - let mut pending_response_input: Vec = vec![ResponseInputItem::from(input)]; + let initial_input_for_turn = ResponseInputItem::from(input); + sess.record_conversation_items(&[initial_input_for_turn.clone().into()]) + .await; + + let mut input_for_next_turn: Vec = vec![initial_input_for_turn]; let last_agent_message: Option; loop { - let mut net_new_turn_input = pending_response_input + let mut net_new_turn_input = input_for_next_turn .drain(..) .map(ResponseItem::from) .collect::>(); @@ -783,11 +821,12 @@ async fn run_task(sess: Arc, sub_id: String, input: Vec) { // Note that pending_input would be something like a message the user // submitted through the UI while the model was running. Though the UI // may support this, the model might not. - let pending_input = sess.get_pending_input().into_iter().map(ResponseItem::from); - net_new_turn_input.extend(pending_input); - - // Persist only the net-new items of this turn to the rollout. - sess.record_rollout_items(&net_new_turn_input).await; + let pending_input = sess + .get_pending_input() + .into_iter() + .map(ResponseItem::from) + .collect::>(); + sess.record_conversation_items(&pending_input).await; // Construct the input that we will send to the model. When using the // Chat completions API (or ZDR clients), the model needs the full @@ -796,20 +835,24 @@ async fn run_task(sess: Arc, sub_id: String, input: Vec) { // represents an append-only log without duplicates. let turn_input: Vec = if let Some(transcript) = sess.state.lock().unwrap().zdr_transcript.as_mut() { - // If we are using Chat/ZDR, we need to send the transcript with every turn. - - // 1. Build up the conversation history for the next turn. - let full_transcript = [transcript.contents(), net_new_turn_input.clone()].concat(); - - // 2. Update the in-memory transcript so that future turns - // include these items as part of the history. - transcript.record_items(&net_new_turn_input); - - // Note that `transcript.record_items()` does some filtering - // such that `full_transcript` may include items that were - // excluded from `transcript`. - full_transcript + // If we are using Chat/ZDR, we need to send the transcript with + // every turn. By induction, `transcript` already contains: + // - The `input` that kicked off this task. + // - Each `ResponseItem` that was recorded in the previous turn. + // - Each response to a `ResponseItem` (in practice, the only + // response type we seem to have is `FunctionCallOutput`). + // + // The only thing the `transcript` does not contain is the + // `pending_input` that was injected while the model was + // running. We need to add that to the conversation history + // so that the model can see it in the next turn. + [transcript.contents(), pending_input].concat() } else { + // In practice, net_new_turn_input should contain only: + // - User messages + // - Outputs for function calls requested by the model + net_new_turn_input.extend(pending_input); + // Responses API path – we can just send the new items and // record the same. net_new_turn_input @@ -830,29 +873,86 @@ async fn run_task(sess: Arc, sub_id: String, input: Vec) { .collect(); match run_turn(&sess, sub_id.clone(), turn_input).await { Ok(turn_output) => { - let (items, responses): (Vec<_>, Vec<_>) = turn_output - .into_iter() - .map(|p| (p.item, p.response)) - .unzip(); - let responses = responses - .into_iter() - .flatten() - .collect::>(); + let mut items_to_record_in_conversation_history = Vec::::new(); + let mut responses = Vec::::new(); + for processed_response_item in turn_output { + let ProcessedResponseItem { item, response } = processed_response_item; + match (&item, &response) { + (ResponseItem::Message { role, .. }, None) if role == "assistant" => { + // If the model returned a message, we need to record it. + items_to_record_in_conversation_history.push(item); + } + ( + ResponseItem::LocalShellCall { .. }, + Some(ResponseInputItem::FunctionCallOutput { call_id, output }), + ) => { + items_to_record_in_conversation_history.push(item); + items_to_record_in_conversation_history.push( + ResponseItem::FunctionCallOutput { + call_id: call_id.clone(), + output: output.clone(), + }, + ); + } + ( + ResponseItem::FunctionCall { .. }, + Some(ResponseInputItem::FunctionCallOutput { call_id, output }), + ) => { + items_to_record_in_conversation_history.push(item); + items_to_record_in_conversation_history.push( + ResponseItem::FunctionCallOutput { + call_id: call_id.clone(), + output: output.clone(), + }, + ); + } + ( + ResponseItem::FunctionCall { .. }, + Some(ResponseInputItem::McpToolCallOutput { call_id, result }), + ) => { + items_to_record_in_conversation_history.push(item); + let (content, success): (String, Option) = match result { + Ok(CallToolResult { content, is_error }) => { + match serde_json::to_string(content) { + Ok(content) => (content, *is_error), + Err(e) => { + warn!("Failed to serialize MCP tool call output: {e}"); + (e.to_string(), Some(true)) + } + } + } + Err(e) => (e.clone(), Some(true)), + }; + items_to_record_in_conversation_history.push( + ResponseItem::FunctionCallOutput { + call_id: call_id.clone(), + output: FunctionCallOutputPayload { content, success }, + }, + ); + } + (ResponseItem::Reasoning { .. }, None) => { + // Omit from conversation history. + } + _ => { + warn!("Unexpected response item: {item:?} with response: {response:?}"); + } + }; + if let Some(response) = response { + responses.push(response); + } + } // Only attempt to take the lock if there is something to record. - if !items.is_empty() { - // First persist model-generated output to the rollout file – this only borrows. - sess.record_rollout_items(&items).await; - - // For ZDR we also need to keep a transcript clone. - if let Some(transcript) = sess.state.lock().unwrap().zdr_transcript.as_mut() { - transcript.record_items(&items); - } + if !items_to_record_in_conversation_history.is_empty() { + sess.record_conversation_items(&items_to_record_in_conversation_history) + .await; } if responses.is_empty() { debug!("Turn completed"); - last_agent_message = get_last_assistant_message_from_turn(&items); + last_agent_message = get_last_assistant_message_from_turn( + &items_to_record_in_conversation_history, + ); sess.maybe_notify(UserNotification::AgentTurnComplete { turn_id: sub_id.clone(), input_messages: turn_input_messages, @@ -861,7 +961,7 @@ async fn run_task(sess: Arc, sub_id: String, input: Vec) { break; } - pending_response_input = responses; + input_for_next_turn = responses; } Err(e) => { info!("Turn error: {e:#}"); @@ -890,9 +990,8 @@ async fn run_turn( input: Vec, ) -> CodexResult> { // Decide whether to use server-side storage (previous_response_id) or disable it - let (prev_id, store, is_first_turn) = { + let (prev_id, store) = { let state = sess.state.lock().unwrap(); - let is_first_turn = state.previous_response_id.is_none(); let store = state.zdr_transcript.is_none(); let prev_id = if store { state.previous_response_id.clone() @@ -901,20 +1000,14 @@ async fn run_turn( // back, but trying to use it results in a 400. None }; - (prev_id, store, is_first_turn) - }; - - let instructions = if is_first_turn { - sess.instructions.clone() - } else { - None + (prev_id, store) }; let extra_tools = sess.mcp_connection_manager.list_all_tools(); let prompt = Prompt { input, prev_id, - instructions, + user_instructions: sess.instructions.clone(), store, extra_tools, }; diff --git a/codex-rs/core/src/config.rs b/codex-rs/core/src/config.rs index d643d0066..74798129b 100644 --- a/codex-rs/core/src/config.rs +++ b/codex-rs/core/src/config.rs @@ -1,6 +1,8 @@ use crate::config_profile::ConfigProfile; use crate::config_types::History; use crate::config_types::McpServerConfig; +use crate::config_types::ReasoningEffort; +use crate::config_types::ReasoningSummary; use crate::config_types::ShellEnvironmentPolicy; use crate::config_types::ShellEnvironmentPolicyToml; use crate::config_types::Tui; @@ -16,6 +18,7 @@ use serde::Deserialize; use std::collections::HashMap; use std::path::Path; use std::path::PathBuf; +use toml::Value as TomlValue; /// Maximum number of bytes of the documentation that will be embedded. Larger /// files are *silently truncated* to this size so we do not take up too much of @@ -41,6 +44,11 @@ pub struct Config { pub shell_environment_policy: ShellEnvironmentPolicy, + /// When `true`, `AgentReasoning` events emitted by the backend will be + /// suppressed from the frontend output. This can reduce visual noise when + /// users are only interested in the final agent responses. + pub hide_agent_reasoning: bool, + /// Disable server-side response storage (sends the full conversation /// context with every request). Currently necessary for OpenAI customers /// who have opted into Zero Data Retention (ZDR). @@ -106,6 +114,116 @@ pub struct Config { /// /// When this program is invoked, arg0 will be set to `codex-linux-sandbox`. pub codex_linux_sandbox_exe: Option, + + /// If not "none", the value to use for `reasoning.effort` when making a + /// request using the Responses API. + pub model_reasoning_effort: ReasoningEffort, + + /// If not "none", the value to use for `reasoning.summary` when making a + /// request using the Responses API. + pub model_reasoning_summary: ReasoningSummary, +} + +impl Config { + /// Load configuration with *generic* CLI overrides (`-c key=value`) applied + /// **in between** the values parsed from `config.toml` and the + /// strongly-typed overrides specified via [`ConfigOverrides`]. + /// + /// The precedence order is therefore: `config.toml` < `-c` overrides < + /// `ConfigOverrides`. + pub fn load_with_cli_overrides( + cli_overrides: Vec<(String, TomlValue)>, + overrides: ConfigOverrides, + ) -> std::io::Result { + // Resolve the directory that stores Codex state (e.g. ~/.codex or the + // value of $CODEX_HOME) so we can embed it into the resulting + // `Config` instance. + let codex_home = find_codex_home()?; + + // Step 1: parse `config.toml` into a generic JSON value. + let mut root_value = load_config_as_toml(&codex_home)?; + + // Step 2: apply the `-c` overrides. + for (path, value) in cli_overrides.into_iter() { + apply_toml_override(&mut root_value, &path, value); + } + + // Step 3: deserialize into `ConfigToml` so that Serde can enforce the + // correct types. + let cfg: ConfigToml = root_value.try_into().map_err(|e| { + tracing::error!("Failed to deserialize overridden config: {e}"); + std::io::Error::new(std::io::ErrorKind::InvalidData, e) + })?; + + // Step 4: merge with the strongly-typed overrides. + Self::load_from_base_config_with_overrides(cfg, overrides, codex_home) + } +} + +/// Read `CODEX_HOME/config.toml` and return it as a generic TOML value. Returns +/// an empty TOML table when the file does not exist. +fn load_config_as_toml(codex_home: &Path) -> std::io::Result { + let config_path = codex_home.join("config.toml"); + match std::fs::read_to_string(&config_path) { + Ok(contents) => match toml::from_str::(&contents) { + Ok(val) => Ok(val), + Err(e) => { + tracing::error!("Failed to parse config.toml: {e}"); + Err(std::io::Error::new(std::io::ErrorKind::InvalidData, e)) + } + }, + Err(e) if e.kind() == std::io::ErrorKind::NotFound => { + tracing::info!("config.toml not found, using defaults"); + Ok(TomlValue::Table(Default::default())) + } + Err(e) => { + tracing::error!("Failed to read config.toml: {e}"); + Err(e) + } + } +} + +/// Apply a single dotted-path override onto a TOML value. +fn apply_toml_override(root: &mut TomlValue, path: &str, value: TomlValue) { + use toml::value::Table; + + let segments: Vec<&str> = path.split('.').collect(); + let mut current = root; + + for (idx, segment) in segments.iter().enumerate() { + let is_last = idx == segments.len() - 1; + + if is_last { + match current { + TomlValue::Table(table) => { + table.insert(segment.to_string(), value); + } + _ => { + let mut table = Table::new(); + table.insert(segment.to_string(), value); + *current = TomlValue::Table(table); + } + } + return; + } + + // Traverse or create intermediate object. + match current { + TomlValue::Table(table) => { + current = table + .entry(segment.to_string()) + .or_insert_with(|| TomlValue::Table(Table::new())); + } + _ => { + *current = TomlValue::Table(Table::new()); + if let TomlValue::Table(tbl) = current { + current = tbl + .entry(segment.to_string()) + .or_insert_with(|| TomlValue::Table(Table::new())); + } + } + } + } } /// Base config deserialized from ~/.codex/config.toml. @@ -169,29 +287,13 @@ pub struct ConfigToml { /// Collection of settings that are specific to the TUI. pub tui: Option, -} -impl ConfigToml { - /// Attempt to parse the file at `~/.codex/config.toml`. If it does not - /// exist, return a default config. Though if it exists and cannot be - /// parsed, report that to the user and force them to fix it. - fn load_from_toml(codex_home: &Path) -> std::io::Result { - let config_toml_path = codex_home.join("config.toml"); - match std::fs::read_to_string(&config_toml_path) { - Ok(contents) => toml::from_str::(&contents).map_err(|e| { - tracing::error!("Failed to parse config.toml: {e}"); - std::io::Error::new(std::io::ErrorKind::InvalidData, e) - }), - Err(e) if e.kind() == std::io::ErrorKind::NotFound => { - tracing::info!("config.toml not found, using defaults"); - Ok(Self::default()) - } - Err(e) => { - tracing::error!("Failed to read config.toml: {e}"); - Err(e) - } - } - } + /// When set to `true`, `AgentReasoning` events will be hidden from the + /// UI/output. Defaults to `false`. + pub hide_agent_reasoning: Option, + + pub model_reasoning_effort: Option, + pub model_reasoning_summary: Option, } fn deserialize_sandbox_permissions<'de, D>( @@ -227,28 +329,12 @@ pub struct ConfigOverrides { pub cwd: Option, pub approval_policy: Option, pub sandbox_policy: Option, - pub disable_response_storage: Option, pub model_provider: Option, pub config_profile: Option, pub codex_linux_sandbox_exe: Option, } impl Config { - /// Load configuration, optionally applying overrides (CLI flags). Merges - /// ~/.codex/config.toml, ~/.codex/instructions.md, embedded defaults, and - /// any values provided in `overrides` (highest precedence). - pub fn load_with_overrides(overrides: ConfigOverrides) -> std::io::Result { - // Resolve the directory that stores Codex state (e.g. ~/.codex or the - // value of $CODEX_HOME) so we can embed it into the resulting - // `Config` instance. - let codex_home = find_codex_home()?; - - let cfg: ConfigToml = ConfigToml::load_from_toml(&codex_home)?; - tracing::warn!("Config parsed from config.toml: {cfg:?}"); - - Self::load_from_base_config_with_overrides(cfg, overrides, codex_home) - } - /// Meant to be used exclusively for tests: `load_with_overrides()` should /// be used in all other cases. pub fn load_from_base_config_with_overrides( @@ -264,7 +350,6 @@ impl Config { cwd, approval_policy, sandbox_policy, - disable_response_storage, model_provider, config_profile: config_profile_key, codex_linux_sandbox_exe, @@ -356,8 +441,8 @@ impl Config { .unwrap_or_else(AskForApproval::default), sandbox_policy, shell_environment_policy, - disable_response_storage: disable_response_storage - .or(config_profile.disable_response_storage) + disable_response_storage: config_profile + .disable_response_storage .or(cfg.disable_response_storage) .unwrap_or(false), notify: cfg.notify, @@ -370,6 +455,10 @@ impl Config { file_opener: cfg.file_opener.unwrap_or(UriBasedFileOpener::VsCode), tui: cfg.tui.unwrap_or_default(), codex_linux_sandbox_exe, + + hide_agent_reasoning: cfg.hide_agent_reasoning.unwrap_or(false), + model_reasoning_effort: cfg.model_reasoning_effort.unwrap_or_default(), + model_reasoning_summary: cfg.model_reasoning_summary.unwrap_or_default(), }; Ok(config) } @@ -711,6 +800,9 @@ disable_response_storage = true file_opener: UriBasedFileOpener::VsCode, tui: Tui::default(), codex_linux_sandbox_exe: None, + hide_agent_reasoning: false, + model_reasoning_effort: ReasoningEffort::default(), + model_reasoning_summary: ReasoningSummary::default(), }, o3_profile_config ); @@ -750,6 +842,9 @@ disable_response_storage = true file_opener: UriBasedFileOpener::VsCode, tui: Tui::default(), codex_linux_sandbox_exe: None, + hide_agent_reasoning: false, + model_reasoning_effort: ReasoningEffort::default(), + model_reasoning_summary: ReasoningSummary::default(), }; assert_eq!(expected_gpt3_profile_config, gpt3_profile_config); @@ -804,6 +899,9 @@ disable_response_storage = true file_opener: UriBasedFileOpener::VsCode, tui: Tui::default(), codex_linux_sandbox_exe: None, + hide_agent_reasoning: false, + model_reasoning_effort: ReasoningEffort::default(), + model_reasoning_summary: ReasoningSummary::default(), }; assert_eq!(expected_zdr_profile_config, zdr_profile_config); diff --git a/codex-rs/core/src/config_types.rs b/codex-rs/core/src/config_types.rs index 6696f76f0..a7152d146 100644 --- a/codex-rs/core/src/config_types.rs +++ b/codex-rs/core/src/config_types.rs @@ -4,9 +4,11 @@ // definitions that do not contain business logic. use std::collections::HashMap; +use strum_macros::Display; use wildmatch::WildMatchPattern; use serde::Deserialize; +use serde::Serialize; #[derive(Deserialize, Debug, Clone, PartialEq)] pub struct McpServerConfig { @@ -89,7 +91,7 @@ pub struct Tui { } #[derive(Deserialize, Debug, Clone, PartialEq, Default)] - +#[serde(rename_all = "kebab-case")] pub enum ShellEnvironmentPolicyInherit { /// "Core" environment variables for the platform. On UNIX, this would /// include HOME, LOGNAME, PATH, SHELL, and USER, among others. @@ -175,3 +177,31 @@ impl From for ShellEnvironmentPolicy { } } } + +/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning +#[derive(Debug, Serialize, Deserialize, Default, Clone, Copy, PartialEq, Eq, Display)] +#[serde(rename_all = "lowercase")] +#[strum(serialize_all = "lowercase")] +pub enum ReasoningEffort { + Low, + #[default] + Medium, + High, + /// Option to disable reasoning. + None, +} + +/// A summary of the reasoning performed by the model. This can be useful for +/// debugging and understanding the model's reasoning process. +/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries +#[derive(Debug, Serialize, Deserialize, Default, Clone, Copy, PartialEq, Eq, Display)] +#[serde(rename_all = "lowercase")] +#[strum(serialize_all = "lowercase")] +pub enum ReasoningSummary { + #[default] + Auto, + Concise, + Detailed, + /// Option to disable reasoning summaries. + None, +} diff --git a/codex-rs/core/src/flags.rs b/codex-rs/core/src/flags.rs index e8cc973c9..c21ef6702 100644 --- a/codex-rs/core/src/flags.rs +++ b/codex-rs/core/src/flags.rs @@ -3,7 +3,7 @@ use std::time::Duration; use env_flags::env_flags; env_flags! { - pub OPENAI_DEFAULT_MODEL: &str = "o4-mini"; + pub OPENAI_DEFAULT_MODEL: &str = "codex-mini-latest"; pub OPENAI_API_BASE: &str = "https://api.openai.com/v1"; /// Fallback when the provider-specific key is not set. diff --git a/codex-rs/core/src/lib.rs b/codex-rs/core/src/lib.rs index 8398ff765..16cf19058 100644 --- a/codex-rs/core/src/lib.rs +++ b/codex-rs/core/src/lib.rs @@ -27,9 +27,13 @@ mod model_provider_info; pub use model_provider_info::ModelProviderInfo; pub use model_provider_info::WireApi; mod models; +pub mod openai_api_key; +mod openai_tools; mod project_doc; pub mod protocol; mod rollout; mod safety; mod user_notification; pub mod util; + +pub use client_common::model_supports_reasoning_summaries; diff --git a/codex-rs/core/src/mcp_tool_call.rs b/codex-rs/core/src/mcp_tool_call.rs index 4da5b2b77..61a51a0e7 100644 --- a/codex-rs/core/src/mcp_tool_call.rs +++ b/codex-rs/core/src/mcp_tool_call.rs @@ -50,51 +50,18 @@ pub(crate) async fn handle_mcp_tool_call( notify_mcp_tool_call_event(sess, sub_id, tool_call_begin_event).await; // Perform the tool call. - let (tool_call_end_event, tool_call_err) = match sess + let result = sess .call_tool(&server, &tool_name, arguments_value, timeout) .await - { - Ok(result) => ( - EventMsg::McpToolCallEnd(McpToolCallEndEvent { - call_id, - success: !result.is_error.unwrap_or(false), - result: Some(result), - }), - None, - ), - Err(e) => ( - EventMsg::McpToolCallEnd(McpToolCallEndEvent { - call_id, - success: false, - result: None, - }), - Some(e), - ), - }; + .map_err(|e| format!("tool call error: {e}")); + let tool_call_end_event = EventMsg::McpToolCallEnd(McpToolCallEndEvent { + call_id: call_id.clone(), + result: result.clone(), + }); notify_mcp_tool_call_event(sess, sub_id, tool_call_end_event.clone()).await; - let EventMsg::McpToolCallEnd(McpToolCallEndEvent { - call_id, - success, - result, - }) = tool_call_end_event - else { - unimplemented!("unexpected event type"); - }; - ResponseInputItem::FunctionCallOutput { - call_id, - output: FunctionCallOutputPayload { - content: result.map_or_else( - || format!("err: {tool_call_err:?}"), - |result| { - serde_json::to_string(&result) - .unwrap_or_else(|e| format!("JSON serialization error: {e}")) - }, - ), - success: Some(success), - }, - } + ResponseInputItem::McpToolCallOutput { call_id, result } } async fn notify_mcp_tool_call_event(sess: &Session, sub_id: &str, event: EventMsg) { diff --git a/codex-rs/core/src/model_provider_info.rs b/codex-rs/core/src/model_provider_info.rs index 186e28d34..44b406c98 100644 --- a/codex-rs/core/src/model_provider_info.rs +++ b/codex-rs/core/src/model_provider_info.rs @@ -11,6 +11,7 @@ use std::collections::HashMap; use std::env::VarError; use crate::error::EnvVarError; +use crate::openai_api_key::get_openai_api_key; /// Wire protocol that the provider speaks. Most third-party services only /// implement the classic OpenAI Chat Completions JSON schema, whereas OpenAI @@ -52,20 +53,27 @@ impl ModelProviderInfo { /// cannot be found, returns an error. pub fn api_key(&self) -> crate::error::Result> { match &self.env_key { - Some(env_key) => std::env::var(env_key) - .and_then(|v| { - if v.trim().is_empty() { - Err(VarError::NotPresent) - } else { - Ok(Some(v)) - } - }) - .map_err(|_| { - crate::error::CodexErr::EnvVar(EnvVarError { - var: env_key.clone(), - instructions: self.env_key_instructions.clone(), + Some(env_key) => { + let env_value = if env_key == crate::openai_api_key::OPENAI_API_KEY_ENV_VAR { + get_openai_api_key().map_or_else(|| Err(VarError::NotPresent), Ok) + } else { + std::env::var(env_key) + }; + env_value + .and_then(|v| { + if v.trim().is_empty() { + Err(VarError::NotPresent) + } else { + Ok(Some(v)) + } }) - }), + .map_err(|_| { + crate::error::CodexErr::EnvVar(EnvVarError { + var: env_key.clone(), + instructions: self.env_key_instructions.clone(), + }) + }) + } None => Ok(None), } } diff --git a/codex-rs/core/src/models.rs b/codex-rs/core/src/models.rs index ab213fd52..ccc550e8e 100644 --- a/codex-rs/core/src/models.rs +++ b/codex-rs/core/src/models.rs @@ -1,6 +1,7 @@ use std::collections::HashMap; use base64::Engine; +use mcp_types::CallToolResult; use serde::Deserialize; use serde::Serialize; use serde::ser::Serializer; @@ -18,6 +19,10 @@ pub enum ResponseInputItem { call_id: String, output: FunctionCallOutputPayload, }, + McpToolCallOutput { + call_id: String, + result: Result, + }, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -77,6 +82,19 @@ impl From for ResponseItem { ResponseInputItem::FunctionCallOutput { call_id, output } => { Self::FunctionCallOutput { call_id, output } } + ResponseInputItem::McpToolCallOutput { call_id, result } => Self::FunctionCallOutput { + call_id, + output: FunctionCallOutputPayload { + success: Some(result.is_ok()), + content: result.map_or_else( + |tool_call_err| format!("err: {tool_call_err:?}"), + |result| { + serde_json::to_string(&result) + .unwrap_or_else(|e| format!("JSON serialization error: {e}")) + }, + ), + }, + }, } } } diff --git a/codex-rs/core/src/openai_api_key.rs b/codex-rs/core/src/openai_api_key.rs new file mode 100644 index 000000000..728914c0f --- /dev/null +++ b/codex-rs/core/src/openai_api_key.rs @@ -0,0 +1,24 @@ +use std::env; +use std::sync::LazyLock; +use std::sync::RwLock; + +pub const OPENAI_API_KEY_ENV_VAR: &str = "OPENAI_API_KEY"; + +static OPENAI_API_KEY: LazyLock>> = LazyLock::new(|| { + let val = env::var(OPENAI_API_KEY_ENV_VAR) + .ok() + .and_then(|s| if s.is_empty() { None } else { Some(s) }); + RwLock::new(val) +}); + +pub fn get_openai_api_key() -> Option { + #![allow(clippy::unwrap_used)] + OPENAI_API_KEY.read().unwrap().clone() +} + +pub fn set_openai_api_key(value: String) { + #![allow(clippy::unwrap_used)] + if !value.is_empty() { + *OPENAI_API_KEY.write().unwrap() = Some(value); + } +} diff --git a/codex-rs/core/src/openai_tools.rs b/codex-rs/core/src/openai_tools.rs new file mode 100644 index 000000000..ef12a629b --- /dev/null +++ b/codex-rs/core/src/openai_tools.rs @@ -0,0 +1,157 @@ +use serde::Serialize; +use serde_json::json; +use std::collections::BTreeMap; +use std::sync::LazyLock; + +use crate::client_common::Prompt; + +#[derive(Debug, Clone, Serialize)] +pub(crate) struct ResponsesApiTool { + name: &'static str, + description: &'static str, + strict: bool, + parameters: JsonSchema, +} + +/// When serialized as JSON, this produces a valid "Tool" in the OpenAI +/// Responses API. +#[derive(Debug, Clone, Serialize)] +#[serde(tag = "type")] +pub(crate) enum OpenAiTool { + #[serde(rename = "function")] + Function(ResponsesApiTool), + #[serde(rename = "local_shell")] + LocalShell {}, +} + +/// Generic JSON‑Schema subset needed for our tool definitions +#[derive(Debug, Clone, Serialize)] +#[serde(tag = "type", rename_all = "lowercase")] +pub(crate) enum JsonSchema { + String, + Number, + Array { + items: Box, + }, + Object { + properties: BTreeMap, + required: &'static [&'static str], + #[serde(rename = "additionalProperties")] + additional_properties: bool, + }, +} + +/// Tool usage specification +static DEFAULT_TOOLS: LazyLock> = LazyLock::new(|| { + let mut properties = BTreeMap::new(); + properties.insert( + "command".to_string(), + JsonSchema::Array { + items: Box::new(JsonSchema::String), + }, + ); + properties.insert("workdir".to_string(), JsonSchema::String); + properties.insert("timeout".to_string(), JsonSchema::Number); + + vec![OpenAiTool::Function(ResponsesApiTool { + name: "shell", + description: "Runs a shell command, and returns its output.", + strict: false, + parameters: JsonSchema::Object { + properties, + required: &["command"], + additional_properties: false, + }, + })] +}); + +static DEFAULT_CODEX_MODEL_TOOLS: LazyLock> = + LazyLock::new(|| vec![OpenAiTool::LocalShell {}]); + +/// Returns JSON values that are compatible with Function Calling in the +/// Responses API: +/// https://platform.openai.com/docs/guides/function-calling?api-mode=responses +pub(crate) fn create_tools_json_for_responses_api( + prompt: &Prompt, + model: &str, +) -> crate::error::Result> { + // Assemble tool list: built-in tools + any extra tools from the prompt. + let default_tools = if model.starts_with("codex") { + &DEFAULT_CODEX_MODEL_TOOLS + } else { + &DEFAULT_TOOLS + }; + let mut tools_json = Vec::with_capacity(default_tools.len() + prompt.extra_tools.len()); + for t in default_tools.iter() { + tools_json.push(serde_json::to_value(t)?); + } + tools_json.extend( + prompt + .extra_tools + .clone() + .into_iter() + .map(|(name, tool)| mcp_tool_to_openai_tool(name, tool)), + ); + + Ok(tools_json) +} + +/// Returns JSON values that are compatible with Function Calling in the +/// Chat Completions API: +/// https://platform.openai.com/docs/guides/function-calling?api-mode=chat +pub(crate) fn create_tools_json_for_chat_completions_api( + prompt: &Prompt, + model: &str, +) -> crate::error::Result> { + // We start with the JSON for the Responses API and than rewrite it to match + // the chat completions tool call format. + let responses_api_tools_json = create_tools_json_for_responses_api(prompt, model)?; + let tools_json = responses_api_tools_json + .into_iter() + .filter_map(|mut tool| { + if tool.get("type") != Some(&serde_json::Value::String("function".to_string())) { + return None; + } + + if let Some(map) = tool.as_object_mut() { + // Remove "type" field as it is not needed in chat completions. + map.remove("type"); + Some(json!({ + "type": "function", + "function": map, + })) + } else { + None + } + }) + .collect::>(); + Ok(tools_json) +} + +fn mcp_tool_to_openai_tool( + fully_qualified_name: String, + tool: mcp_types::Tool, +) -> serde_json::Value { + let mcp_types::Tool { + description, + mut input_schema, + .. + } = tool; + + // OpenAI models mandate the "properties" field in the schema. The Agents + // SDK fixed this by inserting an empty object for "properties" if it is not + // already present https://github.com/openai/openai-agents-python/issues/449 + // so here we do the same. + if input_schema.properties.is_none() { + input_schema.properties = Some(serde_json::Value::Object(serde_json::Map::new())); + } + + // TODO(mbolin): Change the contract of this function to return + // ResponsesApiTool. + json!({ + "name": fully_qualified_name, + "description": description, + "parameters": input_schema, + "type": "function", + }) +} diff --git a/codex-rs/core/src/project_doc.rs b/codex-rs/core/src/project_doc.rs index 1a4e90deb..ab9d46186 100644 --- a/codex-rs/core/src/project_doc.rs +++ b/codex-rs/core/src/project_doc.rs @@ -25,7 +25,7 @@ const PROJECT_DOC_SEPARATOR: &str = "\n\n--- project-doc ---\n\n"; /// Combines `Config::instructions` and `AGENTS.md` (if present) into a single /// string of instructions. -pub(crate) async fn create_full_instructions(config: &Config) -> Option { +pub(crate) async fn get_user_instructions(config: &Config) -> Option { match find_project_doc(config).await { Ok(Some(project_doc)) => match &config.instructions { Some(original_instructions) => Some(format!( @@ -168,7 +168,7 @@ mod tests { async fn no_doc_file_returns_none() { let tmp = tempfile::tempdir().expect("tempdir"); - let res = create_full_instructions(&make_config(&tmp, 4096, None)).await; + let res = get_user_instructions(&make_config(&tmp, 4096, None)).await; assert!( res.is_none(), "Expected None when AGENTS.md is absent and no system instructions provided" @@ -182,7 +182,7 @@ mod tests { let tmp = tempfile::tempdir().expect("tempdir"); fs::write(tmp.path().join("AGENTS.md"), "hello world").unwrap(); - let res = create_full_instructions(&make_config(&tmp, 4096, None)) + let res = get_user_instructions(&make_config(&tmp, 4096, None)) .await .expect("doc expected"); @@ -201,7 +201,7 @@ mod tests { let huge = "A".repeat(LIMIT * 2); // 2 KiB fs::write(tmp.path().join("AGENTS.md"), &huge).unwrap(); - let res = create_full_instructions(&make_config(&tmp, LIMIT, None)) + let res = get_user_instructions(&make_config(&tmp, LIMIT, None)) .await .expect("doc expected"); @@ -233,7 +233,7 @@ mod tests { let mut cfg = make_config(&repo, 4096, None); cfg.cwd = nested; - let res = create_full_instructions(&cfg).await.expect("doc expected"); + let res = get_user_instructions(&cfg).await.expect("doc expected"); assert_eq!(res, "root level doc"); } @@ -243,7 +243,7 @@ mod tests { let tmp = tempfile::tempdir().expect("tempdir"); fs::write(tmp.path().join("AGENTS.md"), "something").unwrap(); - let res = create_full_instructions(&make_config(&tmp, 0, None)).await; + let res = get_user_instructions(&make_config(&tmp, 0, None)).await; assert!( res.is_none(), "With limit 0 the function should return None" @@ -259,7 +259,7 @@ mod tests { const INSTRUCTIONS: &str = "base instructions"; - let res = create_full_instructions(&make_config(&tmp, 4096, Some(INSTRUCTIONS))) + let res = get_user_instructions(&make_config(&tmp, 4096, Some(INSTRUCTIONS))) .await .expect("should produce a combined instruction string"); @@ -276,7 +276,7 @@ mod tests { const INSTRUCTIONS: &str = "some instructions"; - let res = create_full_instructions(&make_config(&tmp, 4096, Some(INSTRUCTIONS))).await; + let res = get_user_instructions(&make_config(&tmp, 4096, Some(INSTRUCTIONS))).await; assert_eq!(res, Some(INSTRUCTIONS.to_string())); } diff --git a/codex-rs/core/src/protocol.rs b/codex-rs/core/src/protocol.rs index 2a922cba6..737acc773 100644 --- a/codex-rs/core/src/protocol.rs +++ b/codex-rs/core/src/protocol.rs @@ -12,6 +12,8 @@ use serde::Deserialize; use serde::Serialize; use uuid::Uuid; +use crate::config_types::ReasoningEffort as ReasoningEffortConfig; +use crate::config_types::ReasoningSummary as ReasoningSummaryConfig; use crate::message_history::HistoryEntry; use crate::model_provider_info::ModelProviderInfo; @@ -37,6 +39,10 @@ pub enum Op { /// If not specified, server will use its default model. model: String, + + model_reasoning_effort: ReasoningEffortConfig, + model_reasoning_summary: ReasoningSummaryConfig, + /// Model instructions instructions: Option, /// When to escalate for approval for execution @@ -396,10 +402,17 @@ pub struct McpToolCallBeginEvent { pub struct McpToolCallEndEvent { /// Identifier for the corresponding McpToolCallBegin that finished. pub call_id: String, - /// Whether the tool call was successful. If `false`, `result` might not be present. - pub success: bool, /// Result of the tool call. Note this could be an error. - pub result: Option, + pub result: Result, +} + +impl McpToolCallEndEvent { + pub fn is_success(&self) -> bool { + match &self.result { + Ok(result) => !result.is_error.unwrap_or(false), + Err(_) => false, + } + } } #[derive(Debug, Clone, Deserialize, Serialize)] @@ -554,7 +567,7 @@ mod tests { id: "1234".to_string(), msg: EventMsg::SessionConfigured(SessionConfiguredEvent { session_id, - model: "o4-mini".to_string(), + model: "codex-mini-latest".to_string(), history_log_id: 0, history_entry_count: 0, }), @@ -562,7 +575,7 @@ mod tests { let serialized = serde_json::to_string(&event).unwrap(); assert_eq!( serialized, - r#"{"id":"1234","msg":{"type":"session_configured","session_id":"67e55044-10b1-426f-9247-bb680e5fe0c8","model":"o4-mini","history_log_id":0,"history_entry_count":0}}"# + r#"{"id":"1234","msg":{"type":"session_configured","session_id":"67e55044-10b1-426f-9247-bb680e5fe0c8","model":"codex-mini-latest","history_log_id":0,"history_entry_count":0}}"# ); } } diff --git a/codex-rs/exec/src/cli.rs b/codex-rs/exec/src/cli.rs index 4a3d493a8..413fd23cb 100644 --- a/codex-rs/exec/src/cli.rs +++ b/codex-rs/exec/src/cli.rs @@ -1,5 +1,6 @@ use clap::Parser; use clap::ValueEnum; +use codex_common::CliConfigOverrides; use codex_common::SandboxPermissionOption; use std::path::PathBuf; @@ -33,9 +34,8 @@ pub struct Cli { #[arg(long = "skip-git-repo-check", default_value_t = false)] pub skip_git_repo_check: bool, - /// Disable server‑side response storage (sends the full conversation context with every request) - #[arg(long = "disable-response-storage", default_value_t = false)] - pub disable_response_storage: bool, + #[clap(skip)] + pub config_overrides: CliConfigOverrides, /// Specifies color settings for use in the output. #[arg(long = "color", value_enum, default_value_t = Color::Auto)] @@ -45,8 +45,10 @@ pub struct Cli { #[arg(long = "output-last-message")] pub last_message_file: Option, - /// Initial instructions for the agent. - pub prompt: String, + /// Initial instructions for the agent. If not provided as an argument (or + /// if `-` is used), instructions are read from stdin. + #[arg(value_name = "PROMPT")] + pub prompt: Option, } #[derive(Debug, Clone, Copy, Default, PartialEq, Eq, ValueEnum)] diff --git a/codex-rs/exec/src/event_processor.rs b/codex-rs/exec/src/event_processor.rs index 676b47d64..4cbbd25f0 100644 --- a/codex-rs/exec/src/event_processor.rs +++ b/codex-rs/exec/src/event_processor.rs @@ -1,6 +1,7 @@ -use chrono::Utc; use codex_common::elapsed::format_elapsed; +use codex_core::WireApi; use codex_core::config::Config; +use codex_core::model_supports_reasoning_summaries; use codex_core::protocol::AgentMessageEvent; use codex_core::protocol::BackgroundEventEvent; use codex_core::protocol::ErrorEvent; @@ -37,15 +38,20 @@ pub(crate) struct EventProcessor { // using .style() with one of these fields. If you need a new style, add a // new field here. bold: Style, + italic: Style, dimmed: Style, magenta: Style, red: Style, green: Style, + cyan: Style, + + /// Whether to include `AgentReasoning` events in the output. + show_agent_reasoning: bool, } impl EventProcessor { - pub(crate) fn create_with_ansi(with_ansi: bool) -> Self { + pub(crate) fn create_with_ansi(with_ansi: bool, show_agent_reasoning: bool) -> Self { let call_id_to_command = HashMap::new(); let call_id_to_patch = HashMap::new(); let call_id_to_tool_call = HashMap::new(); @@ -55,22 +61,28 @@ impl EventProcessor { call_id_to_command, call_id_to_patch, bold: Style::new().bold(), + italic: Style::new().italic(), dimmed: Style::new().dimmed(), magenta: Style::new().magenta(), red: Style::new().red(), green: Style::new().green(), + cyan: Style::new().cyan(), call_id_to_tool_call, + show_agent_reasoning, } } else { Self { call_id_to_command, call_id_to_patch, bold: Style::new(), + italic: Style::new(), dimmed: Style::new(), magenta: Style::new(), red: Style::new(), green: Style::new(), + cyan: Style::new(), call_id_to_tool_call, + show_agent_reasoning, } } } @@ -94,59 +106,85 @@ struct PatchApplyBegin { auto_approved: bool, } +// Timestamped println helper. The timestamp is styled with self.dimmed. +#[macro_export] macro_rules! ts_println { - ($($arg:tt)*) => {{ - let now = Utc::now(); - let formatted = now.format("%Y-%m-%dT%H:%M:%S").to_string(); - print!("[{}] ", formatted); + ($self:ident, $($arg:tt)*) => {{ + let now = chrono::Utc::now(); + let formatted = now.format("[%Y-%m-%dT%H:%M:%S]"); + print!("{} ", formatted.style($self.dimmed)); println!($($arg)*); }}; } -/// Print a concise summary of the effective configuration that will be used -/// for the session. This mirrors the information shown in the TUI welcome -/// screen. -pub(crate) fn print_config_summary(config: &Config, with_ansi: bool) { - let bold = if with_ansi { - Style::new().bold() - } else { - Style::new() - }; - - ts_println!("OpenAI Codex (research preview)\n--------"); - - let entries = vec![ - ("workdir", config.cwd.display().to_string()), - ("model", config.model.clone()), - ("provider", config.model_provider_id.clone()), - ("approval", format!("{:?}", config.approval_policy)), - ("sandbox", format!("{:?}", config.sandbox_policy)), - ]; - - for (key, value) in entries { - println!("{} {}", format!("{key}: ").style(bold), value); - } +impl EventProcessor { + /// Print a concise summary of the effective configuration that will be used + /// for the session. This mirrors the information shown in the TUI welcome + /// screen. + pub(crate) fn print_config_summary(&mut self, config: &Config, prompt: &str) { + const VERSION: &str = env!("CARGO_PKG_VERSION"); + ts_println!( + self, + "OpenAI Codex v{} (research preview)\n--------", + VERSION + ); + + let mut entries = vec![ + ("workdir", config.cwd.display().to_string()), + ("model", config.model.clone()), + ("provider", config.model_provider_id.clone()), + ("approval", format!("{:?}", config.approval_policy)), + ("sandbox", format!("{:?}", config.sandbox_policy)), + ]; + if config.model_provider.wire_api == WireApi::Responses + && model_supports_reasoning_summaries(&config.model) + { + entries.push(( + "reasoning effort", + config.model_reasoning_effort.to_string(), + )); + entries.push(( + "reasoning summaries", + config.model_reasoning_summary.to_string(), + )); + } - println!("--------\n"); -} + for (key, value) in entries { + println!("{} {}", format!("{key}:").style(self.bold), value); + } + + println!("--------"); + + // Echo the prompt that will be sent to the agent so it is visible in the + // transcript/logs before any events come in. Note the prompt may have been + // read from stdin, so it may not be visible in the terminal otherwise. + ts_println!( + self, + "{}\n{}", + "User instructions:".style(self.bold).style(self.cyan), + prompt + ); + } -impl EventProcessor { pub(crate) fn process_event(&mut self, event: Event) { let Event { id: _, msg } = event; match msg { EventMsg::Error(ErrorEvent { message }) => { let prefix = "ERROR:".style(self.red); - ts_println!("{prefix} {message}"); + ts_println!(self, "{prefix} {message}"); } EventMsg::BackgroundEvent(BackgroundEventEvent { message }) => { - ts_println!("{}", message.style(self.dimmed)); + ts_println!(self, "{}", message.style(self.dimmed)); } EventMsg::TaskStarted | EventMsg::TaskComplete(_) => { // Ignore. } EventMsg::AgentMessage(AgentMessageEvent { message }) => { - let prefix = "Agent message:".style(self.bold); - ts_println!("{prefix} {message}"); + ts_println!( + self, + "{}\n{message}", + "codex".style(self.bold).style(self.magenta) + ); } EventMsg::ExecCommandBegin(ExecCommandBeginEvent { call_id, @@ -161,6 +199,7 @@ impl EventProcessor { }, ); ts_println!( + self, "{} {} in {}", "exec".style(self.magenta), escape_command(&command).style(self.bold), @@ -196,11 +235,11 @@ impl EventProcessor { match exit_code { 0 => { let title = format!("{call} succeeded{duration}:"); - ts_println!("{}", title.style(self.green)); + ts_println!(self, "{}", title.style(self.green)); } _ => { let title = format!("{call} exited {exit_code}{duration}:"); - ts_println!("{}", title.style(self.red)); + ts_println!(self, "{}", title.style(self.red)); } } println!("{}", truncated_output.style(self.dimmed)); @@ -237,16 +276,15 @@ impl EventProcessor { ); ts_println!( + self, "{} {}", "tool".style(self.magenta), invocation.style(self.bold), ); } - EventMsg::McpToolCallEnd(McpToolCallEndEvent { - call_id, - success, - result, - }) => { + EventMsg::McpToolCallEnd(tool_call_end_event) => { + let is_success = tool_call_end_event.is_success(); + let McpToolCallEndEvent { call_id, result } = tool_call_end_event; // Retrieve start time and invocation for duration calculation and labeling. let info = self.call_id_to_tool_call.remove(&call_id); @@ -261,13 +299,13 @@ impl EventProcessor { (String::new(), format!("tool('{call_id}')")) }; - let status_str = if success { "success" } else { "failed" }; - let title_style = if success { self.green } else { self.red }; + let status_str = if is_success { "success" } else { "failed" }; + let title_style = if is_success { self.green } else { self.red }; let title = format!("{invocation} {status_str}{duration}:"); - ts_println!("{}", title.style(title_style)); + ts_println!(self, "{}", title.style(title_style)); - if let Some(res) = result { + if let Ok(res) = result { let val: serde_json::Value = res.into(); let pretty = serde_json::to_string_pretty(&val).unwrap_or_else(|_| val.to_string()); @@ -293,6 +331,7 @@ impl EventProcessor { ); ts_println!( + self, "{} auto_approved={}:", "apply_patch".style(self.magenta), auto_approved, @@ -384,7 +423,7 @@ impl EventProcessor { }; let title = format!("{label} exited {exit_code}{duration}:"); - ts_println!("{}", title.style(title_style)); + ts_println!(self, "{}", title.style(title_style)); for line in output.lines() { println!("{}", line.style(self.dimmed)); } @@ -396,7 +435,14 @@ impl EventProcessor { // Should we exit? } EventMsg::AgentReasoning(agent_reasoning_event) => { - println!("thinking: {}", agent_reasoning_event.text); + if self.show_agent_reasoning { + ts_println!( + self, + "{}\n{}", + "thinking".style(self.italic).style(self.magenta), + agent_reasoning_event.text + ); + } } EventMsg::SessionConfigured(session_configured_event) => { let SessionConfiguredEvent { @@ -407,12 +453,13 @@ impl EventProcessor { } = session_configured_event; ts_println!( + self, "{} {}", "codex session".style(self.magenta).style(self.bold), session_id.to_string().style(self.dimmed) ); - ts_println!("model: {}", model); + ts_println!(self, "model: {}", model); println!(); } EventMsg::GetHistoryEntryResponse(_) => { diff --git a/codex-rs/exec/src/lib.rs b/codex-rs/exec/src/lib.rs index dbf01f025..925e25d67 100644 --- a/codex-rs/exec/src/lib.rs +++ b/codex-rs/exec/src/lib.rs @@ -2,6 +2,7 @@ mod cli; mod event_processor; use std::io::IsTerminal; +use std::io::Read; use std::path::Path; use std::path::PathBuf; use std::sync::Arc; @@ -19,7 +20,6 @@ use codex_core::protocol::SandboxPolicy; use codex_core::protocol::TaskCompleteEvent; use codex_core::util::is_inside_git_repo; use event_processor::EventProcessor; -use event_processor::print_config_summary; use tracing::debug; use tracing::error; use tracing::info; @@ -34,12 +34,47 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option) -> any sandbox, cwd, skip_git_repo_check, - disable_response_storage, color, last_message_file, prompt, + config_overrides, } = cli; + // Determine the prompt based on CLI arg and/or stdin. + let prompt = match prompt { + Some(p) if p != "-" => p, + // Either `-` was passed or no positional arg. + maybe_dash => { + // When no arg (None) **and** stdin is a TTY, bail out early – unless the + // user explicitly forced reading via `-`. + let force_stdin = matches!(maybe_dash.as_deref(), Some("-")); + + if std::io::stdin().is_terminal() && !force_stdin { + eprintln!( + "No prompt provided. Either specify one as an argument or pipe the prompt into stdin." + ); + std::process::exit(1); + } + + // Ensure the user knows we are waiting on stdin, as they may + // have gotten into this state by mistake. If so, and they are not + // writing to stdin, Codex will hang indefinitely, so this should + // help them debug in that case. + if !force_stdin { + eprintln!("Reading prompt from stdin..."); + } + let mut buffer = String::new(); + if let Err(e) = std::io::stdin().read_to_string(&mut buffer) { + eprintln!("Failed to read prompt from stdin: {e}"); + std::process::exit(1); + } else if buffer.trim().is_empty() { + eprintln!("No prompt provided via stdin."); + std::process::exit(1); + } + buffer + } + }; + let (stdout_with_ansi, stderr_with_ansi) = match color { cli::Color::Always => (true, true), cli::Color::Never => (false, false), @@ -63,18 +98,25 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option) -> any // the user for approval. approval_policy: Some(AskForApproval::Never), sandbox_policy, - disable_response_storage: if disable_response_storage { - Some(true) - } else { - None - }, cwd: cwd.map(|p| p.canonicalize().unwrap_or(p)), model_provider: None, codex_linux_sandbox_exe, }; - let config = Config::load_with_overrides(overrides)?; - // Print the effective configuration so users can see what Codex is using. - print_config_summary(&config, stdout_with_ansi); + // Parse `-c` overrides. + let cli_kv_overrides = match config_overrides.parse_overrides() { + Ok(v) => v, + Err(e) => { + eprintln!("Error parsing -c overrides: {e}"); + std::process::exit(1); + } + }; + + let config = Config::load_with_cli_overrides(cli_kv_overrides, overrides)?; + let mut event_processor = + EventProcessor::create_with_ansi(stdout_with_ansi, !config.hide_agent_reasoning); + // Print the effective configuration and prompt so users can see what Codex + // is using. + event_processor.print_config_summary(&config, &prompt); if !skip_git_repo_check && !is_inside_git_repo(&config) { eprintln!("Not inside a Git repo and --skip-git-repo-check was not specified."); @@ -164,7 +206,6 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option) -> any info!("Sent prompt with event ID: {initial_prompt_task_id}"); // Run the loop until the task is complete. - let mut event_processor = EventProcessor::create_with_ansi(stdout_with_ansi); while let Some(event) = rx.recv().await { let (is_last_event, last_assistant_message) = match &event.msg { EventMsg::TaskComplete(TaskCompleteEvent { last_agent_message }) => { diff --git a/codex-rs/exec/src/main.rs b/codex-rs/exec/src/main.rs index 17aa5377d..3a8e1f941 100644 --- a/codex-rs/exec/src/main.rs +++ b/codex-rs/exec/src/main.rs @@ -10,13 +10,30 @@ //! This allows us to ship a completely separate set of functionality as part //! of the `codex-exec` binary. use clap::Parser; +use codex_common::CliConfigOverrides; use codex_exec::Cli; use codex_exec::run_main; +#[derive(Parser, Debug)] +struct TopCli { + #[clap(flatten)] + config_overrides: CliConfigOverrides, + + #[clap(flatten)] + inner: Cli, +} + fn main() -> anyhow::Result<()> { codex_linux_sandbox::run_with_sandbox(|codex_linux_sandbox_exe| async move { - let cli = Cli::parse(); - run_main(cli, codex_linux_sandbox_exe).await?; + let top_cli = TopCli::parse(); + // Merge root-level overrides into inner CLI struct so downstream logic remains unchanged. + let mut inner = top_cli.inner; + inner + .config_overrides + .raw_overrides + .splice(0..0, top_cli.config_overrides.raw_overrides); + + run_main(inner, codex_linux_sandbox_exe).await?; Ok(()) }) } diff --git a/codex-rs/execpolicy/Cargo.toml b/codex-rs/execpolicy/Cargo.toml index 9d9188c5b..833df7ea3 100644 --- a/codex-rs/execpolicy/Cargo.toml +++ b/codex-rs/execpolicy/Cargo.toml @@ -24,7 +24,7 @@ env_logger = "0.11.5" log = "0.4" multimap = "0.10.0" path-absolutize = "3.1.1" -regex = "1.11.1" +regex-lite = "0.1" serde = { version = "1.0.194", features = ["derive"] } serde_json = "1.0.110" serde_with = { version = "3", features = ["macros"] } diff --git a/codex-rs/execpolicy/src/policy.rs b/codex-rs/execpolicy/src/policy.rs index 5dd135508..d1fe4ea89 100644 --- a/codex-rs/execpolicy/src/policy.rs +++ b/codex-rs/execpolicy/src/policy.rs @@ -1,6 +1,6 @@ use multimap::MultiMap; -use regex::Error as RegexError; -use regex::Regex; +use regex_lite::Error as RegexError; +use regex_lite::Regex; use crate::ExecCall; use crate::Forbidden; @@ -29,7 +29,7 @@ impl Policy { } else { let escaped_substrings = forbidden_substrings .iter() - .map(|s| regex::escape(s)) + .map(|s| regex_lite::escape(s)) .collect::>() .join("|"); Some(Regex::new(&format!("({escaped_substrings})"))?) diff --git a/codex-rs/execpolicy/src/policy_parser.rs b/codex-rs/execpolicy/src/policy_parser.rs index 92ed0bdc7..0290619d0 100644 --- a/codex-rs/execpolicy/src/policy_parser.rs +++ b/codex-rs/execpolicy/src/policy_parser.rs @@ -7,7 +7,7 @@ use crate::arg_matcher::ArgMatcher; use crate::opt::OptMeta; use log::info; use multimap::MultiMap; -use regex::Regex; +use regex_lite::Regex; use starlark::any::ProvidesStaticType; use starlark::environment::GlobalsBuilder; use starlark::environment::LibraryExtension; @@ -73,7 +73,7 @@ impl PolicyParser { #[derive(Debug)] pub struct ForbiddenProgramRegex { - pub regex: regex::Regex, + pub regex: regex_lite::Regex, pub reason: String, } @@ -93,7 +93,7 @@ impl PolicyBuilder { } } - fn build(self) -> Result { + fn build(self) -> Result { let programs = self.programs.into_inner(); let forbidden_program_regexes = self.forbidden_program_regexes.into_inner(); let forbidden_substrings = self.forbidden_substrings.into_inner(); @@ -207,7 +207,7 @@ fn policy_builtins(builder: &mut GlobalsBuilder) { .unwrap() .downcast_ref::() .unwrap(); - let compiled_regex = regex::Regex::new(®ex)?; + let compiled_regex = regex_lite::Regex::new(®ex)?; policy_builder.add_forbidden_program_regex(compiled_regex, reason); Ok(NoneType) } diff --git a/codex-rs/justfile b/codex-rs/justfile index 61339a232..c09465a48 100644 --- a/codex-rs/justfile +++ b/codex-rs/justfile @@ -1,15 +1,21 @@ +set positional-arguments + # Display help help: just -l -# Install the `codex-tui` binary -install: - cargo install --path tui +# `codex` +codex *args: + cargo run --bin codex -- "$@" + +# `codex exec` +exec *args: + cargo run --bin codex -- exec "$@" -# Run the TUI app +# `codex tui` tui *args: - cargo run --bin codex -- tui {{args}} + cargo run --bin codex -- tui "$@" -# Run the Proto app -proto *args: - cargo run --bin codex -- proto {{args}} +# format code +fmt: + cargo fmt -- --config imports_granularity=Item diff --git a/codex-rs/linux-sandbox/tests/landlock.rs b/codex-rs/linux-sandbox/tests/landlock.rs index 95ca11a29..17bdd9d80 100644 --- a/codex-rs/linux-sandbox/tests/landlock.rs +++ b/codex-rs/linux-sandbox/tests/landlock.rs @@ -15,6 +15,23 @@ use std::sync::Arc; use tempfile::NamedTempFile; use tokio::sync::Notify; +// At least on GitHub CI, the arm64 tests appear to need longer timeouts. + +#[cfg(not(target_arch = "aarch64"))] +const SHORT_TIMEOUT_MS: u64 = 200; +#[cfg(target_arch = "aarch64")] +const SHORT_TIMEOUT_MS: u64 = 5_000; + +#[cfg(not(target_arch = "aarch64"))] +const LONG_TIMEOUT_MS: u64 = 1_000; +#[cfg(target_arch = "aarch64")] +const LONG_TIMEOUT_MS: u64 = 5_000; + +#[cfg(not(target_arch = "aarch64"))] +const NETWORK_TIMEOUT_MS: u64 = 2_000; +#[cfg(target_arch = "aarch64")] +const NETWORK_TIMEOUT_MS: u64 = 10_000; + fn create_env_from_core_vars() -> HashMap { let policy = ShellEnvironmentPolicy::default(); create_env(&policy) @@ -52,7 +69,7 @@ async fn run_cmd(cmd: &[&str], writable_roots: &[PathBuf], timeout_ms: u64) { #[tokio::test] async fn test_root_read() { - run_cmd(&["ls", "-l", "/bin"], &[], 200).await; + run_cmd(&["ls", "-l", "/bin"], &[], SHORT_TIMEOUT_MS).await; } #[tokio::test] @@ -63,7 +80,7 @@ async fn test_root_write() { run_cmd( &["bash", "-lc", &format!("echo blah > {}", tmpfile_path)], &[], - 200, + SHORT_TIMEOUT_MS, ) .await; } @@ -75,7 +92,7 @@ async fn test_dev_null_write() { &[], // We have seen timeouts when running this test in CI on GitHub, // so we are using a generous timeout until we can diagnose further. - 1_000, + LONG_TIMEOUT_MS, ) .await; } @@ -93,7 +110,7 @@ async fn test_writable_root() { &[tmpdir.path().to_path_buf()], // We have seen timeouts when running this test in CI on GitHub, // so we are using a generous timeout until we can diagnose further. - 1_000, + LONG_TIMEOUT_MS, ) .await; } @@ -115,7 +132,7 @@ async fn assert_network_blocked(cmd: &[&str]) { cwd, // Give the tool a generous 2-second timeout so even slow DNS timeouts // do not stall the suite. - timeout_ms: Some(2_000), + timeout_ms: Some(NETWORK_TIMEOUT_MS), env: create_env_from_core_vars(), }; diff --git a/codex-rs/login/Cargo.toml b/codex-rs/login/Cargo.toml new file mode 100644 index 000000000..e6eba6fd4 --- /dev/null +++ b/codex-rs/login/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "codex-login" +version = { workspace = true } +edition = "2024" + +[lints] +workspace = true + +[dependencies] +chrono = { version = "0.4", features = ["serde"] } +reqwest = { version = "0.12", features = ["json"] } +serde = { version = "1", features = ["derive"] } +serde_json = "1" +tokio = { version = "1", features = [ + "io-std", + "macros", + "process", + "rt-multi-thread", + "signal", +] } diff --git a/codex-rs/login/src/lib.rs b/codex-rs/login/src/lib.rs new file mode 100644 index 000000000..390af74ac --- /dev/null +++ b/codex-rs/login/src/lib.rs @@ -0,0 +1,168 @@ +use chrono::DateTime; +use chrono::Utc; +use serde::Deserialize; +use serde::Serialize; +use std::fs::OpenOptions; +use std::io::Read; +use std::io::Write; +#[cfg(unix)] +use std::os::unix::fs::OpenOptionsExt; +use std::path::Path; +use std::process::Stdio; +use tokio::process::Command; + +const SOURCE_FOR_PYTHON_SERVER: &str = include_str!("./login_with_chatgpt.py"); + +const CLIENT_ID: &str = "app_EMoamEEZ73f0CkXaXp7hrann"; + +/// Run `python3 -c {{SOURCE_FOR_PYTHON_SERVER}}` with the CODEX_HOME +/// environment variable set to the provided `codex_home` path. If the +/// subprocess exits 0, read the OPENAI_API_KEY property out of +/// CODEX_HOME/auth.json and return Ok(OPENAI_API_KEY). Otherwise, return Err +/// with any information from the subprocess. +/// +/// If `capture_output` is true, the subprocess's output will be captured and +/// recorded in memory. Otherwise, the subprocess's output will be sent to the +/// current process's stdout/stderr. +pub async fn login_with_chatgpt( + codex_home: &Path, + capture_output: bool, +) -> std::io::Result { + let child = Command::new("python3") + .arg("-c") + .arg(SOURCE_FOR_PYTHON_SERVER) + .env("CODEX_HOME", codex_home) + .stdin(Stdio::null()) + .stdout(if capture_output { + Stdio::piped() + } else { + Stdio::inherit() + }) + .stderr(if capture_output { + Stdio::piped() + } else { + Stdio::inherit() + }) + .spawn()?; + + let output = child.wait_with_output().await?; + if output.status.success() { + try_read_openai_api_key(codex_home).await + } else { + let stderr = String::from_utf8_lossy(&output.stderr); + Err(std::io::Error::other(format!( + "login_with_chatgpt subprocess failed: {stderr}" + ))) + } +} + +/// Attempt to read the `OPENAI_API_KEY` from the `auth.json` file in the given +/// `CODEX_HOME` directory, refreshing it, if necessary. +pub async fn try_read_openai_api_key(codex_home: &Path) -> std::io::Result { + let auth_path = codex_home.join("auth.json"); + let mut file = std::fs::File::open(&auth_path)?; + let mut contents = String::new(); + file.read_to_string(&mut contents)?; + let auth_dot_json: AuthDotJson = serde_json::from_str(&contents)?; + + if is_expired(&auth_dot_json) { + let refresh_response = try_refresh_token(&auth_dot_json).await?; + let mut auth_dot_json = auth_dot_json; + auth_dot_json.tokens.id_token = refresh_response.id_token; + if let Some(refresh_token) = refresh_response.refresh_token { + auth_dot_json.tokens.refresh_token = refresh_token; + } + auth_dot_json.last_refresh = Utc::now(); + + let mut options = OpenOptions::new(); + options.truncate(true).write(true).create(true); + #[cfg(unix)] + { + options.mode(0o600); + } + + let json_data = serde_json::to_string(&auth_dot_json)?; + { + let mut file = options.open(&auth_path)?; + file.write_all(json_data.as_bytes())?; + file.flush()?; + } + + Ok(auth_dot_json.openai_api_key) + } else { + Ok(auth_dot_json.openai_api_key) + } +} + +fn is_expired(auth_dot_json: &AuthDotJson) -> bool { + let last_refresh = auth_dot_json.last_refresh; + last_refresh < Utc::now() - chrono::Duration::days(28) +} + +async fn try_refresh_token(auth_dot_json: &AuthDotJson) -> std::io::Result { + let refresh_request = RefreshRequest { + client_id: CLIENT_ID, + grant_type: "refresh_token", + refresh_token: auth_dot_json.tokens.refresh_token.clone(), + scope: "openid profile email", + }; + + let client = reqwest::Client::new(); + let response = client + .post("https://auth.openai.com/oauth/token") + .header("Content-Type", "application/json") + .json(&refresh_request) + .send() + .await + .map_err(std::io::Error::other)?; + + if response.status().is_success() { + let refresh_response = response + .json::() + .await + .map_err(std::io::Error::other)?; + Ok(refresh_response) + } else { + Err(std::io::Error::other(format!( + "Failed to refresh token: {}", + response.status() + ))) + } +} + +#[derive(Serialize)] +struct RefreshRequest { + client_id: &'static str, + grant_type: &'static str, + refresh_token: String, + scope: &'static str, +} + +#[derive(Deserialize)] +struct RefreshResponse { + id_token: String, + refresh_token: Option, +} + +/// Expected structure for $CODEX_HOME/auth.json. +#[derive(Deserialize, Serialize)] +struct AuthDotJson { + #[serde(rename = "OPENAI_API_KEY")] + openai_api_key: String, + + tokens: TokenData, + + last_refresh: DateTime, +} + +#[derive(Deserialize, Serialize)] +struct TokenData { + /// This is a JWT. + id_token: String, + + /// This is a JWT. + #[allow(dead_code)] + access_token: String, + + refresh_token: String, +} diff --git a/codex-rs/login/src/login_with_chatgpt.py b/codex-rs/login/src/login_with_chatgpt.py new file mode 100644 index 000000000..dc058f642 --- /dev/null +++ b/codex-rs/login/src/login_with_chatgpt.py @@ -0,0 +1,838 @@ +"""Script that spawns a local webserver for retrieving an OpenAI API key. + +- Listens on 127.0.0.1:1455 +- Opens http://localhost:1455/auth/callback in the browser +- If the user successfully navigates the auth flow, + $CODEX_HOME/auth.json will be written with the API key. +- User will be redirected to http://localhost:1455/success upon success. + +The script should exit with a non-zero code if the user fails to navigate the +auth flow. + +To test this script locally without overwriting your existing auth.json file: + +``` +rm -rf /tmp/codex_home && mkdir /tmp/codex_home +CODEX_HOME=/tmp/codex_home python3 codex-rs/login/src/login_with_chatgpt.py +``` +""" + +from __future__ import annotations + +import argparse +import base64 +import datetime +import errno +import hashlib +import http.server +import json +import os +import secrets +import sys +import threading +import time +import urllib.parse +import urllib.request +import webbrowser +from dataclasses import dataclass +from typing import Any, Dict # for type hints + +# Required port for OAuth client. +REQUIRED_PORT = 1455 +URL_BASE = f"http://localhost:{REQUIRED_PORT}" +DEFAULT_ISSUER = "https://auth.openai.com" +DEFAULT_CLIENT_ID = "app_EMoamEEZ73f0CkXaXp7hrann" + +EXIT_CODE_WHEN_ADDRESS_ALREADY_IN_USE = 13 + + +@dataclass +class TokenData: + id_token: str + access_token: str + refresh_token: str + + +@dataclass +class AuthBundle: + """Aggregates authentication data produced after successful OAuth flow.""" + + api_key: str + token_data: TokenData + last_refresh: str + + +def main() -> None: + parser = argparse.ArgumentParser(description="Retrieve API key via local HTTP flow") + parser.add_argument( + "--no-browser", + action="store_true", + help="Do not automatically open the browser", + ) + parser.add_argument("--verbose", action="store_true", help="Enable request logging") + args = parser.parse_args() + + codex_home = os.environ.get("CODEX_HOME") + if not codex_home: + eprint("ERROR: CODEX_HOME environment variable is not set") + sys.exit(1) + + # Spawn server. + try: + httpd = _ApiKeyHTTPServer( + ("127.0.0.1", REQUIRED_PORT), + _ApiKeyHTTPHandler, + codex_home=codex_home, + verbose=args.verbose, + ) + except OSError as e: + eprint(f"ERROR: {e}") + if e.errno == errno.EADDRINUSE: + # Caller might want to handle this case specially. + sys.exit(EXIT_CODE_WHEN_ADDRESS_ALREADY_IN_USE) + else: + sys.exit(1) + + auth_url = httpd.auth_url() + + with httpd: + eprint(f"Starting local login server on {URL_BASE}") + if not args.no_browser: + try: + webbrowser.open(auth_url, new=1, autoraise=True) + except Exception as e: + eprint(f"Failed to open browser: {e}") + + eprint( + f"If your browser did not open, navigate to this URL to authenticate:\n\n{auth_url}" + ) + + # Run the server in the main thread until `shutdown()` is called by the + # request handler. + try: + httpd.serve_forever() + except KeyboardInterrupt: + eprint("\nKeyboard interrupt received, exiting.") + + # Server has been shut down by the request handler. Exit with the code + # it set (0 on success, non-zero on failure). + sys.exit(httpd.exit_code) + + +class _ApiKeyHTTPHandler(http.server.BaseHTTPRequestHandler): + """A minimal request handler that captures an *api key* from query/post.""" + + # We store the result in the server instance itself. + server: "_ApiKeyHTTPServer" # type: ignore[override] - helpful annotation + + def do_GET(self) -> None: # noqa: N802 – required by BaseHTTPRequestHandler + path = urllib.parse.urlparse(self.path).path + + if path == "/success": + # Serve confirmation page then gracefully shut down the server so + # the main thread can exit with the previously captured exit code. + self._send_html(LOGIN_SUCCESS_HTML) + + # Ensure the data is flushed to the client before we stop. + try: + self.wfile.flush() + except Exception as e: + eprint(f"Failed to flush response: {e}") + + self.request_shutdown() + elif path == "/auth/callback": + query = urllib.parse.urlparse(self.path).query + params = urllib.parse.parse_qs(query) + + # Validate state ------------------------------------------------- + if params.get("state", [None])[0] != self.server.state: + self.send_error(400, "State parameter mismatch") + return + + # Standard OAuth flow ----------------------------------------- + code = params.get("code", [None])[0] + if not code: + self.send_error(400, "Missing authorization code") + return + + try: + auth_bundle, success_url = self._exchange_code_for_api_key(code) + except Exception as exc: # noqa: BLE001 – propagate to client + self.send_error(500, f"Token exchange failed: {exc}") + return + + # Persist API key along with additional token metadata. + if _write_auth_file( + auth=auth_bundle, + codex_home=self.server.codex_home, + ): + self.server.exit_code = 0 + self._send_redirect(success_url) + else: + self.send_error(500, "Unable to persist auth file") + else: + self.send_error(404, "Endpoint not supported") + + def do_POST(self) -> None: # noqa: N802 – required by BaseHTTPRequestHandler + self.send_error(404, "Endpoint not supported") + + def send_error(self, code, message=None, explain=None) -> None: + """Send an error response and stop the server. + + We avoid calling `sys.exit()` directly from the request-handling thread + so that the response has a chance to be written to the socket. Instead + we shut the server down; the main thread will then exit with the + appropriate status code. + """ + super().send_error(code, message, explain) + try: + self.wfile.flush() + except Exception as e: + eprint(f"Failed to flush response: {e}") + + self.request_shutdown() + + def _send_redirect(self, url: str) -> None: + self.send_response(302) + self.send_header("Location", url) + self.end_headers() + + def _send_html(self, body: str) -> None: + encoded = body.encode() + self.send_response(200) + self.send_header("Content-Type", "text/html; charset=utf-8") + self.send_header("Content-Length", str(len(encoded))) + self.end_headers() + self.wfile.write(encoded) + + # Silence logging for cleanliness unless --verbose flag is used. + def log_message(self, fmt: str, *args): # type: ignore[override] + if getattr(self.server, "verbose", False): # type: ignore[attr-defined] + super().log_message(fmt, *args) + + def _exchange_code_for_api_key(self, code: str) -> tuple[AuthBundle, str]: + """Perform token + token-exchange to obtain an OpenAI API key. + + Returns (AuthBundle, success_url). + """ + + token_endpoint = f"{self.server.issuer}/oauth/token" + + # 1. Authorization-code -> (id_token, access_token, refresh_token) + data = urllib.parse.urlencode( + { + "grant_type": "authorization_code", + "code": code, + "redirect_uri": self.server.redirect_uri, + "client_id": self.server.client_id, + "code_verifier": self.server.pkce.code_verifier, + } + ).encode() + + token_data: TokenData + + with urllib.request.urlopen( + urllib.request.Request( + token_endpoint, + data=data, + method="POST", + headers={"Content-Type": "application/x-www-form-urlencoded"}, + ) + ) as resp: + payload = json.loads(resp.read().decode()) + token_data = TokenData( + id_token=payload["id_token"], + access_token=payload["access_token"], + refresh_token=payload["refresh_token"], + ) + + id_token_parts = token_data.id_token.split(".") + if len(id_token_parts) != 3: + raise ValueError("Invalid ID token") + access_token_parts = token_data.access_token.split(".") + if len(access_token_parts) != 3: + raise ValueError("Invalid access token") + + id_token_claims = _decode_jwt_segment(id_token_parts[1]) + access_token_claims = _decode_jwt_segment(access_token_parts[1]) + + token_claims = id_token_claims.get("https://api.openai.com/auth", {}) + access_claims = access_token_claims.get("https://api.openai.com/auth", {}) + + org_id = token_claims.get("organization_id") + if not org_id: + raise ValueError("Missing organization in id_token claims") + + project_id = token_claims.get("project_id") + if not project_id: + raise ValueError("Missing project in id_token claims") + + random_id = secrets.token_hex(6) + + # 2. Token exchange to obtain API key + today = datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m-%d") + exchange_data = urllib.parse.urlencode( + { + "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange", + "client_id": self.server.client_id, + "requested_token": "openai-api-key", + "subject_token": token_data.id_token, + "subject_token_type": "urn:ietf:params:oauth:token-type:id_token", + "name": f"Codex CLI [auto-generated] ({today}) [{random_id}]", + } + ).encode() + + exchanged_access_token: str + with urllib.request.urlopen( + urllib.request.Request( + token_endpoint, + data=exchange_data, + method="POST", + headers={"Content-Type": "application/x-www-form-urlencoded"}, + ) + ) as resp: + exchange_payload = json.loads(resp.read().decode()) + exchanged_access_token = exchange_payload["access_token"] + + # Determine whether the organization still requires additional + # setup (e.g., adding a payment method) based on the ID-token + # claim provided by the auth service. + completed_onboarding = token_claims.get("completed_platform_onboarding") == True + chatgpt_plan_type = access_claims.get("chatgpt_plan_type") + is_org_owner = token_claims.get("is_org_owner") == True + needs_setup = not completed_onboarding and is_org_owner + + # Build the success URL on the same host/port as the callback and + # include the required query parameters for the front-end page. + success_url_query = { + "id_token": token_data.id_token, + "needs_setup": "true" if needs_setup else "false", + "org_id": org_id, + "project_id": project_id, + "plan_type": chatgpt_plan_type, + "platform_url": ( + "https://platform.openai.com" + if self.server.issuer == "https://auth.openai.com" + else "https://platform.api.openai.org" + ), + } + success_url = f"{URL_BASE}/success?{urllib.parse.urlencode(success_url_query)}" + + # Attempt to redeem complimentary API credits for eligible ChatGPT + # Plus / Pro subscribers. Any errors are logged but do not interrupt + # the login flow. + + try: + maybe_redeem_credits( + issuer=self.server.issuer, + client_id=self.server.client_id, + id_token=token_data.id_token, + refresh_token=token_data.refresh_token, + codex_home=self.server.codex_home, + ) + except Exception as exc: # pragma: no cover – best-effort only + eprint(f"Unable to redeem ChatGPT subscriber API credits: {exc}") + + # Persist refresh_token/id_token for future use (redeem credits etc.) + last_refresh_str = ( + datetime.datetime.now(datetime.timezone.utc) + .isoformat() + .replace("+00:00", "Z") + ) + + auth_bundle = AuthBundle( + api_key=exchanged_access_token, + token_data=token_data, + last_refresh=last_refresh_str, + ) + + return (auth_bundle, success_url) + + def request_shutdown(self) -> None: + # shutdown() must be invoked from another thread to avoid + # deadlocking the serve_forever() loop, which is running in this + # same thread. A short-lived helper thread does the trick. + threading.Thread(target=self.server.shutdown, daemon=True).start() + + +def _write_auth_file(*, auth: AuthBundle, codex_home: str) -> bool: + """Persist *api_key* to $CODEX_HOME/auth.json. + + Returns True on success, False otherwise. Any error is printed to + *stderr* so that the Rust layer can surface the problem. + """ + if not os.path.isdir(codex_home): + try: + os.makedirs(codex_home, exist_ok=True) + except Exception as exc: # pragma: no cover – unlikely + eprint(f"ERROR: unable to create CODEX_HOME directory: {exc}") + return False + + auth_path = os.path.join(codex_home, "auth.json") + auth_json_contents = { + "OPENAI_API_KEY": auth.api_key, + "tokens": { + "id_token": auth.token_data.id_token, + "access_token": auth.token_data.access_token, + "refresh_token": auth.token_data.refresh_token, + }, + "last_refresh": auth.last_refresh, + } + try: + with open(auth_path, "w", encoding="utf-8") as fp: + if hasattr(os, "fchmod"): # POSIX-safe + os.fchmod(fp.fileno(), 0o600) + json.dump(auth_json_contents, fp, indent=2) + except Exception as exc: # pragma: no cover – permissions/filesystem + eprint(f"ERROR: unable to write auth file: {exc}") + return False + + return True + + +@dataclass +class PkceCodes: + code_verifier: str + code_challenge: str + + +class _ApiKeyHTTPServer(http.server.HTTPServer): + """HTTPServer with shutdown helper & self-contained OAuth configuration.""" + + def __init__( + self, + server_address: tuple[str, int], + request_handler_class: type[http.server.BaseHTTPRequestHandler], + *, + codex_home: str, + verbose: bool = False, + ) -> None: + super().__init__(server_address, request_handler_class, bind_and_activate=True) + + self.exit_code = 1 + self.codex_home = codex_home + self.verbose: bool = verbose + + self.issuer: str = DEFAULT_ISSUER + self.client_id: str = DEFAULT_CLIENT_ID + port = server_address[1] + self.redirect_uri: str = f"http://localhost:{port}/auth/callback" + self.pkce: PkceCodes = _generate_pkce() + self.state: str = secrets.token_hex(32) + + def auth_url(self) -> str: + """Return fully-formed OpenID authorization URL.""" + params = { + "response_type": "code", + "client_id": self.client_id, + "redirect_uri": self.redirect_uri, + "scope": "openid profile email offline_access", + "code_challenge": self.pkce.code_challenge, + "code_challenge_method": "S256", + "id_token_add_organizations": "true", + "state": self.state, + } + return f"{self.issuer}/oauth/authorize?" + urllib.parse.urlencode(params) + + +def maybe_redeem_credits( + *, + issuer: str, + client_id: str, + id_token: str | None, + refresh_token: str, + codex_home: str, +) -> None: + """Attempt to redeem complimentary API credits for ChatGPT subscribers. + + The operation is best-effort: any error results in a warning being printed + and the function returning early without raising. + """ + id_claims: Dict[str, Any] | None = parse_id_token_claims(id_token or "") + + # Refresh expired ID token, if possible + token_expired = True + if id_claims and isinstance(id_claims.get("exp"), int): + token_expired = _current_timestamp_ms() >= int(id_claims["exp"]) * 1000 + + if token_expired: + eprint("Refreshing credentials...") + new_refresh_token: str | None = None + new_id_token: str | None = None + + try: + payload = json.dumps( + { + "client_id": client_id, + "grant_type": "refresh_token", + "refresh_token": refresh_token, + "scope": "openid profile email", + } + ).encode() + + req = urllib.request.Request( + url="https://auth.openai.com/oauth/token", + data=payload, + method="POST", + headers={"Content-Type": "application/json"}, + ) + + with urllib.request.urlopen(req) as resp: + refresh_data = json.loads(resp.read().decode()) + new_id_token = refresh_data.get("id_token") + new_id_claims = parse_id_token_claims(new_id_token or "") + new_refresh_token = refresh_data.get("refresh_token") + except Exception as err: + eprint("Unable to refresh ID token via token-exchange:", err) + return + + if not new_id_token or not new_refresh_token: + return + + # Update auth.json with new tokens. + try: + auth_dir = codex_home + auth_path = os.path.join(auth_dir, "auth.json") + with open(auth_path, "r", encoding="utf-8") as fp: + existing = json.load(fp) + + tokens = existing.setdefault("tokens", {}) + tokens["id_token"] = new_id_token + # Note this does not touch the access_token? + tokens["refresh_token"] = new_refresh_token + tokens["last_refresh"] = ( + datetime.datetime.now(datetime.timezone.utc) + .isoformat() + .replace("+00:00", "Z") + ) + + with open(auth_path, "w", encoding="utf-8") as fp: + if hasattr(os, "fchmod"): + os.fchmod(fp.fileno(), 0o600) + json.dump(existing, fp, indent=2) + except Exception as err: + eprint("Unable to update refresh token in auth file:", err) + + if not new_id_claims: + # Still couldn't parse claims. + return + + id_token = new_id_token + id_claims = new_id_claims + + # Done refreshing credentials: now try to redeem credits. + if not id_token: + eprint("No ID token available, cannot redeem credits.") + return + + auth_claims = id_claims.get("https://api.openai.com/auth", {}) + + # Subscription eligibility check (Plus or Pro, >7 days active) + sub_start_str = auth_claims.get("chatgpt_subscription_active_start") + if isinstance(sub_start_str, str): + try: + sub_start_ts = datetime.datetime.fromisoformat(sub_start_str.rstrip("Z")) + if datetime.datetime.now( + datetime.timezone.utc + ) - sub_start_ts < datetime.timedelta(days=7): + eprint( + "Sorry, your subscription must be active for more than 7 days to redeem credits." + ) + return + except ValueError: + # Malformed; ignore + pass + + completed_onboarding = bool(auth_claims.get("completed_platform_onboarding")) + is_org_owner = bool(auth_claims.get("is_org_owner")) + needs_setup = not completed_onboarding and is_org_owner + plan_type = auth_claims.get("chatgpt_plan_type") + + if needs_setup or plan_type not in {"plus", "pro"}: + eprint("Only users with Plus or Pro subscriptions can redeem free API credits.") + return + + api_host = ( + "https://api.openai.com" + if issuer == "https://auth.openai.com" + else "https://api.openai.org" + ) + + try: + redeem_payload = json.dumps({"id_token": id_token}).encode() + req = urllib.request.Request( + url=f"{api_host}/v1/billing/redeem_credits", + data=redeem_payload, + method="POST", + headers={"Content-Type": "application/json"}, + ) + + with urllib.request.urlopen(req) as resp: + redeem_data = json.loads(resp.read().decode()) + + granted = redeem_data.get("granted_chatgpt_subscriber_api_credits", 0) + if granted and granted > 0: + eprint( + f"""Thanks for being a ChatGPT {'Plus' if plan_type=='plus' else 'Pro'} subscriber! +If you haven't already redeemed, you should receive {'$5' if plan_type=='plus' else '$50'} in API credits. + +Credits: https://platform.openai.com/settings/organization/billing/credit-grants +More info: https://help.openai.com/en/articles/11381614""", + ) + else: + eprint( + f"""It looks like no credits were granted: + +{json.dumps(redeem_data, indent=2)} + +Credits: https://platform.openai.com/settings/organization/billing/credit-grants +More info: https://help.openai.com/en/articles/11381614""" + ) + except Exception as err: + eprint("Credit redemption request failed:", err) + + +def _generate_pkce() -> PkceCodes: + """Generate PKCE *code_verifier* and *code_challenge* (S256).""" + code_verifier = secrets.token_hex(64) + digest = hashlib.sha256(code_verifier.encode()).digest() + code_challenge = base64.urlsafe_b64encode(digest).rstrip(b"=").decode() + return PkceCodes(code_verifier, code_challenge) + + +def eprint(*args, **kwargs) -> None: + print(*args, file=sys.stderr, **kwargs) + + +# Parse ID-token claims (if provided) +# +# interface IDTokenClaims { +# "exp": number; // specifically, an int +# "https://api.openai.com/auth": { +# organization_id: string; +# project_id: string; +# completed_platform_onboarding: boolean; +# is_org_owner: boolean; +# chatgpt_subscription_active_start: string; +# chatgpt_subscription_active_until: string; +# chatgpt_plan_type: string; +# }; +# } +def parse_id_token_claims(id_token: str) -> Dict[str, Any] | None: + if id_token: + parts = id_token.split(".") + if len(parts) == 3: + return _decode_jwt_segment(parts[1]) + return None + + +def _decode_jwt_segment(segment: str) -> Dict[str, Any]: + """Return the decoded JSON payload from a JWT segment. + + Adds required padding for urlsafe_b64decode. + """ + padded = segment + "=" * (-len(segment) % 4) + try: + data = base64.urlsafe_b64decode(padded.encode()) + return json.loads(data.decode()) + except Exception: + return {} + + +def _current_timestamp_ms() -> int: + return int(time.time() * 1000) + + +LOGIN_SUCCESS_HTML = """ + + + + Sign into Codex CLI + + + + +
+
+
+
+ + + +
+
Signed in to Codex CLI
+
+ + +
+
+ + +""" + +# Unconditionally call `main()` instead of gating it behind +# `if __name__ == "__main__"` because this script is either: +# +# - invoked as a string passed to `python3 -c` +# - run via `python3 login_with_chatgpt.py` for testing as part of local +# development +main() diff --git a/codex-rs/mcp-client/src/main.rs b/codex-rs/mcp-client/src/main.rs index af4b05098..518383d1e 100644 --- a/codex-rs/mcp-client/src/main.rs +++ b/codex-rs/mcp-client/src/main.rs @@ -20,9 +20,22 @@ use mcp_types::Implementation; use mcp_types::InitializeRequestParams; use mcp_types::ListToolsRequestParams; use mcp_types::MCP_SCHEMA_VERSION; +use tracing_subscriber::EnvFilter; #[tokio::main] async fn main() -> Result<()> { + let default_level = "debug"; + let _ = tracing_subscriber::fmt() + // Fallback to the `default_level` log filter if the environment + // variable is not set _or_ contains an invalid value + .with_env_filter( + EnvFilter::try_from_default_env() + .or_else(|_| EnvFilter::try_new(default_level)) + .unwrap_or_else(|_| EnvFilter::new(default_level)), + ) + .with_writer(std::io::stderr) + .try_init(); + // Collect command-line arguments excluding the program name itself. let mut args: Vec = std::env::args().skip(1).collect(); diff --git a/codex-rs/mcp-server/Cargo.toml b/codex-rs/mcp-server/Cargo.toml index 968222c94..c3f111581 100644 --- a/codex-rs/mcp-server/Cargo.toml +++ b/codex-rs/mcp-server/Cargo.toml @@ -22,6 +22,7 @@ mcp-types = { path = "../mcp-types" } schemars = "0.8.22" serde = { version = "1", features = ["derive"] } serde_json = "1" +toml = "0.8" tracing = { version = "0.1.41", features = ["log"] } tracing-subscriber = { version = "0.3", features = ["fmt", "env-filter"] } tokio = { version = "1", features = [ diff --git a/codex-rs/mcp-server/src/codex_tool_config.rs b/codex-rs/mcp-server/src/codex_tool_config.rs index d04a5c80b..03e723444 100644 --- a/codex-rs/mcp-server/src/codex_tool_config.rs +++ b/codex-rs/mcp-server/src/codex_tool_config.rs @@ -1,15 +1,16 @@ //! Configuration object accepted by the `codex` MCP tool-call. -use std::path::PathBuf; - +use codex_core::protocol::AskForApproval; +use codex_core::protocol::SandboxPolicy; use mcp_types::Tool; use mcp_types::ToolInputSchema; use schemars::JsonSchema; use schemars::r#gen::SchemaSettings; use serde::Deserialize; +use std::collections::HashMap; +use std::path::PathBuf; -use codex_core::protocol::AskForApproval; -use codex_core::protocol::SandboxPolicy; +use crate::json_to_toml::json_to_toml; /// Client-supplied configuration for a `codex` tool-call. #[derive(Debug, Clone, Deserialize, JsonSchema)] @@ -41,12 +42,10 @@ pub(crate) struct CodexToolCallParam { #[serde(default, skip_serializing_if = "Option::is_none")] pub sandbox_permissions: Option>, - /// Disable server-side response storage. + /// Individual config settings that will override what is in + /// CODEX_HOME/config.toml. #[serde(default, skip_serializing_if = "Option::is_none")] - pub disable_response_storage: Option, - // Custom system instructions. - // #[serde(default, skip_serializing_if = "Option::is_none")] - // pub instructions: Option, + pub config: Option>, } // Create custom enums for use with `CodexToolCallApprovalPolicy` where we @@ -155,7 +154,7 @@ impl CodexToolCallParam { cwd, approval_policy, sandbox_permissions, - disable_response_storage, + config: cli_overrides, } = self; let sandbox_policy = sandbox_permissions.map(|perms| { SandboxPolicy::from(perms.into_iter().map(Into::into).collect::>()) @@ -168,12 +167,17 @@ impl CodexToolCallParam { cwd: cwd.map(PathBuf::from), approval_policy: approval_policy.map(Into::into), sandbox_policy, - disable_response_storage, model_provider: None, codex_linux_sandbox_exe, }; - let cfg = codex_core::config::Config::load_with_overrides(overrides)?; + let cli_overrides = cli_overrides + .unwrap_or_default() + .into_iter() + .map(|(k, v)| (k, json_to_toml(v))) + .collect(); + + let cfg = codex_core::config::Config::load_with_cli_overrides(cli_overrides, overrides)?; Ok((prompt, cfg)) } @@ -216,14 +220,15 @@ mod tests { ], "type": "string" }, + "config": { + "description": "Individual config settings that will override what is in CODEX_HOME/config.toml.", + "additionalProperties": true, + "type": "object" + }, "cwd": { "description": "Working directory for the session. If relative, it is resolved against the server process's current working directory.", "type": "string" }, - "disable-response-storage": { - "description": "Disable server-side response storage.", - "type": "boolean" - }, "model": { "description": "Optional override for the model name (e.g. \"o3\", \"o4-mini\")", "type": "string" diff --git a/codex-rs/mcp-server/src/json_to_toml.rs b/codex-rs/mcp-server/src/json_to_toml.rs new file mode 100644 index 000000000..ae33382a1 --- /dev/null +++ b/codex-rs/mcp-server/src/json_to_toml.rs @@ -0,0 +1,84 @@ +use serde_json::Value as JsonValue; +use toml::Value as TomlValue; + +/// Convert a `serde_json::Value` into a semantically equivalent `toml::Value`. +pub(crate) fn json_to_toml(v: JsonValue) -> TomlValue { + match v { + JsonValue::Null => TomlValue::String(String::new()), + JsonValue::Bool(b) => TomlValue::Boolean(b), + JsonValue::Number(n) => { + if let Some(i) = n.as_i64() { + TomlValue::Integer(i) + } else if let Some(f) = n.as_f64() { + TomlValue::Float(f) + } else { + TomlValue::String(n.to_string()) + } + } + JsonValue::String(s) => TomlValue::String(s), + JsonValue::Array(arr) => TomlValue::Array(arr.into_iter().map(json_to_toml).collect()), + JsonValue::Object(map) => { + let tbl = map + .into_iter() + .map(|(k, v)| (k, json_to_toml(v))) + .collect::(); + TomlValue::Table(tbl) + } + } +} + +#[cfg(test)] +#[allow(clippy::unwrap_used)] +mod tests { + use super::*; + use pretty_assertions::assert_eq; + use serde_json::json; + + #[test] + fn json_number_to_toml() { + let json_value = json!(123); + assert_eq!(TomlValue::Integer(123), json_to_toml(json_value)); + } + + #[test] + fn json_array_to_toml() { + let json_value = json!([true, 1]); + assert_eq!( + TomlValue::Array(vec![TomlValue::Boolean(true), TomlValue::Integer(1)]), + json_to_toml(json_value) + ); + } + + #[test] + fn json_bool_to_toml() { + let json_value = json!(false); + assert_eq!(TomlValue::Boolean(false), json_to_toml(json_value)); + } + + #[test] + fn json_float_to_toml() { + let json_value = json!(1.25); + assert_eq!(TomlValue::Float(1.25), json_to_toml(json_value)); + } + + #[test] + fn json_null_to_toml() { + let json_value = serde_json::Value::Null; + assert_eq!(TomlValue::String(String::new()), json_to_toml(json_value)); + } + + #[test] + fn json_object_nested() { + let json_value = json!({ "outer": { "inner": 2 } }); + let expected = { + let mut inner = toml::value::Table::new(); + inner.insert("inner".into(), TomlValue::Integer(2)); + + let mut outer = toml::value::Table::new(); + outer.insert("outer".into(), TomlValue::Table(inner)); + TomlValue::Table(outer) + }; + + assert_eq!(json_to_toml(json_value), expected); + } +} diff --git a/codex-rs/mcp-server/src/lib.rs b/codex-rs/mcp-server/src/lib.rs index 0f29eb782..b2a7797fe 100644 --- a/codex-rs/mcp-server/src/lib.rs +++ b/codex-rs/mcp-server/src/lib.rs @@ -16,6 +16,7 @@ use tracing::info; mod codex_tool_config; mod codex_tool_runner; +mod json_to_toml; mod message_processor; use crate::message_processor::MessageProcessor; diff --git a/codex-rs/mcp-types/src/lib.rs b/codex-rs/mcp-types/src/lib.rs index afd6f4ad6..0ed518535 100644 --- a/codex-rs/mcp-types/src/lib.rs +++ b/codex-rs/mcp-types/src/lib.rs @@ -1144,6 +1144,7 @@ pub enum ServerRequest { #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] #[serde(untagged)] +#[allow(clippy::large_enum_variant)] pub enum ServerResult { Result(Result), InitializeResult(InitializeResult), diff --git a/codex-rs/tui/Cargo.toml b/codex-rs/tui/Cargo.toml index c7a8361fa..2d7840e66 100644 --- a/codex-rs/tui/Cargo.toml +++ b/codex-rs/tui/Cargo.toml @@ -16,13 +16,16 @@ workspace = true [dependencies] anyhow = "1" +base64 = "0.22.1" clap = { version = "4", features = ["derive"] } codex-ansi-escape = { path = "../ansi-escape" } codex-core = { path = "../core" } codex-common = { path = "../common", features = ["cli", "elapsed"] } codex-linux-sandbox = { path = "../linux-sandbox" } +codex-login = { path = "../login" } color-eyre = "0.6.3" crossterm = { version = "0.28.1", features = ["bracketed-paste"] } +image = { version = "^0.25.6", default-features = false, features = ["jpeg"] } lazy_static = "1" mcp-types = { path = "../mcp-types" } path-clean = "1.0.1" @@ -30,8 +33,9 @@ ratatui = { version = "0.29.0", features = [ "unstable-widget-ref", "unstable-rendered-line-info", ] } -regex = "1" -serde_json = "1" +ratatui-image = "8.0.0" +regex-lite = "0.1" +serde_json = { version = "1", features = ["preserve_order"] } shlex = "1.3.0" strum = "0.27.1" strum_macros = "0.27.1" @@ -48,6 +52,7 @@ tracing-subscriber = { version = "0.3.19", features = ["env-filter"] } tui-input = "0.11.1" tui-markdown = "0.3.3" tui-textarea = "0.7.0" +unicode-segmentation = "1.12.0" uuid = "1" [dev-dependencies] diff --git a/codex-rs/tui/src/app.rs b/codex-rs/tui/src/app.rs index 7d518c23c..ff61b5c94 100644 --- a/codex-rs/tui/src/app.rs +++ b/codex-rs/tui/src/app.rs @@ -3,11 +3,11 @@ use crate::app_event_sender::AppEventSender; use crate::chatwidget::ChatWidget; use crate::git_warning_screen::GitWarningOutcome; use crate::git_warning_screen::GitWarningScreen; +use crate::login_screen::LoginScreen; use crate::mouse_capture::MouseCapture; use crate::scroll_event_helper::ScrollEventHelper; use crate::slash_command::SlashCommand; use crate::tui; -// used by ChatWidgetArgs use codex_core::config::Config; use codex_core::protocol::Event; use codex_core::protocol::Op; @@ -29,6 +29,8 @@ enum AppState<'a> { /// `AppState`. widget: Box>, }, + /// The login screen for the OpenAI provider. + Login { screen: LoginScreen }, /// The start-up warning that recommends running codex inside a Git repo. GitWarning { screen: GitWarningScreen }, } @@ -38,6 +40,9 @@ pub(crate) struct App<'a> { app_event_rx: Receiver, app_state: AppState<'a>, + /// Config is stored here so we can recreate ChatWidgets as needed. + config: Config, + /// Stored parameters needed to instantiate the ChatWidget later, e.g., /// after dismissing the Git-repo warning. chat_args: Option, @@ -56,6 +61,7 @@ impl<'a> App<'a> { pub(crate) fn new( config: Config, initial_prompt: Option, + show_login_screen: bool, show_git_warning: bool, initial_images: Vec, ) -> Self { @@ -113,20 +119,35 @@ impl<'a> App<'a> { }); } - let (app_state, chat_args) = if show_git_warning { + let (app_state, chat_args) = if show_login_screen { + ( + AppState::Login { + screen: LoginScreen::new(app_event_tx.clone(), config.codex_home.clone()), + }, + Some(ChatWidgetArgs { + config: config.clone(), + initial_prompt, + initial_images, + }), + ) + } else if show_git_warning { ( AppState::GitWarning { screen: GitWarningScreen::new(), }, Some(ChatWidgetArgs { - config, + config: config.clone(), initial_prompt, initial_images, }), ) } else { - let chat_widget = - ChatWidget::new(config, app_event_tx.clone(), initial_prompt, initial_images); + let chat_widget = ChatWidget::new( + config.clone(), + app_event_tx.clone(), + initial_prompt, + initial_images, + ); ( AppState::Chat { widget: Box::new(chat_widget), @@ -139,6 +160,7 @@ impl<'a> App<'a> { app_event_tx, app_event_rx, app_state, + config, chat_args, } } @@ -175,7 +197,7 @@ impl<'a> App<'a> { AppState::Chat { widget } => { widget.submit_op(Op::Interrupt); } - AppState::GitWarning { .. } => { + AppState::Login { .. } | AppState::GitWarning { .. } => { // No-op. } } @@ -203,17 +225,23 @@ impl<'a> App<'a> { } AppEvent::CodexOp(op) => match &mut self.app_state { AppState::Chat { widget } => widget.submit_op(op), - AppState::GitWarning { .. } => {} + AppState::Login { .. } | AppState::GitWarning { .. } => {} }, AppEvent::LatestLog(line) => match &mut self.app_state { AppState::Chat { widget } => widget.update_latest_log(line), - AppState::GitWarning { .. } => {} + AppState::Login { .. } | AppState::GitWarning { .. } => {} }, AppEvent::DispatchCommand(command) => match command { - SlashCommand::Clear => match &mut self.app_state { - AppState::Chat { widget } => widget.clear_conversation_history(), - AppState::GitWarning { .. } => {} - }, + SlashCommand::New => { + let new_widget = Box::new(ChatWidget::new( + self.config.clone(), + self.app_event_tx.clone(), + None, + Vec::new(), + )); + self.app_state = AppState::Chat { widget: new_widget }; + self.app_event_tx.send(AppEvent::Redraw); + } SlashCommand::ToggleMouseMode => { if let Err(e) = mouse_capture.toggle() { tracing::error!("Failed to toggle mouse mode: {e}"); @@ -235,6 +263,9 @@ impl<'a> App<'a> { AppState::Chat { widget } => { terminal.draw(|frame| frame.render_widget_ref(&**widget, frame.area()))?; } + AppState::Login { screen } => { + terminal.draw(|frame| frame.render_widget_ref(&*screen, frame.area()))?; + } AppState::GitWarning { screen } => { terminal.draw(|frame| frame.render_widget_ref(&*screen, frame.area()))?; } @@ -249,6 +280,7 @@ impl<'a> App<'a> { AppState::Chat { widget } => { widget.handle_key_event(key_event); } + AppState::Login { screen } => screen.handle_key_event(key_event), AppState::GitWarning { screen } => match screen.handle_key_event(key_event) { GitWarningOutcome::Continue => { // User accepted – switch to chat view. @@ -279,14 +311,14 @@ impl<'a> App<'a> { fn dispatch_scroll_event(&mut self, scroll_delta: i32) { match &mut self.app_state { AppState::Chat { widget } => widget.handle_scroll_delta(scroll_delta), - AppState::GitWarning { .. } => {} + AppState::Login { .. } | AppState::GitWarning { .. } => {} } } fn dispatch_codex_event(&mut self, event: Event) { match &mut self.app_state { AppState::Chat { widget } => widget.handle_codex_event(event), - AppState::GitWarning { .. } => {} + AppState::Login { .. } | AppState::GitWarning { .. } => {} } } } diff --git a/codex-rs/tui/src/bottom_pane/command_popup.rs b/codex-rs/tui/src/bottom_pane/command_popup.rs index 505a4bc69..0dcb98865 100644 --- a/codex-rs/tui/src/bottom_pane/command_popup.rs +++ b/codex-rs/tui/src/bottom_pane/command_popup.rs @@ -4,6 +4,7 @@ use ratatui::buffer::Buffer; use ratatui::layout::Rect; use ratatui::style::Color; use ratatui::style::Style; +use ratatui::style::Stylize; use ratatui::widgets::Block; use ratatui::widgets::BorderType; use ratatui::widgets::Borders; @@ -147,8 +148,6 @@ impl CommandPopup { impl WidgetRef for CommandPopup { fn render_ref(&self, area: Rect, buf: &mut Buffer) { - let style = Style::default().bg(Color::Blue).fg(Color::White); - let matches = self.filtered_commands(); let mut rows: Vec = Vec::new(); @@ -157,21 +156,25 @@ impl WidgetRef for CommandPopup { if visible_matches.is_empty() { rows.push(Row::new(vec![ - Cell::from("").style(style), - Cell::from("No matching commands").style(style.add_modifier(Modifier::ITALIC)), + Cell::from(""), + Cell::from("No matching commands").add_modifier(Modifier::ITALIC), ])); } else { + let default_style = Style::default(); + let command_style = Style::default().fg(Color::LightBlue); for (idx, cmd) in visible_matches.iter().enumerate() { - let highlight = Style::default().bg(Color::White).fg(Color::Blue); - let cmd_style = if Some(idx) == self.selected_idx { - highlight + let (cmd_style, desc_style) = if Some(idx) == self.selected_idx { + ( + command_style.bg(Color::DarkGray), + default_style.bg(Color::DarkGray), + ) } else { - style + (command_style, default_style) }; rows.push(Row::new(vec![ - Cell::from(cmd.command().to_string()).style(cmd_style), - Cell::from(cmd.description().to_string()).style(style), + Cell::from(format!("/{}", cmd.command())).style(cmd_style), + Cell::from(cmd.description().to_string()).style(desc_style), ])); } } @@ -182,13 +185,11 @@ impl WidgetRef for CommandPopup { rows, [Constraint::Length(FIRST_COLUMN_WIDTH), Constraint::Min(10)], ) - .style(style) - .column_spacing(1) + .column_spacing(0) .block( Block::default() .borders(Borders::ALL) - .border_type(BorderType::Rounded) - .style(style), + .border_type(BorderType::Rounded), ); table.render(area, buf); diff --git a/codex-rs/tui/src/cell_widget.rs b/codex-rs/tui/src/cell_widget.rs new file mode 100644 index 000000000..8acdc0553 --- /dev/null +++ b/codex-rs/tui/src/cell_widget.rs @@ -0,0 +1,20 @@ +use ratatui::prelude::*; + +/// Trait implemented by every type that can live inside the conversation +/// history list. It provides two primitives that the parent scroll-view +/// needs: how *tall* the widget is at a given width and how to render an +/// arbitrary contiguous *window* of that widget. +/// +/// The `first_visible_line` argument to [`render_window`] allows partial +/// rendering when the top of the widget is scrolled off-screen. The caller +/// guarantees that `first_visible_line + area.height as usize` never exceeds +/// the total height previously returned by [`height`]. +pub(crate) trait CellWidget { + /// Total height measured in wrapped terminal lines when drawn with the + /// given *content* width (no scrollbar column included). + fn height(&self, width: u16) -> usize; + + /// Render a *window* that starts `first_visible_line` lines below the top + /// of the widget. The window’s size is given by `area`. + fn render_window(&self, first_visible_line: usize, area: Rect, buf: &mut Buffer); +} diff --git a/codex-rs/tui/src/chatwidget.rs b/codex-rs/tui/src/chatwidget.rs index 189f39944..bd5197c73 100644 --- a/codex-rs/tui/src/chatwidget.rs +++ b/codex-rs/tui/src/chatwidget.rs @@ -207,11 +207,6 @@ impl ChatWidget<'_> { self.conversation_history.scroll_to_bottom(); } - pub(crate) fn clear_conversation_history(&mut self) { - self.conversation_history.clear(); - self.request_redraw(); - } - pub(crate) fn handle_codex_event(&mut self, event: Event) { let Event { id, msg } = event; match msg { @@ -239,9 +234,11 @@ impl ChatWidget<'_> { self.request_redraw(); } EventMsg::AgentReasoning(AgentReasoningEvent { text }) => { - self.conversation_history - .add_agent_reasoning(&self.config, text); - self.request_redraw(); + if !self.config.hide_agent_reasoning { + self.conversation_history + .add_agent_reasoning(&self.config, text); + self.request_redraw(); + } } EventMsg::TaskStarted => { self.bottom_pane.set_task_running(true); @@ -343,11 +340,9 @@ impl ChatWidget<'_> { .add_active_mcp_tool_call(call_id, server, tool, arguments); self.request_redraw(); } - EventMsg::McpToolCallEnd(McpToolCallEndEvent { - call_id, - success, - result, - }) => { + EventMsg::McpToolCallEnd(mcp_tool_call_end_event) => { + let success = mcp_tool_call_end_event.is_success(); + let McpToolCallEndEvent { call_id, result } = mcp_tool_call_end_event; self.conversation_history .record_completed_mcp_tool_call(call_id, success, result); self.request_redraw(); diff --git a/codex-rs/tui/src/citation_regex.rs b/codex-rs/tui/src/citation_regex.rs index 7cda1ef11..e5355ec2b 100644 --- a/codex-rs/tui/src/citation_regex.rs +++ b/codex-rs/tui/src/citation_regex.rs @@ -1,6 +1,6 @@ #![allow(clippy::expect_used)] -use regex::Regex; +use regex_lite::Regex; // This is defined in its own file so we can limit the scope of // `allow(clippy::expect_used)` because we cannot scope it to the `lazy_static!` diff --git a/codex-rs/tui/src/cli.rs b/codex-rs/tui/src/cli.rs index f077d2674..4abd68414 100644 --- a/codex-rs/tui/src/cli.rs +++ b/codex-rs/tui/src/cli.rs @@ -1,5 +1,6 @@ use clap::Parser; use codex_common::ApprovalModeCliArg; +use codex_common::CliConfigOverrides; use codex_common::SandboxPermissionOption; use std::path::PathBuf; @@ -40,7 +41,6 @@ pub struct Cli { #[arg(long = "skip-git-repo-check", default_value_t = false)] pub skip_git_repo_check: bool, - /// Disable server‑side response storage (sends the full conversation context with every request) - #[arg(long = "disable-response-storage", default_value_t = false)] - pub disable_response_storage: bool, + #[clap(skip)] + pub config_overrides: CliConfigOverrides, } diff --git a/codex-rs/tui/src/conversation_history_widget.rs b/codex-rs/tui/src/conversation_history_widget.rs index 83d5ebc49..714ac074a 100644 --- a/codex-rs/tui/src/conversation_history_widget.rs +++ b/codex-rs/tui/src/conversation_history_widget.rs @@ -1,3 +1,4 @@ +use crate::cell_widget::CellWidget; use crate::history_cell::CommandOutput; use crate::history_cell::HistoryCell; use crate::history_cell::PatchEventType; @@ -236,11 +237,7 @@ impl ConversationHistoryWidget { fn add_to_history(&mut self, cell: HistoryCell) { let width = self.cached_width.get(); - let count = if width > 0 { - wrapped_line_count_for_cell(&cell, width) - } else { - 0 - }; + let count = if width > 0 { cell.height(width) } else { 0 }; self.entries.push(Entry { cell, @@ -248,12 +245,6 @@ impl ConversationHistoryWidget { }); } - /// Remove all history entries and reset scrolling. - pub fn clear(&mut self) { - self.entries.clear(); - self.scroll_position = usize::MAX; - } - pub fn record_completed_exec_command( &mut self, call_id: String, @@ -284,9 +275,7 @@ impl ConversationHistoryWidget { // Update cached line count. if width > 0 { - entry - .line_count - .set(wrapped_line_count_for_cell(cell, width)); + entry.line_count.set(cell.height(width)); } break; } @@ -298,20 +287,12 @@ impl ConversationHistoryWidget { &mut self, call_id: String, success: bool, - result: Option, + result: Result, ) { - // Convert result into serde_json::Value early so we don't have to - // worry about lifetimes inside the match arm. - let result_val = result.map(|r| { - serde_json::to_value(r) - .unwrap_or_else(|_| serde_json::Value::String("".into())) - }); - let width = self.cached_width.get(); for entry in self.entries.iter_mut() { if let HistoryCell::ActiveMcpToolCall { call_id: history_id, - fq_tool_name, invocation, start, .. @@ -319,18 +300,16 @@ impl ConversationHistoryWidget { { if &call_id == history_id { let completed = HistoryCell::new_completed_mcp_tool_call( - fq_tool_name.clone(), + width, invocation.clone(), *start, success, - result_val, + result, ); entry.cell = completed; if width > 0 { - entry - .line_count - .set(wrapped_line_count_for_cell(&entry.cell, width)); + entry.line_count.set(entry.cell.height(width)); } break; @@ -378,7 +357,7 @@ impl WidgetRef for ConversationHistoryWidget { let mut num_lines: usize = 0; for entry in &self.entries { - let count = wrapped_line_count_for_cell(&entry.cell, effective_width); + let count = entry.cell.height(effective_width); num_lines += count; entry.line_count.set(count); } @@ -398,78 +377,68 @@ impl WidgetRef for ConversationHistoryWidget { }; // ------------------------------------------------------------------ - // Build a *window* into the history so we only clone the `Line`s that - // may actually be visible in this frame. We still hand the slice off - // to a `Paragraph` with an additional scroll offset to avoid slicing - // inside a wrapped line (we don’t have per-subline granularity). + // Render order: + // 1. Clear full widget area (avoid artifacts from prior frame). + // 2. Draw the surrounding Block (border and title). + // 3. Render *each* visible HistoryCell into its own sub-Rect while + // respecting partial visibility at the top and bottom. + // 4. Draw the scrollbar track / thumb in the reserved column. // ------------------------------------------------------------------ - // Find the first entry that intersects the current scroll position. - let mut cumulative = 0usize; - let mut first_idx = 0usize; - for (idx, entry) in self.entries.iter().enumerate() { - let next = cumulative + entry.line_count.get(); - if next > scroll_pos { - first_idx = idx; - break; - } - cumulative = next; - } + // Clear entire widget area first. + Clear.render(area, buf); - let offset_into_first = scroll_pos - cumulative; + // Draw border + title. + block.render(area, buf); + + // ------------------------------------------------------------------ + // Calculate which cells are visible for the current scroll position + // and paint them one by one. + // ------------------------------------------------------------------ - // Collect enough raw lines from `first_idx` onward to cover the - // viewport. We may fetch *slightly* more than necessary (whole cells) - // but never the entire history. - let mut collected_wrapped = 0usize; - let mut visible_lines: Vec> = Vec::new(); + let mut y_cursor = inner.y; // first line inside viewport + let mut remaining_height = inner.height as usize; + let mut lines_to_skip = scroll_pos; // number of wrapped lines to skip (above viewport) - for entry in &self.entries[first_idx..] { - visible_lines.extend(entry.cell.lines().iter().cloned()); - collected_wrapped += entry.line_count.get(); - if collected_wrapped >= offset_into_first + viewport_height { - break; + for entry in &self.entries { + let cell_height = entry.line_count.get(); + + // Completely above viewport? Skip whole cell. + if lines_to_skip >= cell_height { + lines_to_skip -= cell_height; + continue; } - } - // Build the Paragraph with wrapping enabled so long lines are not - // clipped. Apply vertical scroll so that `offset_into_first` wrapped - // lines are hidden at the top. - // ------------------------------------------------------------------ - // Render order: - // 1. Clear the whole widget area so we do not leave behind any glyphs - // from the previous frame. - // 2. Draw the surrounding Block (border and title). - // 3. Draw the Paragraph inside the Block, **leaving the right-most - // column free** for the scrollbar. - // 4. Finally draw the scrollbar (if needed). - // ------------------------------------------------------------------ + // Determine how much of this cell is visible. + let visible_height = (cell_height - lines_to_skip).min(remaining_height); - // Clear the widget area to avoid visual artifacts from previous frames. - Clear.render(area, buf); + if visible_height == 0 { + break; // no space left + } - // Draw the outer border and title first so the Paragraph does not - // overwrite it. - block.render(area, buf); + let cell_rect = Rect { + x: inner.x, + y: y_cursor, + width: effective_width, + height: visible_height as u16, + }; - // Area available for text after accounting for the scrollbar. - let text_area = Rect { - x: inner.x, - y: inner.y, - width: effective_width, - height: inner.height, - }; + entry.cell.render_window(lines_to_skip, cell_rect, buf); - let paragraph = Paragraph::new(visible_lines) - .wrap(wrap_cfg()) - .scroll((offset_into_first as u16, 0)); + // Advance cursor inside viewport. + y_cursor += visible_height as u16; + remaining_height -= visible_height; - paragraph.render(text_area, buf); + // After the first (possibly partially skipped) cell, we no longer + // need to skip lines at the top. + lines_to_skip = 0; - // Always render a scrollbar *track* so that the reserved column is - // visually filled, even when the content fits within the viewport. - // We only draw the *thumb* when the content actually overflows. + if remaining_height == 0 { + break; // viewport filled + } + } + // Always render a scrollbar *track* so the reserved column is filled. let overflow = num_lines.saturating_sub(viewport_height); let mut scroll_state = ScrollbarState::default() @@ -521,15 +490,6 @@ impl WidgetRef for ConversationHistoryWidget { /// Common [`Wrap`] configuration used for both measurement and rendering so /// they stay in sync. #[inline] -const fn wrap_cfg() -> ratatui::widgets::Wrap { +pub(crate) const fn wrap_cfg() -> ratatui::widgets::Wrap { ratatui::widgets::Wrap { trim: false } } - -/// Returns the wrapped line count for `cell` at the given `width` using the -/// same wrapping rules that `ConversationHistoryWidget` uses during -/// rendering. -fn wrapped_line_count_for_cell(cell: &HistoryCell, width: u16) -> usize { - Paragraph::new(cell.lines().clone()) - .wrap(wrap_cfg()) - .line_count(width) -} diff --git a/codex-rs/tui/src/history_cell.rs b/codex-rs/tui/src/history_cell.rs index fab943272..481576b5b 100644 --- a/codex-rs/tui/src/history_cell.rs +++ b/codex-rs/tui/src/history_cell.rs @@ -1,21 +1,36 @@ +use crate::cell_widget::CellWidget; +use crate::exec_command::escape_command; +use crate::markdown::append_markdown; +use crate::text_block::TextBlock; +use crate::text_formatting::format_and_truncate_tool_result; +use base64::Engine; use codex_ansi_escape::ansi_escape_line; use codex_common::elapsed::format_duration; +use codex_core::WireApi; use codex_core::config::Config; +use codex_core::model_supports_reasoning_summaries; use codex_core::protocol::FileChange; use codex_core::protocol::SessionConfiguredEvent; +use image::DynamicImage; +use image::GenericImageView; +use image::ImageReader; +use lazy_static::lazy_static; +use mcp_types::EmbeddedResourceResource; use ratatui::prelude::*; use ratatui::style::Color; use ratatui::style::Modifier; use ratatui::style::Style; use ratatui::text::Line as RtLine; use ratatui::text::Span as RtSpan; +use ratatui_image::Image as TuiImage; +use ratatui_image::Resize as ImgResize; +use ratatui_image::picker::ProtocolType; use std::collections::HashMap; +use std::io::Cursor; use std::path::PathBuf; use std::time::Duration; use std::time::Instant; - -use crate::exec_command::escape_command; -use crate::markdown::append_markdown; +use tracing::error; pub(crate) struct CommandOutput { pub(crate) exit_code: i32, @@ -34,16 +49,16 @@ pub(crate) enum PatchEventType { /// scrollable list. pub(crate) enum HistoryCell { /// Welcome message. - WelcomeMessage { lines: Vec> }, + WelcomeMessage { view: TextBlock }, /// Message from the user. - UserPrompt { lines: Vec> }, + UserPrompt { view: TextBlock }, /// Message from the agent. - AgentMessage { lines: Vec> }, + AgentMessage { view: TextBlock }, /// Reasoning event from the agent. - AgentReasoning { lines: Vec> }, + AgentReasoning { view: TextBlock }, /// An exec tool call that has not finished yet. ActiveExecCommand { @@ -51,45 +66,53 @@ pub(crate) enum HistoryCell { /// The shell command, escaped and formatted. command: String, start: Instant, - lines: Vec>, + view: TextBlock, }, /// Completed exec tool call. - CompletedExecCommand { lines: Vec> }, + CompletedExecCommand { view: TextBlock }, /// An MCP tool call that has not finished yet. ActiveMcpToolCall { call_id: String, - /// `server.tool` fully-qualified name so we can show a concise label - fq_tool_name: String, - /// Formatted invocation that mirrors the `$ cmd ...` style of exec - /// commands. We keep this around so the completed state can reuse the - /// exact same text without re-formatting. - invocation: String, + /// Formatted line that shows the command name and arguments + invocation: Line<'static>, start: Instant, - lines: Vec>, + view: TextBlock, }, - /// Completed MCP tool call. - CompletedMcpToolCall { lines: Vec> }, + /// Completed MCP tool call where we show the result serialized as JSON. + CompletedMcpToolCall { view: TextBlock }, + + /// Completed MCP tool call where the result is an image. + /// Admittedly, [mcp_types::CallToolResult] can have multiple content types, + /// which could be a mix of text and images, so we need to tighten this up. + // NOTE: For image output we keep the *original* image around and lazily + // compute a resized copy that fits the available cell width. Caching the + // resized version avoids doing the potentially expensive rescale twice + // because the scroll-view first calls `height()` for layouting and then + // `render_window()` for painting. + CompletedMcpToolCallWithImageOutput { + image: DynamicImage, + /// Cached data derived from the current terminal width. The cache is + /// invalidated whenever the width changes (e.g. when the user + /// resizes the window). + render_cache: std::cell::RefCell>, + }, - /// Background event - BackgroundEvent { lines: Vec> }, + /// Background event. + BackgroundEvent { view: TextBlock }, /// Error event from the backend. - ErrorEvent { lines: Vec> }, + ErrorEvent { view: TextBlock }, - /// Info describing the newly‑initialized session. - SessionInfo { lines: Vec> }, + /// Info describing the newly-initialized session. + SessionInfo { view: TextBlock }, /// A pending code patch that is awaiting user approval. Mirrors the /// behaviour of `ActiveExecCommand` so the user sees *what* patch the /// model wants to apply before being prompted to approve or deny it. - PendingPatch { - /// Identifier so that a future `PatchApplyEnd` can update the entry - /// with the final status (not yet implemented). - lines: Vec>, - }, + PendingPatch { view: TextBlock }, } const TOOL_CALL_MAX_LINES: usize = 5; @@ -107,10 +130,13 @@ impl HistoryCell { history_entry_count: _, } = event; if is_first_event { + const VERSION: &str = env!("CARGO_PKG_VERSION"); + let mut lines: Vec> = vec![ Line::from(vec![ "OpenAI ".into(), "Codex".bold(), + format!(" v{}", VERSION).into(), " (research preview)".dim(), ]), Line::from(""), @@ -121,20 +147,36 @@ impl HistoryCell { ]), ]; - let entries = vec![ + let mut entries = vec![ ("workdir", config.cwd.display().to_string()), ("model", config.model.clone()), ("provider", config.model_provider_id.clone()), ("approval", format!("{:?}", config.approval_policy)), ("sandbox", format!("{:?}", config.sandbox_policy)), ]; + if config.model_provider.wire_api == WireApi::Responses + && model_supports_reasoning_summaries(&config.model) + { + entries.push(( + "reasoning effort", + config.model_reasoning_effort.to_string(), + )); + entries.push(( + "reasoning summaries", + config.model_reasoning_summary.to_string(), + )); + } for (key, value) in entries { lines.push(Line::from(vec![format!("{key}: ").bold(), value.into()])); } lines.push(Line::from("")); - HistoryCell::WelcomeMessage { lines } + HistoryCell::WelcomeMessage { + view: TextBlock::new(lines), + } } else if config.model == model { - HistoryCell::SessionInfo { lines: vec![] } + HistoryCell::SessionInfo { + view: TextBlock::new(Vec::new()), + } } else { let lines = vec![ Line::from("model changed:".magenta().bold()), @@ -142,7 +184,9 @@ impl HistoryCell { Line::from(format!("used: {}", model)), Line::from(""), ]; - HistoryCell::SessionInfo { lines } + HistoryCell::SessionInfo { + view: TextBlock::new(lines), + } } } @@ -152,7 +196,9 @@ impl HistoryCell { lines.extend(message.lines().map(|l| Line::from(l.to_string()))); lines.push(Line::from("")); - HistoryCell::UserPrompt { lines } + HistoryCell::UserPrompt { + view: TextBlock::new(lines), + } } pub(crate) fn new_agent_message(config: &Config, message: String) -> Self { @@ -161,7 +207,9 @@ impl HistoryCell { append_markdown(&message, &mut lines, config); lines.push(Line::from("")); - HistoryCell::AgentMessage { lines } + HistoryCell::AgentMessage { + view: TextBlock::new(lines), + } } pub(crate) fn new_agent_reasoning(config: &Config, text: String) -> Self { @@ -170,7 +218,9 @@ impl HistoryCell { append_markdown(&text, &mut lines, config); lines.push(Line::from("")); - HistoryCell::AgentReasoning { lines } + HistoryCell::AgentReasoning { + view: TextBlock::new(lines), + } } pub(crate) fn new_active_exec_command(call_id: String, command: Vec) -> Self { @@ -187,7 +237,7 @@ impl HistoryCell { call_id, command: command_escaped, start, - lines, + view: TextBlock::new(lines), } } @@ -226,7 +276,9 @@ impl HistoryCell { } lines.push(Line::from("")); - HistoryCell::CompletedExecCommand { lines } + HistoryCell::CompletedExecCommand { + view: TextBlock::new(lines), + } } pub(crate) fn new_active_mcp_tool_call( @@ -235,8 +287,6 @@ impl HistoryCell { tool: String, arguments: Option, ) -> Self { - let fq_tool_name = format!("{server}.{tool}"); - // Format the arguments as compact JSON so they roughly fit on one // line. If there are no arguments we keep it empty so the invocation // mirrors a function-style call. @@ -248,63 +298,155 @@ impl HistoryCell { }) .unwrap_or_default(); - let invocation = if args_str.is_empty() { - format!("{fq_tool_name}()") - } else { - format!("{fq_tool_name}({args_str})") - }; + let invocation_spans = vec![ + Span::styled(server, Style::default().fg(Color::Blue)), + Span::raw("."), + Span::styled(tool, Style::default().fg(Color::Blue)), + Span::raw("("), + Span::styled(args_str, Style::default().fg(Color::Gray)), + Span::raw(")"), + ]; + let invocation = Line::from(invocation_spans); let start = Instant::now(); let title_line = Line::from(vec!["tool".magenta(), " running...".dim()]); - let lines: Vec> = vec![ - title_line, - Line::from(format!("$ {invocation}")), - Line::from(""), - ]; + let lines: Vec> = vec![title_line, invocation.clone(), Line::from("")]; HistoryCell::ActiveMcpToolCall { call_id, - fq_tool_name, invocation, start, - lines, + view: TextBlock::new(lines), + } + } + + /// If the first content is an image, return a new cell with the image. + /// TODO(rgwood-dd): Handle images properly even if they're not the first result. + fn try_new_completed_mcp_tool_call_with_image_output( + result: &Result, + ) -> Option { + match result { + Ok(mcp_types::CallToolResult { content, .. }) => { + if let Some(mcp_types::CallToolResultContent::ImageContent(image)) = content.first() + { + let raw_data = + match base64::engine::general_purpose::STANDARD.decode(&image.data) { + Ok(data) => data, + Err(e) => { + error!("Failed to decode image data: {e}"); + return None; + } + }; + let reader = match ImageReader::new(Cursor::new(raw_data)).with_guessed_format() + { + Ok(reader) => reader, + Err(e) => { + error!("Failed to guess image format: {e}"); + return None; + } + }; + + let image = match reader.decode() { + Ok(image) => image, + Err(e) => { + error!("Image decoding failed: {e}"); + return None; + } + }; + + Some(HistoryCell::CompletedMcpToolCallWithImageOutput { + image, + render_cache: std::cell::RefCell::new(None), + }) + } else { + None + } + } + _ => None, } } pub(crate) fn new_completed_mcp_tool_call( - fq_tool_name: String, - invocation: String, + num_cols: u16, + invocation: Line<'static>, start: Instant, success: bool, - result: Option, + result: Result, ) -> Self { + if let Some(cell) = Self::try_new_completed_mcp_tool_call_with_image_output(&result) { + return cell; + } + let duration = format_duration(start.elapsed()); let status_str = if success { "success" } else { "failed" }; let title_line = Line::from(vec![ "tool".magenta(), - format!(" {fq_tool_name} ({status_str}, duration: {})", duration).dim(), + " ".into(), + if success { + status_str.green() + } else { + status_str.red() + }, + format!(", duration: {duration}").gray(), ]); let mut lines: Vec> = Vec::new(); lines.push(title_line); - lines.push(Line::from(format!("$ {invocation}"))); - - if let Some(res_val) = result { - let json_pretty = - serde_json::to_string_pretty(&res_val).unwrap_or_else(|_| res_val.to_string()); - let mut iter = json_pretty.lines(); - for raw in iter.by_ref().take(TOOL_CALL_MAX_LINES) { - lines.push(Line::from(raw.to_string()).dim()); + lines.push(invocation); + + match result { + Ok(mcp_types::CallToolResult { content, .. }) => { + if !content.is_empty() { + lines.push(Line::from("")); + + for tool_call_result in content { + let line_text = match tool_call_result { + mcp_types::CallToolResultContent::TextContent(text) => { + format_and_truncate_tool_result( + &text.text, + TOOL_CALL_MAX_LINES, + num_cols as usize, + ) + } + mcp_types::CallToolResultContent::ImageContent(_) => { + // TODO show images even if they're not the first result, will require a refactor of `CompletedMcpToolCall` + "".to_string() + } + mcp_types::CallToolResultContent::AudioContent(_) => { + "