diff --git a/.agents/skills/nils-cli-verify-required-checks/scripts/nils-cli-verify-required-checks.sh b/.agents/skills/nils-cli-verify-required-checks/scripts/nils-cli-verify-required-checks.sh index 0399ec3f..ebb1e45f 100755 --- a/.agents/skills/nils-cli-verify-required-checks/scripts/nils-cli-verify-required-checks.sh +++ b/.agents/skills/nils-cli-verify-required-checks/scripts/nils-cli-verify-required-checks.sh @@ -14,10 +14,10 @@ Runs the required pre-delivery checks from DEVELOPMENT.md: - bash scripts/ci/third-party-artifacts-audit.sh --strict - bash scripts/ci/completion-asset-audit.sh --strict - bash scripts/ci/completion-flag-parity-audit.sh --strict + - zsh -f tests/zsh/completion.test.zsh - cargo fmt --all -- --check - cargo clippy --all-targets --all-features -- -D warnings - cargo test --workspace - - zsh -f tests/zsh/completion.test.zsh Modes: (default) @@ -33,8 +33,6 @@ Environment: NILS_CLI_TEST_RUNNER=nextest Run `cargo nextest run --profile ci --workspace` and `cargo test --workspace --doc` instead of `cargo test --workspace`. - NILS_CLI_COVERAGE_DIR=target/coverage - Coverage output directory to create before checks. Exit codes: 0 all checks passed @@ -122,9 +120,9 @@ fi run bash scripts/ci/test-stale-audit.sh --strict run bash scripts/ci/third-party-artifacts-audit.sh --strict -coverage_dir="${NILS_CLI_COVERAGE_DIR:-target/coverage}" -run mkdir -p "$coverage_dir" run bash scripts/ci/completion-asset-audit.sh --strict +run bash scripts/ci/completion-flag-parity-audit.sh --strict +run zsh -f tests/zsh/completion.test.zsh run cargo fmt --all -- --check run cargo clippy --all-targets --all-features -- -D warnings if [[ "$test_runner" == "nextest" ]]; then @@ -133,7 +131,5 @@ if [[ "$test_runner" == "nextest" ]]; then else run cargo test --workspace fi -run bash scripts/ci/completion-flag-parity-audit.sh --strict -run zsh -f tests/zsh/completion.test.zsh echo "ok: all nils-cli checks passed" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 97ccde23..1dfc8b8a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -39,16 +39,10 @@ jobs: with: tool: cargo-nextest - - name: Third-party artifact audit - run: bash scripts/ci/third-party-artifacts-audit.sh --strict - - - name: Completion asset audit - run: bash scripts/ci/completion-asset-audit.sh --strict - - - name: Nils CLI checks (includes docs-placement-audit) + - name: Nils CLI checks (includes third-party-artifacts-audit, Completion asset audit, docs-hygiene-audit, test-stale-audit) env: NILS_CLI_TEST_RUNNER: nextest - run: xvfb-run -a ./.agents/skills/nils-cli-verify-required-checks/scripts/nils-cli-verify-required-checks.sh + run: bash scripts/ci/nils-cli-checks-entrypoint.sh --xvfb - name: Publish JUnit report if: always() && hashFiles('target/nextest/ci/junit.xml') != '' @@ -104,16 +98,10 @@ jobs: with: tool: cargo-nextest - - name: Third-party artifact audit - run: /opt/homebrew/bin/bash scripts/ci/third-party-artifacts-audit.sh --strict - - - name: Completion asset audit - run: /opt/homebrew/bin/bash scripts/ci/completion-asset-audit.sh --strict - - - name: Nils CLI checks (includes docs-placement-audit) + - name: Nils CLI checks (includes third-party-artifacts-audit, Completion asset audit, docs-hygiene-audit, test-stale-audit) env: NILS_CLI_TEST_RUNNER: nextest - run: /opt/homebrew/bin/bash ./.agents/skills/nils-cli-verify-required-checks/scripts/nils-cli-verify-required-checks.sh + run: /opt/homebrew/bin/bash scripts/ci/nils-cli-checks-entrypoint.sh - name: Publish JUnit report if: always() && hashFiles('target/nextest/ci/junit.xml') != '' diff --git a/BINARY_DEPENDENCIES.md b/BINARY_DEPENDENCIES.md index 101cee25..4ec1d782 100644 --- a/BINARY_DEPENDENCIES.md +++ b/BINARY_DEPENDENCIES.md @@ -76,6 +76,8 @@ These are repository scripts (not third-party packages): - Supporting utilities: - `scripts/generate-third-party-artifacts.sh` - `scripts/workspace-bins.sh` + - `scripts/ci/docs-placement-audit.sh` + - `scripts/ci/docs-hygiene-audit.sh` - `scripts/ci/coverage-summary.sh` - `scripts/ci/coverage-badge.sh` diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index a15c5807..051b82d8 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -44,6 +44,8 @@ ## Documentation placement - Canonical policy: `docs/specs/crate-docs-placement-policy.md`. +- Workspace retention inventory: `docs/specs/workspace-doc-retention-matrix-v1.md`. +- Completion obligations: `docs/specs/completion-coverage-matrix-v1.md`. - Stale-test lifecycle and reviewer checklist: `docs/runbooks/test-cleanup-governance.md`. - When Markdown files change, run: `bash scripts/ci/docs-placement-audit.sh --strict`. - Run generic Markdown lint checks: `bash scripts/ci/markdownlint-audit.sh --strict`. @@ -75,7 +77,7 @@ - Or run the single entrypoint for required checks: `./.agents/skills/nils-cli-verify-required-checks/scripts/nils-cli-verify-required-checks.sh` (includes docs/completion/stale-test/third-party-artifact audits - plus fmt/clippy/tests; it pre-creates `target/coverage`, but still run coverage commands above) + plus fmt/clippy/tests; coverage commands above are still required explicitly) - Docs-only fast path: if every changed file is documentation-only (`*.md`, `docs/**`, `crates/*/docs/**`, plus root docs like `README.md`, `DEVELOPMENT.md`), run: - `./.agents/skills/nils-cli-verify-required-checks/scripts/nils-cli-verify-required-checks.sh --docs-only` diff --git a/README.md b/README.md index 64061a4f..e2c6efba 100644 --- a/README.md +++ b/README.md @@ -52,7 +52,7 @@ Each crate is either a standalone CLI binary or a shared library used across the - [crates/agent-docs](crates/agent-docs): Deterministic policy-document resolver for Codex/agent workflows (`resolve`, `contexts`, `add`, `baseline`). -- [crates/codex-cli](crates/codex-cli): Provider-specific CLI for OpenAI/Codex workflows (auth, diagnostics, execution wrappers, Starship), +- [crates/codex-cli](crates/codex-cli): Provider-specific CLI for OpenAI/Codex workflows (auth, diagnostics, execution flows, Starship), with adapters over `nils-common::provider_runtime`. - [crates/gemini-cli](crates/gemini-cli): Provider-specific CLI lane for Gemini workflows, with adapters over `nils-common::provider_runtime`. @@ -75,12 +75,21 @@ Contributors should treat `nils-common` as the shared helper boundary for cross- Detailed scope, API examples, migration conventions, and non-goals are documented in [crates/nils-common/README.md](crates/nils-common/README.md). +Sprint-2 boundary freeze and extraction lane ownership are tracked in +[docs/specs/workspace-shared-crate-boundary-v1.md](docs/specs/workspace-shared-crate-boundary-v1.md). + +Workspace doc retention scope and delete/keep decisions are tracked in +[docs/specs/workspace-doc-retention-matrix-v1.md](docs/specs/workspace-doc-retention-matrix-v1.md). + ## Shell wrappers and completions Canonical completion architecture and contributor validation live in [docs/runbooks/cli-completion-development-standard.md](docs/runbooks/cli-completion-development-standard.md). Use [DEVELOPMENT.md](DEVELOPMENT.md) for required delivery checks. +Completion obligation coverage is tracked in +[docs/specs/completion-coverage-matrix-v1.md](docs/specs/completion-coverage-matrix-v1.md). + Assets: - [completions/zsh/](completions/zsh/): zsh completions (plus `aliases.zsh`) diff --git a/crates/api-gql/tests/history.rs b/crates/api-gql/tests/history.rs index a5e5dac1..7f2ddc24 100644 --- a/crates/api-gql/tests/history.rs +++ b/crates/api-gql/tests/history.rs @@ -17,8 +17,7 @@ fn run_api_gql(cwd: &Path, args: &[&str]) -> CmdOutput { } fn run_api_gql_with(cwd: &Path, args: &[&str], envs: &[(&str, &str)]) -> CmdOutput { - let mut options = CmdOptions::default().with_cwd(cwd); - for key in [ + let mut options = CmdOptions::default().with_cwd(cwd).with_env_remove_many(&[ "GQL_HISTORY_ENABLED", "GQL_HISTORY_FILE", "GQL_HISTORY_LOG_URL_ENABLED", @@ -29,9 +28,7 @@ fn run_api_gql_with(cwd: &Path, args: &[&str], envs: &[(&str, &str)]) -> CmdOutp "ACCESS_TOKEN", "SERVICE_TOKEN", "GQL_SCHEMA_FILE", - ] { - options = options.with_env_remove(key); - } + ]); for (k, v) in envs { options = options.with_env(k, v); } diff --git a/crates/api-grpc/tests/integration.rs b/crates/api-grpc/tests/integration.rs index fe654947..a9364b67 100644 --- a/crates/api-grpc/tests/integration.rs +++ b/crates/api-grpc/tests/integration.rs @@ -11,29 +11,28 @@ fn api_grpc_bin() -> PathBuf { } fn run_api_grpc(cwd: &Path, args: &[&str], envs: &[(&str, &str)]) -> CmdOutput { - let mut options = CmdOptions::default().with_cwd(cwd); - for key in [ - "GRPCURL_BIN", - "GRPC_URL", - "GRPC_ENV_DEFAULT", - "GRPC_TOKEN_NAME", - "GRPC_HISTORY_ENABLED", - "GRPC_HISTORY_FILE", - "GRPC_HISTORY_LOG_URL_ENABLED", - "GRPC_JWT_VALIDATE_ENABLED", - "ACCESS_TOKEN", - "SERVICE_TOKEN", - "HTTP_PROXY", - "http_proxy", - "HTTPS_PROXY", - "https_proxy", - "ALL_PROXY", - "all_proxy", - ] { - options = options.with_env_remove(key); - } - options = options.with_env("NO_PROXY", "127.0.0.1,localhost"); - options = options.with_env("no_proxy", "127.0.0.1,localhost"); + let mut options = CmdOptions::default() + .with_cwd(cwd) + .with_env_remove_many(&[ + "GRPCURL_BIN", + "GRPC_URL", + "GRPC_ENV_DEFAULT", + "GRPC_TOKEN_NAME", + "GRPC_HISTORY_ENABLED", + "GRPC_HISTORY_FILE", + "GRPC_HISTORY_LOG_URL_ENABLED", + "GRPC_JWT_VALIDATE_ENABLED", + "ACCESS_TOKEN", + "SERVICE_TOKEN", + "HTTP_PROXY", + "http_proxy", + "HTTPS_PROXY", + "https_proxy", + "ALL_PROXY", + "all_proxy", + ]) + .with_env("NO_PROXY", "127.0.0.1,localhost") + .with_env("no_proxy", "127.0.0.1,localhost"); for (k, v) in envs { options = options.with_env(k, v); diff --git a/crates/api-rest/tests/history.rs b/crates/api-rest/tests/history.rs index 44aaf916..8d0912f5 100644 --- a/crates/api-rest/tests/history.rs +++ b/crates/api-rest/tests/history.rs @@ -13,28 +13,27 @@ fn api_rest_bin() -> std::path::PathBuf { } fn run_api_rest(cwd: &Path, args: &[&str], envs: &[(&str, &str)]) -> CmdOutput { - let mut options = CmdOptions::default().with_cwd(cwd); - for key in [ - "REST_URL", - "REST_TOKEN_NAME", - "REST_HISTORY_ENABLED", - "REST_HISTORY_FILE", - "REST_HISTORY_LOG_URL_ENABLED", - "REST_ENV_DEFAULT", - "REST_JWT_VALIDATE_ENABLED", - "ACCESS_TOKEN", - "SERVICE_TOKEN", - "HTTP_PROXY", - "http_proxy", - "HTTPS_PROXY", - "https_proxy", - "ALL_PROXY", - "all_proxy", - ] { - options = options.with_env_remove(key); - } - options = options.with_env("NO_PROXY", "127.0.0.1,localhost"); - options = options.with_env("no_proxy", "127.0.0.1,localhost"); + let mut options = CmdOptions::default() + .with_cwd(cwd) + .with_env_remove_many(&[ + "REST_URL", + "REST_TOKEN_NAME", + "REST_HISTORY_ENABLED", + "REST_HISTORY_FILE", + "REST_HISTORY_LOG_URL_ENABLED", + "REST_ENV_DEFAULT", + "REST_JWT_VALIDATE_ENABLED", + "ACCESS_TOKEN", + "SERVICE_TOKEN", + "HTTP_PROXY", + "http_proxy", + "HTTPS_PROXY", + "https_proxy", + "ALL_PROXY", + "all_proxy", + ]) + .with_env("NO_PROXY", "127.0.0.1,localhost") + .with_env("no_proxy", "127.0.0.1,localhost"); for (k, v) in envs { options = options.with_env(k, v); diff --git a/crates/api-test/tests/e2e.rs b/crates/api-test/tests/e2e.rs index f869e859..b5b7bcc7 100644 --- a/crates/api-test/tests/e2e.rs +++ b/crates/api-test/tests/e2e.rs @@ -19,15 +19,12 @@ fn run_api_test(cwd: &Path, args: &[&str]) -> CmdOutput { } fn run_api_test_with_env(cwd: &Path, args: &[&str], env: &[(&str, &str)]) -> CmdOutput { - let mut options = CmdOptions::default().with_cwd(cwd); - for key in [ + let mut options = CmdOptions::default().with_cwd(cwd).with_env_remove_many(&[ "ACCESS_TOKEN", "SERVICE_TOKEN", "REST_TOKEN_NAME", "GQL_JWT_NAME", - ] { - options = options.with_env_remove(key); - } + ]); for (k, v) in env { options = options.with_env(k, v); } diff --git a/crates/api-testing-core/src/graphql/schema_file.rs b/crates/api-testing-core/src/graphql/schema_file.rs index 270c8ecf..1759b7d5 100644 --- a/crates/api-testing-core/src/graphql/schema_file.rs +++ b/crates/api-testing-core/src/graphql/schema_file.rs @@ -1,6 +1,7 @@ use std::path::{Path, PathBuf}; use crate::{Result, env_file}; +use nils_common::env as shared_env; const FALLBACK_CANDIDATES: &[&str] = &[ "schema.gql", @@ -29,12 +30,7 @@ pub fn resolve_schema_path(setup_dir: &Path, schema_file_arg: Option<&str>) -> R .map(str::trim) .filter(|s| !s.is_empty()) .map(|s| s.to_string()) - .or_else(|| { - std::env::var("GQL_SCHEMA_FILE").ok().and_then(|s| { - let s = s.trim().to_string(); - (!s.is_empty()).then_some(s) - }) - }) + .or_else(|| shared_env::env_non_empty("GQL_SCHEMA_FILE")) .or_else(|| { let schema_local = setup_dir.join("schema.local.env"); let schema_env = setup_dir.join("schema.env"); @@ -69,16 +65,10 @@ pub fn resolve_schema_path(setup_dir: &Path, schema_file_arg: Option<&str>) -> R #[cfg(test)] mod tests { use super::*; + use nils_test_support::{EnvGuard, GlobalStateLock}; use pretty_assertions::assert_eq; use tempfile::TempDir; - use std::sync::{Mutex, OnceLock}; - - fn env_lock() -> &'static Mutex<()> { - static LOCK: OnceLock> = OnceLock::new(); - LOCK.get_or_init(|| Mutex::new(())) - } - fn write_file(path: &Path, contents: &str) { std::fs::create_dir_all(path.parent().expect("parent")).expect("mkdir"); std::fs::write(path, contents).expect("write"); @@ -101,10 +91,8 @@ mod tests { #[test] fn schema_file_env_var_is_used_when_no_arg() { - let _guard = env_lock().lock().expect("lock env"); - let old = std::env::var("GQL_SCHEMA_FILE").ok(); - // SAFETY: tests mutate process env while guarded by env_lock. - unsafe { std::env::set_var("GQL_SCHEMA_FILE", " schema.gql ") }; + let lock = GlobalStateLock::new(); + let _guard = EnvGuard::set(&lock, "GQL_SCHEMA_FILE", " schema.gql "); let tmp = TempDir::new().expect("tmp"); let setup_dir = std::fs::canonicalize(tmp.path()).expect("setup abs"); @@ -113,25 +101,12 @@ mod tests { let got = resolve_schema_path(&setup_dir, None).expect("path"); let expected = std::fs::canonicalize(setup_dir.join("schema.gql")).expect("abs"); assert_eq!(got, expected); - - match old { - Some(v) => { - // SAFETY: tests restore process env while guarded by env_lock. - unsafe { std::env::set_var("GQL_SCHEMA_FILE", v) }; - } - None => { - // SAFETY: tests restore process env while guarded by env_lock. - unsafe { std::env::remove_var("GQL_SCHEMA_FILE") }; - } - } } #[test] fn schema_file_schema_env_is_used_when_no_arg_or_env() { - let _guard = env_lock().lock().expect("lock env"); - let old = std::env::var("GQL_SCHEMA_FILE").ok(); - // SAFETY: tests mutate process env while guarded by env_lock. - unsafe { std::env::remove_var("GQL_SCHEMA_FILE") }; + let lock = GlobalStateLock::new(); + let _guard = EnvGuard::remove(&lock, "GQL_SCHEMA_FILE"); let tmp = TempDir::new().expect("tmp"); let setup_dir = std::fs::canonicalize(tmp.path()).expect("setup abs"); @@ -149,25 +124,12 @@ mod tests { let expected = std::fs::canonicalize(setup_dir.join("schemas/schema.graphql")).expect("abs"); assert_eq!(got, expected); - - match old { - Some(v) => { - // SAFETY: tests restore process env while guarded by env_lock. - unsafe { std::env::set_var("GQL_SCHEMA_FILE", v) }; - } - None => { - // SAFETY: tests restore process env while guarded by env_lock. - unsafe { std::env::remove_var("GQL_SCHEMA_FILE") }; - } - } } #[test] fn schema_file_falls_back_to_candidate_filenames() { - let _guard = env_lock().lock().expect("lock env"); - let old = std::env::var("GQL_SCHEMA_FILE").ok(); - // SAFETY: tests mutate process env while guarded by env_lock. - unsafe { std::env::remove_var("GQL_SCHEMA_FILE") }; + let lock = GlobalStateLock::new(); + let _guard = EnvGuard::remove(&lock, "GQL_SCHEMA_FILE"); let tmp = TempDir::new().expect("tmp"); let setup_dir = std::fs::canonicalize(tmp.path()).expect("setup abs"); @@ -177,25 +139,12 @@ mod tests { let got = resolve_schema_path(&setup_dir, None).expect("path"); let expected = std::fs::canonicalize(setup_dir.join("schema.gql")).expect("abs"); assert_eq!(got, expected); - - match old { - Some(v) => { - // SAFETY: tests restore process env while guarded by env_lock. - unsafe { std::env::set_var("GQL_SCHEMA_FILE", v) }; - } - None => { - // SAFETY: tests restore process env while guarded by env_lock. - unsafe { std::env::remove_var("GQL_SCHEMA_FILE") }; - } - } } #[test] fn schema_file_errors_when_not_configured() { - let _guard = env_lock().lock().expect("lock env"); - let old = std::env::var("GQL_SCHEMA_FILE").ok(); - // SAFETY: tests mutate process env while guarded by env_lock. - unsafe { std::env::remove_var("GQL_SCHEMA_FILE") }; + let lock = GlobalStateLock::new(); + let _guard = EnvGuard::remove(&lock, "GQL_SCHEMA_FILE"); let tmp = TempDir::new().expect("tmp"); let setup_dir = std::fs::canonicalize(tmp.path()).expect("setup abs"); @@ -205,16 +154,5 @@ mod tests { err.to_string() .contains("Schema file not configured. Set GQL_SCHEMA_FILE") ); - - match old { - Some(v) => { - // SAFETY: tests restore process env while guarded by env_lock. - unsafe { std::env::set_var("GQL_SCHEMA_FILE", v) }; - } - None => { - // SAFETY: tests restore process env while guarded by env_lock. - unsafe { std::env::remove_var("GQL_SCHEMA_FILE") }; - } - } } } diff --git a/crates/api-testing-core/src/grpc/runner.rs b/crates/api-testing-core/src/grpc/runner.rs index 2e1e8324..acf3b478 100644 --- a/crates/api-testing-core/src/grpc/runner.rs +++ b/crates/api-testing-core/src/grpc/runner.rs @@ -2,6 +2,7 @@ use std::path::{Path, PathBuf}; use std::process::Command; use anyhow::Context; +use nils_common::env as shared_env; use crate::Result; use crate::grpc::schema::GrpcRequestFile; @@ -43,11 +44,8 @@ pub fn execute_grpc_request( anyhow::bail!("gRPC target URL/endpoint is empty"); } - let grpcurl_bin = std::env::var("GRPCURL_BIN") - .ok() - .map(|s| s.trim().to_string()) - .filter(|s| !s.is_empty()) - .unwrap_or_else(|| "grpcurl".to_string()); + let grpcurl_bin = + shared_env::env_non_empty("GRPCURL_BIN").unwrap_or_else(|| "grpcurl".to_string()); let mut cmd = Command::new(&grpcurl_bin); cmd.arg("-format").arg("json"); @@ -113,6 +111,7 @@ pub fn execute_grpc_request( mod tests { use super::*; use crate::grpc::schema::GrpcRequestFile; + use nils_test_support::{EnvGuard, GlobalStateLock}; use pretty_assertions::assert_eq; use tempfile::TempDir; @@ -141,11 +140,10 @@ mod tests { .unwrap(); let req = GrpcRequestFile::load(&req_path).unwrap(); - // SAFETY: test-only process env mutation in isolated test process. - unsafe { std::env::set_var("GRPCURL_BIN", &script) }; + let lock = GlobalStateLock::new(); + let script_str = script.to_string_lossy(); + let _grpcurl_bin = EnvGuard::set(&lock, "GRPCURL_BIN", script_str.as_ref()); let executed = execute_grpc_request(&req, "127.0.0.1:50051", None).unwrap(); - // SAFETY: test-only process env mutation in isolated test process. - unsafe { std::env::remove_var("GRPCURL_BIN") }; assert_eq!(executed.grpc_status, 0); assert_eq!( @@ -178,11 +176,10 @@ mod tests { .unwrap(); let req = GrpcRequestFile::load(&req_path).unwrap(); - // SAFETY: test-only process env mutation in isolated test process. - unsafe { std::env::set_var("GRPCURL_BIN", &script) }; + let lock = GlobalStateLock::new(); + let script_str = script.to_string_lossy(); + let _grpcurl_bin = EnvGuard::set(&lock, "GRPCURL_BIN", script_str.as_ref()); let err = execute_grpc_request(&req, "127.0.0.1:50051", None).unwrap_err(); - // SAFETY: test-only process env mutation in isolated test process. - unsafe { std::env::remove_var("GRPCURL_BIN") }; let msg = format!("{err:#}"); assert!(msg.contains("grpcurl exit=7")); diff --git a/crates/api-testing-core/src/suite/resolve.rs b/crates/api-testing-core/src/suite/resolve.rs index 26b25d40..a7fd483c 100644 --- a/crates/api-testing-core/src/suite/resolve.rs +++ b/crates/api-testing-core/src/suite/resolve.rs @@ -1,6 +1,7 @@ use std::path::{Path, PathBuf}; use anyhow::Context; +use nils_common::env as shared_env; use crate::{Result, cli_util}; @@ -240,9 +241,8 @@ pub fn resolve_suite_selection( anyhow::bail!("Missing suite (use --suite or --suite-file)"); } - let suites_dir_override = std::env::var("API_TEST_SUITES_DIR") - .ok() - .and_then(|s| cli_util::trim_non_empty(&s)); + let suites_dir_override = + shared_env::env_non_empty("API_TEST_SUITES_DIR").and_then(|s| cli_util::trim_non_empty(&s)); let candidate = if let Some(dir) = suites_dir_override { let abs = resolve_path_from_repo_root(repo_root, &dir); @@ -283,6 +283,7 @@ pub fn write_file(path: &Path, contents: &[u8]) -> Result<()> { #[cfg(test)] mod tests { use super::*; + use nils_test_support::{EnvGuard, GlobalStateLock}; use tempfile::TempDir; @@ -479,8 +480,8 @@ mod tests { #[test] fn suite_resolve_resolves_suite_name_under_tests_dir() { - // SAFETY: tests mutate process env in isolated test scope. - unsafe { std::env::remove_var("API_TEST_SUITES_DIR") }; + let lock = GlobalStateLock::new(); + let _guard = EnvGuard::remove(&lock, "API_TEST_SUITES_DIR"); let tmp = TempDir::new().unwrap(); let root = tmp.path(); @@ -501,8 +502,8 @@ mod tests { #[test] fn suite_resolve_resolves_suite_name_under_setup_dir() { - // SAFETY: tests mutate process env in isolated test scope. - unsafe { std::env::remove_var("API_TEST_SUITES_DIR") }; + let lock = GlobalStateLock::new(); + let _guard = EnvGuard::remove(&lock, "API_TEST_SUITES_DIR"); let tmp = TempDir::new().unwrap(); let root = tmp.path(); diff --git a/crates/api-websocket/tests/json_contract.rs b/crates/api-websocket/tests/json_contract.rs index b3b3f171..88646999 100644 --- a/crates/api-websocket/tests/json_contract.rs +++ b/crates/api-websocket/tests/json_contract.rs @@ -14,8 +14,7 @@ fn api_websocket_bin() -> PathBuf { } fn run_api_websocket(cwd: &Path, args: &[&str]) -> CmdOutput { - let mut options = CmdOptions::default().with_cwd(cwd); - for key in [ + let options = CmdOptions::default().with_cwd(cwd).with_env_remove_many(&[ "WS_URL", "WS_ENV_DEFAULT", "WS_TOKEN_NAME", @@ -25,9 +24,7 @@ fn run_api_websocket(cwd: &Path, args: &[&str]) -> CmdOutput { "WS_JWT_VALIDATE_ENABLED", "ACCESS_TOKEN", "SERVICE_TOKEN", - ] { - options = options.with_env_remove(key); - } + ]); run_with(&api_websocket_bin(), args, &options) } diff --git a/crates/codex-cli/docs/README.md b/crates/codex-cli/docs/README.md index dc634786..ef836c18 100644 --- a/crates/codex-cli/docs/README.md +++ b/crates/codex-cli/docs/README.md @@ -10,7 +10,7 @@ ## Runbooks -- [json-consumers.md](runbooks/json-consumers.md) (`docs/runbooks/json-consumers.md`) +- [json-consumers.md](runbooks/json-consumers.md) ## Reports diff --git a/crates/codex-cli/src/auth/auto_refresh.rs b/crates/codex-cli/src/auth/auto_refresh.rs index 90da8cc0..3d125322 100644 --- a/crates/codex-cli/src/auth/auto_refresh.rs +++ b/crates/codex-cli/src/auth/auto_refresh.rs @@ -4,8 +4,8 @@ use std::path::{Path, PathBuf}; use crate::auth; use crate::auth::output::{self, AuthAutoRefreshResult, AuthAutoRefreshTargetResult}; -use crate::fs; use crate::paths; +use nils_common::fs; pub fn run() -> Result { run_with_json(false) diff --git a/crates/codex-cli/src/auth/current.rs b/crates/codex-cli/src/auth/current.rs index 5e61c41f..b2d9f001 100644 --- a/crates/codex-cli/src/auth/current.rs +++ b/crates/codex-cli/src/auth/current.rs @@ -5,8 +5,8 @@ use std::path::Path; use crate::auth; use crate::auth::output::{self, AuthCurrentResult}; -use crate::fs; use crate::paths; +use nils_common::fs; pub fn run() -> Result { run_with_json(false) diff --git a/crates/codex-cli/src/auth/refresh.rs b/crates/codex-cli/src/auth/refresh.rs index 82183b6f..7cc132f4 100644 --- a/crates/codex-cli/src/auth/refresh.rs +++ b/crates/codex-cli/src/auth/refresh.rs @@ -1,12 +1,12 @@ use anyhow::Result; use chrono::Utc; +use nils_common::fs; use reqwest::blocking::Client; use serde_json::{Map, Value}; use std::path::{Path, PathBuf}; use std::time::Duration; use crate::auth::output::{self, AuthRefreshResult}; -use crate::fs; use crate::json; use crate::paths; @@ -234,12 +234,7 @@ fn run_with_mode(args: &[String], output_mode: RefreshOutputMode) -> Result let output = serde_json::to_vec(&merged)?; fs::write_atomic(&target_file, &output, fs::SECRET_FILE_MODE)?; - let cache_dir = match paths::resolve_secret_cache_dir() { - Some(dir) => dir, - None => PathBuf::new(), - }; - let timestamp_path = cache_dir.join(format!("{}.timestamp", file_name(&target_file))); - if !cache_dir.as_os_str().is_empty() { + if let Some(timestamp_path) = paths::resolve_secret_timestamp_path(&target_file) { fs::write_timestamp(×tamp_path, Some(&now_iso))?; } @@ -378,6 +373,7 @@ fn env_timeout(key: &str, default: u64) -> u64 { .unwrap_or(default) } +#[cfg(test)] fn file_name(path: &Path) -> String { path.file_name() .and_then(|name| name.to_str()) diff --git a/crates/codex-cli/src/auth/remove.rs b/crates/codex-cli/src/auth/remove.rs index 0bc6a492..e92fc4ff 100644 --- a/crates/codex-cli/src/auth/remove.rs +++ b/crates/codex-cli/src/auth/remove.rs @@ -5,8 +5,8 @@ use std::path::Path; use crate::auth; use crate::auth::output::{self, AuthRemoveResult}; -use crate::fs; use crate::paths; +use nils_common::fs; pub fn run(target: &str, yes: bool) -> Result { run_with_json(target, yes, false) @@ -188,14 +188,9 @@ fn confirm_remove(target: &Path) -> Result { } fn remove_target_timestamp(target_file: &Path) { - let Some(cache_dir) = paths::resolve_secret_cache_dir() else { + let Some(timestamp_file) = paths::resolve_secret_timestamp_path(target_file) else { return; }; - let file_name = target_file - .file_name() - .and_then(|v| v.to_str()) - .unwrap_or("auth.json"); - let timestamp_file = cache_dir.join(format!("{file_name}.timestamp")); let _ = fs::write_timestamp(×tamp_file, None); } diff --git a/crates/codex-cli/src/auth/save.rs b/crates/codex-cli/src/auth/save.rs index 502c5a85..bf8652d2 100644 --- a/crates/codex-cli/src/auth/save.rs +++ b/crates/codex-cli/src/auth/save.rs @@ -5,8 +5,8 @@ use std::path::Path; use crate::auth; use crate::auth::output::{self, AuthSaveResult}; -use crate::fs; use crate::paths; +use nils_common::fs; pub fn run(target: &str, yes: bool) -> Result { run_with_json(target, yes, false) @@ -247,18 +247,12 @@ fn confirm_overwrite(target: &Path) -> Result { } fn write_target_timestamp(target_file: &Path, auth_file: &Path) -> Result<()> { - let cache_dir = match paths::resolve_secret_cache_dir() { - Some(dir) => dir, - None => return Ok(()), + let Some(timestamp_file) = paths::resolve_secret_timestamp_path(target_file) else { + return Ok(()); }; - - let file_name = target_file - .file_name() - .and_then(|v| v.to_str()) - .unwrap_or("auth.json"); - let timestamp_file = cache_dir.join(format!("{file_name}.timestamp")); let iso = auth::last_refresh_from_auth_file(auth_file).unwrap_or(None); - fs::write_timestamp(×tamp_file, iso.as_deref()) + fs::write_timestamp(×tamp_file, iso.as_deref())?; + Ok(()) } #[cfg(test)] diff --git a/crates/codex-cli/src/auth/sync.rs b/crates/codex-cli/src/auth/sync.rs index b4e7d277..43407a58 100644 --- a/crates/codex-cli/src/auth/sync.rs +++ b/crates/codex-cli/src/auth/sync.rs @@ -1,11 +1,13 @@ use anyhow::Result; +use nils_common::fs; +use nils_common::provider_runtime::persistence::{ + SyncSecretsError, TimestampPolicy, sync_auth_to_matching_secrets, +}; use serde_json::json; -use std::path::{Path, PathBuf}; -use crate::auth; use crate::auth::output::{self, AuthSyncResult}; -use crate::fs; use crate::paths; +use crate::provider_profile::CODEX_PROVIDER_PROFILE; pub fn run() -> Result { run_with_json(false) @@ -31,7 +33,33 @@ pub fn run_with_json(output_json: bool) -> Result { } }; - if !auth_file.is_file() { + let sync_result = match sync_auth_to_matching_secrets( + &CODEX_PROVIDER_PROFILE, + &auth_file, + fs::SECRET_FILE_MODE, + TimestampPolicy::Strict, + ) { + Ok(result) => result, + Err(SyncSecretsError::HashAuthFile { path, .. }) + | Err(SyncSecretsError::HashSecretFile { path, .. }) => { + if output_json { + output::emit_error( + "auth sync", + "hash-failed", + format!("failed to hash {}", path.display()), + Some(json!({ + "path": path.display().to_string(), + })), + )?; + } else { + eprintln!("codex: failed to hash {}", path.display()); + } + return Ok(1); + } + Err(err) => return Err(err.into()), + }; + + if !sync_result.auth_file_present { if output_json { output::emit_result( "auth sync", @@ -47,106 +75,30 @@ pub fn run_with_json(output_json: bool) -> Result { return Ok(0); } - let auth_key = match auth::identity_key_from_auth_file(&auth_file) { - Ok(Some(key)) => key, - _ => { - if output_json { - output::emit_result( - "auth sync", - AuthSyncResult { - auth_file: auth_file.display().to_string(), - synced: 0, - skipped: 1, - failed: 0, - updated_files: Vec::new(), - }, - )?; - } - return Ok(0); - } - }; - - let auth_last_refresh = auth::last_refresh_from_auth_file(&auth_file).unwrap_or(None); - let auth_hash = match fs::sha256_file(&auth_file) { - Ok(hash) => hash, - Err(_) => { - if output_json { - output::emit_error( - "auth sync", - "hash-failed", - format!("failed to hash {}", auth_file.display()), - Some(json!({ - "path": auth_file.display().to_string(), - })), - )?; - } else { - eprintln!("codex: failed to hash {}", auth_file.display()); - } - return Ok(1); - } - }; - - let mut synced = 0usize; - let mut skipped = 0usize; - let failed = 0usize; - let mut updated_files: Vec = Vec::new(); - - let secret_dir = paths::resolve_secret_dir(); - if let Some(secret_dir) = secret_dir - && let Ok(entries) = std::fs::read_dir(&secret_dir) - { - for entry in entries.flatten() { - let path = entry.path(); - if path.extension().and_then(|s| s.to_str()) != Some("json") { - continue; - } - let candidate_key = match auth::identity_key_from_auth_file(&path) { - Ok(Some(key)) => key, - _ => { - skipped += 1; - continue; - } - }; - if candidate_key != auth_key { - skipped += 1; - continue; - } - - let secret_hash = match fs::sha256_file(&path) { - Ok(hash) => hash, - Err(_) => { - if output_json { - output::emit_error( - "auth sync", - "hash-failed", - format!("failed to hash {}", path.display()), - Some(json!({ - "path": path.display().to_string(), - })), - )?; - } else { - eprintln!("codex: failed to hash {}", path.display()); - } - return Ok(1); - } - }; - if secret_hash == auth_hash { - skipped += 1; - continue; - } - - let contents = std::fs::read(&auth_file)?; - fs::write_atomic(&path, &contents, fs::SECRET_FILE_MODE)?; - - let timestamp_path = secret_timestamp_path(&path)?; - fs::write_timestamp(×tamp_path, auth_last_refresh.as_deref())?; - synced += 1; - updated_files.push(path.display().to_string()); + if !sync_result.auth_identity_present { + if output_json { + output::emit_result( + "auth sync", + AuthSyncResult { + auth_file: auth_file.display().to_string(), + synced: 0, + skipped: 1, + failed: 0, + updated_files: Vec::new(), + }, + )?; } + return Ok(0); } - let auth_timestamp = secret_timestamp_path(&auth_file)?; - fs::write_timestamp(&auth_timestamp, auth_last_refresh.as_deref())?; + let synced = sync_result.synced; + let skipped = sync_result.skipped; + let failed = 0usize; + let updated_files = sync_result + .updated_files + .into_iter() + .map(|path| path.display().to_string()) + .collect(); if output_json { output::emit_result( @@ -163,13 +115,3 @@ pub fn run_with_json(output_json: bool) -> Result { Ok(0) } - -fn secret_timestamp_path(target_file: &Path) -> Result { - let cache_dir = paths::resolve_secret_cache_dir() - .ok_or_else(|| anyhow::anyhow!("CODEX_SECRET_CACHE_DIR not resolved"))?; - let name = target_file - .file_name() - .and_then(|name| name.to_str()) - .unwrap_or("auth.json"); - Ok(cache_dir.join(format!("{name}.timestamp"))) -} diff --git a/crates/codex-cli/src/auth/use_secret.rs b/crates/codex-cli/src/auth/use_secret.rs index ce29dc05..4ee82de6 100644 --- a/crates/codex-cli/src/auth/use_secret.rs +++ b/crates/codex-cli/src/auth/use_secret.rs @@ -4,8 +4,8 @@ use std::path::{Path, PathBuf}; use crate::auth; use crate::auth::output::{self, AuthUseResult}; -use crate::fs; use crate::paths; +use nils_common::fs; pub fn run(target: &str) -> Result { run_with_json(target, false) @@ -216,13 +216,8 @@ fn resolve_by_email(secret_dir: &Path, target: &str) -> ResolveResult { } fn secret_timestamp_path(target_file: &Path) -> Result { - let cache_dir = paths::resolve_secret_cache_dir() - .ok_or_else(|| anyhow::anyhow!("CODEX_SECRET_CACHE_DIR not resolved"))?; - let name = target_file - .file_name() - .and_then(|name| name.to_str()) - .unwrap_or("auth.json"); - Ok(cache_dir.join(format!("{name}.timestamp"))) + paths::resolve_secret_timestamp_path(target_file) + .ok_or_else(|| anyhow::anyhow!("CODEX_SECRET_CACHE_DIR not resolved")) } fn file_name(path: &Path) -> String { diff --git a/crates/codex-cli/src/fs.rs b/crates/codex-cli/src/fs.rs deleted file mode 100644 index 9ff5f176..00000000 --- a/crates/codex-cli/src/fs.rs +++ /dev/null @@ -1,59 +0,0 @@ -use anyhow::Result; -use nils_common::fs as shared_fs; -use std::io; -use std::path::Path; - -pub const SECRET_FILE_MODE: u32 = shared_fs::SECRET_FILE_MODE; - -pub fn sha256_file(path: &Path) -> Result { - match shared_fs::sha256_file(path) { - Ok(hash) => Ok(hash), - Err(shared_fs::FileHashError::OpenFile { source, .. }) => Err(anyhow::Error::new(source) - .context(format!("failed to open for sha256: {}", path.display()))), - Err(shared_fs::FileHashError::ReadFile { source, .. }) => Err(source.into()), - } -} - -pub fn write_atomic(path: &Path, contents: &[u8], mode: u32) -> Result<()> { - shared_fs::write_atomic(path, contents, mode).map_err(map_atomic_write_error) -} - -pub fn write_timestamp(path: &Path, iso: Option<&str>) -> Result<()> { - match shared_fs::write_timestamp(path, iso) { - Ok(()) => Ok(()), - Err(shared_fs::TimestampError::CreateParentDir { path, source }) => { - Err(anyhow::Error::new(source) - .context(format!("failed to create dir: {}", path.display()))) - } - Err(shared_fs::TimestampError::WriteFile { path, source }) => { - Err(anyhow::Error::new(source) - .context(format!("failed to write timestamp: {}", path.display()))) - } - Err(shared_fs::TimestampError::RemoveFile { .. }) => Ok(()), - } -} - -fn map_atomic_write_error(err: shared_fs::AtomicWriteError) -> anyhow::Error { - match err { - shared_fs::AtomicWriteError::CreateParentDir { path, source } => { - anyhow::Error::new(source).context(format!("failed to create dir: {}", path.display())) - } - shared_fs::AtomicWriteError::CreateTempFile { source, .. } => { - anyhow::Error::new(source).context("failed to create temp file") - } - shared_fs::AtomicWriteError::TempPathExhausted { .. } => anyhow::Error::new( - io::Error::new(io::ErrorKind::AlreadyExists, "temp file already exists"), - ) - .context("failed to create unique temp file"), - shared_fs::AtomicWriteError::WriteTempFile { path, source } => anyhow::Error::new(source) - .context(format!("failed to write temp file: {}", path.display())), - shared_fs::AtomicWriteError::SetPermissions { path, source } => anyhow::Error::new(source) - .context(format!("failed to set permissions: {}", path.display())), - shared_fs::AtomicWriteError::ReplaceFile { from, to, source } => anyhow::Error::new(source) - .context(format!( - "failed to rename {} -> {}", - from.display(), - to.display() - )), - } -} diff --git a/crates/codex-cli/src/lib.rs b/crates/codex-cli/src/lib.rs index 39f3f107..72b9cf94 100644 --- a/crates/codex-cli/src/lib.rs +++ b/crates/codex-cli/src/lib.rs @@ -2,7 +2,6 @@ pub mod agent; pub mod auth; pub mod config; pub mod diag_output; -pub mod fs; pub mod json; pub mod jwt; pub mod paths; diff --git a/crates/codex-cli/src/paths.rs b/crates/codex-cli/src/paths.rs index 619aeabf..e043905e 100644 --- a/crates/codex-cli/src/paths.rs +++ b/crates/codex-cli/src/paths.rs @@ -1,4 +1,4 @@ -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use crate::provider_profile::CODEX_PROVIDER_PROFILE; @@ -18,6 +18,13 @@ pub fn resolve_secret_cache_dir() -> Option { crate::runtime::resolve_secret_cache_dir() } +pub fn resolve_secret_timestamp_path(target_file: &Path) -> Option { + nils_common::provider_runtime::paths::resolve_secret_timestamp_path( + &CODEX_PROVIDER_PROFILE, + target_file, + ) +} + pub fn resolve_feature_dir() -> Option { crate::runtime::resolve_feature_dir() } diff --git a/crates/codex-cli/src/rate_limits/cache.rs b/crates/codex-cli/src/rate_limits/cache.rs index e0e3e117..59faa7d7 100644 --- a/crates/codex-cli/src/rate_limits/cache.rs +++ b/crates/codex-cli/src/rate_limits/cache.rs @@ -1,9 +1,9 @@ use anyhow::{Context, Result}; +use nils_common::fs as shared_fs; use std::fs; use std::path::{Path, PathBuf}; use crate::auth; -use crate::fs as codex_fs; use crate::paths; use nils_common::env as shared_env; @@ -68,7 +68,7 @@ pub fn cache_file_for_target(target_file: &Path) -> Result { } } - let hash = codex_fs::sha256_file(target_file)?; + let hash = shared_fs::sha256_file(target_file)?; Ok(cache_dir.join(format!("auth_{}.kv", hash.to_lowercase()))) } @@ -187,7 +187,7 @@ pub fn write_starship_cache( lines.push(format!("weekly_reset_epoch={weekly_reset_epoch}")); let data = lines.join("\n"); - codex_fs::write_atomic(&cache_file, data.as_bytes(), codex_fs::SECRET_FILE_MODE)?; + shared_fs::write_atomic(&cache_file, data.as_bytes(), shared_fs::SECRET_FILE_MODE)?; Ok(()) } @@ -348,8 +348,8 @@ mod tests { cache_file_for_target, clear_starship_cache, read_cache_entry, read_cache_entry_for_cached_mode, secret_name_for_target, write_starship_cache, }; - use crate::fs as codex_fs; use chrono::Utc; + use nils_common::fs as shared_fs; use nils_test_support::{EnvGuard, GlobalStateLock}; use std::fs; use std::path::Path; @@ -476,7 +476,7 @@ mod tests { let target = dir.path().join("auth.json"); fs::write(&target, "{\"tokens\":{\"access_token\":\"tok\"}}").expect("write auth file"); - let hash = codex_fs::sha256_file(&target).expect("sha256"); + let hash = shared_fs::sha256_file(&target).expect("sha256"); let cache_file = cache_file_for_target(&target).expect("cache file"); assert_eq!( cache_file, diff --git a/crates/codex-cli/src/rate_limits/mod.rs b/crates/codex-cli/src/rate_limits/mod.rs index 3d5e6100..bb323daa 100644 --- a/crates/codex-cli/src/rate_limits/mod.rs +++ b/crates/codex-cli/src/rate_limits/mod.rs @@ -10,8 +10,13 @@ use std::time::Duration; use crate::auth; use crate::diag_output; +use crate::provider_profile::CODEX_PROVIDER_PROFILE; use crate::rate_limits::client::{UsageRequest, fetch_usage}; use nils_common::env as shared_env; +use nils_common::fs; +use nils_common::provider_runtime::persistence::{ + SyncSecretsError, TimestampPolicy, sync_auth_to_matching_secrets, +}; use nils_term::progress::{Progress, ProgressFinish, ProgressOptions}; pub use nils_common::rate_limits_ansi as ansi; @@ -1322,63 +1327,23 @@ fn sync_auth_silent() -> Result<(i32, Option)> { None => return Ok((0, None)), }; - if !auth_file.is_file() { - return Ok((0, None)); - } - - let auth_key = match auth::identity_key_from_auth_file(&auth_file) { - Ok(Some(key)) => key, - _ => return Ok((0, None)), - }; - - let auth_last_refresh = auth::last_refresh_from_auth_file(&auth_file).unwrap_or(None); - let auth_hash = match crate::fs::sha256_file(&auth_file) { - Ok(hash) => hash, - Err(_) => { - return Ok(( - 1, - Some(format!("codex: failed to hash {}", auth_file.display())), - )); + let sync_result = match sync_auth_to_matching_secrets( + &CODEX_PROVIDER_PROFILE, + &auth_file, + fs::SECRET_FILE_MODE, + TimestampPolicy::Strict, + ) { + Ok(result) => result, + Err(SyncSecretsError::HashAuthFile { path, .. }) + | Err(SyncSecretsError::HashSecretFile { path, .. }) => { + return Ok((1, Some(format!("codex: failed to hash {}", path.display())))); } + Err(err) => return Err(err.into()), }; - - if let Some(secret_dir) = crate::paths::resolve_secret_dir() - && let Ok(entries) = std::fs::read_dir(&secret_dir) - { - for entry in entries.flatten() { - let path = entry.path(); - if path.extension().and_then(|s| s.to_str()) != Some("json") { - continue; - } - let candidate_key = match auth::identity_key_from_auth_file(&path) { - Ok(Some(key)) => key, - _ => continue, - }; - if candidate_key != auth_key { - continue; - } - - let secret_hash = match crate::fs::sha256_file(&path) { - Ok(hash) => hash, - Err(_) => { - return Ok((1, Some(format!("codex: failed to hash {}", path.display())))); - } - }; - if secret_hash == auth_hash { - continue; - } - - let contents = std::fs::read(&auth_file)?; - crate::fs::write_atomic(&path, &contents, crate::fs::SECRET_FILE_MODE)?; - - let timestamp_path = secret_timestamp_path(&path)?; - crate::fs::write_timestamp(×tamp_path, auth_last_refresh.as_deref())?; - } + if !sync_result.auth_file_present || !sync_result.auth_identity_present { + return Ok((0, None)); } - let auth_timestamp = secret_timestamp_path(&auth_file)?; - crate::fs::write_timestamp(&auth_timestamp, auth_last_refresh.as_deref())?; - Ok((0, None)) } @@ -1401,16 +1366,6 @@ fn maybe_sync_all_mode_auth_silent(debug_mode: bool) { } } -fn secret_timestamp_path(target_file: &Path) -> Result { - let cache_dir = crate::paths::resolve_secret_cache_dir() - .ok_or_else(|| anyhow::anyhow!("CODEX_SECRET_CACHE_DIR not resolved"))?; - let name = target_file - .file_name() - .and_then(|name| name.to_str()) - .unwrap_or("auth.json"); - Ok(cache_dir.join(format!("{name}.timestamp"))) -} - fn run_all_mode(args: &RateLimitsOptions, cached_mode: bool, debug_mode: bool) -> Result { let secret_dir = crate::paths::resolve_secret_dir().unwrap_or_default(); if !secret_dir.is_dir() { @@ -1599,11 +1554,11 @@ fn current_secret_basename(secret_files: &[PathBuf]) -> Option { } let auth_key = auth::identity_key_from_auth_file(&auth_file).ok().flatten(); - let auth_hash = crate::fs::sha256_file(&auth_file).ok(); + let auth_hash = fs::sha256_file(&auth_file).ok(); if let Some(auth_hash) = auth_hash.as_deref() { for secret_file in secret_files { - if let Ok(secret_hash) = crate::fs::sha256_file(secret_file) + if let Ok(secret_hash) = fs::sha256_file(secret_file) && secret_hash == auth_hash && let Some(name) = secret_file.file_name().and_then(|name| name.to_str()) { diff --git a/crates/codex-cli/src/rate_limits/writeback.rs b/crates/codex-cli/src/rate_limits/writeback.rs index 8fd5b975..3c483b8b 100644 --- a/crates/codex-cli/src/rate_limits/writeback.rs +++ b/crates/codex-cli/src/rate_limits/writeback.rs @@ -1,9 +1,9 @@ use anyhow::{Context, Result}; use chrono::{TimeZone, Utc}; +use nils_common::fs; use serde_json::{Map, Value}; use std::path::Path; -use crate::fs; use crate::json; use crate::rate_limits::render; diff --git a/crates/codex-cli/tests/fs.rs b/crates/codex-cli/tests/fs.rs index 988630f0..40c90307 100644 --- a/crates/codex-cli/tests/fs.rs +++ b/crates/codex-cli/tests/fs.rs @@ -1,4 +1,4 @@ -use codex_cli::fs::{SECRET_FILE_MODE, sha256_file, write_atomic, write_timestamp}; +use nils_common::fs::{SECRET_FILE_MODE, sha256_file, write_atomic, write_timestamp}; use pretty_assertions::assert_eq; use std::fs; diff --git a/crates/fzf-cli/tests/common.rs b/crates/fzf-cli/tests/common.rs index 4e748873..f8e062f1 100644 --- a/crates/fzf-cli/tests/common.rs +++ b/crates/fzf-cli/tests/common.rs @@ -1,17 +1,14 @@ -#[allow(unused_imports)] -pub use nils_test_support::write_exe; +#![allow(dead_code)] + use nils_test_support::{StubBinDir, cmd}; use std::path::Path; -#[allow(dead_code)] pub struct CmdOutput { pub code: i32, pub stdout: String, - #[allow(dead_code)] pub stderr: String, } -#[allow(dead_code)] pub fn run_fzf_cli( dir: &Path, args: &[&str], @@ -30,7 +27,6 @@ pub fn run_fzf_cli( } } -#[allow(dead_code)] pub fn run_fzf_cli_with_stub_path( dir: &Path, stub_path: &Path, @@ -50,7 +46,6 @@ pub fn run_fzf_cli_with_stub_path( } } -#[allow(dead_code)] pub fn run_fzf_cli_with_stub_only_path( dir: &Path, stub_path: &Path, @@ -71,12 +66,14 @@ pub fn run_fzf_cli_with_stub_only_path( } } -#[allow(dead_code)] pub fn make_stub_dir() -> StubBinDir { StubBinDir::new() } -#[allow(dead_code)] pub fn fzf_stub_script() -> &'static str { nils_test_support::stubs::fzf_stub_script() } + +pub fn write_exe(dir: &Path, name: &str, content: &str) { + nils_test_support::write_exe(dir, name, content); +} diff --git a/crates/gemini-cli/docs/README.md b/crates/gemini-cli/docs/README.md index 16141a6a..2a5af755 100644 --- a/crates/gemini-cli/docs/README.md +++ b/crates/gemini-cli/docs/README.md @@ -10,7 +10,7 @@ ## Runbooks -- [json-consumers.md](runbooks/json-consumers.md) (`docs/runbooks/json-consumers.md`) +- [json-consumers.md](runbooks/json-consumers.md) ## Reports diff --git a/crates/gemini-cli/src/auth/mod.rs b/crates/gemini-cli/src/auth/mod.rs index 203f4573..0d893496 100644 --- a/crates/gemini-cli/src/auth/mod.rs +++ b/crates/gemini-cli/src/auth/mod.rs @@ -12,7 +12,7 @@ use std::io; use std::path::{Path, PathBuf}; use std::time::{SystemTime, UNIX_EPOCH}; -pub(crate) const SECRET_FILE_MODE: u32 = crate::fs::SECRET_FILE_MODE; +pub(crate) const SECRET_FILE_MODE: u32 = nils_common::fs::SECRET_FILE_MODE; pub fn identity_from_auth_file(path: &Path) -> io::Result> { crate::runtime::auth::identity_from_auth_file(path).map_err(core_error_to_io) @@ -35,11 +35,11 @@ pub fn identity_key_from_auth_file(path: &Path) -> io::Result> { } pub(crate) fn write_atomic(path: &Path, contents: &[u8], mode: u32) -> io::Result<()> { - crate::fs::write_atomic(path, contents, mode) + nils_common::fs::write_atomic(path, contents, mode).map_err(io_error_from_atomic_write) } pub(crate) fn write_timestamp(path: &Path, iso: Option<&str>) -> io::Result<()> { - crate::fs::write_timestamp(path, iso) + nils_common::fs::write_timestamp(path, iso).map_err(io_error_from_timestamp_write) } pub(crate) fn normalize_iso(raw: &str) -> String { @@ -137,6 +137,28 @@ fn core_error_to_io(err: crate::runtime::CoreError) -> io::Error { io::Error::other(err.to_string()) } +fn io_error_from_atomic_write(err: nils_common::fs::AtomicWriteError) -> io::Error { + match err { + nils_common::fs::AtomicWriteError::CreateParentDir { source, .. } + | nils_common::fs::AtomicWriteError::CreateTempFile { source, .. } + | nils_common::fs::AtomicWriteError::WriteTempFile { source, .. } + | nils_common::fs::AtomicWriteError::SetPermissions { source, .. } + | nils_common::fs::AtomicWriteError::ReplaceFile { source, .. } => source, + nils_common::fs::AtomicWriteError::TempPathExhausted { target, .. } => io::Error::new( + io::ErrorKind::AlreadyExists, + format!("failed to create unique temp file for {}", target.display()), + ), + } +} + +fn io_error_from_timestamp_write(err: nils_common::fs::TimestampError) -> io::Error { + match err { + nils_common::fs::TimestampError::CreateParentDir { source, .. } + | nils_common::fs::TimestampError::WriteFile { source, .. } + | nils_common::fs::TimestampError::RemoveFile { source, .. } => source, + } +} + fn parse_u32(raw: &str) -> Option { raw.parse::().ok() } diff --git a/crates/gemini-cli/src/auth/refresh.rs b/crates/gemini-cli/src/auth/refresh.rs index 2ed152aa..f5df0db9 100644 --- a/crates/gemini-cli/src/auth/refresh.rs +++ b/crates/gemini-cli/src/auth/refresh.rs @@ -329,8 +329,7 @@ fn run_with_mode(args: &[String], output_mode: RefreshOutputMode) -> i32 { return 1; } - if let Some(cache_dir) = crate::paths::resolve_secret_cache_dir() { - let timestamp_path = cache_dir.join(format!("{}.timestamp", file_name(&target_file))); + if let Some(timestamp_path) = crate::paths::resolve_secret_timestamp_path(&target_file) { let _ = auth::write_timestamp(×tamp_path, Some(&now_iso)); } @@ -626,6 +625,7 @@ fn env_timeout(key: &str, default: u64) -> u64 { .unwrap_or(default) } +#[cfg(test)] fn file_name(path: &Path) -> String { path.file_name() .and_then(|name| name.to_str()) diff --git a/crates/gemini-cli/src/auth/remove.rs b/crates/gemini-cli/src/auth/remove.rs index 816653b8..269990f6 100644 --- a/crates/gemini-cli/src/auth/remove.rs +++ b/crates/gemini-cli/src/auth/remove.rs @@ -197,14 +197,9 @@ fn confirm_remove(target: &Path) -> io::Result { } fn remove_target_timestamp(target_file: &Path) { - let Some(cache_dir) = crate::paths::resolve_secret_cache_dir() else { + let Some(timestamp_file) = crate::paths::resolve_secret_timestamp_path(target_file) else { return; }; - let file_name = target_file - .file_name() - .and_then(|v| v.to_str()) - .unwrap_or("auth.json"); - let timestamp_file = cache_dir.join(format!("{file_name}.timestamp")); let _ = auth::write_timestamp(×tamp_file, None); } diff --git a/crates/gemini-cli/src/auth/save.rs b/crates/gemini-cli/src/auth/save.rs index ec53b20f..cff5712c 100644 --- a/crates/gemini-cli/src/auth/save.rs +++ b/crates/gemini-cli/src/auth/save.rs @@ -254,16 +254,9 @@ fn confirm_overwrite(target: &Path) -> io::Result { } fn write_target_timestamp(target_file: &Path, auth_file: &Path) -> io::Result<()> { - let cache_dir = match crate::paths::resolve_secret_cache_dir() { - Some(dir) => dir, - None => return Ok(()), + let Some(timestamp_file) = crate::paths::resolve_secret_timestamp_path(target_file) else { + return Ok(()); }; - - let file_name = target_file - .file_name() - .and_then(|v| v.to_str()) - .unwrap_or("auth.json"); - let timestamp_file = cache_dir.join(format!("{file_name}.timestamp")); let iso = auth::last_refresh_from_auth_file(auth_file).ok().flatten(); auth::write_timestamp(×tamp_file, iso.as_deref()) } diff --git a/crates/gemini-cli/src/auth/sync.rs b/crates/gemini-cli/src/auth/sync.rs index 13a873c1..40c36d16 100644 --- a/crates/gemini-cli/src/auth/sync.rs +++ b/crates/gemini-cli/src/auth/sync.rs @@ -1,7 +1,10 @@ -use std::path::{Path, PathBuf}; +use nils_common::provider_runtime::persistence::{ + SyncSecretsError, TimestampPolicy, sync_auth_to_matching_secrets, +}; use crate::auth; use crate::auth::output; +use crate::provider_profile::GEMINI_PROVIDER_PROFILE; pub fn run() -> i32 { run_with_json(false) @@ -27,143 +30,106 @@ pub fn run_with_json(output_json: bool) -> i32 { } }; - if !auth_file.is_file() { - if output_json { - let _ = output::emit_result( - "auth sync", - output::obj(vec![ - ("auth_file", output::s(auth_file.display().to_string())), - ("synced", output::n(0)), - ("skipped", output::n(1)), - ("failed", output::n(0)), - ("updated_files", output::arr(Vec::new())), - ]), - ); + let sync_result = match sync_auth_to_matching_secrets( + &GEMINI_PROVIDER_PROFILE, + &auth_file, + auth::SECRET_FILE_MODE, + TimestampPolicy::BestEffort, + ) { + Ok(result) => result, + Err(SyncSecretsError::ReadAuthFile { path, .. }) + | Err(SyncSecretsError::HashAuthFile { path, .. }) => { + if output_json { + let _ = output::emit_error( + "auth sync", + "auth-read-failed", + format!("failed to read {}", path.display()), + Some(output::obj(vec![( + "path", + output::s(path.display().to_string()), + )])), + ); + } else { + eprintln!("gemini: failed to read {}", path.display()); + } + return 1; } - return 0; - } - - let auth_key = match auth::identity_key_from_auth_file(&auth_file) { - Ok(Some(key)) => key, - _ => { + Err(SyncSecretsError::HashSecretFile { path, .. }) => { if output_json { - let _ = output::emit_result( + let _ = output::emit_error( "auth sync", - output::obj(vec![ - ("auth_file", output::s(auth_file.display().to_string())), - ("synced", output::n(0)), - ("skipped", output::n(1)), - ("failed", output::n(0)), - ("updated_files", output::arr(Vec::new())), - ]), + "secret-read-failed", + format!("failed to read {}", path.display()), + Some(output::obj(vec![( + "path", + output::s(path.display().to_string()), + )])), ); + } else { + eprintln!("gemini: failed to read {}", path.display()); } - return 0; + return 1; } - }; - - let auth_last_refresh = auth::last_refresh_from_auth_file(&auth_file).ok().flatten(); - let auth_contents = match std::fs::read(&auth_file) { - Ok(contents) => contents, - Err(_) => { + Err(SyncSecretsError::WriteSecretFile { path, .. }) + | Err(SyncSecretsError::WriteTimestampFile { path, .. }) => { if output_json { let _ = output::emit_error( "auth sync", - "auth-read-failed", - format!("failed to read {}", auth_file.display()), + "sync-write-failed", + format!("failed to write {}", path.display()), Some(output::obj(vec![( "path", - output::s(auth_file.display().to_string()), + output::s(path.display().to_string()), )])), ); } else { - eprintln!("gemini: failed to read {}", auth_file.display()); + eprintln!("gemini: failed to write {}", path.display()); } return 1; } }; - let mut synced = 0usize; - let mut skipped = 0usize; - let failed = 0usize; - let mut updated_files: Vec = Vec::new(); - - if let Some(secret_dir) = crate::paths::resolve_secret_dir() - && let Ok(entries) = std::fs::read_dir(&secret_dir) - { - for entry in entries.flatten() { - let path = entry.path(); - if path.extension().and_then(|s| s.to_str()) != Some("json") { - continue; - } - - let candidate_key = match auth::identity_key_from_auth_file(&path) { - Ok(Some(key)) => key, - _ => { - skipped += 1; - continue; - } - }; - - if candidate_key != auth_key { - skipped += 1; - continue; - } - - let secret_contents = match std::fs::read(&path) { - Ok(contents) => contents, - Err(_) => { - if output_json { - let _ = output::emit_error( - "auth sync", - "secret-read-failed", - format!("failed to read {}", path.display()), - Some(output::obj(vec![( - "path", - output::s(path.display().to_string()), - )])), - ); - } else { - eprintln!("gemini: failed to read {}", path.display()); - } - return 1; - } - }; - - if secret_contents == auth_contents { - skipped += 1; - continue; - } - - if auth::write_atomic(&path, &auth_contents, auth::SECRET_FILE_MODE).is_err() { - if output_json { - let _ = output::emit_error( - "auth sync", - "sync-write-failed", - format!("failed to write {}", path.display()), - Some(output::obj(vec![( - "path", - output::s(path.display().to_string()), - )])), - ); - } else { - eprintln!("gemini: failed to write {}", path.display()); - } - return 1; - } - - if let Some(timestamp_path) = secret_timestamp_path(&path) { - let _ = auth::write_timestamp(×tamp_path, auth_last_refresh.as_deref()); - } - synced += 1; - updated_files.push(path.display().to_string()); + if !sync_result.auth_file_present { + if output_json { + let _ = output::emit_result( + "auth sync", + output::obj(vec![ + ("auth_file", output::s(auth_file.display().to_string())), + ("synced", output::n(0)), + ("skipped", output::n(1)), + ("failed", output::n(0)), + ("updated_files", output::arr(Vec::new())), + ]), + ); } + return 0; } - if let Some(auth_timestamp) = secret_timestamp_path(&auth_file) { - let _ = auth::write_timestamp(&auth_timestamp, auth_last_refresh.as_deref()); + if !sync_result.auth_identity_present { + if output_json { + let _ = output::emit_result( + "auth sync", + output::obj(vec![ + ("auth_file", output::s(auth_file.display().to_string())), + ("synced", output::n(0)), + ("skipped", output::n(1)), + ("failed", output::n(0)), + ("updated_files", output::arr(Vec::new())), + ]), + ); + } + return 0; } + let synced = sync_result.synced; + let skipped = sync_result.skipped; + let failed = 0usize; + let updated_files: Vec = sync_result + .updated_files + .into_iter() + .map(|path| path.display().to_string()) + .collect(); + if output_json { let _ = output::emit_result( "auth sync", @@ -182,12 +148,3 @@ pub fn run_with_json(output_json: bool) -> i32 { 0 } - -fn secret_timestamp_path(target_file: &Path) -> Option { - let cache_dir = crate::paths::resolve_secret_cache_dir()?; - let name = target_file - .file_name() - .and_then(|name| name.to_str()) - .unwrap_or("auth.json"); - Some(cache_dir.join(format!("{name}.timestamp"))) -} diff --git a/crates/gemini-cli/src/auth/use_secret.rs b/crates/gemini-cli/src/auth/use_secret.rs index 02389688..811c9acb 100644 --- a/crates/gemini-cli/src/auth/use_secret.rs +++ b/crates/gemini-cli/src/auth/use_secret.rs @@ -211,12 +211,7 @@ fn resolve_by_email(secret_dir: &Path, target: &str) -> ResolveResult { } fn secret_timestamp_path(target_file: &Path) -> Option { - let cache_dir = crate::paths::resolve_secret_cache_dir()?; - let name = target_file - .file_name() - .and_then(|name| name.to_str()) - .unwrap_or("auth.json"); - Some(cache_dir.join(format!("{name}.timestamp"))) + crate::paths::resolve_secret_timestamp_path(target_file) } fn file_name(path: &Path) -> String { diff --git a/crates/gemini-cli/src/fs.rs b/crates/gemini-cli/src/fs.rs deleted file mode 100644 index 27fa0e10..00000000 --- a/crates/gemini-cli/src/fs.rs +++ /dev/null @@ -1,37 +0,0 @@ -use nils_common::fs as shared_fs; -use std::io; -use std::path::Path; - -pub const SECRET_FILE_MODE: u32 = shared_fs::SECRET_FILE_MODE; - -pub fn sha256_file(path: &Path) -> io::Result { - match shared_fs::sha256_file(path) { - Ok(hash) => Ok(hash), - Err(shared_fs::FileHashError::OpenFile { source, .. }) => Err(source), - Err(shared_fs::FileHashError::ReadFile { source, .. }) => Err(source), - } -} - -pub fn write_atomic(path: &Path, contents: &[u8], mode: u32) -> io::Result<()> { - match shared_fs::write_atomic(path, contents, mode) { - Ok(()) => Ok(()), - Err(shared_fs::AtomicWriteError::CreateParentDir { source, .. }) => Err(source), - Err(shared_fs::AtomicWriteError::CreateTempFile { source, .. }) => Err(source), - Err(shared_fs::AtomicWriteError::WriteTempFile { source, .. }) => Err(source), - Err(shared_fs::AtomicWriteError::SetPermissions { source, .. }) => Err(source), - Err(shared_fs::AtomicWriteError::ReplaceFile { source, .. }) => Err(source), - Err(shared_fs::AtomicWriteError::TempPathExhausted { target, .. }) => Err(io::Error::new( - io::ErrorKind::AlreadyExists, - format!("failed to create unique temp file for {}", target.display()), - )), - } -} - -pub fn write_timestamp(path: &Path, iso: Option<&str>) -> io::Result<()> { - match shared_fs::write_timestamp(path, iso) { - Ok(()) => Ok(()), - Err(shared_fs::TimestampError::CreateParentDir { source, .. }) => Err(source), - Err(shared_fs::TimestampError::WriteFile { source, .. }) => Err(source), - Err(shared_fs::TimestampError::RemoveFile { source, .. }) => Err(source), - } -} diff --git a/crates/gemini-cli/src/lib.rs b/crates/gemini-cli/src/lib.rs index abbebd04..dc4261b4 100644 --- a/crates/gemini-cli/src/lib.rs +++ b/crates/gemini-cli/src/lib.rs @@ -4,7 +4,6 @@ pub mod agent; pub mod auth; pub mod config; pub mod diag_output; -pub mod fs; pub mod json; pub mod jwt; pub mod paths; diff --git a/crates/gemini-cli/src/paths.rs b/crates/gemini-cli/src/paths.rs index a7164fb9..d5ff0bae 100644 --- a/crates/gemini-cli/src/paths.rs +++ b/crates/gemini-cli/src/paths.rs @@ -1,4 +1,4 @@ -use std::path::PathBuf; +use std::path::{Path, PathBuf}; pub fn resolve_secret_dir() -> Option { crate::runtime::resolve_secret_dir() @@ -12,6 +12,13 @@ pub fn resolve_secret_cache_dir() -> Option { crate::runtime::resolve_secret_cache_dir() } +pub fn resolve_secret_timestamp_path(target_file: &Path) -> Option { + nils_common::provider_runtime::paths::resolve_secret_timestamp_path( + &crate::provider_profile::GEMINI_PROVIDER_PROFILE, + target_file, + ) +} + pub fn resolve_feature_dir() -> Option { crate::runtime::resolve_feature_dir() } diff --git a/crates/gemini-cli/src/rate_limits/mod.rs b/crates/gemini-cli/src/rate_limits/mod.rs index b148d7df..eb252060 100644 --- a/crates/gemini-cli/src/rate_limits/mod.rs +++ b/crates/gemini-cli/src/rate_limits/mod.rs @@ -2,10 +2,14 @@ use std::fs; use std::path::{Path, PathBuf}; use nils_common::env as shared_env; +use nils_common::fs as shared_fs; +use nils_common::provider_runtime::persistence::{ + SyncSecretsError, TimestampPolicy, sync_auth_to_matching_secrets, +}; use crate::auth; -use crate::fs as gemini_fs; use crate::paths; +use crate::provider_profile::GEMINI_PROVIDER_PROFILE; use crate::rate_limits::client::{UsageRequest, fetch_usage}; pub use nils_common::rate_limits_ansi as ansi; @@ -186,67 +190,36 @@ fn sync_auth_silent() -> Result<(), String> { None => return Ok(()), }; - if !auth_file.is_file() { - return Ok(()); - } - - let auth_key = match auth::identity_key_from_auth_file(&auth_file) { - Ok(Some(key)) => key, - _ => return Ok(()), - }; - - let auth_last_refresh = auth::last_refresh_from_auth_file(&auth_file).ok().flatten(); - let auth_contents = fs::read(&auth_file) - .map_err(|_| format!("gemini-rate-limits: failed to read {}", auth_file.display()))?; - - if let Some(secret_dir) = paths::resolve_secret_dir() - && let Ok(entries) = fs::read_dir(&secret_dir) - { - for entry in entries.flatten() { - let path = entry.path(); - if path.extension().and_then(|value| value.to_str()) != Some("json") { - continue; - } - - let candidate_key = match auth::identity_key_from_auth_file(&path) { - Ok(Some(key)) => key, - _ => continue, - }; - if candidate_key != auth_key { - continue; - } - - let secret_contents = fs::read(&path) - .map_err(|_| format!("gemini-rate-limits: failed to read {}", path.display()))?; - if secret_contents == auth_contents { - continue; - } - - auth::write_atomic(&path, &auth_contents, auth::SECRET_FILE_MODE) - .map_err(|_| format!("gemini-rate-limits: failed to write {}", path.display()))?; - - if let Some(timestamp_path) = sync_timestamp_path(&path) { - let _ = auth::write_timestamp(×tamp_path, auth_last_refresh.as_deref()); - } + let sync_result = match sync_auth_to_matching_secrets( + &GEMINI_PROVIDER_PROFILE, + &auth_file, + auth::SECRET_FILE_MODE, + TimestampPolicy::BestEffort, + ) { + Ok(result) => result, + Err(SyncSecretsError::ReadAuthFile { path, .. }) + | Err(SyncSecretsError::HashAuthFile { path, .. }) + | Err(SyncSecretsError::HashSecretFile { path, .. }) => { + return Err(format!( + "gemini-rate-limits: failed to read {}", + path.display() + )); } - } - - if let Some(auth_timestamp) = sync_timestamp_path(&auth_file) { - let _ = auth::write_timestamp(&auth_timestamp, auth_last_refresh.as_deref()); + Err(SyncSecretsError::WriteSecretFile { path, .. }) + | Err(SyncSecretsError::WriteTimestampFile { path, .. }) => { + return Err(format!( + "gemini-rate-limits: failed to write {}", + path.display() + )); + } + }; + if !sync_result.auth_file_present || !sync_result.auth_identity_present { + return Ok(()); } Ok(()) } -fn sync_timestamp_path(target_file: &Path) -> Option { - let cache_dir = paths::resolve_secret_cache_dir()?; - let name = target_file - .file_name() - .and_then(|name| name.to_str()) - .unwrap_or("auth.json"); - Some(cache_dir.join(format!("{name}.timestamp"))) -} - fn run_single_mode(args: &RateLimitsOptions, cached_mode: bool, output_json: bool) -> i32 { let target_file = match resolve_single_target(args.secret.as_deref()) { Ok(path) => path, @@ -728,7 +701,7 @@ pub fn cache_file_for_target(target_file: &Path) -> Result { } } - let hash = gemini_fs::sha256_file(target_file).map_err(|err| err.to_string())?; + let hash = shared_fs::sha256_file(target_file).map_err(|err| err.to_string())?; Ok(cache_dir.join(format!("auth_{}.kv", hash.to_lowercase()))) } @@ -846,7 +819,7 @@ pub fn write_starship_cache( lines.push(format!("weekly_reset_epoch={weekly_reset_epoch}")); let data = lines.join("\n"); - gemini_fs::write_atomic(&cache_file, data.as_bytes(), gemini_fs::SECRET_FILE_MODE) + shared_fs::write_atomic(&cache_file, data.as_bytes(), shared_fs::SECRET_FILE_MODE) .map_err(|err| err.to_string()) } @@ -1090,10 +1063,10 @@ fn current_secret_basename(secret_files: &[PathBuf]) -> Option { return None; } - let auth_hash = gemini_fs::sha256_file(&auth_file).ok(); + let auth_hash = shared_fs::sha256_file(&auth_file).ok(); if let Some(auth_hash) = auth_hash.as_deref() { for secret_file in secret_files { - if let Ok(secret_hash) = gemini_fs::sha256_file(secret_file) + if let Ok(secret_hash) = shared_fs::sha256_file(secret_file) && secret_hash == auth_hash && let Ok(name) = secret_file_basename(secret_file) { diff --git a/crates/gemini-cli/tests/fs.rs b/crates/gemini-cli/tests/fs.rs index a1dd5005..7dbec2bc 100644 --- a/crates/gemini-cli/tests/fs.rs +++ b/crates/gemini-cli/tests/fs.rs @@ -1,5 +1,4 @@ -use gemini_cli::fs as gemini_fs; -use gemini_fs::{SECRET_FILE_MODE, sha256_file, write_atomic, write_timestamp}; +use nils_common::fs::{SECRET_FILE_MODE, sha256_file, write_atomic, write_timestamp}; use std::fs; use std::path::{Path, PathBuf}; use std::time::{SystemTime, UNIX_EPOCH}; diff --git a/crates/git-cli/src/commit.rs b/crates/git-cli/src/commit.rs index 0bf64b65..a9fc4a26 100644 --- a/crates/git-cli/src/commit.rs +++ b/crates/git-cli/src/commit.rs @@ -5,10 +5,10 @@ use crate::commit_shared::{ is_lockfile, parse_name_status_z, trim_trailing_newlines, }; use crate::prompt; -use crate::util; use anyhow::{Result, anyhow}; use nils_common::env as shared_env; use nils_common::git::{self as common_git, GitContextError}; +use nils_common::process; use nils_common::shell::{AnsiStripMode, strip_ansi as strip_ansi_impl}; use std::env; use std::io::Write; @@ -198,7 +198,7 @@ fn git_scope_available() -> bool { if env::var("GIT_CLI_FIXTURE_GIT_SCOPE_MODE").ok().as_deref() == Some("missing") { return false; } - util::cmd_exists("git-scope") + process::cmd_exists("git-scope") } fn git_scope_output(no_color: bool) -> Result { @@ -389,7 +389,7 @@ fn file_probe(blob_ref: &str) -> Option { return None; } - if !util::cmd_exists("file") { + if !process::cmd_exists("file") { return None; } diff --git a/crates/git-cli/src/lib.rs b/crates/git-cli/src/lib.rs index 413181c9..2b39f2d4 100644 --- a/crates/git-cli/src/lib.rs +++ b/crates/git-cli/src/lib.rs @@ -9,5 +9,4 @@ pub mod open; pub mod prompt; pub mod reset; pub mod usage; -pub mod util; pub mod utils; diff --git a/crates/git-cli/src/open.rs b/crates/git-cli/src/open.rs index 1b3031cb..766a9f2f 100644 --- a/crates/git-cli/src/open.rs +++ b/crates/git-cli/src/open.rs @@ -1,4 +1,3 @@ -use crate::util; use nils_common::process; use std::io::{self, Write}; use std::process::Output; @@ -249,7 +248,7 @@ fn open_pr(args: &[String]) -> i32 { } if collab.provider == Provider::Github - && util::cmd_exists("gh") + && process::cmd_exists("gh") && try_open_pr_with_gh(&ctx, &collab) { return 0; @@ -571,7 +570,7 @@ fn run_gh_pr_view(repo: Option<&str>, selector: Option<&str>) -> bool { } let args: Vec<&str> = owned_args.iter().map(String::as_str).collect(); - let output = match util::run_output("gh", &args) { + let output = match run_output("gh", &args) { Ok(output) => output, Err(_) => return false, }; @@ -920,7 +919,7 @@ fn open_url(url: &str, label: &str) -> i32 { return 1; }; - let output = match util::run_output(opener, &[url]) { + let output = match run_output(opener, &[url]) { Ok(output) => output, Err(err) => { eprintln!("{err}"); @@ -970,7 +969,7 @@ fn print_usage() { } fn run_git_output(args: &[&str]) -> Option { - match util::run_output("git", args) { + match run_output("git", args) { Ok(output) => Some(output), Err(err) => { eprintln!("{err}"); @@ -979,6 +978,12 @@ fn run_git_output(args: &[&str]) -> Option { } } +fn run_output(cmd: &str, args: &[&str]) -> Result { + process::run_output(cmd, args) + .map(|output| output.into_std_output()) + .map_err(|err| format!("spawn {cmd}: {err}")) +} + fn git_stdout_trimmed(args: &[&str]) -> Result { let output = run_git_output(args).ok_or(1)?; if !output.status.success() { diff --git a/crates/git-cli/src/util.rs b/crates/git-cli/src/util.rs deleted file mode 100644 index 38bd8172..00000000 --- a/crates/git-cli/src/util.rs +++ /dev/null @@ -1,86 +0,0 @@ -use anyhow::{Context, Result}; -use nils_common::process; -use std::process::Output; - -pub fn cmd_exists(cmd: &str) -> bool { - process::cmd_exists(cmd) -} - -pub fn run_output(cmd: &str, args: &[&str]) -> Result { - process::run_output(cmd, args) - .map(|output| output.into_std_output()) - .with_context(|| format!("spawn {cmd}")) -} - -#[cfg(test)] -mod tests { - use super::*; - use nils_test_support::{EnvGuard, GlobalStateLock, StubBinDir}; - use tempfile::TempDir; - - #[test] - fn run_output_returns_ok_for_nonzero_exit_status() { - let lock = GlobalStateLock::new(); - let stubs = StubBinDir::new(); - stubs.write_exe( - "git", - r#"#!/bin/bash -set -euo pipefail -if [[ "${1:-}" == "rev-parse" && "${2:-}" == "--is-inside-work-tree" ]]; then - exit 128 -fi -exit 0 -"#, - ); - - let _path_guard = EnvGuard::set(&lock, "PATH", &stubs.path_str()); - let output = run_output("git", &["rev-parse", "--is-inside-work-tree"]).expect("output"); - assert!(!output.status.success()); - } - - #[test] - fn git_repo_probe_semantics_are_stable() { - fn probe() -> bool { - run_output("git", &["rev-parse", "--is-inside-work-tree"]) - .map(|output| output.status.success()) - .unwrap_or(false) - } - - let lock = GlobalStateLock::new(); - - let success_stubs = StubBinDir::new(); - success_stubs.write_exe( - "git", - r#"#!/bin/bash -set -euo pipefail -if [[ "${1:-}" == "rev-parse" && "${2:-}" == "--is-inside-work-tree" ]]; then - exit 0 -fi -exit 1 -"#, - ); - let success_path_guard = EnvGuard::set(&lock, "PATH", &success_stubs.path_str()); - assert!(probe()); - drop(success_path_guard); - - let fail_stubs = StubBinDir::new(); - fail_stubs.write_exe( - "git", - r#"#!/bin/bash -set -euo pipefail -if [[ "${1:-}" == "rev-parse" && "${2:-}" == "--is-inside-work-tree" ]]; then - exit 128 -fi -exit 1 -"#, - ); - let fail_path_guard = EnvGuard::set(&lock, "PATH", &fail_stubs.path_str()); - assert!(!probe()); - drop(fail_path_guard); - - let empty = TempDir::new().expect("tempdir"); - let empty_path = empty.path().to_string_lossy().to_string(); - let _missing_path_guard = EnvGuard::set(&lock, "PATH", &empty_path); - assert!(!probe()); - } -} diff --git a/crates/git-cli/src/utils.rs b/crates/git-cli/src/utils.rs index 609c70f6..5e3679db 100644 --- a/crates/git-cli/src/utils.rs +++ b/crates/git-cli/src/utils.rs @@ -1,4 +1,5 @@ -use crate::{clipboard, util}; +use crate::clipboard; +use nils_common::process; use nils_common::shell::quote_posix_single; use std::io::{self, Write}; use std::process::Output; @@ -147,7 +148,7 @@ fn commit_hash(args: &[String]) -> i32 { } fn run_git_output(args: &[&str]) -> Option { - match util::run_output("git", args) { + match run_output("git", args) { Ok(output) => Some(output), Err(err) => { eprintln!("{err}"); @@ -156,6 +157,12 @@ fn run_git_output(args: &[&str]) -> Option { } } +fn run_output(cmd: &str, args: &[&str]) -> Result { + process::run_output(cmd, args) + .map(|output| output.into_std_output()) + .map_err(|err| format!("spawn {cmd}: {err}")) +} + fn git_stdout_trimmed(args: &[&str]) -> Result { let output = run_git_output(args).ok_or(1)?; if !output.status.success() { diff --git a/crates/nils-common/README.md b/crates/nils-common/README.md index 49c864ca..83f12a15 100644 --- a/crates/nils-common/README.md +++ b/crates/nils-common/README.md @@ -7,6 +7,11 @@ user-facing output text, warnings, color behavior, or exit-code contracts. ## Shared helper policy +Workspace-wide boundary decisions for Sprint 2 extraction lanes are frozen in +[`docs/specs/workspace-shared-crate-boundary-v1.md`](../../docs/specs/workspace-shared-crate-boundary-v1.md). +Workspace-level keep/delete ownership decisions are tracked in +[`docs/specs/workspace-doc-retention-matrix-v1.md`](../../docs/specs/workspace-doc-retention-matrix-v1.md). + ### What belongs in `nils-common` - Reusable helper logic used by multiple CLI crates. diff --git a/crates/nils-common/src/provider_runtime/mod.rs b/crates/nils-common/src/provider_runtime/mod.rs index a168a147..63e77878 100644 --- a/crates/nils-common/src/provider_runtime/mod.rs +++ b/crates/nils-common/src/provider_runtime/mod.rs @@ -5,6 +5,7 @@ pub mod exec; pub mod json; pub mod jwt; pub mod paths; +pub mod persistence; pub mod profile; pub use error::{CoreError, CoreErrorCategory, ProviderCategoryHint}; diff --git a/crates/nils-common/src/provider_runtime/paths.rs b/crates/nils-common/src/provider_runtime/paths.rs index 8cf15e48..427813a2 100644 --- a/crates/nils-common/src/provider_runtime/paths.rs +++ b/crates/nils-common/src/provider_runtime/paths.rs @@ -59,6 +59,22 @@ pub fn resolve_secret_cache_dir(profile: &ProviderProfile) -> Option { ) } +pub fn resolve_secret_timestamp_path( + profile: &ProviderProfile, + target_file: &Path, +) -> Option { + let cache_dir = resolve_secret_cache_dir(profile)?; + Some(secret_timestamp_path(&cache_dir, target_file)) +} + +pub fn secret_timestamp_path(cache_dir: &Path, target_file: &Path) -> PathBuf { + let file_name = target_file + .file_name() + .and_then(|name| name.to_str()) + .unwrap_or("auth.json"); + cache_dir.join(format!("{file_name}.timestamp")) +} + pub fn resolve_feature_dir(profile: &ProviderProfile) -> Option { let script_dir = resolve_script_dir()?; let feature_dir = script_dir @@ -125,3 +141,22 @@ fn parent_dir(path: &Path, levels: usize) -> Option { } Some(current.to_path_buf()) } + +#[cfg(test)] +mod tests { + use super::secret_timestamp_path; + use std::path::Path; + + #[test] + fn secret_timestamp_path_defaults_file_name_when_missing() { + let cache = Path::new("/tmp/cache"); + assert_eq!( + secret_timestamp_path(cache, Path::new("/tmp/alpha.json")), + cache.join("alpha.json.timestamp") + ); + assert_eq!( + secret_timestamp_path(cache, Path::new("")), + cache.join("auth.json.timestamp") + ); + } +} diff --git a/crates/nils-common/src/provider_runtime/persistence.rs b/crates/nils-common/src/provider_runtime/persistence.rs new file mode 100644 index 00000000..de039746 --- /dev/null +++ b/crates/nils-common/src/provider_runtime/persistence.rs @@ -0,0 +1,386 @@ +use std::fs; +use std::path::{Path, PathBuf}; + +use thiserror::Error; + +use crate::fs as shared_fs; + +use super::auth; +use super::paths; +use super::profile::ProviderProfile; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum TimestampPolicy { + Strict, + BestEffort, +} + +#[derive(Debug, Clone, PartialEq, Eq, Default)] +pub struct SyncSecretsResult { + pub auth_file_present: bool, + pub auth_identity_present: bool, + pub synced: usize, + pub skipped: usize, + pub updated_files: Vec, +} + +impl SyncSecretsResult { + fn auth_file_missing() -> Self { + Self { + auth_file_present: false, + auth_identity_present: false, + synced: 0, + skipped: 0, + updated_files: Vec::new(), + } + } + + fn auth_identity_missing() -> Self { + Self { + auth_file_present: true, + auth_identity_present: false, + synced: 0, + skipped: 0, + updated_files: Vec::new(), + } + } +} + +#[derive(Debug, Error)] +pub enum SyncSecretsError { + #[error("failed to hash auth file {path}: {source}")] + HashAuthFile { + path: PathBuf, + #[source] + source: shared_fs::FileHashError, + }, + #[error("failed to read auth file {path}: {source}")] + ReadAuthFile { + path: PathBuf, + #[source] + source: std::io::Error, + }, + #[error("failed to hash secret file {path}: {source}")] + HashSecretFile { + path: PathBuf, + #[source] + source: shared_fs::FileHashError, + }, + #[error("failed to write secret file {path}: {source}")] + WriteSecretFile { + path: PathBuf, + #[source] + source: shared_fs::AtomicWriteError, + }, + #[error("failed to write timestamp file {path}: {source}")] + WriteTimestampFile { + path: PathBuf, + #[source] + source: shared_fs::TimestampError, + }, +} + +pub fn sync_auth_to_matching_secrets( + profile: &ProviderProfile, + auth_file: &Path, + secret_file_mode: u32, + timestamp_policy: TimestampPolicy, +) -> Result { + if !auth_file.is_file() { + return Ok(SyncSecretsResult::auth_file_missing()); + } + + let auth_key = match auth::identity_key_from_auth_file(auth_file).ok().flatten() { + Some(value) => value, + None => return Ok(SyncSecretsResult::auth_identity_missing()), + }; + + let auth_last_refresh = auth::last_refresh_from_auth_file(auth_file).ok().flatten(); + let auth_hash = + shared_fs::sha256_file(auth_file).map_err(|source| SyncSecretsError::HashAuthFile { + path: auth_file.to_path_buf(), + source, + })?; + let auth_contents = fs::read(auth_file).map_err(|source| SyncSecretsError::ReadAuthFile { + path: auth_file.to_path_buf(), + source, + })?; + + let mut result = SyncSecretsResult { + auth_file_present: true, + auth_identity_present: true, + synced: 0, + skipped: 0, + updated_files: Vec::new(), + }; + + if let Some(secret_dir) = paths::resolve_secret_dir(profile) + && let Ok(entries) = fs::read_dir(secret_dir) + { + for entry in entries.flatten() { + let path = entry.path(); + if path.extension().and_then(|value| value.to_str()) != Some("json") { + continue; + } + + let candidate_key = match auth::identity_key_from_auth_file(&path).ok().flatten() { + Some(value) => value, + None => { + result.skipped += 1; + continue; + } + }; + if candidate_key != auth_key { + result.skipped += 1; + continue; + } + + let secret_hash = shared_fs::sha256_file(&path).map_err(|source| { + SyncSecretsError::HashSecretFile { + path: path.clone(), + source, + } + })?; + if secret_hash == auth_hash { + result.skipped += 1; + continue; + } + + shared_fs::write_atomic(&path, &auth_contents, secret_file_mode).map_err(|source| { + SyncSecretsError::WriteSecretFile { + path: path.clone(), + source, + } + })?; + write_timestamp_for_target( + profile, + &path, + auth_last_refresh.as_deref(), + timestamp_policy, + )?; + result.synced += 1; + result.updated_files.push(path); + } + } + + write_timestamp_for_target( + profile, + auth_file, + auth_last_refresh.as_deref(), + timestamp_policy, + )?; + + Ok(result) +} + +fn write_timestamp_for_target( + profile: &ProviderProfile, + target_file: &Path, + iso: Option<&str>, + timestamp_policy: TimestampPolicy, +) -> Result<(), SyncSecretsError> { + let Some(timestamp_path) = paths::resolve_secret_timestamp_path(profile, target_file) else { + return Ok(()); + }; + match shared_fs::write_timestamp(×tamp_path, iso) { + Ok(()) => Ok(()), + Err(source) => match timestamp_policy { + TimestampPolicy::Strict => Err(SyncSecretsError::WriteTimestampFile { + path: timestamp_path, + source, + }), + TimestampPolicy::BestEffort => Ok(()), + }, + } +} + +#[cfg(test)] +mod tests { + use super::{TimestampPolicy, sync_auth_to_matching_secrets}; + use crate::provider_runtime::{ + ExecInvocation, ExecProfile, HomePathSelection, PathsProfile, ProviderDefaults, + ProviderEnvKeys, ProviderProfile, + }; + use nils_test_support::{EnvGuard, GlobalStateLock}; + use pretty_assertions::assert_eq; + use std::sync::atomic::AtomicBool; + + const HEADER: &str = "eyJhbGciOiJub25lIiwidHlwIjoiSldUIn0"; + const PAYLOAD_ALPHA: &str = "eyJzdWIiOiJ1c2VyXzEyMyIsImVtYWlsIjoiYWxwaGFAZXhhbXBsZS5jb20iLCJodHRwczovL2FwaS5vcGVuYWkuY29tL2F1dGgiOnsiY2hhdGdwdF91c2VyX2lkIjoidXNlcl8xMjMiLCJlbWFpbCI6ImFscGhhQGV4YW1wbGUuY29tIn19"; + const PAYLOAD_BETA: &str = "eyJzdWIiOiJ1c2VyXzQ1NiIsImVtYWlsIjoiYmV0YUBleGFtcGxlLmNvbSIsImh0dHBzOi8vYXBpLm9wZW5haS5jb20vYXV0aCI6eyJjaGF0Z3B0X3VzZXJfaWQiOiJ1c2VyXzQ1NiIsImVtYWlsIjoiYmV0YUBleGFtcGxlLmNvbSJ9fQ"; + const SECRET_HOME: &[&str] = &[".config", "test-secrets"]; + const AUTH_HOME: &[&str] = &[".config", "test-auth.json"]; + static WARNED_INVALID_ALLOW_DANGEROUS: AtomicBool = AtomicBool::new(false); + + static TEST_PROFILE: ProviderProfile = ProviderProfile { + provider_name: "test", + env: ProviderEnvKeys { + model: "TEST_MODEL", + reasoning: "TEST_REASONING", + allow_dangerous_enabled: "TEST_ALLOW_DANGEROUS", + secret_dir: "TEST_SECRET_DIR", + auth_file: "TEST_AUTH_FILE", + secret_cache_dir: "TEST_SECRET_CACHE_DIR", + starship_enabled: "TEST_STARSHIP_ENABLED", + auto_refresh_enabled: "TEST_AUTO_REFRESH_ENABLED", + auto_refresh_min_days: "TEST_AUTO_REFRESH_MIN_DAYS", + }, + defaults: ProviderDefaults { + model: "test-model", + reasoning: "medium", + starship_enabled: "false", + auto_refresh_enabled: "false", + auto_refresh_min_days: "5", + }, + paths: PathsProfile { + feature_name: "test", + feature_tool_script: "test-tools.zsh", + secret_dir_home: HomePathSelection::ModernOnly(SECRET_HOME), + auth_file_home: HomePathSelection::ModernOnly(AUTH_HOME), + secret_cache_home: None, + }, + exec: ExecProfile { + default_caller_prefix: "test", + missing_prompt_label: "_test_exec_dangerous", + binary_name: "test-bin", + failed_exec_message_prefix: "test-tools: failed to run test exec", + invocation: ExecInvocation::CodexStyle, + warned_invalid_allow_dangerous: &WARNED_INVALID_ALLOW_DANGEROUS, + }, + }; + + fn token(payload: &str) -> String { + format!("{HEADER}.{payload}.sig") + } + + fn auth_json( + payload: &str, + account_id: &str, + refresh_token: &str, + last_refresh: &str, + ) -> String { + format!( + r#"{{"tokens":{{"access_token":"{}","id_token":"{}","refresh_token":"{}","account_id":"{}"}},"last_refresh":"{}"}}"#, + token(payload), + token(payload), + refresh_token, + account_id, + last_refresh + ) + } + + #[test] + fn sync_auth_to_matching_secrets_updates_only_identity_matches() { + let lock = GlobalStateLock::new(); + let dir = tempfile::TempDir::new().expect("tempdir"); + let secret_dir = dir.path().join("secrets"); + let cache_dir = dir.path().join("cache"); + std::fs::create_dir_all(&secret_dir).expect("secrets"); + std::fs::create_dir_all(&cache_dir).expect("cache"); + + let auth_file = dir.path().join("auth.json"); + let alpha = secret_dir.join("alpha.json"); + let beta = secret_dir.join("beta.json"); + std::fs::write( + &auth_file, + auth_json( + PAYLOAD_ALPHA, + "acct_001", + "refresh_new", + "2025-01-20T12:34:56Z", + ), + ) + .expect("auth"); + std::fs::write( + &alpha, + auth_json( + PAYLOAD_ALPHA, + "acct_001", + "refresh_old", + "2025-01-19T12:34:56Z", + ), + ) + .expect("alpha"); + std::fs::write( + &beta, + auth_json( + PAYLOAD_BETA, + "acct_002", + "refresh_beta", + "2025-01-18T12:34:56Z", + ), + ) + .expect("beta"); + std::fs::write(secret_dir.join("invalid.json"), "{invalid").expect("invalid"); + + let _secret = EnvGuard::set( + &lock, + "TEST_SECRET_DIR", + secret_dir.to_string_lossy().as_ref(), + ); + let _cache = EnvGuard::set( + &lock, + "TEST_SECRET_CACHE_DIR", + cache_dir.to_string_lossy().as_ref(), + ); + + let result = sync_auth_to_matching_secrets( + &TEST_PROFILE, + &auth_file, + crate::fs::SECRET_FILE_MODE, + TimestampPolicy::Strict, + ) + .expect("sync"); + + assert!(result.auth_file_present); + assert!(result.auth_identity_present); + assert_eq!(result.synced, 1); + assert_eq!(result.skipped, 2); + assert_eq!(result.updated_files, vec![alpha.clone()]); + assert_eq!( + std::fs::read(&alpha).expect("alpha"), + std::fs::read(&auth_file).expect("auth") + ); + assert_ne!( + std::fs::read(&beta).expect("beta"), + std::fs::read(&auth_file).expect("auth") + ); + assert_eq!( + std::fs::read_to_string(cache_dir.join("alpha.json.timestamp")) + .expect("alpha timestamp"), + "2025-01-20T12:34:56Z" + ); + assert_eq!( + std::fs::read_to_string(cache_dir.join("auth.json.timestamp")).expect("auth timestamp"), + "2025-01-20T12:34:56Z" + ); + } + + #[test] + fn sync_auth_to_matching_secrets_reports_missing_identity() { + let lock = GlobalStateLock::new(); + let dir = tempfile::TempDir::new().expect("tempdir"); + let secret_dir = dir.path().join("secrets"); + std::fs::create_dir_all(&secret_dir).expect("secrets"); + + let auth_file = dir.path().join("auth.json"); + std::fs::write(&auth_file, r#"{"tokens":{"refresh_token":"only"}}"#).expect("auth"); + + let _secret = EnvGuard::set( + &lock, + "TEST_SECRET_DIR", + secret_dir.to_string_lossy().as_ref(), + ); + let result = sync_auth_to_matching_secrets( + &TEST_PROFILE, + &auth_file, + crate::fs::SECRET_FILE_MODE, + TimestampPolicy::BestEffort, + ) + .expect("sync"); + assert!(result.auth_file_present); + assert!(!result.auth_identity_present); + assert_eq!(result.synced, 0); + assert_eq!(result.skipped, 0); + } +} diff --git a/crates/nils-term/docs/README.md b/crates/nils-term/docs/README.md index cae1a776..e42d11df 100644 --- a/crates/nils-term/docs/README.md +++ b/crates/nils-term/docs/README.md @@ -1,6 +1,9 @@ # nils-term Docs Index > Ownership: Maintained by the `nils-term` crate maintainers. Keep this index updated when docs are added, moved, or removed. +> +> Boundary note: `nils-term` owns terminal UX primitives only. Runtime/process/fs/auth persistence helpers belong in `nils-common` +> (`docs/specs/workspace-shared-crate-boundary-v1.md`). ## Specs diff --git a/crates/nils-test-support/README.md b/crates/nils-test-support/README.md index b6ef3db0..cd84ca67 100644 --- a/crates/nils-test-support/README.md +++ b/crates/nils-test-support/README.md @@ -8,6 +8,12 @@ It provides small utilities to keep tests deterministic when they need to manipu ## Shared helper policy +Runtime shared-crate ownership boundaries are tracked in +[`docs/specs/workspace-shared-crate-boundary-v1.md`](../../docs/specs/workspace-shared-crate-boundary-v1.md) so test-surface extractions +stay aligned with production-lane decisions. +Stale-test cleanup sequencing is frozen in +[`docs/specs/workspace-test-cleanup-lane-matrix-v1.md`](../../docs/specs/workspace-test-cleanup-lane-matrix-v1.md). + ### What belongs in `nils-test-support` - Test-only utilities reused by multiple crates (guards, git helpers, command wrappers, stubs). @@ -30,6 +36,8 @@ It provides small utilities to keep tests deterministic when they need to manipu - Command runners - `cmd`: run binaries with captured output (`CmdOutput`) and flexible options (`CmdOptions`), including resolved workspace-binary helpers (`run_resolved*`) + - `CmdOptions::with_env_remove_many`: remove multiple env vars in one call for deterministic harness setup + - `cmd::path_with_prepend_excluding_program`: construct a PATH that prepends stubs while filtering one real binary - Workspace binaries - `bin`: `resolve` finds `CARGO_BIN_EXE_*` or falls back to `target//` - Git helpers diff --git a/crates/nils-test-support/src/cmd.rs b/crates/nils-test-support/src/cmd.rs index 775b2624..99c25945 100644 --- a/crates/nils-test-support/src/cmd.rs +++ b/crates/nils-test-support/src/cmd.rs @@ -91,6 +91,13 @@ impl CmdOptions { self } + pub fn with_env_remove_many(mut self, keys: &[&str]) -> Self { + for key in keys { + self = self.with_env_remove(key); + } + self + } + pub fn with_path_prepend(self, dir: &Path) -> Self { let base = self .envs @@ -139,6 +146,20 @@ impl CmdOptions { } } +/// Build a `PATH` value with `prepend` inserted first and any directory that +/// contains `program` removed. +pub fn path_with_prepend_excluding_program(prepend: &Path, program: &str) -> String { + let mut paths: Vec = + std::env::split_paths(&std::env::var_os("PATH").unwrap_or_default()) + .filter(|dir| !dir.join(program).is_file()) + .collect(); + paths.insert(0, prepend.to_path_buf()); + std::env::join_paths(paths) + .expect("join PATH") + .to_string_lossy() + .to_string() +} + /// Run a binary with arguments, capturing `code`, `stdout`, and `stderr`. /// /// - `envs` overrides or adds environment variables (existing vars are preserved). diff --git a/crates/nils-test-support/tests/bin_cmd.rs b/crates/nils-test-support/tests/bin_cmd.rs index 31e9d00a..e03dfa14 100644 --- a/crates/nils-test-support/tests/bin_cmd.rs +++ b/crates/nils-test-support/tests/bin_cmd.rs @@ -117,6 +117,28 @@ printf "%s|%s" "${NTS_REMOVE_ME-unset}" "${NTS_KEEP_ME-unset}" assert_eq!(output.stdout_text(), "unset|present"); } +#[cfg(unix)] +#[test] +fn run_with_env_remove_many_clears_all_requested_variables() { + let lock = GlobalStateLock::new(); + let temp = TempDir::new().expect("tempdir"); + let script = r#"#!/bin/sh +printf "%s|%s|%s" "${NTS_REMOVE_A-unset}" "${NTS_REMOVE_B-unset}" "${NTS_KEEP-unset}" +"#; + write_exe(temp.path(), "env-remove-many-test", script); + let bin = temp.path().join("env-remove-many-test"); + + let _remove_a = EnvGuard::set(&lock, "NTS_REMOVE_A", "present"); + let _remove_b = EnvGuard::set(&lock, "NTS_REMOVE_B", "present"); + let _keep = EnvGuard::set(&lock, "NTS_KEEP", "present"); + + let options = cmd::CmdOptions::new().with_env_remove_many(&["NTS_REMOVE_A", "NTS_REMOVE_B"]); + let output = cmd::run_with(&bin, &[], &options); + + assert_eq!(output.code, 0); + assert_eq!(output.stdout_text(), "unset|unset|present"); +} + #[cfg(unix)] #[test] fn run_resolved_in_dir_with_stdin_str_supports_optional_text_stdin() { @@ -220,6 +242,41 @@ path-pick assert_eq!(prefixed.stdout_text(), "first"); } +#[cfg(unix)] +#[test] +fn path_with_prepend_excluding_program_filters_existing_program_and_prepends_stub_dir() { + let lock = GlobalStateLock::new(); + let temp = TempDir::new().expect("tempdir"); + let existing_program_dir = temp.path().join("existing"); + let kept_dir = temp.path().join("kept"); + let prepend_dir = temp.path().join("prepend"); + std::fs::create_dir_all(&existing_program_dir).expect("create existing dir"); + std::fs::create_dir_all(&kept_dir).expect("create kept dir"); + std::fs::create_dir_all(&prepend_dir).expect("create prepend dir"); + write_exe( + &existing_program_dir, + "path-filter-test", + "#!/bin/sh\nexit 0\n", + ); + + let base_path = std::env::join_paths([existing_program_dir.as_path(), kept_dir.as_path()]) + .expect("join base path") + .to_string_lossy() + .to_string(); + let _path = EnvGuard::set(&lock, "PATH", &base_path); + + let filtered = cmd::path_with_prepend_excluding_program(&prepend_dir, "path-filter-test"); + let split = std::env::split_paths(std::ffi::OsStr::new(&filtered)).collect::>(); + + assert_eq!(split[0], prepend_dir); + assert!( + split + .iter() + .all(|dir| !dir.join("path-filter-test").is_file()) + ); + assert!(split.contains(&kept_dir)); +} + #[cfg(unix)] #[test] fn run_in_dir_with_overrides_options_cwd() { diff --git a/crates/plan-issue-cli/README.md b/crates/plan-issue-cli/README.md index 445c8040..06e033be 100644 --- a/crates/plan-issue-cli/README.md +++ b/crates/plan-issue-cli/README.md @@ -120,7 +120,6 @@ plan-issue-local build-plan-task-spec \ ## Specifications - [CLI contract v2](docs/specs/plan-issue-cli-contract-v2.md) -- [CLI contract v1](docs/specs/plan-issue-cli-contract-v1.md) - [State machine and gate invariants v1](docs/specs/plan-issue-state-machine-v1.md) - [Gate matrix v1](docs/specs/plan-issue-gate-matrix-v1.md) diff --git a/crates/plan-issue-cli/docs/README.md b/crates/plan-issue-cli/docs/README.md index ab3bf9b4..9f3327d0 100644 --- a/crates/plan-issue-cli/docs/README.md +++ b/crates/plan-issue-cli/docs/README.md @@ -17,6 +17,5 @@ Current runtime ownership: ## Specs - [plan-issue CLI contract v2](specs/plan-issue-cli-contract-v2.md) -- [plan-issue CLI contract v1](specs/plan-issue-cli-contract-v1.md) - [plan-issue state machine and gates v1](specs/plan-issue-state-machine-v1.md) - [plan-issue gate matrix v1](specs/plan-issue-gate-matrix-v1.md) diff --git a/crates/plan-issue-cli/docs/specs/plan-issue-cli-contract-v1.md b/crates/plan-issue-cli/docs/specs/plan-issue-cli-contract-v1.md deleted file mode 100644 index ef463e14..00000000 --- a/crates/plan-issue-cli/docs/specs/plan-issue-cli-contract-v1.md +++ /dev/null @@ -1,154 +0,0 @@ -# plan-issue CLI Contract v1 - -> Historical reference: see `plan-issue-cli-contract-v2.md` for the current runtime metadata ownership model where `split-prs` outputs -> grouping primitives only and `plan-issue-cli` materializes runtime execution metadata. - -## Purpose - -Define the v1 command contract for the Rust replacement of the current plan issue orchestration shell entrypoint -(`plan-issue-delivery-loop.sh`). - -This contract is the Sprint 1 source of truth for command names, required flags, gating behavior, and deterministic task-spec compatibility. - -## Scope - -- In scope: - - Canonical command surface and subcommand matrix. - - Shared option rules (`--repo`, `--dry-run`, grouping flags, summary flags). - - Deterministic task-spec output compatibility and default artifact locations. - - Gate-oriented behavior for sprint and plan transitions. -- Out of scope for this document: - - Internal state machine details (tracked in `plan-issue-state-machine-v1.md`). - - Full text/JSON output envelope details (expanded in later implementation tasks). - - GitHub API implementation internals. - -## CLI Surface (v1) - -```text -plan-issue [options] -``` - -v1 subcommands: - -- `build-task-spec` -- `build-plan-task-spec` -- `start-plan` -- `status-plan` -- `ready-plan` -- `close-plan` -- `cleanup-worktrees` -- `start-sprint` -- `ready-sprint` -- `accept-sprint` -- `multi-sprint-guide` - -## Command Matrix - -| Subcommand | Scope | GitHub dependency | Required core inputs | Primary outputs | -| ---------------------- | --------------------------------------------- | --------------------------------------- | --------------------------------------------------------- | ------------------------------------------------------------------------------------------ | -| `build-task-spec` | Sprint-scoped task split preview | no | `--plan`, `--sprint`, `--pr-grouping` | sprint TSV task-spec | -| `build-plan-task-spec` | Plan-scoped task split preview | no | `--plan`, `--pr-grouping` | plan TSV task-spec | -| `start-plan` | Initialize one plan issue | yes (unless dry-run) | `--plan`, `--pr-grouping` | issue creation + rendered issue body artifact | -| `status-plan` | Plan issue status snapshot | yes (unless `--body-file`) | `--issue` or `--body-file` | status summary and optional comment | -| `ready-plan` | Request final plan review | yes (unless `--body-file` only mode) | `--issue` or `--body-file` | review-ready signal (+ label/comment controls) | -| `close-plan` | Final gate + issue close | yes (except dry-run with `--body-file`) | `--approved-comment-url` and issue context | issue close + required worktree cleanup | -| `cleanup-worktrees` | Remove all issue-assigned worktrees | yes (to read issue body) | `--issue` | deleted worktree set (or dry-run listing) | -| `start-sprint` | Begin sprint execution on existing plan issue | yes (unless dry-run) | `--plan`, `--issue`, `--sprint`, `--pr-grouping` | sprint TSV, rendered subagent prompts, issue-row runtime-truth validation, kickoff comment | -| `ready-sprint` | Request sprint acceptance review | yes | `--plan`, `--issue`, `--sprint` | sprint review-ready comment | -| `accept-sprint` | Enforce merged-PR gate and mark sprint done | yes | `--plan`, `--issue`, `--sprint`, `--approved-comment-url` | task status sync to `done` + acceptance comment | -| `multi-sprint-guide` | Print repeatable command flow | no (with `--dry-run`) | `--plan` | execution guide text | - -## Shared Flag and Validation Rules - -- `--repo `: pass-through repository target for GitHub operations. -- `--dry-run`: prints write actions without mutating GitHub state. -- `--pr-grouping `: - - required for `build-task-spec`, `build-plan-task-spec`, `start-plan`, `start-sprint`. - - `per-spring` must be accepted as compatibility alias for `per-sprint`. - - with `--pr-grouping group --strategy auto|deterministic`, when a sprint resolves to exactly one shared PR group, runtime-truth/render - paths normalize `Execution Mode` to `per-sprint` (single-lane semantics). -- `--pr-group `: - - repeatable. - - valid only when `--pr-grouping group`. - - task key may be generated task id (`SxTy`) or plan task id (`Task N.M`). -- `--summary` and `--summary-file` are mutually exclusive where provided (`ready-plan`, `ready-sprint`). -- `close-plan --dry-run` requires `--body-file` when no live issue read is available. - -## Task Decomposition Runtime-Truth Contract (v1) - -- `## Task Decomposition` in the plan issue body is the single runtime-truth execution table for plan/sprint orchestration. -- No second issue-body dispatch table is introduced in v1; task dispatch artifacts are derived from `Task Decomposition`. -- Column roles are split as follows: - - runtime-truth execution columns: `Owner`, `Branch`, `Worktree`, `Execution Mode`, and lane metadata tokens in `Notes`. - - runtime-progress columns: `PR`, `Status` (may remain placeholders until execution/review advances). - - descriptive row identity columns: `Task`, `Summary`. -- `Owner` stores a stable dispatch alias (for example `subagent-s1-t1`, or a shared-lane `dispatch` alias), not a platform-internal - ephemeral spawned-agent identifier. -- `task-spec` TSV rows and subagent prompt artifacts must be derived from the same `Task Decomposition` runtime-truth rows and must not - intentionally diverge from the issue table. -- Lane canonicalization rules: - - rows that share one execution lane (`per-sprint` or `pr-shared`) must keep canonical lane metadata (`Owner`, `Branch`, `Worktree`, - lane-note tokens) synchronized across the lane. - - `--pr-grouping group --strategy auto|deterministic` single-lane sprints normalize to `Execution Mode=per-sprint` and use canonical - per-sprint lane metadata rather than per-task pseudo-lanes. - -## Deterministic Artifact Contracts - -### Task-spec TSV - -Header must remain exactly: - -```text -# task_id\tsummary\tbranch\tworktree\towner\tnotes\tpr_group -``` - -Notes field must preserve orchestration metadata tokens used by issue-table sync: - -- `sprint=S` -- `plan-task:Task N.M` (or deterministic fallback id) -- optional `deps=...` -- optional `validate=...` -- `pr-grouping=` -- `pr-group=` -- optional `shared-pr-anchor=` - -### Default output paths - -When explicit output paths are omitted, v1 keeps AGENT_HOME-based deterministic defaults: - -- plan task-spec: - - `$AGENT_HOME/out/plan-issue-delivery/-plan-tasks.tsv` -- sprint task-spec: - - `$AGENT_HOME/out/plan-issue-delivery/-sprint--tasks.tsv` -- plan issue body artifact: - - `$AGENT_HOME/out/plan-issue-delivery/-plan-issue-body.md` -- sprint prompt directory: - - `$AGENT_HOME/out/plan-issue-delivery/-sprint--subagent-prompts/` - -## Gate Semantics (v1) - -- Single-plan issue model: one plan maps to one GitHub issue for the full delivery lifecycle. -- `Task Decomposition` runtime-truth ownership: - - sprint execution and issue-sync flows read runtime-truth lane metadata from the issue table. - - task-spec and prompt generation are derived outputs, not an alternate source of runtime execution truth. -- Sprint ordering gate: - - `start-sprint` for sprint `N>1` is blocked until sprint `N-1` has merged PRs and `done` task statuses. -- Acceptance gate: - - `accept-sprint` requires approval comment URL and merged PRs for that sprint. -- Final close gate: - - `close-plan` requires final approval comment URL and close-gate checks to pass before issue close. - - successful `close-plan` must enforce task worktree cleanup. - -## Exit Code Contract - -- `0`: success. -- `1`: runtime failure, dependency failure, or gate failure. -- `2`: usage/argument error. - -## Compatibility Boundary - -- Until full Rust cutover is complete, behavior must preserve user-visible orchestration semantics from the shell workflow: - - command names, - - required flags and gate inputs, - - task-spec TSV compatibility, - - sprint/plan gate outcomes. diff --git a/crates/plan-issue-cli/docs/specs/plan-issue-cli-contract-v2.md b/crates/plan-issue-cli/docs/specs/plan-issue-cli-contract-v2.md index cb724e20..eecc8483 100644 --- a/crates/plan-issue-cli/docs/specs/plan-issue-cli-contract-v2.md +++ b/crates/plan-issue-cli/docs/specs/plan-issue-cli-contract-v2.md @@ -4,14 +4,12 @@ v2 defines the runtime metadata ownership model after split-prs output decoupling. -Key change from v1: +Current contract boundary: - `plan-tooling split-prs` provides grouping primitives only (`task_id`, `summary`, `pr_group`). - `plan-issue-cli` is the authority that materializes executable runtime metadata (`Owner`, `Branch`, `Worktree`, `Notes`) for task-spec artifacts and `Task Decomposition` rows. -`plan-issue-cli-contract-v1.md` remains as a historical reference. - ## Runtime Metadata Materialization (v2) `plan-issue-cli` runtime metadata is derived from: diff --git a/crates/plan-issue-cli/tests/fixtures/plans/plan-issue-rust-cli-full-delivery-plan.md b/crates/plan-issue-cli/tests/fixtures/plans/plan-issue-rust-cli-full-delivery-plan.md index f0a32eb9..26a1a2f2 100644 --- a/crates/plan-issue-cli/tests/fixtures/plans/plan-issue-rust-cli-full-delivery-plan.md +++ b/crates/plan-issue-cli/tests/fixtures/plans/plan-issue-rust-cli-full-delivery-plan.md @@ -62,9 +62,9 @@ This plan delivers a shell-free Rust implementation for the current plan-issue o **Parallelizable tasks**: - `Task 1.2` and `Task 1.3` can run in parallel after `Task 1.1`. -### Task 1.1: Write Rust CLI contract v1 and command matrix +### Task 1.1: Write Rust CLI contract v2 and command matrix - **Location**: - - `crates/plan-issue-cli/docs/specs/plan-issue-cli-contract-v1.md` + - `crates/plan-issue-cli/docs/specs/plan-issue-cli-contract-v2.md` - `crates/plan-issue-cli/README.md` - **Description**: Define command surface, argument rules, mutual exclusivity, exit codes, live vs local mode boundaries, and output schemas for `text` and `json`. - **Dependencies**: @@ -75,9 +75,9 @@ This plan delivers a shell-free Rust implementation for the current plan-issue o - Contract defines required and incompatible flags for both binaries. - Contract defines deterministic error envelope for JSON mode. - **Validation**: - - `test -f crates/plan-issue-cli/docs/specs/plan-issue-cli-contract-v1.md` + - `test -f crates/plan-issue-cli/docs/specs/plan-issue-cli-contract-v2.md` - `test -f crates/plan-issue-cli/README.md` - - `rg -n 'build-task-spec|start-plan|start-sprint|close-plan|multi-sprint-guide|format json' crates/plan-issue-cli/docs/specs/plan-issue-cli-contract-v1.md` + - `rg -n 'build-task-spec|start-plan|start-sprint|close-plan|multi-sprint-guide|format json' crates/plan-issue-cli/docs/specs/plan-issue-cli-contract-v2.md` ### Task 1.2: Capture baseline shell fixtures for parity assertions - **Location**: diff --git a/crates/plan-tooling/README.md b/crates/plan-tooling/README.md index 0cda52db..6640868e 100644 --- a/crates/plan-tooling/README.md +++ b/crates/plan-tooling/README.md @@ -138,4 +138,3 @@ plan-tooling completion zsh > completions/zsh/_plan-tooling - [Docs index](docs/README.md) - [split-prs contract v2](docs/specs/split-prs-contract-v2.md) -- [Migration runbook](docs/runbooks/split-prs-migration.md) diff --git a/crates/plan-tooling/docs/README.md b/crates/plan-tooling/docs/README.md index dbe5156f..83827b26 100644 --- a/crates/plan-tooling/docs/README.md +++ b/crates/plan-tooling/docs/README.md @@ -13,8 +13,6 @@ - [`split-prs Build Task-Spec Cutover`](runbooks/split-prs-build-task-spec-cutover.md): command mapping and parity checks for downstream `build-task-spec` migration. -- [`split-prs Migration`](runbooks/split-prs-migration.md): rollout, compatibility contract, - and rollback checklist for adopting `split-prs`. ## Reports diff --git a/crates/plan-tooling/docs/runbooks/split-prs-migration.md b/crates/plan-tooling/docs/runbooks/split-prs-migration.md deleted file mode 100644 index adba5603..00000000 --- a/crates/plan-tooling/docs/runbooks/split-prs-migration.md +++ /dev/null @@ -1,121 +0,0 @@ -# split-prs Migration - -## Objective - -Migrate plan task splitting to `plan-tooling split-prs` grouping primitives with deterministic and -auto strategy compatibility, while moving runtime execution metadata materialization to -`plan-issue-cli`. - -## Before / After - -Before: - -```bash -build-task-spec --plan --sprint --pr-grouping [--pr-group ]... --task-spec-out -``` - -After (grouping primitives): - -```bash -plan-tooling split-prs \ - --file \ - --scope sprint \ - --sprint \ - --pr-grouping \ - [--pr-group ]... \ - --strategy deterministic \ - --format json -``` - -Auto group example (no full manual mapping): - -```bash -plan-tooling split-prs \ - --file \ - --scope sprint \ - --sprint \ - --pr-grouping group \ - --strategy auto \ - --format json -``` - -## Required Compatibility - -- Keep grouping output deterministic and replayable across repeated runs. -- Keep reduced TSV header exactly (when `--format tsv` is used): - -```text -# task_id\tsummary\tpr_group -``` - -- Runtime execution metadata (`branch`, `worktree`, `owner`, `notes`) is no longer part of - `split-prs` output and must be materialized by `plan-issue-cli` runtime lane logic. -- Preserve deterministic ordering for records and explain anchors so repeated runs are byte-stable. - -## Grouping Guidance - -- Use `per-sprint` when one shared PR should represent all sprint tasks. -- Use `group` for mixed isolated/shared PR shapes. -- In `pr-grouping=group` + `strategy=deterministic`, every selected task must have an explicit mapping. -- In `strategy=auto`, omit `--pr-grouping`; per-sprint grouping comes from plan metadata first, then `--default-pr-grouping`. -- In `strategy=auto` group-resolved sprints, explicit mappings are optional pins and remaining tasks are auto-assigned. - -## Auto Strategy Status - -- `--strategy auto` is implemented in v1 runtime. -- Grouping uses deterministic heuristics from `Complexity`, dependency topology, and `Location`. -- Explicit `--pr-group` mappings in auto/group mode are optional pins and still validated. -- Output ordering and reduced record schema remain deterministic. - -## Release Gate Checklist - -1. Verify deterministic command output against fixture expectations. -2. Verify auto output is byte-stable across repeated runs. -3. Verify downstream consumers use `plan-issue-cli` runtime materialization (instead of parsing - removed split output runtime metadata columns/notes). -4. Verify fallback/rollback command sequence is documented before rollout. - -## Deterministic Rollback Command Path - -If auto rollout is disabled or deferred, keep downstream command paths pinned to deterministic mode: - -```bash -plan-tooling split-prs \ - --file \ - --scope sprint \ - --sprint \ - --pr-grouping \ - [--pr-group ]... \ - --strategy deterministic \ - --format json -``` - -## Rollback - -If downstream integration fails: - -1. Keep `split-prs` implementation intact. -2. Revert downstream invocation wiring only. -3. Re-run fixture parity checks and reopen cutover PR once fixed. - -## Emergency Switchback (Restore Auto to Not-Implemented) - -Use this only if production incidents require disabling auto runtime behavior in the CLI itself. - -Impacted files: - -- `crates/plan-tooling/src/split_prs.rs` -- `crates/plan-tooling/tests/split_prs.rs` -- `crates/plan-tooling/docs/specs/split-prs-contract-v1.md` -- `crates/plan-tooling/README.md` - -Switchback commands: - -```bash -# Revert the auto-runtime introduction commit from issue #220 Sprint 2. -git revert --no-edit e1cdc5a - -# Validate fallback behavior and deterministic guardrails. -cargo test -p nils-plan-tooling --test split_prs split_prs_auto_not_implemented -- --exact -cargo test -p nils-plan-tooling --test split_prs split_prs_error_group_requires_mapping -- --exact -``` diff --git a/crates/plan-tooling/tests/fixtures/split_prs/auto_regression_matrix_plan.md b/crates/plan-tooling/tests/fixtures/split_prs/auto_regression_matrix_plan.md index e705f1ac..ade1e604 100644 --- a/crates/plan-tooling/tests/fixtures/split_prs/auto_regression_matrix_plan.md +++ b/crates/plan-tooling/tests/fixtures/split_prs/auto_regression_matrix_plan.md @@ -40,10 +40,10 @@ Fixture for auto strategy matrix coverage: complexity pressure, external blocker ### Task 1.4: Publish rollback and migration notes - **Location**: - - crates/plan-tooling/docs/runbooks/split-prs-migration.md + - crates/plan-tooling/docs/runbooks/split-prs-build-task-spec-cutover.md - **Description**: Lightweight docs task depending on external blocker integration. - **Dependencies**: - Task 1.3 - **Complexity**: 2 - **Validation**: - - rg -n 'rollback' crates/plan-tooling/docs/runbooks/split-prs-migration.md + - rg -n 'Compatibility Contract|Cutover' crates/plan-tooling/docs/runbooks/split-prs-build-task-spec-cutover.md diff --git a/crates/screen-record/tests/linux_unit.rs b/crates/screen-record/tests/linux_unit.rs index c2906cb7..48f71823 100644 --- a/crates/screen-record/tests/linux_unit.rs +++ b/crates/screen-record/tests/linux_unit.rs @@ -1,9 +1,8 @@ #[cfg(target_os = "linux")] mod linux_unit { use std::fs; - use std::path::{Path, PathBuf}; - use nils_test_support::{EnvGuard, GlobalStateLock, StubBinDir, prepend_path}; + use nils_test_support::{EnvGuard, GlobalStateLock, StubBinDir, cmd, prepend_path}; use screen_record::cli::{AudioMode, ContainerFormat, ImageFormat}; use screen_record::linux::ffmpeg; use screen_record::linux::portal::PortalCapture; @@ -188,18 +187,6 @@ esac .collect() } - fn path_with_stub_without_program(stub_dir: &Path, program: &str) -> String { - let mut paths: Vec = - std::env::split_paths(&std::env::var_os("PATH").unwrap_or_default()) - .filter(|dir| !dir.join(program).is_file()) - .collect(); - paths.insert(0, stub_dir.to_path_buf()); - std::env::join_paths(paths) - .expect("join PATH") - .to_string_lossy() - .to_string() - } - fn portal_capture_with_remote(node_id: u32) -> PortalCapture { use std::os::fd::OwnedFd; @@ -446,7 +433,7 @@ exit 1 let stubs = StubBinDir::new(); write_ffmpeg_stub(&stubs); - let path = path_with_stub_without_program(stubs.path(), "pactl"); + let path = cmd::path_with_prepend_excluding_program(stubs.path(), "pactl"); let _path_guard = EnvGuard::set(&lock, "PATH", &path); let _display_guard = EnvGuard::set(&lock, "DISPLAY", ":99"); diff --git a/docs/plans/markdown-gh-handling-audit-remediation-plan.md b/docs/plans/markdown-gh-handling-audit-remediation-plan.md deleted file mode 100644 index 46f3d4df..00000000 --- a/docs/plans/markdown-gh-handling-audit-remediation-plan.md +++ /dev/null @@ -1,441 +0,0 @@ -# Plan: Workspace Markdown Handling Audit and gh Write Hardening - -## Overview - -This plan inventories markdown handling across all existing production Rust source files and remediates every discovered gap in a serial, -sprint-gated flow. The repository keeps GitHub operations on the current `gh` CLI path and explicitly does not introduce -`crates/nils-common/src/github.rs`. The implementation focuses on shared markdown helper consistency, guarded markdown write paths, and -contract-preserving migrations. Each sprint must pass its own validation gate before the next sprint starts. - -## Scope - -- In scope: - - Inventory all markdown and GitHub markdown-write touchpoints under `crates/**/src/**/*.rs`. - - Define a machine-checkable audit status model and close every production row. - - Harden shared markdown helper behavior and migrate existing callsites to that contract. - - Keep `gh` adapters crate-local and enforce the boundary with explicit checks. -- Out of scope: - - Replacing `gh` with direct GitHub API clients. - - Adding `nils-common::github` or `crates/nils-common/src/github.rs`. - - Refactoring unrelated output UX or command behavior. - -## Assumptions - -1. `plan-tooling` remains available for plan parsing/validation/splitting checks. -2. `gh` is available where live GitHub issue/PR operations are executed. -3. Existing output contracts stay stable unless updated in paired specs/tests. -4. Required checks from `DEVELOPMENT.md` remain mandatory pre-delivery gates. - -## Success criteria - -- Audit file includes all production markdown/GitHub-write touchpoints with `status=open|resolved`. -- Final audit contains zero `status=open` rows. -- Markdown helper behavior is centralized by contract and covered by regression tests. -- GitHub boundary is enforced: crate-local `gh` adapters only, no `nils-common/src/github.rs`. - -## Sprint 1: Workspace-wide inventory and boundary contract - -**Goal**: Define strict audit boundaries and produce complete, machine-checkable inventory + ownership rules. **Demo/Validation**: - -- Command(s): - - `plan-tooling validate --file docs/plans/markdown-gh-handling-audit-remediation-plan.md` - - Source inventory probe: - - ```bash - rg -n --glob 'crates/**/src/**/*.rs' \ - 'render_.*markdown|markdown::|validate_markdown_payload|canonicalize_table_cell|code_block\(|heading\(|write_temp_markdown|body-file|run_output\("gh"' \ - crates - ``` - - - `test ! -f crates/nils-common/src/github.rs` -- Verify: - - Audit boundaries and row schema are locked and reproducible. - - Inventory covers all discovered source-level touchpoints with explicit owners. - - GitHub boundary decision is documented and guarded. -- **PR grouping intent**: `per-sprint` -- **Execution Profile**: `serial` -- Sprint scorecard: `ExecutionProfile=serial`, `TotalComplexity=14`, `CriticalPathComplexity=14`, `MaxBatchWidth=1`, - `OverlapHotspots=docs/specs/markdown-github-handling-audit-v1.md; crates/nils-common/docs/specs/markdown-helpers-contract-v1.md`. - -### Task 1.1: Define inventory boundary and audit row schema - -- **Location**: - - docs/specs/markdown-github-handling-audit-v1.md - - docs/plans/markdown-gh-handling-audit-remediation-plan.md -- **Description**: Define inventory rules (`crates/**/src/**/*.rs` include scope, exclusions, and risk classes) and lock a markdown table - schema with required columns including `status=open|resolved` and `test_ref=...`. -- **Dependencies**: - - none -- **Complexity**: 3 -- **Acceptance criteria**: - - Audit spec includes include/exclude rules and risk class definitions. - - Audit schema requires `crate`, `file`, `function`, `risk_class`, `owner`, `status`, and `test_ref` columns. -- **Validation**: - - `test -f docs/specs/markdown-github-handling-audit-v1.md` - - `rg -n 'status=(open|resolved)|test_ref=|risk_class|crates/\*\*/src/\*\*/\*.rs' docs/specs/markdown-github-handling-audit-v1.md` - -### Task 1.2: Populate complete source-level markdown/GitHub-write inventory - -- **Location**: - - docs/specs/markdown-github-handling-audit-v1.md - - crates/nils-common/src/markdown.rs - - crates/plan-issue-cli/src/execute.rs - - crates/plan-issue-cli/src/github.rs - - crates/plan-issue-cli/src/issue_body.rs - - crates/plan-issue-cli/src/task_spec.rs - - crates/api-testing-core/src/markdown.rs - - crates/api-testing-core/src/report.rs - - crates/api-testing-core/src/suite/summary.rs - - crates/api-rest/src/commands/report.rs - - crates/api-gql/src/commands/report.rs - - crates/api-grpc/src/commands/report.rs - - crates/api-websocket/src/commands/report.rs - - crates/memo-cli/src/preprocess/detect.rs - - crates/memo-cli/src/preprocess/validate.rs - - crates/plan-tooling/src/parse.rs - - crates/plan-tooling/src/validate.rs - - crates/plan-tooling/src/split_prs.rs -- **Description**: Run source-only discovery and capture every production touchpoint row in the audit table with owner and initial - `status=open` or `status=resolved`. -- **Dependencies**: - - Task 1.1 -- **Complexity**: 4 -- **Acceptance criteria**: - - Inventory includes all matches from defined source-discovery commands. - - Every row has a non-empty owner and valid status value. -- **Validation**: - - Markdown/GitHub touchpoint discovery: - - ```bash - rg -n --glob 'crates/**/src/**/*.rs' \ - 'render_.*markdown|validate_markdown_payload|canonicalize_table_cell|write_temp_markdown|run_output\("gh"|parse_markdown_row|parse_sprint_heading|render_summary_markdown' \ - crates - ``` - - - Inventory row coverage check: - - ```bash - set -euo pipefail - for f in \ - crates/nils-common/src/markdown.rs \ - crates/plan-issue-cli/src/execute.rs \ - crates/plan-issue-cli/src/github.rs \ - crates/plan-issue-cli/src/issue_body.rs \ - crates/plan-issue-cli/src/task_spec.rs \ - crates/api-testing-core/src/markdown.rs \ - crates/api-testing-core/src/report.rs \ - crates/api-testing-core/src/suite/summary.rs \ - crates/api-rest/src/commands/report.rs \ - crates/api-gql/src/commands/report.rs \ - crates/api-grpc/src/commands/report.rs \ - crates/api-websocket/src/commands/report.rs \ - crates/memo-cli/src/preprocess/detect.rs \ - crates/memo-cli/src/preprocess/validate.rs \ - crates/plan-tooling/src/parse.rs \ - crates/plan-tooling/src/validate.rs \ - crates/plan-tooling/src/split_prs.rs - do - rg -n --fixed-strings "$f" docs/specs/markdown-github-handling-audit-v1.md >/dev/null - done - ``` - - - `rg -n 'status=(open|resolved)' docs/specs/markdown-github-handling-audit-v1.md` - -### Task 1.3: Map inventory rows to concrete regression coverage - -- **Location**: - - docs/specs/markdown-github-handling-audit-v1.md - - crates/nils-common/tests/markdown_table_canonicalization.rs - - crates/plan-issue-cli/tests/live_issue_ops.rs - - crates/plan-issue-cli/tests/parity_guardrails.rs - - crates/api-testing-core/tests/report_history.rs -- **Description**: For each inventory row, map at least one existing or planned regression test and mark rows requiring new coverage. -- **Dependencies**: - - Task 1.2 -- **Complexity**: 4 -- **Acceptance criteria**: - - Each audit row references at least one test target. - - Rows lacking coverage are explicitly marked with planned test file and command. -- **Validation**: - - `cargo test -p nils-common --test markdown_table_canonicalization` - - `cargo test -p nils-plan-issue-cli --test live_issue_ops` - - `cargo test -p nils-api-testing-core --test report_history` - - `rg -n 'test_ref=' docs/specs/markdown-github-handling-audit-v1.md` - - `test -f docs/specs/markdown-github-handling-audit-v1.md && ! rg -n 'test_ref=missing' docs/specs/markdown-github-handling-audit-v1.md` - -### Task 1.4: Lock GitHub boundary and enforce crate ownership rules - -- **Location**: - - crates/nils-common/README.md - - crates/nils-common/docs/specs/markdown-helpers-contract-v1.md - - crates/plan-issue-cli/docs/specs/plan-issue-cli-contract-v2.md - - docs/specs/markdown-github-handling-audit-v1.md -- **Description**: Document and enforce that GitHub writes stay on crate-local `gh` adapters and shared markdown stays in `nils-common` - without adding a shared GitHub module. -- **Dependencies**: - - Task 1.3 -- **Complexity**: 3 -- **Acceptance criteria**: - - Docs explicitly state “keep `gh`, no `nils-common/src/github.rs`”. - - Allowed `gh` owners in production source are explicitly listed. -- **Validation**: - - `test ! -f crates/nils-common/src/github.rs` - - `rg -n --glob 'crates/**/src/**/*.rs' 'run_output\("gh"' crates` - - Boundary contract wording check: - - ```bash - rg -n 'no `nils-common/src/github.rs`|crate-local `gh`' \ - crates/nils-common/README.md \ - crates/nils-common/docs/specs/markdown-helpers-contract-v1.md \ - crates/plan-issue-cli/docs/specs/plan-issue-cli-contract-v2.md \ - docs/specs/markdown-github-handling-audit-v1.md - ``` - -## Sprint 2: Shared helper hardening and serial callsite migration - -**Goal**: Implement shared markdown helper improvements and migrate major callsites in controlled serial steps. **Demo/Validation**: - -- Command(s): - - `plan-tooling to-json --file docs/plans/markdown-gh-handling-audit-remediation-plan.md --sprint 2 --pretty` - - `cargo test -p nils-common` - - `cargo test -p nils-api-testing-core --test report_history` - - `cargo test -p nils-plan-issue-cli --test live_issue_ops` -- Verify: - - Shared helper contract is implemented and consumed by core callsites. - - Migration preserves output/guard behavior under existing tests. -- **PR grouping intent**: `per-sprint` -- **Execution Profile**: `serial` -- Sprint scorecard: `ExecutionProfile=serial`, `TotalComplexity=16`, `CriticalPathComplexity=16`, `MaxBatchWidth=1`, - `OverlapHotspots=crates/nils-common/src/markdown.rs; crates/api-testing-core/src/markdown.rs; crates/plan-issue-cli/src/github.rs`. - -### Task 2.1: Extend `nils-common::markdown` contract and tests - -- **Location**: - - crates/nils-common/src/markdown.rs - - crates/nils-common/src/lib.rs - - crates/nils-common/tests/markdown_table_canonicalization.rs - - crates/nils-common/docs/specs/markdown-helpers-contract-v1.md -- **Description**: Extend shared markdown primitives needed by audited callsites while preserving current payload and canonicalization - semantics. -- **Dependencies**: - - Task 1.4 -- **Complexity**: 5 -- **Acceptance criteria**: - - New helper APIs are documented and covered by unit/integration tests. - - Existing behavior for escaped-control guard and table canonicalization is unchanged. -- **Validation**: - - `cargo test -p nils-common` - -### Task 2.2: Migrate `api-testing-core` report builders to shared helper usage - -- **Location**: - - crates/api-testing-core/src/markdown.rs - - crates/api-testing-core/src/report.rs - - crates/api-testing-core/src/rest/report.rs - - crates/api-testing-core/src/graphql/report.rs - - crates/api-testing-core/src/grpc/report.rs - - crates/api-testing-core/src/websocket/report.rs - - crates/api-testing-core/tests/report_history.rs -- **Description**: Align report markdown rendering with shared helper contract and remove drift-prone duplicate behavior. -- **Dependencies**: - - Task 2.1 -- **Complexity**: 4 -- **Acceptance criteria**: - - `api-testing-core` report markdown output remains contract-stable. - - Duplicate markdown rendering drift points are removed or wrapped by a thin stable facade. -- **Validation**: - - `cargo test -p nils-api-testing-core --test report_history` - - `cargo test -p nils-api-testing-core --test cli_report` - -### Task 2.3: Migrate API command report entrypoints to updated markdown flow - -- **Location**: - - crates/api-rest/src/commands/report.rs - - crates/api-gql/src/commands/report.rs - - crates/api-grpc/src/commands/report.rs - - crates/api-websocket/src/commands/report.rs -- **Description**: Apply shared markdown flow consistently in API command crates and keep command-level output contracts unchanged. -- **Dependencies**: - - Task 2.2 -- **Complexity**: 4 -- **Acceptance criteria**: - - All API command report entrypoints use the updated shared markdown path. - - Existing command report tests remain green. -- **Validation**: - - `cargo test -p nils-api-rest --tests` - - `cargo test -p nils-api-gql --tests` - - `cargo test -p nils-api-grpc --tests` - - `cargo test -p nils-api-websocket --tests` - -### Task 2.4: Harden `plan-issue-cli` markdown guard paths for body/comment writes - -- **Location**: - - crates/plan-issue-cli/src/github.rs - - crates/plan-issue-cli/src/execute.rs - - crates/plan-issue-cli/src/render.rs - - crates/plan-issue-cli/src/issue_body.rs - - crates/plan-issue-cli/src/task_spec.rs -- **Description**: Ensure all `plan-issue-cli` markdown body/comment generation and write paths consistently apply shared guard semantics, - including strict vs force-mode behavior. -- **Dependencies**: - - Task 2.3 -- **Complexity**: 3 -- **Acceptance criteria**: - - Strict mode rejects escaped-control artifacts on all write paths. - - Force mode continues to bypass only contract-allowed paths. -- **Validation**: - - `cargo test -p nils-plan-issue-cli --test live_issue_ops` - - `cargo test -p nils-plan-issue-cli --test parity_guardrails` - - `cargo test -p nils-plan-issue-cli --test task_spec_flow` - -## Sprint 3: Closure, split executability checks, and delivery gates - -**Goal**: Close audit rows, prove plan executability deterministically, and pass repo-level required checks. **Demo/Validation**: - -- Command(s): - - `plan-tooling batches --file docs/plans/markdown-gh-handling-audit-remediation-plan.md --sprint 3 --format text` - - `./.agents/skills/nils-cli-verify-required-checks/scripts/nils-cli-verify-required-checks.sh` - - `mkdir -p target/coverage && cargo llvm-cov nextest --profile ci --workspace --lcov` - `--output-path target/coverage/lcov.info --fail-under-lines 85` - - `scripts/ci/coverage-summary.sh target/coverage/lcov.info` -- Verify: - - Inventory is fully resolved (`status=open` count is zero). - - Executability checks pass for per-sprint deterministic split mapping. - - Repository required checks and coverage gate pass. -- **PR grouping intent**: `per-sprint` -- **Execution Profile**: `serial` -- Sprint scorecard: `ExecutionProfile=serial`, `TotalComplexity=15`, `CriticalPathComplexity=15`, `MaxBatchWidth=1`, - `OverlapHotspots=docs/specs/markdown-github-handling-audit-v1.md; crates/plan-issue-cli/tests/live_issue_ops.rs; crates/api-testing-core/tests/report_history.rs`. - -### Task 3.1: Close remaining markdown touchpoints in secondary crates - -- **Location**: - - crates/memo-cli/src/preprocess/detect.rs - - crates/memo-cli/src/preprocess/validate.rs - - crates/plan-tooling/src/parse.rs - - crates/plan-tooling/src/validate.rs - - crates/plan-tooling/src/split_prs.rs - - docs/specs/markdown-github-handling-audit-v1.md -- **Description**: Resolve or explicitly classify remaining secondary markdown touchpoints found during the full-source audit and update - their row status. -- **Dependencies**: - - Task 2.4 -- **Complexity**: 4 -- **Acceptance criteria**: - - Secondary-crate rows are marked `status=resolved` or documented as non-production exclusions per schema rules. - - No unresolved production row remains in the audit table. -- **Validation**: - - `test -f docs/specs/markdown-github-handling-audit-v1.md && ! rg -n 'status=open' docs/specs/markdown-github-handling-audit-v1.md` - -### Task 3.2: Add explicit regression suites for markdown-safe GitHub lifecycle - -- **Location**: - - crates/plan-issue-cli/tests/live_issue_ops.rs - - crates/plan-issue-cli/tests/live_start_sprint_runtime_truth.rs - - crates/plan-issue-cli/tests/runtime_truth_plan_and_sprint_flow.rs - - crates/plan-issue-cli/tests/auto_single_lane_runtime_truth.rs - - crates/api-testing-core/tests/report_history.rs -- **Description**: Add/upgrade explicit integration regressions to verify strict rejection, force bypass, and markdown output stability - across lifecycle flows. -- **Dependencies**: - - Task 3.1 -- **Complexity**: 5 -- **Acceptance criteria**: - - Listed integration suites validate markdown lifecycle behaviors with deterministic assertions. - - Coverage matrix rows tied to these suites are marked resolved. -- **Validation**: - - `cargo test -p nils-plan-issue-cli --test live_issue_ops` - - `cargo test -p nils-plan-issue-cli --test live_start_sprint_runtime_truth` - - `cargo test -p nils-plan-issue-cli --test runtime_truth_plan_and_sprint_flow` - - `cargo test -p nils-plan-issue-cli --test auto_single_lane_runtime_truth` - - `cargo test -p nils-api-testing-core --test report_history` - -### Task 3.3: Prove split executability with deterministic per-sprint checks - -- **Location**: - - docs/plans/markdown-gh-handling-audit-remediation-plan.md -- **Description**: Execute deterministic split checks for every sprint and assert split record counts match task counts for this plan. -- **Dependencies**: - - Task 3.2 -- **Complexity**: 2 -- **Acceptance criteria**: - - `split-prs` deterministic checks pass for sprints 1-3 with per-sprint grouping. - - Record counts match expected task counts for each sprint. -- **Validation**: - - Sprint 1 record count check: - - ```bash - test "$( - plan-tooling split-prs --file docs/plans/markdown-gh-handling-audit-remediation-plan.md \ - --scope sprint --sprint 1 --pr-grouping per-sprint --strategy deterministic --format json | - rg -o '"task_id"' | wc -l | tr -d ' ' - )" -eq 4 - ``` - - - Sprint 2 record count check: - - ```bash - test "$( - plan-tooling split-prs --file docs/plans/markdown-gh-handling-audit-remediation-plan.md \ - --scope sprint --sprint 2 --pr-grouping per-sprint --strategy deterministic --format json | - rg -o '"task_id"' | wc -l | tr -d ' ' - )" -eq 4 - ``` - - - Sprint 3 record count check: - - ```bash - test "$( - plan-tooling split-prs --file docs/plans/markdown-gh-handling-audit-remediation-plan.md \ - --scope sprint --sprint 3 --pr-grouping per-sprint --strategy deterministic --format json | - rg -o '"task_id"' | wc -l | tr -d ' ' - )" -eq 4 - ``` - -### Task 3.4: Run required gates and publish final closure summary - -- **Location**: - - docs/specs/markdown-github-handling-audit-v1.md - - docs/plans/markdown-gh-handling-audit-remediation-plan.md - - DEVELOPMENT.md -- **Description**: Execute full required checks and coverage, then publish closure summary confirming zero open rows and boundary compliance - (`gh` retained, no shared GitHub module). -- **Dependencies**: - - Task 3.3 -- **Complexity**: 4 -- **Acceptance criteria**: - - Required checks pass and coverage gate is >= 85%. - - Closure summary explicitly confirms `test ! -f crates/nils-common/src/github.rs` and zero open rows. -- **Validation**: - - `test ! -f crates/nils-common/src/github.rs` - - `test -f docs/specs/markdown-github-handling-audit-v1.md && ! rg -n 'status=open' docs/specs/markdown-github-handling-audit-v1.md` - - `./.agents/skills/nils-cli-verify-required-checks/scripts/nils-cli-verify-required-checks.sh` - - `mkdir -p target/coverage && cargo llvm-cov nextest --profile ci --workspace --lcov` - `--output-path target/coverage/lcov.info --fail-under-lines 85` - - `scripts/ci/coverage-summary.sh target/coverage/lcov.info` - -## Testing Strategy - -- Unit: - - `nils-common` markdown helper unit/integration tests. - - `api-testing-core` markdown report rendering tests. -- Integration: - - `plan-issue-cli` integration suites for markdown body/comment lifecycle paths. - - API command crate integration tests for markdown report outputs. -- Executability and gates: - - `plan-tooling validate`, `to-json`, `batches`, and per-sprint `split-prs` deterministic checks. - - Required repository checks and coverage gate from `DEVELOPMENT.md`. - -## Risks & gotchas - -- Shared-helper adoption can alter whitespace/newline behavior if not guarded by output tests. -- Guard strictness changes can break force-mode expectations if bypass boundaries drift. -- Full-source inventory can surface high-volume low-risk rows; exclusion criteria must stay explicit and deterministic. - -## Rollback plan - -1. Revert Sprint 2 migration commits first (shared-helper adoption and guard rewires), keep Sprint 1 inventory/spec artifacts. -2. Re-run targeted suite commands for `nils-common`, `nils-api-testing-core`, and `nils-plan-issue-cli` to confirm baseline behavior - recovery. -3. If needed, restore prior crate-local markdown rendering paths while preserving `gh` adapter ownership rules. -4. Keep audit table and mark rolled-back rows back to `status=open`; restart remediation from last green checkpoint. diff --git a/docs/plans/third-party-licenses-notices-release-packaging-plan.md b/docs/plans/third-party-licenses-notices-release-packaging-plan.md deleted file mode 100644 index a2e3a5ab..00000000 --- a/docs/plans/third-party-licenses-notices-release-packaging-plan.md +++ /dev/null @@ -1,327 +0,0 @@ -# Plan: Third-Party Licenses/Notices Automation and Release Packaging - -## Overview - -This plan introduces a deterministic automation flow that generates `THIRD_PARTY_LICENSES.md` and `THIRD_PARTY_NOTICES.md`, enforces -freshness in CI, and ensures both files are included in GitHub release tarballs. The rollout is sprint-gated: generator contract first, -then CI enforcement, then release packaging updates. Existing CLI behavior and runtime contracts remain unchanged outside legal/compliance -artifacts. The implementation prioritizes deterministic output, fail-closed CI behavior, and release artifact traceability. - -## Scope - -- In scope: - - Add a repository script to generate `THIRD_PARTY_LICENSES.md` and `THIRD_PARTY_NOTICES.md` from `Cargo.lock`/`cargo metadata`. - - Define and document the artifact contract (sections, deterministic ordering, failure behavior, regeneration workflow). - - Add a CI audit command that fails when generated artifacts drift from source-of-truth dependencies. - - Wire the audit into required checks and GitHub CI workflows. - - Update release packaging to include both files in tarballs. -- Out of scope: - - Legal review of every upstream dependency notice requirement beyond machine-extractable metadata and obvious notice files. - - Replacing the existing release model or changing binary build targets. - - Introducing new external package managers or non-repo build systems. - -## Assumptions - -1. `cargo metadata --format-version 1 --locked` remains the canonical dependency inventory source. -2. `python3` and `cargo` are available in local dev and CI (already part of repository tooling assumptions). -3. Root-level `THIRD_PARTY_LICENSES.md` and `THIRD_PARTY_NOTICES.md` are acceptable release artifact locations. -4. Some crates may not ship an explicit `NOTICE` file; the notices artifact may include deterministic "no explicit NOTICE file discovered" - markers for those entries. - -## Success criteria - -- Running a single generator command updates both third-party artifacts deterministically with no manual editing. -- CI fails when either artifact is stale relative to the current dependency graph. -- `./.agents/skills/nils-cli-verify-required-checks/scripts/nils-cli-verify-required-checks.sh` includes the new audit gate. -- Release tarballs produced by `.github/workflows/release.yml` include: - - `THIRD_PARTY_LICENSES.md` - - `THIRD_PARTY_NOTICES.md` -- README release packaging contract documents both files as required shipped assets. - -## Sprint 1: Generator contract and deterministic artifacts - -**Goal**: Implement deterministic generation for licenses + notices and lock the artifact contract before CI/release integration. **Demo/Validation**: - -- Command(s): - - `plan-tooling validate --file docs/plans/third-party-licenses-notices-release-packaging-plan.md` - - `bash scripts/generate-third-party-artifacts.sh --check` - - `bash scripts/generate-third-party-artifacts.sh --write` -- Verify: - - Both root artifacts are generated from locked dependency metadata. - - Output ordering/content are deterministic and re-runs are no-op when dependencies are unchanged. -- **PR grouping intent**: group -- **Execution Profile**: parallel-x2 -- Sprint scorecard: - - `Execution Profile`: parallel-x2 - - `TotalComplexity`: 16 - - `CriticalPathComplexity`: 12 - - `MaxBatchWidth`: 2 - - `OverlapHotspots`: `scripts/generate-third-party-artifacts.sh`; `THIRD_PARTY_LICENSES.md`; `THIRD_PARTY_NOTICES.md` - -### Task 1.1: Define artifact schema and generation contract - -- **Location**: - - docs/specs/third-party-artifacts-contract-v1.md - - THIRD_PARTY_LICENSES.md - - THIRD_PARTY_NOTICES.md -- **Description**: Define required sections, column schema, deterministic ordering rules, and failure semantics for both generated files. -- **Dependencies**: - - none -- **Complexity**: 3 -- **Acceptance criteria**: - - Contract spec defines mandatory headers/sections for both artifacts. - - Contract includes deterministic ordering keys and regeneration triggers. - - Contract documents `--check` vs `--write` behavior. -- **Validation**: - - `test -f docs/specs/third-party-artifacts-contract-v1.md` - - `rg -n 'THIRD_PARTY_LICENSES\.md|THIRD_PARTY_NOTICES\.md|deterministic|--check|--write' docs/specs/third-party-artifacts-contract-v1.md` - -### Task 1.2: Implement generator entrypoint for both artifacts - -- **Location**: - - scripts/generate-third-party-artifacts.sh - - THIRD_PARTY_LICENSES.md - - THIRD_PARTY_NOTICES.md -- **Description**: Build a single entrypoint that reads `cargo metadata --locked`, renders license summary/list plus notice sections, and writes - both artifacts with stable sorting and formatting. -- **Dependencies**: - - Task 1.1 -- **Complexity**: 5 -- **Acceptance criteria**: - - `--write` regenerates both artifacts in one run. - - `--check` exits non-zero if in-repo artifacts differ from regenerated output. - - Generator output is stable across repeated runs with unchanged lockfile. -- **Validation**: - - `bash scripts/generate-third-party-artifacts.sh --write` - - `bash scripts/generate-third-party-artifacts.sh --check` - - `git diff --exit-code -- THIRD_PARTY_LICENSES.md THIRD_PARTY_NOTICES.md` - -### Task 1.3: Add deterministic notice extraction policy and fallback markers - -- **Location**: - - scripts/generate-third-party-artifacts.sh - - docs/specs/third-party-artifacts-contract-v1.md - - THIRD_PARTY_NOTICES.md -- **Description**: Implement deterministic notice extraction (crate notice/license-file references when discoverable) and explicit - fallback text when no crate-level notice file is found. -- **Dependencies**: - - Task 1.2 -- **Complexity**: 4 -- **Acceptance criteria**: - - `THIRD_PARTY_NOTICES.md` contains per-crate notice entries with deterministic ordering. - - Entries without discoverable notice files use standardized fallback wording. - - Contract spec defines fallback wording exactly. -- **Validation**: - - `bash scripts/generate-third-party-artifacts.sh --write` - - `rg -n '## Dependency Notices|No explicit NOTICE file discovered' THIRD_PARTY_NOTICES.md` - - `rg -n 'fallback wording' docs/specs/third-party-artifacts-contract-v1.md` - -### Task 1.4: Add script-level regression tests and usage docs - -- **Location**: - - tests/third-party-artifacts/generator.test.sh - - README.md - - BINARY_DEPENDENCIES.md -- **Description**: Add regression tests for `--check/--write` behavior and document the generator as a repository-local compliance tool. -- **Dependencies**: - - Task 1.2 -- **Complexity**: 4 -- **Acceptance criteria**: - - Test covers no-op determinism and drift detection paths. - - README includes regeneration command for contributors. - - `BINARY_DEPENDENCIES.md` references the new script entrypoint. -- **Validation**: - - `bash tests/third-party-artifacts/generator.test.sh` - - `rg -n 'generate-third-party-artifacts\.sh' README.md BINARY_DEPENDENCIES.md` - -## Sprint 2: CI freshness gate and required-check integration - -**Goal**: Enforce generated-artifact freshness in local required checks and GitHub CI for both Linux and macOS jobs. **Demo/Validation**: - -- Command(s): - - `bash scripts/ci/third-party-artifacts-audit.sh --strict` - - `./.agents/skills/nils-cli-verify-required-checks/scripts/nils-cli-verify-required-checks.sh --docs-only` - - `./.agents/skills/nils-cli-verify-required-checks/scripts/nils-cli-verify-required-checks.sh` -- Verify: - - CI/local checks fail closed when artifacts are stale. - - The new audit gate is part of the canonical required-checks entrypoint. -- **PR grouping intent**: group -- **Execution Profile**: parallel-x2 -- Sprint scorecard: - - `Execution Profile`: parallel-x2 - - `TotalComplexity`: 15 - - `CriticalPathComplexity`: 11 - - `MaxBatchWidth`: 2 - - `OverlapHotspots`: `scripts/ci/third-party-artifacts-audit.sh`; `.github/workflows/ci.yml`; `.agents/skills/nils-cli-verify-required-checks/scripts/nils-cli-verify-required-checks.sh` - -### Task 2.1: Implement strict third-party artifact audit script - -- **Location**: - - scripts/ci/third-party-artifacts-audit.sh -- **Description**: Add a strict CI audit script that regenerates artifacts in check mode and reports precise drift diagnostics. -- **Dependencies**: - - Task 1.3 - - Task 1.4 -- **Complexity**: 4 -- **Acceptance criteria**: - - Supports `--strict` and emits PASS/WARN/FAIL style messages consistent with existing audits. - - Fails when generated output differs or required files are missing. -- **Validation**: - - `bash scripts/ci/third-party-artifacts-audit.sh --strict` - - `bash scripts/ci/third-party-artifacts-audit.sh` - -### Task 2.2: Add audit gate to required-checks entrypoint and contributor docs - -- **Location**: - - .agents/skills/nils-cli-verify-required-checks/scripts/nils-cli-verify-required-checks.sh - - DEVELOPMENT.md - - AGENTS.md -- **Description**: Insert the new audit into full-check flows and update required-check documentation to keep local/CI policy aligned. -- **Dependencies**: - - Task 2.1 -- **Complexity**: 3 -- **Acceptance criteria**: - - Required-check script runs third-party audit before fmt/clippy/tests. - - `DEVELOPMENT.md` and root `AGENTS.md` list the new required command. -- **Validation**: - - `rg -n 'third-party-artifacts-audit\.sh' .agents/skills/nils-cli-verify-required-checks/scripts/nils-cli-verify-required-checks.sh` - - `rg -n 'third-party-artifacts-audit\.sh' DEVELOPMENT.md AGENTS.md` - - `./.agents/skills/nils-cli-verify-required-checks/scripts/nils-cli-verify-required-checks.sh` - -### Task 2.3: Wire audit into GitHub CI workflows (Linux + macOS) - -- **Location**: - - .github/workflows/ci.yml -- **Description**: Add explicit third-party artifact audit steps in Linux and macOS CI jobs, matching shell conventions used by each job. -- **Dependencies**: - - Task 2.1 -- **Complexity**: 4 -- **Acceptance criteria**: - - CI test and test_macos jobs each execute the new audit command. - - Failure surface is visible before expensive build/test stages when feasible. -- **Validation**: - - `rg -n 'third-party-artifacts-audit\.sh' .github/workflows/ci.yml` - - `test "$(rg -n 'third-party-artifacts-audit\\.sh' .github/workflows/ci.yml | wc -l | tr -d ' ')" -ge 2` - -### Task 2.4: Add CI regression tests for audit script behavior - -- **Location**: - - tests/third-party-artifacts/audit.test.sh - - scripts/ci/third-party-artifacts-audit.sh -- **Description**: Add shell regression tests that assert strict/non-strict behavior and drift detection output semantics. -- **Dependencies**: - - Task 2.2 -- **Complexity**: 4 -- **Acceptance criteria**: - - Tests assert exit code behavior for clean and dirty generated artifacts. - - Tests validate core PASS/FAIL message format. -- **Validation**: - - `bash tests/third-party-artifacts/audit.test.sh` - -## Sprint 3: Release workflow packaging and artifact verification - -**Goal**: Ensure release tarballs always include regenerated third-party artifacts and fail if packaging contract is broken. **Demo/Validation**: - -- Command(s): - - `bash scripts/generate-third-party-artifacts.sh --check` - - `host_target="$(rustc -vV | sed -n 's/^host: //p')" && cargo build --release --workspace --locked --target "$host_target"` - - `host_target="$(rustc -vV | sed -n 's/^host: //p')" && bash scripts/ci/release-tarball-third-party-audit.sh --target "$host_target"` -- Verify: - - Release packaging regenerates and ships both artifacts. - - Tarball contract fails closed when either file is missing. -- **PR grouping intent**: per-sprint -- **Execution Profile**: serial -- Sprint scorecard: - - `Execution Profile`: serial - - `TotalComplexity`: 14 - - `CriticalPathComplexity`: 14 - - `MaxBatchWidth`: 1 - - `OverlapHotspots`: `.github/workflows/release.yml`; `README.md`; `scripts/ci/release-tarball-third-party-audit.sh` - -### Task 3.1: Regenerate third-party artifacts in release build job - -- **Location**: - - .github/workflows/release.yml - - scripts/generate-third-party-artifacts.sh -- **Description**: Add a release workflow step that runs generator check/write deterministically before packaging and fails on generation errors. -- **Dependencies**: - - Task 2.2 - - Task 2.3 - - Task 2.4 -- **Complexity**: 4 -- **Acceptance criteria**: - - Each matrix build job runs the generator before tarball creation. - - Release job halts if generation fails. -- **Validation**: - - `rg -n 'generate-third-party-artifacts\.sh' .github/workflows/release.yml` - -### Task 3.2: Include artifacts in tarball packaging contract - -- **Location**: - - .github/workflows/release.yml - - README.md -- **Description**: Update packaging copy steps and README contract text so release tarballs explicitly contain both third-party artifacts. -- **Dependencies**: - - Task 3.1 -- **Complexity**: 3 -- **Acceptance criteria**: - - Package step copies `THIRD_PARTY_LICENSES.md` and `THIRD_PARTY_NOTICES.md` into release output directory. - - README release artifact contract lists both files. -- **Validation**: - - `rg -n 'THIRD_PARTY_LICENSES\.md|THIRD_PARTY_NOTICES\.md' .github/workflows/release.yml README.md` - - `tar -tzf dist/nils-cli-*.tar.gz | rg 'THIRD_PARTY_(LICENSES|NOTICES)\\.md'` - -### Task 3.3: Add release tarball compliance audit script - -- **Location**: - - scripts/ci/release-tarball-third-party-audit.sh - - .github/workflows/release.yml -- **Description**: Add a script that inspects generated tarball contents and fails if required third-party artifacts are absent. -- **Dependencies**: - - Task 3.2 -- **Complexity**: 4 -- **Acceptance criteria**: - - Audit validates tarball file list includes both third-party artifacts. - - Release workflow executes the audit after tarball creation and before upload. -- **Validation**: - - `bash scripts/ci/release-tarball-third-party-audit.sh --target x86_64-unknown-linux-gnu` - - `rg -n 'release-tarball-third-party-audit\.sh' .github/workflows/release.yml` - -### Task 3.4: Add end-to-end release fixture test for artifact presence - -- **Location**: - - tests/third-party-artifacts/release-package.test.sh - - scripts/ci/release-tarball-third-party-audit.sh -- **Description**: Add an E2E shell test that builds a local release package fixture and asserts third-party files exist in extracted archive. -- **Dependencies**: - - Task 3.3 -- **Complexity**: 3 -- **Acceptance criteria**: - - Test verifies tarball contains both root third-party artifacts and reports clear failure diagnostics. - - Test is runnable in CI-like environments without interactive dependencies. -- **Validation**: - - `bash tests/third-party-artifacts/release-package.test.sh` - -## Testing Strategy - -- Unit: - - Script-level parsing/render helpers (if split into helper functions) for deterministic sorting and fallback marker rendering. -- Integration: - - Generator `--write`/`--check` regression tests and CI audit strict/non-strict mode tests. -- E2E/manual: - - Release package fixture test and workflow-level verification that uploaded tarballs include both third-party artifacts. - -## Risks & gotchas - -- Notice completeness risk: some crates may have nuanced notice obligations not discoverable via metadata alone. -- Determinism risk: timestamps or environment-dependent paths can cause false CI drift unless explicitly normalized. -- CI runtime risk: generating notice content from vendored crate sources may increase job time; audit should remain lightweight. -- Release race risk: if generator runs after packaging copy operations, tarballs can ship stale artifacts. - -## Rollback plan - -- Revert CI/release workflow wiring first (`.github/workflows/ci.yml`, `.github/workflows/release.yml`) to restore previous pipeline behavior. -- Keep generator script and docs behind non-blocking manual invocation until drift/issues are resolved. -- If release packaging gate causes urgent blocker, temporarily disable only `release-tarball-third-party-audit.sh` step while retaining - `scripts/generate-third-party-artifacts.sh --check` in CI to preserve freshness signal. -- Re-run required checks and one release dry-run after rollback adjustments to confirm pipeline stability. diff --git a/docs/runbooks/cli-completion-development-standard.md b/docs/runbooks/cli-completion-development-standard.md index e6283eb4..eb4edd6c 100644 --- a/docs/runbooks/cli-completion-development-standard.md +++ b/docs/runbooks/cli-completion-development-standard.md @@ -166,7 +166,7 @@ Rules: Every completion-required CLI migration must declare and validate this metadata contract in both: -- the CLI row in `docs/reports/completion-coverage-matrix.md` +- the CLI row in `docs/specs/completion-coverage-matrix-v1.md` - the crate-local migration contract copied from `docs/specs/completion-contract-template.md` Canonical metadata tuple: @@ -208,7 +208,7 @@ Completion changes are not deliverable until required checks pass. Use this checklist for every existing CLI migration. The migration is only complete when the per-CLI contract is filled, validation evidence is captured, and acceptance criteria are checked. -1. confirm the CLI is completion-required (or explicitly excluded) in `docs/reports/completion-coverage-matrix.md`, and for `required` CLIs +1. confirm the CLI is completion-required (or explicitly excluded) in `docs/specs/completion-coverage-matrix-v1.md`, and for `required` CLIs ensure the matrix row has explicit completion enforcement metadata 2. create the per-CLI migration contract from `docs/specs/completion-contract-template.md`: - `mkdir -p crates//docs/reports` diff --git a/docs/runbooks/test-cleanup-governance.md b/docs/runbooks/test-cleanup-governance.md index 1e0f19a3..b17ea590 100644 --- a/docs/runbooks/test-cleanup-governance.md +++ b/docs/runbooks/test-cleanup-governance.md @@ -16,6 +16,31 @@ Use it to decide whether a candidate should be `remove`, `keep`, `rewrite`, or ` 3. Validate contract safety before merge. 4. Update CI baseline only after reviewed cleanup PRs merge. +## Deterministic Cleanup Map Inputs + +Treat these artifacts as the authoritative cleanup map for Sprint 3: + +- `$AGENT_HOME/out/workspace-test-cleanup/stale-tests.tsv` +- `$AGENT_HOME/out/workspace-test-cleanup/decision-rubric.md` +- `$AGENT_HOME/out/workspace-test-cleanup/crate-tiers.tsv` +- `$AGENT_HOME/out/workspace-test-cleanup/execution-manifest.md` +- `docs/specs/workspace-test-cleanup-lane-matrix-v1.md` + +The spec freezes the `serial` vs `parallel` lane contract and the decision-mode mapping (`remove`, `rewrite`, `keep`, `defer`) so task +lanes do not drift as cleanup work lands. + +## Frozen Serial Sequence + +The serialized crate order is fixed: + +1. `git-cli` (`serial-1`) +2. `agent-docs` (`serial-2`) +3. `macos-agent` (`serial-3`) +4. `fzf-cli` (`serial-4`) +5. `memo-cli` (`serial-5`) + +All other crates remain `parallel` unless the matrix spec is intentionally revised. + ## Evidence Rules Before marking a candidate `remove`, include all of the following: @@ -31,10 +56,19 @@ For `rewrite`, document: - Which test now guards the behavior. - Which command(s) prove parity/contract behavior still pass. +## Baseline Update Policy + +- `scripts/ci/test-stale-audit-baseline.tsv` is a constrained allowlist for known `helper_fanout` + `remove` rows only. +- New regression rows must be fixed in code; do not add them to baseline as a shortcut. +- During cleanup lanes, baseline changes are limited to deleting rows for helpers that were actually removed with replacement coverage. +- Any baseline expansion requires an explicit policy update to both this runbook and + `docs/specs/workspace-test-cleanup-lane-matrix-v1.md`, plus review evidence in the PR. + ## CI Guardrails - `bash scripts/ci/test-stale-audit.sh --strict` - Fails on new orphaned helper regressions relative to `scripts/ci/test-stale-audit-baseline.tsv`. + - Fails when baseline contains entries outside the frozen S3T1 allowlist. - Fails on deprecated-path leftovers (`deprecated_path_marker`) in the current inventory. - `./.agents/skills/nils-cli-verify-required-checks/scripts/nils-cli-verify-required-checks.sh` - Must pass before delivery. diff --git a/docs/runbooks/wrappers-mode-usage.md b/docs/runbooks/wrappers-mode-usage.md deleted file mode 100644 index 232180ec..00000000 --- a/docs/runbooks/wrappers-mode-usage.md +++ /dev/null @@ -1,187 +0,0 @@ -# Wrapper Execution Mode Runbook - -## Purpose - -This runbook defines how `wrappers/*` choose between: - -- a locally installed binary, and -- workspace debug execution (`cargo run -q -p ...`). - -It also documents the safety rules that prevent wrapper self-recursion and ambiguous fallback behavior. -Wrappers emit a short status line on `stderr` before execution so users can see which path is active. - -## Scope - -This applies to all current wrappers under `wrappers/`: - -- `agent-docs` -- `api-gql` -- `api-grpc` -- `api-rest` -- `api-test` -- `api-websocket` -- `cli-template` -- `codex-cli` -- `fzf-cli` -- `gemini-cli` -- `git-cli` -- `git-lock` -- `git-scope` -- `git-summary` -- `image-processing` -- `macos-agent` -- `memo-cli` -- `plan-issue` -- `plan-issue-local` -- `plan-tooling` -- `screen-record` -- `semantic-commit` - -## Environment Contract - -### `NILS_WRAPPER_MODE` - -Supported values: - -- `auto` (default) -- `debug` -- `installed` - -Behavior summary: - -| Mode | Resolution behavior | -| --- | --- | -| `auto` | Try installed binary first; if not found, fallback to `cargo run -q -p -- ...`. | -| `debug` | Force `cargo run -q -p -- ...`; fail if `cargo` is unavailable. | -| `installed` | Force installed binary lookup only; do not fallback to `cargo`. | - -Invalid mode value exits with code `64` and an explicit error. - -Note: debug mode uses Cargo package names (for example `nils-git-scope`, `nils-codex-cli`), not -necessarily the binary name. - -### `NILS_WRAPPER_INSTALL_PREFIX` - -Optional install prefix for installed binary lookup. - -Default: - -- `~/.local/nils-cli` - -Lookup order for installed mode resolution: - -1. `${NILS_WRAPPER_INSTALL_PREFIX:-$HOME/.local/nils-cli}/` -2. `command -v ` - -Both `~` and `~/...` forms are expanded. - -### `NILS_WRAPPER_STATUS_HINTS` - -Controls whether wrappers print execution-path status lines on `stderr`. - -- `1` (default): print status hints -- `0`: disable status hints - -Default status hints include: - -- `exec=installed mode= path=` -- `exec=cargo mode= crate= (cargo -q: build may stay silent while compiling)` - -## Safety Guarantees - -All wrappers enforce: - -- self-recursion guard: candidate binary path must not resolve to the wrapper itself (`-ef` check) -- explicit mode validation (`auto|debug|installed` only) -- deterministic fallback and error messages by mode - -This prevents infinite recursion when `wrappers/` appears before real binaries in `PATH`. - -## `codex-cli` and `gemini-cli` parser behavior - -Both CLIs expose only the canonical groups: - -- `agent` -- `auth` -- `diag` -- `config` -- `starship` -- `completion` - -Unsupported groups/subcommands return exit `64` from clap parser validation. - -## `git-cli` Compatibility Behavior - -`git-cli` wrapper preserves the existing compatibility rule: - -- when called as `git-cli -- help ...`, arguments are normalized to `git-cli help ...` before - resolution/execution. - -## Usage Examples - -Set globally (for current shell): - -```bash -export NILS_WRAPPER_MODE=debug -``` - -Force installed binary usage: - -```bash -export NILS_WRAPPER_MODE=installed -export NILS_WRAPPER_INSTALL_PREFIX="$HOME/.local/nils-cli" -``` - -One-shot override for a single command: - -```bash -NILS_WRAPPER_MODE=auto ./wrappers/git-scope tracked -NILS_WRAPPER_MODE=debug ./wrappers/codex-cli auth current -NILS_WRAPPER_MODE=installed ./wrappers/semantic-commit --help -``` - -`.env` example: - -```dotenv -NILS_WRAPPER_MODE=debug -# NILS_WRAPPER_MODE=auto -# NILS_WRAPPER_MODE=installed -``` - -## Troubleshooting - -### `... invalid NILS_WRAPPER_MODE=...` - -- Cause: unsupported mode string -- Fix: use one of `auto`, `debug`, `installed` - -### `... cargo not found (required when NILS_WRAPPER_MODE=debug)` - -- Cause: debug mode without Cargo on `PATH` -- Fix: install Rust/Cargo, or switch mode to `auto`/`installed` - -### `... installed binary not found (NILS_WRAPPER_MODE=installed)` - -- Cause: installed mode cannot find target binary in prefix or `PATH` -- Fix: - 1. run install flow (`./.agents/skills/nils-cli-install-local-release-binaries/scripts/nils-cli-install-local-release-binaries.sh`) - 2. confirm `NILS_WRAPPER_INSTALL_PREFIX` and `PATH` - -### `... binary not found (install via cargo install or build the workspace)` - -- Cause: `auto` mode could not find installed binary and cannot run cargo fallback -- Fix: install binary or ensure Cargo is available - -### Need fully quiet wrapper stderr - -- Cause: status hints are enabled by default -- Fix: set `NILS_WRAPPER_STATUS_HINTS=0` - -## Verification Quick Checks - -```bash -NILS_WRAPPER_MODE=debug ./wrappers/cli-template --help -NILS_WRAPPER_MODE=auto ./wrappers/cli-template --help -NILS_WRAPPER_MODE=installed NILS_WRAPPER_INSTALL_PREFIX="$HOME/.local/nils-cli" ./wrappers/cli-template --help -scripts/ci/wrapper-mode-smoke.sh -``` diff --git a/docs/specs/completion-contract-template.md b/docs/specs/completion-contract-template.md index fca5b444..55cfd451 100644 --- a/docs/specs/completion-contract-template.md +++ b/docs/specs/completion-contract-template.md @@ -76,7 +76,7 @@ Checklist: ## completion enforcement metadata (required) Declare and validate metadata that enforces clap-first behavior and forbids runtime mode switches or alternate completion dispatch paths. -Values must match the CLI row in `docs/reports/completion-coverage-matrix.md`. +Values must match the CLI row in `docs/specs/completion-coverage-matrix-v1.md`. | metadata key | required value | declared value | enforcement location | verification method | | ------------------------------- | -------------- | -------------- | -------------------- | -------------------------------------------------------------------------- | @@ -120,7 +120,7 @@ Record command-level and repository-level checks for this migration. ```bash rg -n "COMPLETION_MODE|completion_mode_toggles|alternate_completion_dispatch|generated_load_failure" \ - docs/reports/completion-coverage-matrix.md \ + docs/specs/completion-coverage-matrix-v1.md \ crates//docs/reports/-completion-migration-contract.md ``` diff --git a/docs/reports/completion-coverage-matrix.md b/docs/specs/completion-coverage-matrix-v1.md similarity index 99% rename from docs/reports/completion-coverage-matrix.md rename to docs/specs/completion-coverage-matrix-v1.md index 6e4f449a..9d02587f 100644 --- a/docs/reports/completion-coverage-matrix.md +++ b/docs/specs/completion-coverage-matrix-v1.md @@ -1,4 +1,4 @@ -# Completion Coverage Matrix (Workspace Binaries) +# Completion Coverage Matrix v1 (Workspace Binaries) ## Policy notes diff --git a/docs/specs/crate-docs-placement-policy.md b/docs/specs/crate-docs-placement-policy.md index cd29fecb..ea63f8d3 100644 --- a/docs/specs/crate-docs-placement-policy.md +++ b/docs/specs/crate-docs-placement-policy.md @@ -21,7 +21,7 @@ add docs without ownership drift. - `transient-dev-record`: temporary planning/reporting/adoption notes used to complete a development cycle and removed after completion unless explicitly required by CI/governance contracts. -Contributors MUST classify each new/updated documentation file into one of these two ownership +Contributors MUST classify each new/updated documentation file into one of these three ownership types before choosing a path. ## Allowed Root Docs @@ -51,11 +51,12 @@ If a historical root path must remain for compatibility, it MUST be a short stub ## Compatibility Stub Lifecycle Decision -Compatibility stubs under root `docs/` are permanent redirects (no deprecation sunset date planned). +Compatibility stubs under root `docs/` are temporary compatibility artifacts. - Stubs MUST keep a `Moved to:` target and migration metadata. - Stubs MUST remain redirect-only shims and MUST NOT carry canonical runbook/spec/report content. -- If governance changes later, the policy update MUST explicitly document a new sunset decision first. +- Stubs MUST be removed once inbound-reference checks show no active script/test/doc caller still + depends on the old path. Root stub retention criteria: @@ -106,7 +107,8 @@ When adding or moving docs, contributors MUST: 2. Place `crate-local` docs in `crates//docs/...` using canonical paths. 3. Keep root `docs/` paths for `workspace-level` docs only. 4. Update references so README/runbooks/specs point to canonical locations. -5. For moved root docs, leave only compatibility stubs with a `Moved to` pointer. +5. For moved root docs, leave a compatibility stub only when active callers still reference the old + path; otherwise remove the root path entirely. Contributors SHOULD: diff --git a/docs/specs/markdown-github-handling-audit-v1.md b/docs/specs/markdown-github-handling-audit-v1.md deleted file mode 100644 index 9f94fd4d..00000000 --- a/docs/specs/markdown-github-handling-audit-v1.md +++ /dev/null @@ -1,87 +0,0 @@ -# Markdown + GitHub Handling Audit v1 - -## Purpose - -Provide a workspace-wide, source-only inventory of markdown handling and GitHub markdown-write touchpoints, and track remediation status -with machine-checkable rows. - -## Inventory boundary - -- Include scope: - - `crates/**/src/**/*.rs` production Rust source files. -- Exclude scope: - - `crates/**/tests/**`, `**/fixtures/**`, `**/docs/**`, generated outputs, and shell scripts. -- Discovery baseline command: - - ```bash - rg -n --glob 'crates/**/src/**/*.rs' \ - 'render_.*markdown|markdown::|validate_markdown_payload|canonicalize_table_cell|code_block\(|heading\(|write_temp_markdown|run_output\("gh"|parse_markdown_row|parse_sprint_heading|render_summary_markdown' \ - crates - ``` - -## Risk classes - -- `payload-guard`: markdown payload safety validation before write. -- `table-canonicalization`: markdown table-cell normalization for round-trip safety. -- `markdown-render`: markdown string construction/rendering. -- `markdown-write`: markdown file write paths. -- `markdown-parse`: markdown parser/heading/table interpretation logic. -- `markdown-detect`: markdown type detection/validation heuristics. -- `github-markdown-write`: GitHub CLI/API write paths using markdown body/comment payloads. -- `github-non-markdown`: GitHub CLI usage not writing markdown payloads. - -## Row schema - -Each row must contain: - -- `crate=` -- `file=` -- `function=` -- `risk_class=` -- `owner=` -- `test_ref=` -- `status=` -- `notes=` - -## Audit table - -| crate | file | function | risk_class | owner | test_ref | status | notes | -| ---------------------- | ---------------------------------------------------- | ----------------------------------------------------- | --------------------------------- | --------------------------- | --------------------------------------------------------------------------------------------------------------------------- | --------------- | ------------------------------------------------------------------------------ | -| crate=nils-common | file=crates/nils-common/src/markdown.rs | function=validate_markdown_payload | risk_class=payload-guard | owner=nils-common | test_ref=cargo test -p nils-common markdown_payload_validator_rejects_literal_escaped_controls -- --exact | status=resolved | notes=Shared guard rejects literal escaped controls. | -| crate=nils-common | file=crates/nils-common/src/markdown.rs | function=canonicalize_table_cell | risk_class=table-canonicalization | owner=nils-common | test_ref=cargo test -p nils-common canonicalize_table_cell_is_idempotent -- --exact | status=resolved | notes=Shared canonical table-cell implementation. | -| crate=nils-common | file=crates/nils-common/src/markdown.rs | function=heading | risk_class=markdown-render | owner=nils-common | test_ref=cargo test -p nils-common markdown_heading_trims_and_clamps_level -- --exact | status=resolved | notes=Shared heading renderer clamps and normalizes. | -| crate=nils-common | file=crates/nils-common/src/markdown.rs | function=code_block | risk_class=markdown-render | owner=nils-common | test_ref=cargo test -p nils-common markdown_code_block_is_newline_stable -- --exact | status=resolved | notes=Shared code-fence renderer with newline stability. | -| crate=nils-common | file=crates/nils-common/src/markdown.rs | function=format_json_pretty_sorted | risk_class=markdown-render | owner=nils-common | test_ref=cargo test -p nils-common json_format_sorts_keys_recursively -- --exact | status=resolved | notes=Shared sorted JSON formatter for markdown sections. | -| crate=api-testing-core | file=crates/api-testing-core/src/markdown.rs | function=format_json_pretty_sorted wrapper | risk_class=markdown-render | owner=nils-api-testing-core | test_ref=cargo test -p nils-api-testing-core --test report_history | status=resolved | notes=Wrapper delegates to nils-common canonical helpers. | -| crate=api-testing-core | file=crates/api-testing-core/src/markdown.rs | function=heading wrapper | risk_class=markdown-render | owner=nils-api-testing-core | test_ref=cargo test -p nils-api-testing-core --test report_history | status=resolved | notes=Uses shared heading behavior through wrapper. | -| crate=api-testing-core | file=crates/api-testing-core/src/markdown.rs | function=code_block wrapper | risk_class=markdown-render | owner=nils-api-testing-core | test_ref=cargo test -p nils-api-testing-core --test report_history | status=resolved | notes=Uses shared code-block behavior through wrapper. | -| crate=api-testing-core | file=crates/api-testing-core/src/report.rs | function=ReportBuilder markdown composition | risk_class=markdown-render | owner=nils-api-testing-core | test_ref=cargo test -p nils-api-testing-core --test report_history | status=resolved | notes=Report builder composes markdown via shared wrappers. | -| crate=api-testing-core | file=crates/api-testing-core/src/rest/report.rs | function=render_rest_report_markdown | risk_class=markdown-render | owner=nils-api-testing-core | test_ref=cargo test -p nils-api-testing-core rest_report_renders_markdown_with_optional_sections -- --exact | status=resolved | notes=REST report markdown output covered by unit tests. | -| crate=api-testing-core | file=crates/api-testing-core/src/graphql/report.rs | function=render_graphql_report_markdown | risk_class=markdown-render | owner=nils-api-testing-core | test_ref=cargo test -p nils-api-testing-core --test report_history | status=resolved | notes=GraphQL report markdown output covered by integration tests. | -| crate=api-testing-core | file=crates/api-testing-core/src/grpc/report.rs | function=render_grpc_report_markdown | risk_class=markdown-render | owner=nils-api-testing-core | test_ref=cargo test -p nils-api-testing-core --test report_history | status=resolved | notes=gRPC report markdown output covered by integration tests. | -| crate=api-testing-core | file=crates/api-testing-core/src/websocket/report.rs | function=render_websocket_report_markdown | risk_class=markdown-render | owner=nils-api-testing-core | test_ref=cargo test -p nils-api-testing-core --test report_history | status=resolved | notes=WebSocket report markdown output covered by integration tests. | -| crate=api-testing-core | file=crates/api-testing-core/src/suite/summary.rs | function=render_summary_markdown | risk_class=markdown-render | owner=nils-api-testing-core | test_ref=cargo test -p nils-api-testing-core render_summary_markdown_handles_successful_runs -- --exact | status=resolved | notes=Suite-level markdown summary renderer has dedicated tests. | -| crate=api-rest | file=crates/api-rest/src/commands/report.rs | function=report command markdown output path | risk_class=markdown-render | owner=nils-api-rest | test_ref=cargo test -p nils-api-rest --tests | status=resolved | notes=Command uses api-testing-core markdown report pipeline. | -| crate=api-gql | file=crates/api-gql/src/commands/report.rs | function=report command markdown output path | risk_class=markdown-render | owner=nils-api-gql | test_ref=cargo test -p nils-api-gql --tests | status=resolved | notes=Command uses api-testing-core markdown report pipeline. | -| crate=api-grpc | file=crates/api-grpc/src/commands/report.rs | function=report command markdown output path | risk_class=markdown-render | owner=nils-api-grpc | test_ref=cargo test -p nils-api-grpc --tests | status=resolved | notes=Command uses api-testing-core markdown report pipeline. | -| crate=api-websocket | file=crates/api-websocket/src/commands/report.rs | function=report command markdown output path | risk_class=markdown-render | owner=nils-api-websocket | test_ref=cargo test -p nils-api-websocket --tests | status=resolved | notes=Command uses api-testing-core markdown report pipeline. | -| crate=plan-issue-cli | file=crates/plan-issue-cli/src/github.rs | function=guard_markdown_payload | risk_class=payload-guard | owner=nils-plan-issue-cli | test_ref=cargo test -p nils-plan-issue-cli gh_adapter_guard_rejects_escaped_payload_without_force -- --exact | status=resolved | notes=Live GitHub writes enforce markdown payload guard. | -| crate=plan-issue-cli | file=crates/plan-issue-cli/src/github.rs | function=create_issue/edit_issue_body/comment_issue | risk_class=github-markdown-write | owner=nils-plan-issue-cli | test_ref=cargo test -p nils-plan-issue-cli gh_adapter_live_methods_work_with_stubbed_gh -- --exact | status=resolved | notes=All body-file write paths are guard-wrapped. | -| crate=plan-issue-cli | file=crates/plan-issue-cli/src/github.rs | function=close_issue --comment guard | risk_class=github-markdown-write | owner=nils-plan-issue-cli | test_ref=cargo test -p nils-plan-issue-cli gh_adapter_guard_rejects_escaped_payload_without_force -- --exact | status=resolved | notes=Inline close comment guarded unless force enabled. | -| crate=plan-issue-cli | file=crates/plan-issue-cli/src/execute.rs | function=write_temp_markdown | risk_class=markdown-write | owner=nils-plan-issue-cli | test_ref=cargo test -p nils-plan-issue-cli temp_markdown_and_prompt_outputs_use_agent_home_and_expected_paths -- --exact | status=resolved | notes=Temporary markdown artifacts use deterministic output path. | -| crate=plan-issue-cli | file=crates/plan-issue-cli/src/render.rs | function=render_plan_issue_body/render_sprint_comment | risk_class=markdown-render | owner=nils-plan-issue-cli | test_ref=cargo test -p nils-plan-issue-cli render_issue_body_start_plan_falls_back_when_preface_sections_missing -- --exact | status=resolved | notes=Issue/sprint markdown rendering paths are covered. | -| crate=plan-issue-cli | file=crates/plan-issue-cli/src/render.rs | function=parse_markdown_row | risk_class=markdown-parse | owner=nils-plan-issue-cli | test_ref=cargo test -p nils-plan-issue-cli --test runtime_truth_plan_and_sprint_flow | status=resolved | notes=Table row parsing participates in runtime-truth tests. | -| crate=plan-issue-cli | file=crates/plan-issue-cli/src/issue_body.rs | function=canonicalize_table_value | risk_class=table-canonicalization | owner=nils-plan-issue-cli | test_ref=cargo test -p nils-plan-issue-cli --test task_spec_flow | status=resolved | notes=Task decomposition markdown cells normalized by shared helper. | -| crate=plan-issue-cli | file=crates/plan-issue-cli/src/task_spec.rs | function=notes canonicalization for task-spec rows | risk_class=table-canonicalization | owner=nils-plan-issue-cli | test_ref=cargo test -p nils-plan-issue-cli --test task_spec_flow | status=resolved | notes=Runtime metadata notes are canonicalized for markdown round-trip safety. | -| crate=memo-cli | file=crates/memo-cli/src/preprocess/detect.rs | function=looks_like_markdown | risk_class=markdown-detect | owner=nils-memo-cli | test_ref=cargo test -p nils-memo-cli --tests | status=resolved | notes=Content-type detection is parse-only and not a markdown write path. | -| crate=memo-cli | file=crates/memo-cli/src/preprocess/validate.rs | function=validate_markdown | risk_class=markdown-detect | owner=nils-memo-cli | test_ref=cargo test -p nils-memo-cli --tests | status=resolved | notes=Validation checks markdown syntax heuristics only. | -| crate=plan-tooling | file=crates/plan-tooling/src/parse.rs | function=parse_sprint_heading/parse_task_heading | risk_class=markdown-parse | owner=nils-plan-tooling | test_ref=cargo test -p nils-plan-tooling --test to_json | status=resolved | notes=Plan markdown parser behavior covered by parse/to-json tests. | -| crate=plan-tooling | file=crates/plan-tooling/src/validate.rs | function=validate_sprint_metadata/validate_task | risk_class=markdown-parse | owner=nils-plan-tooling | test_ref=cargo test -p nils-plan-tooling --test validate | status=resolved | notes=Plan markdown contract linting covered by validate tests. | -| crate=plan-tooling | file=crates/plan-tooling/src/split_prs.rs | function=split-prs plan markdown split logic | risk_class=markdown-parse | owner=nils-plan-tooling | test_ref=cargo test -p nils-plan-tooling --test split_prs | status=resolved | notes=Plan-derived split behavior covered by split_prs tests. | -| crate=git-cli | file=crates/git-cli/src/open.rs | function=run_gh_pr_view | risk_class=github-non-markdown | owner=nils-git-cli | test_ref=cargo test -p nils-git-cli --tests | status=resolved | notes=Uses gh web open only; not a markdown body/comment write path. | - -## Boundary decisions - -- Keep live GitHub issue/PR body/comment writes in crate-local adapters (`plan-issue-cli` ownership). -- Keep shared markdown rendering/validation primitives in `nils-common::markdown`. -- Do not add `crates/nils-common/src/github.rs`. diff --git a/docs/specs/workspace-ci-entrypoint-inventory-v1.md b/docs/specs/workspace-ci-entrypoint-inventory-v1.md new file mode 100644 index 00000000..e84ac881 --- /dev/null +++ b/docs/specs/workspace-ci-entrypoint-inventory-v1.md @@ -0,0 +1,77 @@ +# Workspace CI Entrypoint Inventory v1 + +## Purpose + +This inventory records the canonical owner for each active CI/release workflow check and defines keep/delete criteria for helper scripts. +It is the source of truth for Sprint 1 CI entrypoint consolidation. + +## Keep/Delete Criteria + +A helper path is `keep` only when at least one active caller is discoverable by repository search in one of: + +- GitHub workflow YAML (`.github/workflows/*.yml`) +- Canonical contributor docs (`README.md`, `DEVELOPMENT.md`, `docs/runbooks/**`, crate READMEs) +- Canonical runtime entrypoints (`scripts/**`, `.agents/**`, `wrappers/**`) used by active workflows/docs + +A helper path is `delete` when no active caller exists outside historical/transient planning artifacts. + +## Canonical Workflow Owners + +### `.github/workflows/ci.yml` + +| Job | Step | Canonical owner | Decision | Notes | +| --- | --- | --- | --- | --- | +| `test`, `test_macos` | `Checkout`, `Set up Rust`, `Cache cargo`, `Set up Node.js`, tool bootstrap | Upstream GitHub Actions + runner bootstrap shell | keep | Platform bootstrap stays in workflow. | +| `test`, `test_macos` | `Nils CLI checks (includes third-party-artifacts-audit, Completion asset audit, docs-hygiene-audit, test-stale-audit)` | `scripts/ci/nils-cli-checks-entrypoint.sh` -> `./.agents/skills/nils-cli-verify-required-checks/scripts/nils-cli-verify-required-checks.sh` | keep | Canonical verification contract after setup. | +| `test`, `test_macos` | `Third-party artifact audit` (removed) | replaced by required-checks script ordering | delete | Duplicate workflow fragment removed. | +| `test`, `test_macos` | `Completion asset audit` (removed) | replaced by required-checks script ordering | delete | Duplicate workflow fragment removed. | +| `test`, `test_macos` | `Publish JUnit report`, `Upload JUnit XML` | upstream Actions artifacts/reporting | keep | Post-check reporting only. | +| `coverage` | coverage generation/report/upload/cleanup steps | `cargo llvm-cov` + `scripts/ci/coverage-summary.sh` + upload/comment actions | keep | Coverage artifacts are created and cleaned only in this job. | +| `coverage_badge` | badge generation/publish | `scripts/ci/coverage-badge.sh` + git push flow | keep | Push-only automation path. | + +### `.github/workflows/release.yml` + +| Job | Step | Canonical owner | Decision | Notes | +| --- | --- | --- | --- | --- | +| `build` | ripgrep bootstrap | inline OS bootstrap shell | keep | Platform package manager differences remain workflow-local. | +| `build` | `Regenerate third-party artifacts` | `scripts/generate-third-party-artifacts.sh` | keep | Canonical artifact generation gate. | +| `build` | `Package` | `scripts/workspace-bins.sh` + inline packaging shell | keep | Packaging owns workspace binary discovery through script entrypoint. | +| `build` | `Audit release tarball third-party artifacts` | `scripts/ci/release-tarball-third-party-audit.sh` | keep | Canonical release artifact audit. | +| `release` | publish GitHub release | `softprops/action-gh-release@v2` | keep | Standard release publication action. | + +### `.github/workflows/publish-crates.yml` + +| Job | Step | Canonical owner | Decision | Notes | +| --- | --- | --- | --- | --- | +| `publish` | `Publish selected crates` | `scripts/publish-crates.sh` | keep | Canonical crates publish/dry-run entrypoint. | + +## Required-Checks Script Ownership + +`./.agents/skills/nils-cli-verify-required-checks/scripts/nils-cli-verify-required-checks.sh` is canonical for verification ordering: + +1. docs/stale/third-party/completion audits (`scripts/ci/*`) +2. completion registration/parity checks +3. compile/test gates (`cargo fmt`, `cargo clippy`, workspace tests) + +No workflow may duplicate these audit commands as independent pre-steps unless the required-checks entrypoint is updated first. + +## Helper Surface Decisions (Sprint 1 Scope) + +| Path | Decision | Active caller evidence | +| --- | --- | --- | +| `scripts/ci/wrapper-mode-smoke.sh` | keep | `README.md` wrapper contributor flow + wrapper smoke command examples | +| `scripts/ci/agent-docs-snapshots.sh` | keep | `crates/agent-docs/README.md` snapshot workflow | +| obsolete completion matrix shell helper | delete | no active caller outside itself | +| obsolete release package shell helper | delete | no active CI/release/contributor caller; replaced by canonical release tarball audit script | +| `wrappers/plan-tooling` | keep | `README.md` wrapper contributor flow + runbook wrapper scope includes `plan-tooling` | +| `wrappers/git-cli` | keep | `README.md` wrapper contributor flow includes `git-cli` wrapper behavior | + +## Validation Commands + +```bash +test -f docs/specs/workspace-ci-entrypoint-inventory-v1.md +rg -n 'scripts/ci/|nils-cli-verify-required-checks' \ + .github/workflows/ci.yml .github/workflows/release.yml .github/workflows/publish-crates.yml \ + DEVELOPMENT.md .agents/skills/nils-cli-verify-required-checks/scripts/nils-cli-verify-required-checks.sh +rg -n 'canonical|delete|keep|workflow' docs/specs/workspace-ci-entrypoint-inventory-v1.md +``` diff --git a/docs/specs/workspace-doc-retention-matrix-v1.md b/docs/specs/workspace-doc-retention-matrix-v1.md new file mode 100644 index 00000000..0a5dd010 --- /dev/null +++ b/docs/specs/workspace-doc-retention-matrix-v1.md @@ -0,0 +1,99 @@ +# Workspace Doc Retention Matrix v1 + +## Purpose + +This matrix freezes Sprint 4 documentation ownership and retention decisions for the simplified +workspace architecture. + +Decision fields: + +- `scope`: `workspace-level` | `crate-local` | `transient-dev-record` +- `lifecycle`: `canonical` | `delete` +- `decision`: `keep` | `delete` | `move` + +## Workspace-Level Inventory (Keep) + +| Path | Scope | Lifecycle | Decision | Rationale | +| --- | --- | --- | --- | --- | +| `README.md` | `workspace-level` | `canonical` | `keep` | Workspace overview and contributor entrypoint. | +| `DEVELOPMENT.md` | `workspace-level` | `canonical` | `keep` | Required checks and contributor workflow contract. | +| `AGENTS.md` | `workspace-level` | `canonical` | `keep` | Agent execution policy for this repository. | +| `BINARY_DEPENDENCIES.md` | `workspace-level` | `canonical` | `keep` | Shared runtime/tooling dependency contract. | +| `docs/runbooks/cli-completion-development-standard.md` | `workspace-level` | `canonical` | `keep` | Canonical completion architecture and checks. | +| `docs/runbooks/crates-io-status-script-runbook.md` | `workspace-level` | `canonical` | `keep` | Workspace crates.io status workflow. | +| `docs/runbooks/new-cli-crate-development-standard.md` | `workspace-level` | `canonical` | `keep` | New CLI crate standards. | +| `docs/runbooks/test-cleanup-governance.md` | `workspace-level` | `canonical` | `keep` | Stale-test lifecycle and CI guardrails. | +| `docs/specs/cli-service-json-contract-guideline-v1.md` | `workspace-level` | `canonical` | `keep` | Service-consumed CLI JSON contract guidance. | +| `docs/specs/codex-gemini-cli-parity-contract-v1.md` | `workspace-level` | `canonical` | `keep` | Shared Codex/Gemini parity contract. | +| `docs/specs/codex-gemini-runtime-contract.md` | `workspace-level` | `canonical` | `keep` | Shared provider runtime contract. | +| `docs/specs/completion-contract-template.md` | `workspace-level` | `canonical` | `keep` | Per-crate completion migration contract template. | +| `docs/specs/completion-coverage-matrix-v1.md` | `workspace-level` | `canonical` | `keep` | Completion obligations and enforcement metadata matrix. | +| `docs/specs/crate-docs-placement-policy.md` | `workspace-level` | `canonical` | `keep` | Workspace docs placement policy. | +| `docs/specs/third-party-artifacts-contract-v1.md` | `workspace-level` | `canonical` | `keep` | Third-party artifacts generation contract. | +| `docs/specs/workspace-ci-entrypoint-inventory-v1.md` | `workspace-level` | `canonical` | `keep` | CI owner-script inventory and keep/delete criteria. | +| `docs/specs/workspace-shared-crate-boundary-v1.md` | `workspace-level` | `canonical` | `keep` | Shared crate ownership boundaries. | +| `docs/specs/workspace-test-cleanup-lane-matrix-v1.md` | `workspace-level` | `canonical` | `keep` | Test cleanup sequencing and lane policy. | +| `docs/specs/workspace-doc-retention-matrix-v1.md` | `workspace-level` | `canonical` | `keep` | Doc ownership and retention source of truth (this file). | + +## Crate-Local Inventory (Keep) + +All paths below are classified as `scope=crate-local`, `lifecycle=canonical`, `decision=keep`. +Rationale: each file is owned by one crate and lives under `crates//docs/**`. + +- `crates/agent-docs/docs/README.md` +- `crates/api-gql/docs/README.md` +- `crates/api-grpc/docs/README.md` +- `crates/api-rest/docs/README.md` +- `crates/api-test/docs/README.md` +- `crates/api-testing-core/docs/README.md` +- `crates/api-websocket/docs/README.md` +- `crates/api-websocket/docs/specs/websocket-cli-contract-v1.md` +- `crates/api-websocket/docs/specs/websocket-request-schema-v1.md` +- `crates/cli-template/docs/README.md` +- `crates/codex-cli/docs/README.md` +- `crates/codex-cli/docs/runbooks/json-consumers.md` +- `crates/codex-cli/docs/specs/codex-cli-diag-auth-json-contract-v1.md` +- `crates/fzf-cli/docs/README.md` +- `crates/gemini-cli/docs/README.md` +- `crates/gemini-cli/docs/runbooks/json-consumers.md` +- `crates/gemini-cli/docs/specs/gemini-cli-diag-auth-json-contract-v1.md` +- `crates/git-cli/docs/README.md` +- `crates/git-lock/docs/README.md` +- `crates/git-scope/docs/README.md` +- `crates/git-summary/docs/README.md` +- `crates/image-processing/docs/README.md` +- `crates/image-processing/docs/runbooks/llm-svg-workflow.md` +- `crates/macos-agent/docs/README.md` +- `crates/memo-cli/docs/README.md` +- `crates/memo-cli/docs/runbooks/memo-cli-agent-workflow.md` +- `crates/memo-cli/docs/specs/memo-cli-command-contract-v1.md` +- `crates/memo-cli/docs/specs/memo-cli-json-contract-v1.md` +- `crates/memo-cli/docs/specs/memo-cli-release-policy.md` +- `crates/memo-cli/docs/specs/memo-cli-storage-schema-v1.md` +- `crates/memo-cli/docs/specs/memo-cli-workflow-extension-contract-v1.md` +- `crates/nils-common/docs/README.md` +- `crates/nils-common/docs/specs/markdown-helpers-contract-v1.md` +- `crates/nils-term/docs/README.md` +- `crates/nils-test-support/docs/README.md` +- `crates/plan-issue-cli/docs/README.md` +- `crates/plan-issue-cli/docs/specs/plan-issue-cli-contract-v2.md` +- `crates/plan-issue-cli/docs/specs/plan-issue-gate-matrix-v1.md` +- `crates/plan-issue-cli/docs/specs/plan-issue-state-machine-v1.md` +- `crates/plan-tooling/docs/README.md` +- `crates/plan-tooling/docs/runbooks/split-prs-build-task-spec-cutover.md` +- `crates/plan-tooling/docs/specs/split-prs-contract-v1.md` +- `crates/plan-tooling/docs/specs/split-prs-contract-v2.md` +- `crates/screen-record/docs/README.md` +- `crates/semantic-commit/docs/README.md` + +## Transient/Obsolete Inventory (Delete or Move) + +| Path | Scope | Lifecycle | Decision | Reason | Inbound-reference proof | +| --- | --- | --- | --- | --- | --- | +| `docs/plans/markdown-gh-handling-audit-remediation-plan.md` | `transient-dev-record` | `delete` | `delete` | Completed migration plan; no active workflow caller remains. | `rg -n 'markdown-gh-handling-audit-remediation-plan\\.md' README.md DEVELOPMENT.md AGENTS.md BINARY_DEPENDENCIES.md docs crates scripts tests .github` -> no matches. | +| `docs/plans/third-party-licenses-notices-release-packaging-plan.md` | `transient-dev-record` | `delete` | `delete` | Completed migration plan; contract moved to canonical spec + scripts. | `rg -n 'third-party-licenses-notices-release-packaging-plan\\.md' README.md DEVELOPMENT.md AGENTS.md BINARY_DEPENDENCIES.md docs crates scripts tests .github` -> no matches. | +| `docs/reports/completion-coverage-matrix.md` | `workspace-level` | `delete` | `move` | Promoted from report to canonical workspace spec. | `rg -n 'docs/reports/completion-coverage-matrix\\.md' README.md DEVELOPMENT.md AGENTS.md BINARY_DEPENDENCIES.md docs crates scripts tests .github` -> no matches. | +| `docs/runbooks/wrappers-mode-usage.md` | `transient-dev-record` | `delete` | `delete` | Compatibility-only wrapper-mode runbook superseded by canonical README guidance. | `rg -n 'wrappers-mode-usage\\.md' README.md DEVELOPMENT.md AGENTS.md BINARY_DEPENDENCIES.md docs crates scripts tests .github` -> no matches. | +| `docs/specs/markdown-github-handling-audit-v1.md` | `transient-dev-record` | `delete` | `delete` | Sprint audit artifact completed; no active policy gate depends on it. | `rg -n 'markdown-github-handling-audit-v1\\.md' README.md DEVELOPMENT.md AGENTS.md BINARY_DEPENDENCIES.md docs crates scripts tests .github` -> no matches. | +| `crates/plan-tooling/docs/runbooks/split-prs-migration.md` | `crate-local` | `delete` | `delete` | Migration-only runbook superseded by cutover runbook + v2 contract docs. | `rg -n 'split-prs-migration\\.md' README.md DEVELOPMENT.md AGENTS.md BINARY_DEPENDENCIES.md docs crates scripts tests .github` -> no matches. | +| `crates/plan-issue-cli/docs/specs/plan-issue-cli-contract-v1.md` | `crate-local` | `delete` | `delete` | Compatibility-era contract superseded by active v2 contract. | `rg -n 'plan-issue-cli-contract-v1\\.md' README.md DEVELOPMENT.md AGENTS.md BINARY_DEPENDENCIES.md docs crates scripts tests .github` -> no matches. | diff --git a/docs/specs/workspace-shared-crate-boundary-v1.md b/docs/specs/workspace-shared-crate-boundary-v1.md new file mode 100644 index 00000000..d71b4266 --- /dev/null +++ b/docs/specs/workspace-shared-crate-boundary-v1.md @@ -0,0 +1,68 @@ +# Workspace Shared Crate Boundary v1 + +## Purpose + +This spec freezes Sprint 2 shared-crate boundaries from the canonical audit artifacts so extraction work stays deterministic across +subagent lanes. + +Audit inputs: + +- `bash scripts/dev/workspace-shared-crate-audit.sh --format tsv` +- `$AGENT_HOME/out/workspace-shared-audit/crate-matrix.tsv` +- `$AGENT_HOME/out/workspace-shared-audit/hotspots-index.tsv` +- `$AGENT_HOME/out/workspace-shared-audit/task-lanes.tsv` + +## Boundary Decisions + +### What belongs in `nils-common` (`extract`) + +- Domain-neutral filesystem primitives (`write_atomic`, `sha256_file`, timestamp persistence, secret timestamp path derivation). +- Provider-runtime path resolution and profile-driven auth-file/secret-dir/cache-dir rules. +- Shared auth persistence substrate that identifies identity-matched secrets and synchronizes auth snapshots without provider-specific UX copy. +- Structured errors that caller crates map back to existing output/exit contracts. + +### What stays in `nils-term` (`keep-local`) + +- Progress bars/spinners and TTY rendering policy. +- ANSI/presentation behavior that is terminal UX specific rather than runtime-domain neutral. +- CLI surface decisions for progress visibility and one-line rendering modes. + +### What stays crate-local (`keep-local` / `defer`) + +- Provider-specific warning/error text, JSON envelope wording, and exit-code mapping. +- Parity-sensitive secret-dir UX behavior where Codex/Gemini semantics intentionally diverge (`defer` until characterization proves safe merge). +- Command composition and product-specific policy (auth prompts, sync messaging, diagnostics output). + +## Hotspot Lane Matrix + +| Theme | Audit signals | Decision | Owner sprint tasks | Notes | +| --- | --- | --- | --- | --- | +| process/env/no-color primitives | `manual_process_probe`, `manual_env_mutation`, `manual_no_color_logic` | `extract` to `nils-common` with crate-local adapters | `S2T2` | Keep UX text and exit semantics in crate-local adapters. | +| provider auth persistence + atomic fs | `manual_atomic_fs` | `extract` to `nils-common::provider_runtime` substrate | `S2T3` | Shared sync substrate + timestamp path rules; keep provider JSON/text copy local. | +| parity-sensitive secret-dir routing | `manual_secret_dir_resolution` | `defer` / `keep-local` until characterization remains green | `S2T3` | Do not force full Codex/Gemini secret-dir unification without explicit parity evidence. | +| redundant local wrappers post-extraction | `dependency_present` + wrapper shims | `keep-local` only if still contract-relevant, otherwise delete | `S2T4` | Wrapper removal depends on `S2T2` + `S2T3` substrate landing. | + +## Keep/Remove Rules for Runtime Helpers + +- `extract` when helper logic is domain-neutral and used by multiple crates. +- `keep-local` when helper exists only to preserve user-visible contract fidelity. +- `defer` when migration risks parity-sensitive behavior without characterization coverage. +- `remove` when no live caller remains after extraction and wrapper no longer provides contract value. +- Post-S2T4 baseline: `crates/codex-cli/src/fs.rs`, `crates/gemini-cli/src/fs.rs`, and `crates/git-cli/src/util.rs` are removed; callers + should consume `nils-common` primitives directly. + +## Non-goals + +- Moving provider-specific message wording into `nils-common`. +- Merging Codex and Gemini command-level UX into one behavior surface. +- Treating `nils-term` as a generic runtime helper crate. +- Keeping compatibility-only wrappers once shared helpers are canonical. + +## Validation + +```bash +test -f docs/specs/workspace-shared-crate-boundary-v1.md +bash scripts/dev/workspace-shared-crate-audit.sh --format tsv +rg -n 'What belongs|What stays crate-local|Non-goals' README.md crates/nils-common/README.md crates/nils-test-support/README.md +rg -n 'keep-local|extract|nils-common|nils-term' docs/specs/workspace-shared-crate-boundary-v1.md +``` diff --git a/docs/specs/workspace-test-cleanup-lane-matrix-v1.md b/docs/specs/workspace-test-cleanup-lane-matrix-v1.md new file mode 100644 index 00000000..584b2e0d --- /dev/null +++ b/docs/specs/workspace-test-cleanup-lane-matrix-v1.md @@ -0,0 +1,89 @@ +# Workspace Test Cleanup Lane Matrix v1 + +## Purpose + +This spec freezes the Sprint 3 stale-test cleanup map so subagent lanes keep deterministic `remove`, `rewrite`, `keep`, and `defer` +decisions while running in parallel. + +Canonical audit inputs: + +- `bash scripts/dev/workspace-test-stale-audit.sh --format tsv` +- `$AGENT_HOME/out/workspace-test-cleanup/stale-tests.tsv` +- `$AGENT_HOME/out/workspace-test-cleanup/decision-rubric.md` +- `$AGENT_HOME/out/workspace-test-cleanup/crate-tiers.tsv` +- `$AGENT_HOME/out/workspace-test-cleanup/execution-manifest.md` + +## Decision Matrix + +| Signal + condition | Decision mode | Tier | Lane routing | Notes | +| --- | --- | --- | --- | --- | +| `helper_fanout` with `fanout=0`, `review=auto` | `remove` | `safe` | `serial-*` for frozen crates, otherwise `parallel` | Deterministic orphan helper candidate. | +| `helper_fanout` with `fanout=0`, `review=manual-review` | `defer` | `high-risk` | `serial-*` for frozen crates, otherwise `parallel` | Macro/reflection risk blocks automatic removal. | +| `helper_fanout` with `fanout>0` | `keep` | `medium` | `serial-*` for frozen crates, otherwise `parallel` | Helper still has live callsites. | +| `allow_dead_code` marker in tests | `rewrite` | `medium` | `serial-*` for frozen crates, otherwise `parallel` | Replace obsolete helper/test shape without dropping coverage. | +| `deprecated_path_marker` marker in tests/path | `rewrite` | `high-risk` | `serial-*` for frozen crates, otherwise `parallel` | Requires explicit replacement coverage and contract check. | +| `test_module` on contract-protected path (`contract/parity/json/exit-code`) | `keep` (or `rewrite` with equivalent coverage) | `high-risk` | `serial-*` for frozen crates, otherwise `parallel` | Never treat protected modules as opportunistic `remove`. | + +## Frozen Serial-Group Order + +The serialized order is frozen by Sprint 3 Task 3.1 and is no longer recomputed from top-N score drift: + +| Order | Crate | Candidates | Helper Signals | allow(dead_code) | Score | Serial Group | +| ---: | --- | ---: | ---: | ---: | ---: | --- | +| 1 | `git-cli` | 37 | 27 | 1 | 94 | `serial-1` | +| 2 | `agent-docs` | 32 | 20 | 1 | 75 | `serial-2` | +| 3 | `macos-agent` | 37 | 7 | 2 | 57 | `serial-3` | +| 4 | `fzf-cli` | 19 | 5 | 7 | 50 | `serial-4` | +| 5 | `memo-cli` | 25 | 5 | 5 | 50 | `serial-5` | + +All non-listed crates stay `parallel`. + +## Crate Tier Snapshot + +From the current `execution-manifest.md`: + +| Crate | Safe | Medium | High-Risk | Serial Group | +| --- | ---: | ---: | ---: | --- | +| agent-docs | 1 | 30 | 1 | serial-2 | +| api-gql | 0 | 5 | 1 | parallel | +| api-grpc | 0 | 2 | 1 | parallel | +| api-rest | 0 | 9 | 1 | parallel | +| api-test | 0 | 3 | 2 | parallel | +| api-testing-core | 0 | 24 | 0 | parallel | +| api-websocket | 0 | 2 | 2 | parallel | +| cli-template | 0 | 1 | 0 | parallel | +| codex-cli | 0 | 29 | 11 | parallel | +| fzf-cli | 0 | 18 | 1 | serial-4 | +| gemini-cli | 0 | 30 | 10 | parallel | +| git-cli | 13 | 22 | 2 | serial-1 | +| git-lock | 0 | 15 | 2 | parallel | +| git-scope | 0 | 15 | 3 | parallel | +| git-summary | 0 | 10 | 1 | parallel | +| image-processing | 0 | 8 | 0 | parallel | +| macos-agent | 0 | 35 | 2 | serial-3 | +| memo-cli | 0 | 22 | 3 | serial-5 | +| nils-common | 0 | 1 | 1 | parallel | +| nils-term | 0 | 1 | 0 | parallel | +| nils-test-support | 0 | 6 | 0 | parallel | +| plan-issue-cli | 0 | 17 | 3 | parallel | +| plan-tooling | 0 | 9 | 2 | parallel | +| screen-record | 0 | 19 | 1 | parallel | +| semantic-commit | 0 | 11 | 1 | parallel | + +## Baseline Update Policy (No Silent Regression Hiding) + +- `scripts/ci/test-stale-audit-baseline.tsv` remains a strict subset of the frozen S3T1 orphan-helper allowlist enforced by + `scripts/ci/test-stale-audit.sh`. +- Allowed baseline movement during cleanup: row deletions after replacement coverage is merged and stale helper removal is validated. +- Disallowed baseline movement: adding or rewriting rows to absorb new regressions without first updating governance/spec policy and explicit + review evidence. +- `deprecated_path_marker` regressions are never baselined; they must be rewritten or removed. + +## Validation + +```bash +test -f docs/specs/workspace-test-cleanup-lane-matrix-v1.md +bash scripts/dev/workspace-test-stale-audit.sh --format tsv +bash scripts/ci/test-stale-audit.sh --strict +rg -n 'serial|parallel|remove|rewrite|defer' docs/specs/workspace-test-cleanup-lane-matrix-v1.md +``` diff --git a/scripts/ci/completion-asset-audit.sh b/scripts/ci/completion-asset-audit.sh index 0f683358..07e08919 100755 --- a/scripts/ci/completion-asset-audit.sh +++ b/scripts/ci/completion-asset-audit.sh @@ -43,7 +43,7 @@ if [[ -z "$repo_root" || ! -d "$repo_root" ]]; then fi cd "$repo_root" -matrix_path="docs/reports/completion-coverage-matrix.md" +matrix_path="docs/specs/completion-coverage-matrix-v1.md" workspace_bins_script="scripts/workspace-bins.sh" if [[ ! -f "$matrix_path" ]]; then diff --git a/scripts/ci/completion-flag-parity-audit.sh b/scripts/ci/completion-flag-parity-audit.sh index 17b487ae..5bc7b4a9 100755 --- a/scripts/ci/completion-flag-parity-audit.sh +++ b/scripts/ci/completion-flag-parity-audit.sh @@ -629,7 +629,7 @@ main() { script_dir="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)" local repo_root repo_root="$(cd "$script_dir/../.." && pwd)" - local matrix_path="$repo_root/docs/reports/completion-coverage-matrix.md" + local matrix_path="$repo_root/docs/specs/completion-coverage-matrix-v1.md" if [[ ! -f "$matrix_path" ]]; then echo "FAIL: missing completion matrix: $matrix_path" >&2 diff --git a/scripts/ci/docs-hygiene-audit.sh b/scripts/ci/docs-hygiene-audit.sh index 7a2854e3..14d9b7b2 100755 --- a/scripts/ci/docs-hygiene-audit.sh +++ b/scripts/ci/docs-hygiene-audit.sh @@ -59,8 +59,15 @@ record_issue() { declare -a removed_transient_docs=( "docs/reports/codex-gemini-doc-audit.md" + "docs/reports/completion-coverage-matrix.md" "docs/plans/codex-gemini-core-merge-plan.md" + "docs/plans/markdown-gh-handling-audit-remediation-plan.md" + "docs/plans/third-party-licenses-notices-release-packaging-plan.md" "docs/runbooks/image-processing-llm-svg.md" + "docs/runbooks/wrappers-mode-usage.md" + "docs/specs/markdown-github-handling-audit-v1.md" + "crates/plan-issue-cli/docs/specs/plan-issue-cli-contract-v1.md" + "crates/plan-tooling/docs/runbooks/split-prs-migration.md" "crates/api-test/docs/runbooks/api-test-websocket-adoption.md" "crates/api-websocket/docs/runbooks/api-websocket-rollout.md" "crates/memo-cli/docs/runbooks/memo-cli-rollout.md" @@ -94,6 +101,7 @@ for path in "${removed_transient_docs[@]}"; do if [[ ${#existing_reference_roots[@]} -gt 0 ]]; then refs="$(rg -n --fixed-strings "$path" "${existing_reference_roots[@]}" \ -g '!**/docs/plans/**' \ + -g '!docs/specs/workspace-doc-retention-matrix-v1.md' \ -g '!**/tests/**' \ -g '!**/target/**' || true)" fi diff --git a/scripts/ci/docs-placement-audit.sh b/scripts/ci/docs-placement-audit.sh index 2659d778..7147823e 100755 --- a/scripts/ci/docs-placement-audit.sh +++ b/scripts/ci/docs-placement-audit.sh @@ -77,7 +77,7 @@ done runbook_is_approved_workspace_file() { local base="$1" case "$base" in - cli-completion-development-standard.md|crates-io-status-script-runbook.md|new-cli-crate-development-standard.md|test-cleanup-governance.md|wrappers-mode-usage.md) + cli-completion-development-standard.md|crates-io-status-script-runbook.md|new-cli-crate-development-standard.md|test-cleanup-governance.md) return 0 ;; *) diff --git a/scripts/ci/nils-cli-checks-entrypoint.sh b/scripts/ci/nils-cli-checks-entrypoint.sh new file mode 100755 index 00000000..8354cd9a --- /dev/null +++ b/scripts/ci/nils-cli-checks-entrypoint.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash +set -euo pipefail + +usage() { + cat <<'USAGE' +Usage: + scripts/ci/nils-cli-checks-entrypoint.sh [--xvfb] [verify-script args...] + +Description: + Canonical cross-platform entrypoint for CI verification jobs. + - Runs ./.agents/skills/nils-cli-verify-required-checks/scripts/nils-cli-verify-required-checks.sh + - Reuses the current Bash interpreter so nested audit scripts run with the same shell version. + - Optionally wraps execution with xvfb-run for Linux runners. + +Options: + --xvfb Run checks under `xvfb-run -a` + -h, --help Show this help +USAGE +} + +use_xvfb=0 +declare -a verify_args=() +while [[ $# -gt 0 ]]; do + case "${1:-}" in + --xvfb) + use_xvfb=1 + shift + ;; + -h|--help) + usage + exit 0 + ;; + *) + verify_args+=("${1:-}") + shift + ;; + esac +done + +repo_root="$(git rev-parse --show-toplevel 2>/dev/null || true)" +if [[ -z "$repo_root" || ! -d "$repo_root" ]]; then + echo "error: must run inside a git work tree" >&2 + exit 2 +fi +cd "$repo_root" + +verify_script="./.agents/skills/nils-cli-verify-required-checks/scripts/nils-cli-verify-required-checks.sh" +if [[ ! -f "$verify_script" ]]; then + echo "error: missing required checks script: $verify_script" >&2 + exit 2 +fi + +bash_bin="${BASH:-}" +if [[ -z "$bash_bin" || ! -x "$bash_bin" ]]; then + bash_bin="$(command -v bash || true)" +fi +if [[ -z "$bash_bin" || ! -x "$bash_bin" ]]; then + echo "error: bash not found on PATH" >&2 + exit 2 +fi + +declare -a cmd=( "$bash_bin" "$verify_script" "${verify_args[@]}" ) +if [[ "$use_xvfb" -eq 1 ]]; then + if ! command -v xvfb-run >/dev/null 2>&1; then + echo "error: --xvfb requested but xvfb-run is not available on PATH" >&2 + exit 2 + fi + cmd=(xvfb-run -a "${cmd[@]}") +fi + +echo "+ ${cmd[*]}" +"${cmd[@]}" diff --git a/scripts/ci/test-stale-audit.sh b/scripts/ci/test-stale-audit.sh index 55db6f5b..fcd9e0f7 100644 --- a/scripts/ci/test-stale-audit.sh +++ b/scripts/ci/test-stale-audit.sh @@ -74,12 +74,58 @@ current_orphans="${tmp_dir}/current-orphans.tsv" baseline_orphans="${tmp_dir}/baseline-orphans.tsv" new_orphans="${tmp_dir}/new-orphans.tsv" deprecated_leftovers="${tmp_dir}/deprecated-leftovers.tsv" +frozen_baseline_allowlist="${tmp_dir}/frozen-baseline-allowlist.tsv" +invalid_baseline_rows="${tmp_dir}/invalid-baseline-rows.tsv" +unexpected_baseline_rows="${tmp_dir}/unexpected-baseline-rows.tsv" cleanup() { rm -rf "$tmp_dir" } trap cleanup EXIT +write_frozen_baseline_allowlist() { + cat >"$frozen_baseline_allowlist" <<'EOF' +agent-docs crates/agent-docs/tests/common.rs drop (fanout=0) helper_fanout remove +git-cli crates/git-cli/tests/utils.rs utils_commit_hash_missing_ref_errors (fanout=0) helper_fanout remove +git-cli crates/git-cli/tests/utils.rs utils_commit_hash_outputs_sha_for_head (fanout=0) helper_fanout remove +git-cli crates/git-cli/tests/utils.rs utils_commit_hash_resolves_annotated_tag (fanout=0) helper_fanout remove +git-cli crates/git-cli/tests/utils.rs utils_copy_staged_both_outputs_diff_and_status (fanout=0) helper_fanout remove +git-cli crates/git-cli/tests/utils.rs utils_copy_staged_help_prints_usage (fanout=0) helper_fanout remove +git-cli crates/git-cli/tests/utils.rs utils_copy_staged_no_changes_warns_and_exits_1 (fanout=0) helper_fanout remove +git-cli crates/git-cli/tests/utils.rs utils_copy_staged_rejects_conflicting_modes (fanout=0) helper_fanout remove +git-cli crates/git-cli/tests/utils.rs utils_copy_staged_rejects_unknown_arg (fanout=0) helper_fanout remove +git-cli crates/git-cli/tests/utils.rs utils_copy_staged_stdout_outputs_diff_only (fanout=0) helper_fanout remove +git-cli crates/git-cli/tests/utils.rs utils_root_not_in_repo_errors (fanout=0) helper_fanout remove +git-cli crates/git-cli/tests/utils.rs utils_root_prints_message (fanout=0) helper_fanout remove +git-cli crates/git-cli/tests/utils.rs utils_root_shell_outputs_cd_command (fanout=0) helper_fanout remove +git-cli crates/git-cli/tests/utils.rs utils_zip_creates_backup_zip (fanout=0) helper_fanout remove +EOF + LC_ALL=C sort -u -o "$frozen_baseline_allowlist" "$frozen_baseline_allowlist" +} + +validate_baseline_file() { + local expected_header + expected_header=$'crate\tpath\tsymbol_or_test\tsignal\tproposed_action' + + if [[ "$(sed -n '1p' "$baseline_file")" != "$expected_header" ]]; then + echo "error: invalid baseline header in $baseline_file" >&2 + exit 2 + fi + + awk -F '\t' ' + NR > 1 && (NF != 5 || $4 != "helper_fanout" || $5 != "remove") {print $0} + ' "$baseline_file" | LC_ALL=C sort -u >"$invalid_baseline_rows" + + if [[ -s "$invalid_baseline_rows" ]]; then + echo "FAIL: baseline contains unsupported stale-test actions (only helper_fanout/remove rows are allowed)." >&2 + cat "$invalid_baseline_rows" >&2 + exit 1 + fi +} + +write_frozen_baseline_allowlist +validate_baseline_file + bash scripts/dev/workspace-test-stale-audit.sh --out "$inventory_file" >/dev/null awk -F '\t' ' @@ -94,6 +140,13 @@ awk -F '\t' ' } ' "$baseline_file" | LC_ALL=C sort -u >"$baseline_orphans" +comm -23 "$baseline_orphans" "$frozen_baseline_allowlist" >"$unexpected_baseline_rows" +if [[ -s "$unexpected_baseline_rows" ]]; then + echo "FAIL: baseline contains entries outside the frozen S3T1 allowlist; expand rules in docs/runbooks/test-cleanup-governance.md and docs/specs/workspace-test-cleanup-lane-matrix-v1.md before changing baseline." >&2 + cat "$unexpected_baseline_rows" >&2 + exit 1 +fi + comm -23 "$current_orphans" "$baseline_orphans" >"$new_orphans" awk -F '\t' ' diff --git a/scripts/dev/workspace-shared-crate-audit.sh b/scripts/dev/workspace-shared-crate-audit.sh index ec9ea73a..3ef98fef 100755 --- a/scripts/dev/workspace-shared-crate-audit.sh +++ b/scripts/dev/workspace-shared-crate-audit.sh @@ -123,8 +123,8 @@ hotspots_index_tsv="${out_dir}/hotspots-index.tsv" decision_rubric_md="${out_dir}/decision-rubric.md" task_lanes_tsv="${out_dir}/task-lanes.tsv" -tmp_hotspots="$(mktemp "${TMPDIR:-/tmp}/workspace-shared-hotspots.XXXXXX.tsv")" -tmp_matrix="$(mktemp "${TMPDIR:-/tmp}/workspace-shared-matrix.XXXXXX.tsv")" +tmp_hotspots="$(mktemp "${TMPDIR:-/tmp}/workspace-shared-hotspots.XXXXXX")" +tmp_matrix="$(mktemp "${TMPDIR:-/tmp}/workspace-shared-matrix.XXXXXX")" cleanup() { rm -f "$tmp_hotspots" "$tmp_matrix" diff --git a/scripts/dev/workspace-test-stale-audit.sh b/scripts/dev/workspace-test-stale-audit.sh index db1ffb1c..972f00cb 100755 --- a/scripts/dev/workspace-test-stale-audit.sh +++ b/scripts/dev/workspace-test-stale-audit.sh @@ -119,6 +119,15 @@ tmp_allowlist_sorted="$(mktemp "${TMPDIR:-/tmp}/workspace-stale-tests.allowlist. tmp_crate_metrics="$(mktemp "${TMPDIR:-/tmp}/workspace-stale-tests.metrics.XXXXXX.tsv")" tmp_high_overlap="$(mktemp "${TMPDIR:-/tmp}/workspace-stale-tests.high-overlap.XXXXXX.tsv")" +# Frozen serial execution order from Sprint 3 Task 3.1. +frozen_serial_crates=( + git-cli + agent-docs + macos-agent + fzf-cli + memo-cli +) + cleanup() { rm -f \ "$tmp_rows" \ @@ -517,19 +526,42 @@ build_crate_metrics() { select_high_overlap() { : >"$tmp_high_overlap" - awk -F '\t' '$2+0 > 0 {print}' "$tmp_crate_metrics" | LC_ALL=C sort -t $'\t' -k5,5nr -k1,1 | head -n 5 >"$tmp_high_overlap" + local crate metrics total helper allow score + for crate in "${frozen_serial_crates[@]}"; do + metrics="$(awk -F '\t' -v crate="$crate" ' + $1==crate { + print $2 "\t" $3 "\t" $4 "\t" $5 + found=1 + exit + } + END { + if (!found) { + print "0\t0\t0\t0" + } + } + ' "$tmp_crate_metrics")" + IFS=$'\t' read -r total helper allow score <<<"$metrics" + printf '%s\t%s\t%s\t%s\t%s\n' \ + "$crate" \ + "${total:-0}" \ + "${helper:-0}" \ + "${allow:-0}" \ + "${score:-0}" \ + >>"$tmp_high_overlap" + done } serial_group_for_crate() { local crate="${1:-}" local order=0 - while IFS=$'\t' read -r overlap_crate _rest; do + local serial_crate + for serial_crate in "${frozen_serial_crates[@]}"; do order=$((order + 1)) - if [[ "$crate" == "$overlap_crate" ]]; then + if [[ "$crate" == "$serial_crate" ]]; then printf 'serial-%d' "$order" return 0 fi - done <"$tmp_high_overlap" + done printf 'parallel' } @@ -612,7 +644,9 @@ render_execution_manifest() { echo "2. \`medium\` lane -> Task 2.2 (parallel allowed unless serial group set)." echo "3. \`high-risk\` lane -> Task 2.5 (execute after safe/medium plus contract guard checks)." echo - echo "## Serialized High-Overlap Crates" + echo "## Frozen Serialized Crates" + echo + echo "Order is frozen by Sprint 3 Task 3.1 to avoid serial-group drift while cleanup lanes run in parallel." echo echo "| Order | Crate | Candidates | Helper Signals | allow(dead_code) | Score |" echo "| ---: | --- | ---: | ---: | ---: | ---: |" diff --git a/tests/completion/coverage_matrix.sh b/tests/completion/coverage_matrix.sh deleted file mode 100755 index c355476b..00000000 --- a/tests/completion/coverage_matrix.sh +++ /dev/null @@ -1,338 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" -MATRIX_FILE="$REPO_ROOT/docs/reports/completion-coverage-matrix.md" -ZSH_COMPLETIONS_DIR="$REPO_ROOT/completions/zsh" -BASH_COMPLETIONS_DIR="$REPO_ROOT/completions/bash" -ZSH_ALIASES_FILE="$ZSH_COMPLETIONS_DIR/aliases.zsh" -BASH_ALIASES_FILE="$BASH_COMPLETIONS_DIR/aliases.bash" - -fail() { - printf 'FAIL: %s\n' "$*" >&2 - exit 1 -} - -has_zsh_compdef_header_token() { - local zsh_file="$1" - local token="$2" - - awk -v token="$token" ' -/^[[:space:]]*#compdef[[:space:]]+/ { - for (i = 2; i <= NF; i++) { - if ($i == token) { - found = 1 - } - } -} -END { - exit(found ? 0 : 1) -} -' "$zsh_file" -} - -run_zsh_registration_check() { - local binary="$1" - local zsh_file="$2" - local rc - - if zsh -f -c ' -setopt pipe_fail nounset - -typeset -ga compdef_calls -compdef() { - compdef_calls+=("$*") -} - -source "$1" - -function_name="$2" -command_name="$3" - -(( $+functions[$function_name] )) || exit 10 - -for call in "${compdef_calls[@]}"; do - if [[ " $call " == *" $function_name "* && " $call " == *" $command_name "* ]]; then - exit 0 - fi -done - -exit 11 -' -- "$zsh_file" "_$binary" "$binary"; then - return 0 - else - rc=$? - fi - - case "$rc" in - 10) - fail "zsh completion function '_$binary' not defined by $zsh_file" - ;; - 11) - if has_zsh_compdef_header_token "$zsh_file" "$binary"; then - return 0 - fi - fail "zsh completion '_$binary' missing registration for '$binary' (compdef/#compdef) in $zsh_file" - ;; - *) - fail "failed to source zsh completion for '$binary': $zsh_file (exit $rc)" - ;; - esac -} - -run_zsh_alias_registration_check() { - local binary="$1" - local zsh_file="$2" - local alias_prefix="$3" - local output - local rc - - if output="$(zsh -f -c ' -setopt pipe_fail nounset - -typeset -ga compdef_calls -compdef() { - compdef_calls+=("$*") -} - -source "$1" -source "$2" - -function_name="$3" -prefix="$4" -comp_file="$1" - -has_compdef_header_token() { - local target="$1" - local token="$2" - - awk -v token="$token" "BEGIN { found = 0 }\n\ -/^[[:space:]]*#compdef[[:space:]]+/ {\n\ - for (i = 2; i <= NF; i++) {\n\ - if (\\$i == token) {\n\ - found = 1\n\ - }\n\ - }\n\ -}\n\ -END {\n\ - exit(found ? 0 : 1)\n\ -}" "$target" -} - -typeset -a family_aliases -family_aliases=() - -for alias_name in ${(k)aliases}; do - [[ "$alias_name" == ${prefix}* ]] && family_aliases+=("$alias_name") -done - -(( ${#family_aliases[@]} > 0 )) || exit 20 - -for alias_name in "${family_aliases[@]}"; do - integer matched=0 - for call in "${compdef_calls[@]}"; do - if [[ " $call " == *" $function_name "* && " $call " == *" $alias_name "* ]]; then - matched=1 - break - fi - done - - if (( ! matched )); then - if ! has_compdef_header_token "$comp_file" "$alias_name"; then - print -r -- "$alias_name" - exit 21 - fi - fi -done -' -- "$zsh_file" "$ZSH_ALIASES_FILE" "_$binary" "$alias_prefix" 2>&1)"; then - return 0 - fi - - rc=$? - case "$rc" in - 20) - fail "no zsh aliases found for required alias prefix '${alias_prefix}*' (binary '$binary')" - ;; - 21) - fail "zsh completion '_$binary' missing alias registration for '$output' from aliases.zsh" - ;; - *) - fail "failed zsh alias registration check for '$binary': ${output:-unknown error}" - ;; - esac -} - -run_bash_registration_check() { - local binary="$1" - local bash_file="$2" - - if ! bash -c 'set -euo pipefail; source "$1"; complete -p "$2" >/dev/null' -- "$bash_file" "$binary"; then - fail "bash completion missing canonical registration for '$binary' in $bash_file" - fi -} - -run_bash_alias_registration_check() { - local binary="$1" - local bash_file="$2" - local alias_prefix="$3" - local alias_name - local -a family_aliases=() - - mapfile -t family_aliases < <( - bash -c 'set -euo pipefail; source "$1"; alias -p' -- "$BASH_ALIASES_FILE" | - awk -v prefix="$alias_prefix" ' -$1 == "alias" { - name = $2 - sub(/=.*/, "", name) - if (name ~ ("^" prefix)) { - print name - } -} -' - ) - - if [[ ${#family_aliases[@]} -eq 0 ]]; then - fail "no bash aliases found for required alias prefix '${alias_prefix}*' (binary '$binary')" - fi - - for alias_name in "${family_aliases[@]}"; do - if ! bash -c 'set -euo pipefail; source "$1"; complete -p "$2" >/dev/null' -- "$bash_file" "$alias_name"; then - fail "bash completion for '$binary' missing alias registration for '$alias_name'" - fi - done -} - -assert_required_completion_contract() { - local binary="$1" - local metadata_cell="$2" - local zsh_file="$3" - local bash_file="$4" - - [[ "$metadata_cell" == *"completion_mode=clap-first"* ]] || { - fail "matrix completion metadata missing completion_mode=clap-first for '$binary'" - } - [[ "$metadata_cell" == *"completion_mode_toggles=forbidden"* ]] || { - fail "matrix completion metadata missing completion_mode_toggles=forbidden for '$binary'" - } - [[ "$metadata_cell" == *"alternate_completion_dispatch=forbidden"* ]] || { - fail "matrix completion metadata missing alternate_completion_dispatch=forbidden for '$binary'" - } - [[ "$metadata_cell" == *"generated_load_failure=fail-closed"* ]] || { - fail "matrix completion metadata missing generated_load_failure=fail-closed for '$binary'" - } - - if rg -n --fixed-strings "COMPLETION_MODE" "$zsh_file" "$bash_file" >/dev/null; then - fail "runtime completion-mode toggle marker detected for '$binary' in completion assets" - fi - -} - -[[ -f "$MATRIX_FILE" ]] || fail "missing completion coverage matrix: $MATRIX_FILE" -[[ -d "$ZSH_COMPLETIONS_DIR" ]] || fail "missing zsh completions directory: $ZSH_COMPLETIONS_DIR" -[[ -d "$BASH_COMPLETIONS_DIR" ]] || fail "missing bash completions directory: $BASH_COMPLETIONS_DIR" -[[ -f "$ZSH_ALIASES_FILE" ]] || fail "missing zsh aliases file: $ZSH_ALIASES_FILE" -[[ -f "$BASH_ALIASES_FILE" ]] || fail "missing bash aliases file: $BASH_ALIASES_FILE" - -declare -a matrix_rows=() -mapfile -t matrix_rows < <( - awk ' -/^[[:space:]]*\| `[^`]+` \|/ { - split($0, cells, "|") - if (length(cells) < 7) { - next - } - - binary = cells[2] - obligation = cells[3] - zsh_cell = cells[4] - bash_cell = cells[5] - alias_cell = cells[6] - metadata_cell = cells[7] - - gsub(/^[[:space:]]+|[[:space:]]+$/, "", binary) - gsub(/^[[:space:]]+|[[:space:]]+$/, "", obligation) - gsub(/^[[:space:]]+|[[:space:]]+$/, "", zsh_cell) - gsub(/^[[:space:]]+|[[:space:]]+$/, "", bash_cell) - gsub(/^[[:space:]]+|[[:space:]]+$/, "", alias_cell) - gsub(/^[[:space:]]+|[[:space:]]+$/, "", metadata_cell) - - gsub(/`/, "", binary) - gsub(/`/, "", obligation) - - alias_required = (alias_cell ~ /`required`/) ? "1" : "0" - alias_prefix = "__none__" - if (match(alias_cell, /`[[:alnum:]-]+\*`/)) { - alias_pattern = substr(alias_cell, RSTART + 1, RLENGTH - 2) - sub(/\*$/, "", alias_pattern) - alias_prefix = alias_pattern - } - - printf "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", binary, obligation, zsh_cell, bash_cell, alias_required, alias_prefix, metadata_cell -} -' "$MATRIX_FILE" -) - -if [[ ${#matrix_rows[@]} -eq 0 ]]; then - fail "no matrix rows found in $MATRIX_FILE" -fi - -declare -A seen_binaries=() -declare -A required_alias_prefix_by_binary=() -required_count=0 - -for row in "${matrix_rows[@]}"; do - IFS=$'\t' read -r binary obligation zsh_cell bash_cell alias_required alias_prefix metadata_cell <<< "$row" - - [[ -n "$binary" ]] || continue - - if [[ -n "${seen_binaries[$binary]:-}" ]]; then - fail "duplicate binary row in matrix: $binary" - fi - seen_binaries[$binary]=1 - - case "$obligation" in - required) - required_count=$((required_count + 1)) - - [[ "$zsh_cell" == *'`present`'* ]] || fail "matrix marks required CLI '$binary' as zsh '$zsh_cell'" - [[ "$bash_cell" == *'`present`'* ]] || fail "matrix marks required CLI '$binary' as bash '$bash_cell'" - - zsh_file="$ZSH_COMPLETIONS_DIR/_$binary" - bash_file="$BASH_COMPLETIONS_DIR/$binary" - - [[ -f "$zsh_file" ]] || fail "required zsh completion file missing for '$binary': $zsh_file" - [[ -f "$bash_file" ]] || fail "required bash completion file missing for '$binary': $bash_file" - - run_zsh_registration_check "$binary" "$zsh_file" - run_bash_registration_check "$binary" "$bash_file" - assert_required_completion_contract "$binary" "$metadata_cell" "$zsh_file" "$bash_file" - - if [[ "$alias_required" == "1" ]]; then - [[ "$alias_prefix" != "__none__" ]] || fail "matrix alias requirement for '$binary' is missing prefix metadata" - required_alias_prefix_by_binary["$binary"]="$alias_prefix" - fi - ;; - excluded) - ;; - *) - fail "unsupported obligation '$obligation' for '$binary'" - ;; - esac -done - -if [[ "$required_count" -eq 0 ]]; then - fail "matrix produced zero required CLIs" -fi - -for binary in "${!required_alias_prefix_by_binary[@]}"; do - alias_prefix="${required_alias_prefix_by_binary[$binary]}" - zsh_file="$ZSH_COMPLETIONS_DIR/_$binary" - bash_file="$BASH_COMPLETIONS_DIR/$binary" - - run_zsh_alias_registration_check "$binary" "$zsh_file" "$alias_prefix" - run_bash_alias_registration_check "$binary" "$bash_file" "$alias_prefix" -done - -printf 'OK\n' diff --git a/tests/third-party-artifacts/release-package.test.sh b/tests/third-party-artifacts/release-package.test.sh deleted file mode 100644 index fb7371ab..00000000 --- a/tests/third-party-artifacts/release-package.test.sh +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" -AUDIT_SCRIPT="$REPO_ROOT/scripts/ci/release-tarball-third-party-audit.sh" -LICENSES_FILE="$REPO_ROOT/THIRD_PARTY_LICENSES.md" -NOTICES_FILE="$REPO_ROOT/THIRD_PARTY_NOTICES.md" -README_FILE="$REPO_ROOT/README.md" -LICENSE_FILE="$REPO_ROOT/LICENSE" - -fail() { - printf 'FAIL: %s\n' "$*" >&2 - exit 1 -} - -assert_contains() { - local haystack="$1" - local needle="$2" - local context="$3" - if [[ "$haystack" != *"$needle"* ]]; then - fail "$context missing '$needle'" - fi -} - -run_audit() { - local target="$1" - local tag="$2" - local dist_dir="$3" - local output_file="$4" - local rc_file="$5" - ( - cd "$REPO_ROOT" - bash "$AUDIT_SCRIPT" --target "$target" --tag "$tag" --dist-dir "$dist_dir" - ) >"$output_file" 2>&1 - printf '%s\n' "$?" >"$rc_file" -} - -package_fixture() { - local dist_dir="$1" - local tag="$2" - local target="$3" - local include_notices="$4" - local package_dir="$dist_dir/nils-cli-${tag}-${target}" - local tarball="$dist_dir/nils-cli-${tag}-${target}.tar.gz" - - rm -rf "$package_dir" - mkdir -p "$package_dir/bin" - printf '#!/usr/bin/env bash\nexit 0\n' >"$package_dir/bin/nils-cli-smoke" - chmod 0755 "$package_dir/bin/nils-cli-smoke" - - cp "$README_FILE" "$LICENSE_FILE" "$LICENSES_FILE" "$package_dir/" - if [[ "$include_notices" == "1" ]]; then - cp "$NOTICES_FILE" "$package_dir/" - fi - - tar -C "$dist_dir" -czf "$tarball" "nils-cli-${tag}-${target}" -} - -[[ -f "$AUDIT_SCRIPT" ]] || fail "missing audit script: $AUDIT_SCRIPT" -[[ -f "$LICENSES_FILE" ]] || fail "missing licenses artifact: $LICENSES_FILE" -[[ -f "$NOTICES_FILE" ]] || fail "missing notices artifact: $NOTICES_FILE" - -TMP_DIR="$(mktemp -d)" -TARGET="x86_64-unknown-linux-gnu" -TAG="v0.0.0-test" -DIST_DIR="$TMP_DIR/dist" -mkdir -p "$DIST_DIR" - -cleanup() { - rm -rf "$TMP_DIR" -} -trap cleanup EXIT - -# Happy path: package includes both third-party artifacts and audit passes. -package_fixture "$DIST_DIR" "$TAG" "$TARGET" "1" -run_audit "$TARGET" "$TAG" "$DIST_DIR" "$TMP_DIR/audit-pass.out" "$TMP_DIR/audit-pass.rc" -[[ "$(cat "$TMP_DIR/audit-pass.rc")" -eq 0 ]] || fail "audit should pass for complete package" -AUDIT_PASS_OUTPUT="$(cat "$TMP_DIR/audit-pass.out")" -assert_contains "$AUDIT_PASS_OUTPUT" "PASS: release tarball third-party audit (target=${TARGET}, missing=0" "audit pass output" - -EXTRACT_DIR="$TMP_DIR/extract" -mkdir -p "$EXTRACT_DIR" -tar -C "$EXTRACT_DIR" -xzf "$DIST_DIR/nils-cli-${TAG}-${TARGET}.tar.gz" -EXTRACT_ROOT="$EXTRACT_DIR/nils-cli-${TAG}-${TARGET}" -[[ -f "$EXTRACT_ROOT/THIRD_PARTY_LICENSES.md" ]] || fail "extracted archive missing THIRD_PARTY_LICENSES.md" -[[ -f "$EXTRACT_ROOT/THIRD_PARTY_NOTICES.md" ]] || fail "extracted archive missing THIRD_PARTY_NOTICES.md" - -# Failure diagnostics path: missing notices artifact should fail with clear messaging. -package_fixture "$DIST_DIR" "$TAG" "$TARGET" "0" -set +e -run_audit "$TARGET" "$TAG" "$DIST_DIR" "$TMP_DIR/audit-fail.out" "$TMP_DIR/audit-fail.rc" -set -e -[[ "$(cat "$TMP_DIR/audit-fail.rc")" -eq 1 ]] || fail "audit should fail when notices artifact is omitted" -AUDIT_FAIL_OUTPUT="$(cat "$TMP_DIR/audit-fail.out")" -assert_contains "$AUDIT_FAIL_OUTPUT" "FAIL: missing required file in tarball: THIRD_PARTY_NOTICES.md" "audit fail diagnostics" -assert_contains "$AUDIT_FAIL_OUTPUT" "FAIL: release tarball third-party audit (target=${TARGET}, missing=1" "audit fail summary" - -printf 'OK\n' diff --git a/tests/zsh/completion.test.zsh b/tests/zsh/completion.test.zsh index 582715e9..4ee6ca29 100755 --- a/tests/zsh/completion.test.zsh +++ b/tests/zsh/completion.test.zsh @@ -4,7 +4,7 @@ setopt pipe_fail nounset SCRIPT_PATH="${0:A}" REPO_ROOT="${SCRIPT_PATH:h:h:h}" -MATRIX_FILE="$REPO_ROOT/docs/reports/completion-coverage-matrix.md" +MATRIX_FILE="$REPO_ROOT/docs/specs/completion-coverage-matrix-v1.md" ZSH_COMPLETIONS_DIR="$REPO_ROOT/completions/zsh" ALIASES_FILE="$ZSH_COMPLETIONS_DIR/aliases.zsh"