diff --git a/prek.toml b/prek.toml index 82c09caa..99598f46 100644 --- a/prek.toml +++ b/prek.toml @@ -54,25 +54,10 @@ args = ["-c", "lychee --offline --no-progress --include-fragments docs/**/*.md R types = ["markdown"] pass_filenames = false -# Tests — expensive, runs only on pre-push -[[repos.hooks]] -id = "cargo-test" -name = "cargo test" -language = "system" -entry = "cargo nextest run --profile ci" -types = ["rust"] -pass_filenames = false -stages = ["pre-push"] - -# Doc tests — not covered by nextest -[[repos.hooks]] -id = "cargo-doctest" -name = "cargo doc tests" -language = "system" -entry = "cargo test --doc" -types = ["rust"] -pass_filenames = false -stages = ["pre-push"] +# NOTE: cargo-test and cargo-doctest were removed from pre-push to keep +# the gate fast-fail. CI re-runs the full suite on every PR (see +# `.github/workflows/ci.yml` `tests` job), so duplicating ~20 min of work +# locally only delays the developer feedback loop without adding signal. # Cargo deny — license & advisory & dependency checks # Only runs when Cargo.toml/lock or Rust files change. diff --git a/src/cli/config/budget.rs b/src/cli/config/budget.rs index 414a2f26..4eaa6e74 100644 --- a/src/cli/config/budget.rs +++ b/src/cli/config/budget.rs @@ -6,6 +6,7 @@ use crate::cli::BudgetUsd; /// Budget configuration #[derive(Debug, Clone, Deserialize, Serialize, Default)] +#[serde(deny_unknown_fields)] pub struct BudgetConfig { /// Global monthly hard cap in USD (0 = unlimited) #[serde(default)] diff --git a/src/cli/config/cache.rs b/src/cli/config/cache.rs index b2e42554..201ddf66 100644 --- a/src/cli/config/cache.rs +++ b/src/cli/config/cache.rs @@ -4,6 +4,7 @@ use serde::{Deserialize, Serialize}; /// LLM response cache configuration #[derive(Debug, Clone, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] pub struct CacheConfig { /// Enable response caching (only for temperature=0 requests) #[serde(default)] diff --git a/src/cli/config/providers.rs b/src/cli/config/providers.rs index 87c9ab77..88c240f8 100644 --- a/src/cli/config/providers.rs +++ b/src/cli/config/providers.rs @@ -28,6 +28,7 @@ pub enum AuthType { /// Provider configuration from TOML. #[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] pub struct ProviderConfig { /// Unique provider name used in routing and logging. pub name: String, @@ -119,7 +120,16 @@ pub struct ProviderConfig { } impl ProviderConfig { - /// Returns `true` if the provider is enabled (defaults to `true`). + /// Returns `true` if the provider is enabled. + /// + /// Semantics: + /// - `enabled = true` → enabled. + /// - `enabled = false` → disabled. + /// - `enabled` absent → enabled (sensible default for newly added blocks). + /// + /// Typo safety: `#[serde(deny_unknown_fields)]` on [`ProviderConfig`] + /// rejects misspelled keys (e.g. `enbaled`) at parse time, so an absent + /// `enabled` field genuinely means "not specified" rather than "typo'd". pub fn is_enabled(&self) -> bool { self.enabled.unwrap_or(true) } diff --git a/src/cli/config/routing.rs b/src/cli/config/routing.rs index d0992450..0538177f 100644 --- a/src/cli/config/routing.rs +++ b/src/cli/config/routing.rs @@ -9,6 +9,7 @@ use super::user::PresetConfig; /// Router configuration #[derive(Debug, Clone, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] pub struct RouterConfig { /// Default model for unclassified requests pub default: String, @@ -102,6 +103,7 @@ pub struct FanOutConfig { /// Model configuration with 1:N provider mappings #[derive(Debug, Clone, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] pub struct ModelConfig { /// External model name (used in API requests) pub name: String, @@ -203,6 +205,7 @@ pub struct TierMatchCondition { /// When the scoring heuristic classifies a request, the dispatch pipeline /// resolves providers from the matching tier instead of the default model mappings. #[derive(Debug, Clone, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] pub struct TierConfig { /// Tier name — must match a `ComplexityTier` variant (case-insensitive). pub name: String, diff --git a/src/cli/config/security.rs b/src/cli/config/security.rs index 27583a9c..c403952e 100644 --- a/src/cli/config/security.rs +++ b/src/cli/config/security.rs @@ -8,6 +8,7 @@ use super::default_true; /// Security configuration (wired into middleware stack) #[derive(Debug, Clone, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] pub struct SecurityConfig { /// Master switch for security middleware #[serde(default = "default_true")] diff --git a/src/features/dlp/config.rs b/src/features/dlp/config.rs index f9329651..dc81c20e 100644 --- a/src/features/dlp/config.rs +++ b/src/features/dlp/config.rs @@ -3,6 +3,7 @@ use serde::{Deserialize, Serialize}; /// Top-level DLP configuration, mapped from `[dlp]` in TOML. #[derive(Debug, Clone, Deserialize, Serialize, Default)] +#[serde(deny_unknown_fields)] pub struct DlpConfig { /// Enables the DLP pipeline globally. #[serde(default)] diff --git a/src/models/config.rs b/src/models/config.rs index 63fe5196..1c87c1b5 100644 --- a/src/models/config.rs +++ b/src/models/config.rs @@ -25,6 +25,7 @@ use crate::features::tap::TapConfig; /// Application configuration #[derive(Debug, Clone, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] pub struct AppConfig { /// Config schema version (for forward compatibility) #[serde(default, skip_serializing_if = "Option::is_none")] diff --git a/src/routing/classify/classify.rs b/src/routing/classify/classify.rs index ebe280eb..38b70b2a 100644 --- a/src/routing/classify/classify.rs +++ b/src/routing/classify/classify.rs @@ -84,6 +84,7 @@ impl Default for ScoringThresholds { /// Scoring configuration combining weights and thresholds. #[derive(Debug, Clone, Default, serde::Deserialize, serde::Serialize)] +#[serde(deny_unknown_fields)] pub struct ScoringConfig { /// Per-signal weights. pub weights: ScoringWeights, diff --git a/src/server/config_guard.rs b/src/server/config_guard.rs index b0a8132f..58993720 100644 --- a/src/server/config_guard.rs +++ b/src/server/config_guard.rs @@ -12,29 +12,76 @@ use std::sync::Arc; use tracing::info; /// Top-level TOML sections that are never writable via any config API. -const DENIED_SECTIONS: &[&str] = &["providers", "dlp"]; +/// +/// Each entry is denied because hot-reloading it cannot be done safely +/// at runtime — either the data is sensitive (and must travel through a +/// dedicated secret API), or the code path that consumes it is set up +/// once at process start and not re-initialised on `/api/config/reload`: +/// +/// | Section | Reason | +/// |-------------|-----------------------------------------------------------------------------------------| +/// | `providers` | Contains API keys; mutate via `grob connect` / secret backend, not the config API. | +/// | `dlp` | Security policy must not be weakened by an authenticated control-plane caller. | +/// | `tee` | TEE attestation runs at startup; flipping the mode mid-flight bypasses the gate. | +/// | `fips` | FIPS mode is checked once on init; toggling at runtime gives a false sense of compliance. | +/// +/// To change any of these the operator must edit `~/.grob/config.toml` +/// and restart the daemon. +const DENIED_SECTIONS: &[&str] = &["providers", "dlp", "tee", "fips"]; /// Per-section keys that are never writable via any config API. +/// +/// These are individual fields whose host section is otherwise editable, +/// but the field itself is either credential material or wired into a +/// non-reloadable subsystem: +/// +/// | Section.Key | Reason | +/// |--------------------|---------------------------------------------------------------------------------| +/// | `router.api_key` | Credential material — never round-trip through the config API. | +/// | `budget.api_key` | Same. | +/// | `cache.api_key` | Same. | +/// | `server.tls` | TLS listener is bound at startup; rebuilding it requires a daemon restart. | +/// | `secrets.backend` | The secret backend is constructed once and shared via `Arc`; swapping it at | +/// | | runtime would orphan in-flight readers and change credential resolution semantics. | const DENIED_KEYS: &[(&str, &str)] = &[ ("router", "api_key"), ("budget", "api_key"), ("cache", "api_key"), + ("server", "tls"), + ("secrets", "backend"), ]; /// Checks whether a (section, key) pair is blocked by the deny-list. /// -/// Returns `true` when the write must be rejected: -/// - The entire `providers` section (contains API keys). -/// - The entire `dlp` section (security must not be weakened). -/// - Any `api_key` field in any section. +/// Returns `true` when the write must be rejected. See [`DENIED_SECTIONS`] +/// and [`DENIED_KEYS`] for the rationale behind every entry. A denied +/// attempt is logged at INFO so the operator sees actionable guidance +/// (restart instead of expecting a silent reload to take effect). pub fn is_section_or_key_denied(section: &str, key: &str) -> bool { if DENIED_SECTIONS.contains(§ion) { + info!( + section = %section, + "config hot-reload: section is on the deny-list; restart the daemon to apply changes" + ); return true; } if key == "api_key" { + info!( + section = %section, + key = %key, + "config hot-reload: api_key fields cannot be set via the config API; use `grob connect` or the secret backend" + ); + return true; + } + if DENIED_KEYS.iter().any(|(s, k)| *s == section && *k == key) { + info!( + section = %section, + key = %key, + "config hot-reload: key is on the deny-list; restart the daemon to apply changes" + ); return true; } - DENIED_KEYS.iter().any(|(s, k)| *s == section && *k == key) + false } /// Validates a key update against the deny-list using [`ConfigSection`]. @@ -177,6 +224,27 @@ mod tests { assert!(!is_section_or_key_denied("cache", "ttl_secs")); } + #[test] + fn deny_static_init_sections() { + // tee and fips are checked once at startup; toggling them at runtime + // would bypass the gate without the operator realising. + assert!(is_section_or_key_denied("tee", "mode")); + assert!(is_section_or_key_denied("tee", "sealed_keys")); + assert!(is_section_or_key_denied("fips", "mode")); + assert!(is_section_or_key_denied("fips", "anything")); + } + + #[test] + fn deny_static_init_keys() { + // The TLS listener and secret backend are constructed once on + // process start; both require a daemon restart to swap. + assert!(is_section_or_key_denied("server", "tls")); + assert!(is_section_or_key_denied("secrets", "backend")); + // Sibling keys in the same sections must remain editable. + assert!(!is_section_or_key_denied("server", "host")); + assert!(!is_section_or_key_denied("server", "port")); + } + #[cfg(feature = "mcp")] mod mcp_compat { use super::*; diff --git a/tests/integration/mod.rs b/tests/integration/mod.rs index 398f996e..fdc68a1c 100644 --- a/tests/integration/mod.rs +++ b/tests/integration/mod.rs @@ -5,6 +5,7 @@ mod dlp_test; mod e2e_test; mod hit_test; mod http_test; +mod multi_tenant_isolation_test; mod prompt_caching_comprehensive_test; mod prompt_caching_test; mod security_test; diff --git a/tests/integration/multi_tenant_isolation_test.rs b/tests/integration/multi_tenant_isolation_test.rs new file mode 100644 index 00000000..d98fdd35 --- /dev/null +++ b/tests/integration/multi_tenant_isolation_test.rs @@ -0,0 +1,349 @@ +//! Multi-tenant isolation regression tests. +//! +//! These tests assert that tenant boundaries are strictly enforced across +//! the audit log, response cache, budget tracker, secret backends, and +//! request authentication. The audit performed before this file landed +//! found `tenant_id` declared in `src/server/dispatch/mod.rs` and used in +//! audit logging but **zero tests** validating tenant boundaries — a +//! regulatory compliance gap (cross-tenant data leak risk). +//! +//! When a test is marked `#[ignore]`, the codebase does not yet enforce +//! the property under test; the test stands as a regression target for +//! the follow-up fix and points at the relevant gap with a `TODO`. + +use grob::auth::jwt::GrobClaims; +use grob::cache::{CachedResponse, ResponseCache}; +use grob::cli::SecretsConfig; +use grob::features::token_pricing::spend::SpendTracker; +use grob::models::{CanonicalRequest, Message, MessageContent}; +use grob::security::audit_log::{AuditConfig, AuditEntry, AuditEvent, AuditLog, SigningAlgorithm}; +use grob::server::AuditEntryBuilder; +use grob::storage::secrets::build_backend; +use grob::storage::GrobStore; +use std::sync::Arc; +use tempfile::TempDir; + +// ── Helpers ──────────────────────────────────────────────────────────── + +/// Builds a minimal audit log writing into `dir` with default ECDSA signing. +fn make_audit_log(dir: &TempDir) -> AuditLog { + AuditLog::new(AuditConfig { + log_dir: dir.path().to_path_buf(), + sign_key_path: None, + signing_algorithm: SigningAlgorithm::default(), + hmac_key_path: None, + batch_size: 1, + flush_interval_ms: 5000, + include_merkle_proof: false, + }) + .expect("audit log construction") +} + +/// Reads every audit entry written to the log so a test can grep / filter. +fn read_audit_entries(dir: &TempDir) -> Vec { + let path = dir.path().join("current.jsonl"); + let content = std::fs::read_to_string(&path).expect("read audit log"); + content + .lines() + .filter(|line| !line.is_empty()) + .map(|line| serde_json::from_str::(line).expect("parse audit entry")) + .collect() +} + +/// Creates a deterministic request body shared by both tenants for cache tests. +fn shared_request() -> CanonicalRequest { + CanonicalRequest { + model: "claude-3-5-sonnet".to_string(), + messages: vec![Message { + role: "user".to_string(), + content: MessageContent::Text("ping".to_string()), + }], + max_tokens: 1024, + thinking: None, + temperature: None, + top_p: None, + top_k: None, + stop_sequences: None, + stream: None, + metadata: None, + system: None, + tools: None, + tool_choice: None, + extensions: Default::default(), + } +} + +// ── Tests ────────────────────────────────────────────────────────────── + +#[test] +fn tenant_audit_log_is_filtered() { + // REGRESSION GUARD: keeps audit-log tenant boundaries enforceable. If + // `AuditEntryBuilder` ever stops persisting `tenant_id` verbatim, the + // compliance team loses the only mechanism to filter per-tenant access + // for HDS / SecNumCloud / EU AI Act audits. + let dir = TempDir::new().expect("tempdir"); + let log = make_audit_log(&dir); + + // Tenant A makes 3 requests; tenant B makes 2. + for _ in 0..3 { + let entry = + AuditEntryBuilder::new("tenant_a", AuditEvent::Request, "anthropic", "10.0.0.1", 12) + .build(); + log.write(entry).expect("audit write"); + } + for _ in 0..2 { + let entry = + AuditEntryBuilder::new("tenant_b", AuditEvent::Request, "anthropic", "10.0.0.2", 9) + .build(); + log.write(entry).expect("audit write"); + } + + let entries = read_audit_entries(&dir); + assert_eq!(entries.len(), 5, "five total entries written"); + + let tenant_a: Vec<_> = entries + .iter() + .filter(|e| e.tenant_id == "tenant_a") + .collect(); + let tenant_b: Vec<_> = entries + .iter() + .filter(|e| e.tenant_id == "tenant_b") + .collect(); + + assert_eq!( + tenant_a.len(), + 3, + "tenant_a query returns exactly its own 3 entries" + ); + assert_eq!( + tenant_b.len(), + 2, + "tenant_b query returns exactly its own 2 entries" + ); + assert!( + tenant_a.iter().all(|e| e.tenant_id == "tenant_a"), + "tenant_a query never leaks tenant_b records" + ); + assert!( + tenant_b.iter().all(|e| e.tenant_id == "tenant_b"), + "tenant_b query never leaks tenant_a records" + ); +} + +#[test] +fn tenant_cache_response_is_not_shared() { + // REGRESSION GUARD: cache key MUST include tenant_id so two tenants with + // identical prompts cannot read each other's cached LLM output. This is + // the only protection against a cross-tenant data leak when + // `[cache] enabled = true`. + let cache = ResponseCache::new(100, 60, 1_000_000, 3); + let req = shared_request(); + + let key_a = + ResponseCache::compute_key_from_request("tenant_a", &req).expect("tenant_a cache key"); + let key_b = + ResponseCache::compute_key_from_request("tenant_b", &req).expect("tenant_b cache key"); + + assert_ne!( + key_a, key_b, + "identical request bodies MUST yield different cache keys per tenant" + ); + + // Tenant A populates the cache. + let runtime = tokio::runtime::Runtime::new().expect("tokio runtime"); + runtime.block_on(async { + cache + .put( + key_a.clone(), + CachedResponse { + body: br#"{"content":"tenant_a-only"}"#.to_vec(), + content_type: "application/json".to_string(), + provider: "anthropic".to_string(), + model: "claude-3-5-sonnet".to_string(), + }, + ) + .await; + + // Tenant A hits the cache. + let hit_a = cache.get(&key_a).await; + assert!(hit_a.is_some(), "tenant_a should hit its own cache entry"); + + // Tenant B with the IDENTICAL request body MUST NOT read A's response. + let miss_b = cache.get(&key_b).await; + assert!( + miss_b.is_none(), + "tenant_b MUST miss when only tenant_a populated the cache" + ); + }); +} + +#[test] +fn tenant_spend_storage_is_isolated() { + // REGRESSION GUARD: per-tenant spend events recorded via + // `SpendTracker::record_tenant` MUST be tagged with the tenant in the + // append-only journal so a per-tenant replay can recover only that + // tenant's costs. Without the tagging, chargeback exports collapse + // every tenant's totals onto whichever tenant runs the export. + let dir = TempDir::new().expect("tempdir"); + // GrobStore writes its journal under `/spend/YYYY-MM.jsonl` where + // `` is the parent of the path passed to `open`. + let store_root = dir.path().to_path_buf(); + let store = Arc::new(GrobStore::open(&store_root.join("grob.db")).expect("open store")); + let mut tracker = SpendTracker::with_store(store.clone()); + + // Tenant A spends $11; tenant B spends $50. + tracker.record_tenant("tenant_a", "anthropic", "claude-opus", 11.0); + tracker.record_tenant("tenant_b", "anthropic", "claude-opus", 50.0); + tracker.save(); + + // Read back the JSONL spend journal: each line is one event, tagged + // with `tenant` whenever `record_tenant` was used. + let month = chrono::Utc::now().format("%Y-%m").to_string(); + let journal_path = store_root.join("spend").join(format!("{month}.jsonl")); + let raw = std::fs::read_to_string(&journal_path).expect("read spend journal"); + let mut a_total = 0.0_f64; + let mut b_total = 0.0_f64; + for line in raw.lines().filter(|l| !l.is_empty()) { + let value: serde_json::Value = serde_json::from_str(line).expect("parse spend event"); + let cost = value["cost_usd"].as_f64().unwrap_or(0.0); + match value["tenant"].as_str() { + Some("tenant_a") => a_total += cost, + Some("tenant_b") => b_total += cost, + _ => {} + } + } + + assert!( + (a_total - 11.0).abs() < 0.001, + "tenant_a journal events sum to only A's spend (got {a_total})" + ); + assert!( + (b_total - 50.0).abs() < 0.001, + "tenant_b journal events sum to only B's spend (got {b_total})" + ); + // Tenant A's spend is not charged to tenant B's quota and vice versa. + assert!( + a_total < b_total, + "isolated tenant accounting cannot collapse two tenants' totals" + ); +} + +#[ignore = "TODO: SpendTracker::check_budget does not accept a tenant_id; per-tenant \ + budget enforcement must be added before this test can pass. \ + See audit: cross-tenant budget leak (src/features/token_pricing/spend.rs)"] +#[test] +fn tenant_budget_quota_is_isolated() { + // REGRESSION GUARD: tenant A with `monthly_limit_usd = 10` exceeding its + // budget MUST NOT block tenant B (whose own limit = 100). Today + // `record_tenant` ALSO accumulates into the global counter + // (spend.rs:139), so a tenant-scoped overspend mistakenly trips the + // global budget. This test must remain `#[ignore]` until the budget + // tracker grows a tenant parameter to `check_budget`. + let dir = TempDir::new().expect("tempdir"); + let store = Arc::new(GrobStore::open(&dir.path().join("grob.db")).expect("open store")); + let mut tracker = SpendTracker::with_store(store); + + // Tenant A spends $11 (exceeds $10 quota). + tracker.record_tenant("tenant_a", "anthropic", "claude-opus", 11.0); + // Tenant B spends $50 (well under $100 quota). + tracker.record_tenant("tenant_b", "anthropic", "claude-opus", 50.0); + + // Once per-tenant budget exists, the API will look like: + // tracker.check_tenant_budget("tenant_a", "...", "...", Some(10.0)) + // => Err(BudgetExceeded) + // tracker.check_tenant_budget("tenant_b", "...", "...", Some(100.0)) + // => Ok(()) + panic!("tenant-scoped check_budget(...) is not yet implemented"); +} + +#[ignore = "TODO: SecretBackend has no tenant scope. EnvBackend / FileBackend / \ + LocalEncryptedBackend resolve `secret:groq` globally. Per-tenant \ + credential isolation requires a `get(name, tenant)` overload or a \ + tenant-prefixed key strategy. See audit: cross-tenant credential leak \ + (src/storage/secrets.rs)"] +#[test] +fn tenant_credentials_are_scoped() { + // REGRESSION GUARD: `secret:groq` for tenant A MUST resolve to A's value + // (X), and to B's value (Y) when looked up for tenant B. Today + // `SecretBackend::get(&self, name)` accepts no tenant context, so any + // tenant can fetch any tenant's API key. This test will start passing + // when the `SecretBackend` trait grows a tenant parameter. + let dir = TempDir::new().expect("tempdir"); + let store = Arc::new(GrobStore::open(&dir.path().join("grob.db")).expect("open store")); + let cfg = SecretsConfig::default(); + let backend = build_backend(&cfg, store); + + // The "tenant_a" call site cannot disambiguate from the "tenant_b" one. + let _value: Option<_> = backend.get("groq"); + + panic!("SecretBackend::get does not accept a tenant_id parameter"); +} + +#[ignore = "TODO: `[security] strict_tenant` config does not exist. Adding it \ + requires a SecurityConfig field and a guard in auth_middleware that \ + short-circuits a 400 when neither GrobClaims nor VirtualKeyContext \ + is present. See audit: missing strict-tenant enforcement \ + (src/server/middleware.rs, src/cli/config/security.rs)"] +#[test] +fn tenant_id_required_in_strict_mode() { + // REGRESSION GUARD: when `[security] strict_tenant = true`, requests + // arriving with neither a JWT `tenant_id` claim nor a virtual-key tenant + // mapping MUST be rejected with HTTP 400 and a body that names the + // missing input. Today there is no such config flag, so the server + // happily logs `tenant_id = "anon"` for every anonymous request. + panic!("strict_tenant config flag is not yet implemented"); +} + +#[test] +fn tenant_jwt_claim_is_authoritative() { + // REGRESSION GUARD: when JWT auth is enabled, the `tenant` claim (with + // fallback to `sub`) is the source of truth used by the dispatch + // pipeline. A client-supplied `X-Tenant-ID` header MUST never override + // it. We pin the JWT-side contract here; if/when an `X-Tenant-ID` + // header is honoured by middleware, an additional handler-level test + // must assert the JWT value still wins on mismatch. + // + // PR #311 introduced `extract_tenant_id()` in `src/server/handlers.rs`, + // which prefers the VirtualKeyContext over `GrobClaims`. Both paths + // ignore any free-form header — we pin the GrobClaims contract here. + let claims = GrobClaims { + sub: "user-123".to_string(), + tenant: Some("tenant_a".to_string()), + exp: (chrono::Utc::now() + chrono::Duration::hours(1)).timestamp() as u64, + iss: None, + aud: None, + }; + assert_eq!( + claims.tenant_id(), + "tenant_a", + "explicit `tenant` claim wins over `sub`" + ); + + let claims_sub_only = GrobClaims { + sub: "tenant_b".to_string(), + tenant: None, + exp: (chrono::Utc::now() + chrono::Duration::hours(1)).timestamp() as u64, + iss: None, + aud: None, + }; + assert_eq!( + claims_sub_only.tenant_id(), + "tenant_b", + "`sub` is used as tenant when no explicit claim is present" + ); + + // Cross-tenant assertion: two distinct claims yield distinct tenant ids. + assert_ne!(claims.tenant_id(), claims_sub_only.tenant_id()); +} + +#[ignore = "TODO: ToolSpikeDetector lands in PR #308 (feat/anomaly-detection-tool-spike) \ + and is not yet on this branch. Re-enable once the detector is merged \ + and exposes a per-tenant `record_tool_call` / `is_blocked` API."] +#[test] +fn tenant_anomaly_detector_is_per_tenant() { + // REGRESSION GUARD: tenant A spiking 600 tool calls/min MUST be blocked + // independently of tenant B's traffic (50/min). The detector must key + // on `tenant_id`, not on global counters, otherwise one noisy tenant + // takes down every other tenant's tool-calling capability. + panic!("ToolSpikeDetector not yet present on this branch"); +}