diff --git a/.github/workflows/create-tag.yml b/.github/workflows/create-tag.yml index 84b5170d..22541047 100644 --- a/.github/workflows/create-tag.yml +++ b/.github/workflows/create-tag.yml @@ -8,6 +8,7 @@ on: required: true type: choice options: + - iii-database - iii-lsp - iii-lsp-vscode - image-resize diff --git a/.github/workflows/iii-database-e2e.yml b/.github/workflows/iii-database-e2e.yml new file mode 100644 index 00000000..3509d249 --- /dev/null +++ b/.github/workflows/iii-database-e2e.yml @@ -0,0 +1,69 @@ +name: iii-database E2E + +on: + pull_request: + paths: + - 'iii-database/**' + - '.github/workflows/iii-database-e2e.yml' + workflow_dispatch: + +concurrency: + group: iii-database-e2e-${{ github.ref }} + cancel-in-progress: true + +env: + CARGO_TERM_COLOR: always + +jobs: + e2e: + name: Harness (sqlite + postgres + mysql) + runs-on: ubuntu-latest + timeout-minutes: 25 + steps: + - uses: actions/checkout@v4 + + - name: Rewrite SSH to HTTPS for public deps + run: git config --global url."https://github.com/".insteadOf "ssh://git@github.com/" + + - name: Install Rust toolchain + uses: dtolnay/rust-toolchain@stable + + - name: Cache cargo registry & build + uses: Swatinem/rust-cache@v2 + with: + workspaces: iii-database + + - name: Install Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + cache: 'npm' + cache-dependency-path: iii-database/tests/e2e/workers/harness/package-lock.json + + # GHA `services:` blocks can't pass `-c wal_level=logical` to postgres, + # which the row-change tests require. Reuse the same docker-compose + # stack the harness uses locally for dev/CI parity. + - name: Install iii engine (latest from main) + run: | + curl -fsSL --retry 3 --retry-connrefused --retry-delay 5 \ + https://install.iii.dev/iii/main/install.sh | sh + echo "$HOME/.local/bin" >> "$GITHUB_PATH" + + - name: Verify engine + run: iii --version + + - name: Run harness + working-directory: iii-database/tests/e2e + # --with-cargo-test runs `cargo test --all-features` against the + # already-running postgres + mysql so the gated driver/pool tests + # actually exercise their target DBs (otherwise they early-return). + run: ./run-tests.sh --with-cargo-test + + - name: Upload report on failure + if: failure() + uses: actions/upload-artifact@v4 + with: + name: iii-database-e2e-report + path: | + iii-database/tests/e2e/reports/ + retention-days: 7 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 7061f1e8..6c4ee585 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -3,6 +3,7 @@ name: Release on: push: tags: + - 'iii-database/v*' - 'iii-lsp/v*' - 'image-resize/v*' - 'mcp/v*' diff --git a/.gitignore b/.gitignore index 60aa7efc..92245e39 100644 --- a/.gitignore +++ b/.gitignore @@ -4,8 +4,10 @@ target/ .idea/ .DS_Store docs +.worktrees node_modules package-lock.json pnpm-lock.yaml yarn.lock +!iii-database/tests/e2e/workers/harness/package-lock.json iii_workers/ \ No newline at end of file diff --git a/iii-database/.gitignore b/iii-database/.gitignore new file mode 100644 index 00000000..d8b7b0ac --- /dev/null +++ b/iii-database/.gitignore @@ -0,0 +1,5 @@ +target/ +*.db +*.sqlite +*.sqlite-journal +/data/ diff --git a/iii-database/Cargo.lock b/iii-database/Cargo.lock new file mode 100644 index 00000000..45ebd4b0 --- /dev/null +++ b/iii-database/Cargo.lock @@ -0,0 +1,4201 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "adler2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" + +[[package]] +name = "ahash" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" +dependencies = [ + "getrandom 0.2.17", + "once_cell", + "version_check", +] + +[[package]] +name = "ahash" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "aho-corasick" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" +dependencies = [ + "memchr", +] + +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anstream" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "824a212faf96e9acacdbd09febd34438f8f711fb84e09a8916013cd7815ca28d" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "940b3a0ca603d1eade50a4846a2afffd5ef57a9feac2c0e2ec2e14f9ead76000" + +[[package]] +name = "anstyle-parse" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52ce7f38b242319f7cabaa6813055467063ecdc9d355bbb4ce0c68908cd8130e" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" +dependencies = [ + "anstyle", + "once_cell_polyfill", + "windows-sys 0.61.2", +] + +[[package]] +name = "anyhow" +version = "1.0.102" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" + +[[package]] +name = "async-trait" +version = "0.1.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "aws-lc-rs" +version = "1.16.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ec6fb3fe69024a75fa7e1bfb48aa6cf59706a101658ea01bfd33b2b248a038f" +dependencies = [ + "aws-lc-sys", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f50037ee5e1e41e7b8f9d161680a725bd1626cb6f8c7e901f91f942850852fe7" +dependencies = [ + "cc", + "cmake", + "dunce", + "fs_extra", +] + +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "base64ct" +version = "1.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2af50177e190e07a26ab74f8b1efbfe2ef87da2116221318cb1c2e82baf7de06" + +[[package]] +name = "bindgen" +version = "0.72.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "itertools 0.13.0", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.117", +] + +[[package]] +name = "bitflags" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4512299f36f043ab09a583e57bceb5a5aab7a73db1805848e8fef3c9e8c78b3" + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "block-buffer" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdd35008169921d80bc60d3d0ab416eecb028c4cd653352907921d95084790be" +dependencies = [ + "hybrid-array", +] + +[[package]] +name = "borsh" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfd1e3f8955a5d7de9fab72fc8373fade9fb8a703968cb200ae3dc6cf08e185a" +dependencies = [ + "borsh-derive", + "bytes", + "cfg_aliases", +] + +[[package]] +name = "borsh-derive" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfcfdc083699101d5a7965e49925975f2f55060f94f9a05e7187be95d530ca59" +dependencies = [ + "once_cell", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "btoi" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd6407f73a9b8b6162d8a2ef999fe6afd7cc15902ebf42c5cd296addf17e0ad" +dependencies = [ + "num-traits", +] + +[[package]] +name = "bumpalo" +version = "3.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb" + +[[package]] +name = "bytecheck" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23cdc57ce23ac53c931e88a43d06d070a6fd142f2617be5855eb75efc9beb1c2" +dependencies = [ + "bytecheck_derive", + "ptr_meta", + "simdutf8", +] + +[[package]] +name = "bytecheck_derive" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" + +[[package]] +name = "cc" +version = "1.2.61" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d16d90359e986641506914ba71350897565610e87ce0ad9e6f28569db3dd5c6d" +dependencies = [ + "find-msvc-tools", + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "chacha20" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f8d983286843e49675a4b7a2d174efe136dc93a18d69130dd18198a6c167601" +dependencies = [ + "cfg-if", + "cpufeatures 0.3.0", + "rand_core 0.10.1", +] + +[[package]] +name = "chrono" +version = "0.4.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c673075a2e0e5f4a1dde27ce9dee1ea4558c7ffe648f576438a20ca1d2acc4b0" +dependencies = [ + "iana-time-zone", + "js-sys", + "num-traits", + "serde", + "wasm-bindgen", + "windows-link", +] + +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + +[[package]] +name = "clap" +version = "4.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ddb117e43bbf7dacf0a4190fef4d345b9bad68dfc649cb349e7d17d28428e51" +dependencies = [ + "clap_builder", + "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "714a53001bf66416adb0e2ef5ac857140e7dc3a0c48fb28b2f10762fc4b5069f" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim", +] + +[[package]] +name = "clap_derive" +version = "4.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2ce8604710f6733aa641a2b3731eaa1e8b3d9973d5e3565da11800813f997a9" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "clap_lex" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8d4a3bb8b1e0c1050499d1815f5ab16d04f0959b233085fb31653fbfc9d98f9" + +[[package]] +name = "cmake" +version = "0.1.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0f78a02292a74a88ac736019ab962ece0bc380e3f977bf72e376c5d78ff0678" +dependencies = [ + "cc", +] + +[[package]] +name = "cmov" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f88a43d011fc4a6876cb7344703e297c71dda42494fee094d5f7c76bf13f746" + +[[package]] +name = "colorchoice" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d07550c9036bf2ae0c684c4297d503f838287c83c53686d05370d0e139ae570" + +[[package]] +name = "const-hex" +version = "1.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "531185e432bb31db1ecda541e9e7ab21468d4d844ad7505e0546a49b4945d49b" +dependencies = [ + "cfg-if", + "cpufeatures 0.2.17", + "proptest", + "serde_core", +] + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "const-oid" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6ef517f0926dd24a1582492c791b6a4818a4d94e789a334894aa15b0d12f55c" + +[[package]] +name = "core-foundation" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "cpufeatures" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b2a41393f66f16b0823bb79094d54ac5fbd34ab292ddafb9a0456ac9f87d201" +dependencies = [ + "libc", +] + +[[package]] +name = "crc32fast" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "crossbeam" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8" +dependencies = [ + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-epoch", + "crossbeam-queue", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-queue" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "crypto-common" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "crypto-common" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77727bb15fa921304124b128af125e7e3b968275d1b108b379190264f4423710" +dependencies = [ + "hybrid-array", +] + +[[package]] +name = "ctutils" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d5515a3834141de9eafb9717ad39eea8247b5674e6066c404e8c4b365d2a29e" +dependencies = [ + "cmov", +] + +[[package]] +name = "data-encoding" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4ae5f15dda3c708c0ade84bfee31ccab44a3da4f88015ed22f63732abe300c8" + +[[package]] +name = "deadpool" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0be2b1d1d6ec8d846f05e137292d0b89133caf95ef33695424c09568bdd39b1b" +dependencies = [ + "deadpool-runtime", + "lazy_static", + "num_cpus", + "tokio", +] + +[[package]] +name = "deadpool-postgres" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d697d376cbfa018c23eb4caab1fd1883dd9c906a8c034e8d9a3cb06a7e0bef9" +dependencies = [ + "async-trait", + "deadpool", + "getrandom 0.2.17", + "tokio", + "tokio-postgres", + "tracing", +] + +[[package]] +name = "deadpool-runtime" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "092966b41edc516079bdf31ec78a2e0588d1d0c08f78b91d8307215928642b2b" +dependencies = [ + "tokio", +] + +[[package]] +name = "der" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" +dependencies = [ + "const-oid 0.9.6", + "der_derive", + "flagset", + "zeroize", +] + +[[package]] +name = "der_derive" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8034092389675178f570469e6c3b0465d3d30b4505c294a6550db47f3c17ad18" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer 0.10.4", + "crypto-common 0.1.7", +] + +[[package]] +name = "digest" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4850db49bf08e663084f7fb5c87d202ef91a3907271aff24a94eb97ff039153c" +dependencies = [ + "block-buffer 0.12.0", + "const-oid 0.10.2", + "crypto-common 0.2.1", + "ctutils", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + +[[package]] +name = "dyn-clone" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "fallible-iterator" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" + +[[package]] +name = "fallible-iterator" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" + +[[package]] +name = "fallible-streaming-iterator" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" + +[[package]] +name = "fastrand" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f1f227452a390804cdb637b74a86990f2a7d7ba4b7d5693aac9b4dd6defd8d6" + +[[package]] +name = "find-msvc-tools" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" + +[[package]] +name = "flagset" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7ac824320a75a52197e8f2d787f6a38b6718bb6897a35142d749af3c0e8f4fe" + +[[package]] +name = "flate2" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "843fba2746e448b37e26a819579957415c8cef339bf08564fe8b7ddbd959573c" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "form_urlencoded" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + +[[package]] +name = "futures-channel" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07bbe89c50d7a535e539b8c17bc0b49bdb77747034daa8087407d655f3f7cc1d" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d" + +[[package]] +name = "futures-executor" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf29c38818342a3b26b5b923639e7b1f4a61fc5e76102d4b1981c6dc7a7579d" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-macro" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "futures-sink" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893" + +[[package]] +name = "futures-task" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393" + +[[package]] +name = "futures-util" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6" +dependencies = [ + "futures-core", + "futures-macro", + "futures-sink", + "futures-task", + "pin-project-lite", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "wasi 0.11.1+wasi-snapshot-preview1", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "r-efi 5.3.0", + "wasip2", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de51e6874e94e7bf76d726fc5d13ba782deca734ff60d5bb2fb2607c7406555" +dependencies = [ + "cfg-if", + "libc", + "r-efi 6.0.0", + "rand_core 0.10.1", + "wasip2", + "wasip3", +] + +[[package]] +name = "glob" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" + +[[package]] +name = "h2" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f44da3a8150a6703ed5d34e164b875fd14c2cdab9af1252a9a1020bde2bdc54" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash 0.7.8", +] + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash 0.8.12", +] + +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", +] + +[[package]] +name = "hashbrown" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f467dd6dccf739c208452f8014c75c18bb8301b050ad1cfb27153803edb0f51" + +[[package]] +name = "hashlink" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +dependencies = [ + "hashbrown 0.14.5", +] + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" + +[[package]] +name = "hmac" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6303bc9732ae41b04cb554b844a762b4115a61bfaa81e3e83050991eeb56863f" +dependencies = [ + "digest 0.11.2", +] + +[[package]] +name = "hostname" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "617aaa3557aef3810a6369d0a99fac8a080891b68bd9f9812a1eeda0c0730cbd" +dependencies = [ + "cfg-if", + "libc", + "windows-link", +] + +[[package]] +name = "http" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" +dependencies = [ + "bytes", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "hybrid-array" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d46837a0ed51fe95bd3b05de33cd64a1ee88fc797477ca48446872504507c5" +dependencies = [ + "typenum", +] + +[[package]] +name = "hyper" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6299f016b246a94207e63da54dbe807655bf9e00044f73ded42c3ac5305fbcca" +dependencies = [ + "atomic-waker", + "bytes", + "futures-channel", + "futures-core", + "h2", + "http", + "http-body", + "httparse", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ca68d021ef39cf6463ab54c1d0f5daf03377b70561305bb89a8f83aab66e0f" +dependencies = [ + "http", + "hyper", + "hyper-util", + "rustls", + "tokio", + "tokio-rustls", + "tower-service", + "webpki-roots 1.0.7", +] + +[[package]] +name = "hyper-timeout" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +dependencies = [ + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-channel", + "futures-util", + "http", + "http-body", + "hyper", + "ipnet", + "libc", + "percent-encoding", + "pin-project-lite", + "socket2 0.6.3", + "tokio", + "tower-service", + "tracing", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.65" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "icu_collections" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2984d1cd16c883d7935b9e07e44071dca8d917fd52ecc02c04d5fa0b5a3f191c" +dependencies = [ + "displaydoc", + "potential_utf", + "utf8_iter", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92219b62b3e2b4d88ac5119f8904c10f8f61bf7e95b640d25ba3075e6cac2c29" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c56e5ee99d6e3d33bd91c5d85458b6005a22140021cc324cea84dd0e72cff3b4" +dependencies = [ + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da3be0ae77ea334f4da67c12f149704f19f81d1adf7c51cf482943e84a2bad38" + +[[package]] +name = "icu_properties" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bee3b67d0ea5c2cca5003417989af8996f8604e34fb9ddf96208a033901e70de" +dependencies = [ + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e2bbb201e0c04f7b4b3e14382af113e17ba4f63e2c9d2ee626b720cbce54a14" + +[[package]] +name = "icu_provider" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139c4cf31c8b5f33d7e199446eff9c1e02decfc2f0eec2c8d71f65befa45b421" +dependencies = [ + "displaydoc", + "icu_locale_core", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "id-arena" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" + +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb68373c0d6620ef8105e855e7745e18b0d00d3bdb07fb532e434244cdb9a714" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "iii-database" +version = "1.0.0" +dependencies = [ + "anyhow", + "async-trait", + "base64 0.22.1", + "bytes", + "chrono", + "clap", + "deadpool-postgres", + "futures-util", + "iii-sdk", + "mysql_async", + "postgres-protocol", + "postgres-types", + "r2d2", + "r2d2_sqlite", + "rusqlite", + "rust_decimal", + "rustls", + "rustls-native-certs", + "rustls-pemfile", + "rustls-pki-types", + "rustls-webpki", + "schemars", + "serde", + "serde_json", + "serde_yml", + "tempfile", + "thiserror 1.0.69", + "tokio", + "tokio-postgres", + "tokio-postgres-rustls", + "tracing", + "tracing-subscriber", + "url", + "uuid", +] + +[[package]] +name = "iii-sdk" +version = "0.12.0-next.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60ce447ddd571bfa8fc40270a9de6e7655a91d2b949f0d5c12842cde14e107bd" +dependencies = [ + "async-trait", + "futures-util", + "hostname", + "opentelemetry", + "opentelemetry-http", + "opentelemetry-proto", + "opentelemetry_sdk", + "prost", + "reqwest", + "schemars", + "serde", + "serde_json", + "sysinfo", + "thiserror 2.0.18", + "tokio", + "tokio-tungstenite", + "tracing", + "uuid", +] + +[[package]] +name = "indexmap" +version = "2.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d466e9454f08e4a911e14806c24e16fba1b4c121d1ea474396f396069cf949d9" +dependencies = [ + "equivalent", + "hashbrown 0.17.0", + "serde", + "serde_core", +] + +[[package]] +name = "ipnet" +version = "2.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d98f6fed1fde3f8c21bc40a1abb88dd75e67924f9cffc3ef95607bad8017f8e2" + +[[package]] +name = "iri-string" +version = "0.7.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25e659a4bb38e810ebc252e53b5814ff908a8c58c2a9ce2fae1bbec24cbf4e20" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f42a60cbdf9a97f5d2305f08a87dc4e09308d1276d28c869c684d7777685682" + +[[package]] +name = "jobserver" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" +dependencies = [ + "getrandom 0.3.4", + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.97" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1840c94c045fbcf8ba2812c95db44499f7c64910a912551aaaa541decebcacf" +dependencies = [ + "cfg-if", + "futures-util", + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "keyed_priority_queue" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee7893dab2e44ae5f9d0173f26ff4aa327c10b01b06a72b52dd9405b628640d" +dependencies = [ + "indexmap", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "leb128fmt" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" + +[[package]] +name = "libc" +version = "0.2.186" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68ab91017fe16c622486840e4c83c9a37afeff978bd239b5293d61ece587de66" + +[[package]] +name = "libloading" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" +dependencies = [ + "cfg-if", + "windows-link", +] + +[[package]] +name = "libredox" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e02f3bb43d335493c96bf3fd3a321600bf6bd07ed34bc64118e9293bdffea46c" +dependencies = [ + "libc", +] + +[[package]] +name = "libsqlite3-sys" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "libyml" +version = "0.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3302702afa434ffa30847a83305f0a69d6abd74293b6554c18ec85c7ef30c980" +dependencies = [ + "anyhow", + "version_check", +] + +[[package]] +name = "linux-raw-sys" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53" + +[[package]] +name = "litemap" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92daf443525c4cce67b150400bc2316076100ce0b3686209eb8cf3c31612e6f0" + +[[package]] +name = "lock_api" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" + +[[package]] +name = "lru" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" +dependencies = [ + "hashbrown 0.15.5", +] + +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + +[[package]] +name = "matchers" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" +dependencies = [ + "regex-automata", +] + +[[package]] +name = "md-5" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69b6441f590336821bb897fb28fc622898ccceb1d6cea3fde5ea86b090c4de98" +dependencies = [ + "cfg-if", + "digest 0.11.2", +] + +[[package]] +name = "memchr" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "miniz_oxide" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +dependencies = [ + "adler2", + "simd-adler32", +] + +[[package]] +name = "mio" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50b7e5b27aa02a74bac8c3f23f448f8d87ff11f92d3aac1a6ed369ee08cc56c1" +dependencies = [ + "libc", + "wasi 0.11.1+wasi-snapshot-preview1", + "windows-sys 0.61.2", +] + +[[package]] +name = "mysql_async" +version = "0.34.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0b66e411c31265e879d9814d03721f2daa7ad07337b6308cb4bb0cde7e6fd47" +dependencies = [ + "bytes", + "crossbeam", + "flate2", + "futures-core", + "futures-sink", + "futures-util", + "keyed_priority_queue", + "lru", + "mysql_common", + "pem", + "percent-encoding", + "pin-project", + "rand 0.8.6", + "rustls", + "rustls-pemfile", + "serde", + "serde_json", + "socket2 0.5.10", + "thiserror 1.0.69", + "tokio", + "tokio-rustls", + "tokio-util", + "twox-hash", + "url", + "webpki", + "webpki-roots 0.26.11", +] + +[[package]] +name = "mysql_common" +version = "0.32.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "478b0ff3f7d67b79da2b96f56f334431aef65e15ba4b29dd74a4236e29582bdc" +dependencies = [ + "base64 0.21.7", + "bindgen", + "bitflags", + "btoi", + "byteorder", + "bytes", + "cc", + "cmake", + "crc32fast", + "flate2", + "lazy_static", + "num-bigint", + "num-traits", + "rand 0.8.6", + "regex", + "saturating", + "serde", + "serde_json", + "sha1", + "sha2 0.10.9", + "smallvec", + "subprocess", + "thiserror 1.0.69", + "uuid", + "zstd", +] + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "ntapi" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3b335231dfd352ffb0f8017f3b6027a4917f7df785ea2143d8af2adc66980ae" +dependencies = [ + "winapi", +] + +[[package]] +name = "nu-ansi-term" +version = "0.50.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "objc2-core-foundation" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a180dd8642fa45cdb7dd721cd4c11b1cadd4929ce112ebd8b9f5803cc79d536" +dependencies = [ + "bitflags", +] + +[[package]] +name = "objc2-io-kit" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33fafba39597d6dc1fb709123dfa8289d39406734be322956a69f0931c73bb15" +dependencies = [ + "libc", + "objc2-core-foundation", +] + +[[package]] +name = "objc2-system-configuration" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7216bd11cbda54ccabcab84d523dc93b858ec75ecfb3a7d89513fa22464da396" +dependencies = [ + "objc2-core-foundation", +] + +[[package]] +name = "once_cell" +version = "1.21.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7c3e4beb33f85d45ae3e3a1792185706c8e16d043238c593331cc7cd313b50" + +[[package]] +name = "once_cell_polyfill" +version = "1.70.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" + +[[package]] +name = "openssl-probe" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" + +[[package]] +name = "opentelemetry" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b84bcd6ae87133e903af7ef497404dda70c60d0ea14895fc8a5e6722754fc2a0" +dependencies = [ + "futures-core", + "futures-sink", + "js-sys", + "pin-project-lite", + "thiserror 2.0.18", + "tracing", +] + +[[package]] +name = "opentelemetry-http" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7a6d09a73194e6b66df7c8f1b680f156d916a1a942abf2de06823dd02b7855d" +dependencies = [ + "async-trait", + "bytes", + "http", + "opentelemetry", + "reqwest", +] + +[[package]] +name = "opentelemetry-proto" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7175df06de5eaee9909d4805a3d07e28bb752c34cab57fa9cff549da596b30f" +dependencies = [ + "base64 0.22.1", + "const-hex", + "opentelemetry", + "opentelemetry_sdk", + "prost", + "serde", + "serde_json", + "tonic", + "tonic-prost", +] + +[[package]] +name = "opentelemetry_sdk" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e14ae4f5991976fd48df6d843de219ca6d31b01daaab2dad5af2badeded372bd" +dependencies = [ + "futures-channel", + "futures-executor", + "futures-util", + "opentelemetry", + "percent-encoding", + "rand 0.9.4", + "thiserror 2.0.18", + "tokio", + "tokio-stream", +] + +[[package]] +name = "parking_lot" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-link", +] + +[[package]] +name = "pem" +version = "3.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d30c53c26bc5b31a98cd02d20f25a7c8567146caf63ed593a9d87b2775291be" +dependencies = [ + "base64 0.22.1", + "serde_core", +] + +[[package]] +name = "percent-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" + +[[package]] +name = "phf" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1562dc717473dbaa4c1f85a36410e03c047b2e7df7f45ee938fbef64ae7fadf" +dependencies = [ + "phf_shared", + "serde", +] + +[[package]] +name = "phf_shared" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e57fef6bc5981e38c2ce2d63bfa546861309f875b8a75f092d1d54ae2d64f266" +dependencies = [ + "siphasher", +] + +[[package]] +name = "pin-project" +version = "1.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1749c7ed4bcaf4c3d0a3efc28538844fb29bcdd7d2b67b2be7e20ba861ff517" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b20ed30f105399776b9c883e68e536ef602a16ae6f596d2c473591d6ad64c6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a89322df9ebe1c1578d689c92318e070967d1042b512afbe49518723f4e6d5cd" + +[[package]] +name = "pkg-config" +version = "0.3.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19f132c84eca552bf34cab8ec81f1c1dcc229b811638f9d283dceabe58c5569e" + +[[package]] +name = "postgres-protocol" +version = "0.6.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56201207dac53e2f38e848e31b4b91616a6bb6e0c7205b77718994a7f49e70fc" +dependencies = [ + "base64 0.22.1", + "byteorder", + "bytes", + "fallible-iterator 0.2.0", + "hmac", + "md-5", + "memchr", + "rand 0.10.1", + "sha2 0.11.0", + "stringprep", +] + +[[package]] +name = "postgres-types" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8dc729a129e682e8d24170cd30ae1aa01b336b096cbb56df6d534ffec133d186" +dependencies = [ + "bytes", + "chrono", + "fallible-iterator 0.2.0", + "postgres-protocol", + "serde_core", + "serde_json", + "uuid", +] + +[[package]] +name = "potential_utf" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0103b1cef7ec0cf76490e969665504990193874ea05c85ff9bab8b911d0a0564" +dependencies = [ + "zerovec", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn 2.0.117", +] + +[[package]] +name = "proc-macro-crate" +version = "3.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e67ba7e9b2b56446f1d419b1d807906278ffa1a658a8a5d8a39dcb1f5a78614f" +dependencies = [ + "toml_edit", +] + +[[package]] +name = "proc-macro2" +version = "1.0.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "proptest" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b45fcc2344c680f5025fe57779faef368840d0bd1f42f216291f0dc4ace4744" +dependencies = [ + "bitflags", + "num-traits", + "rand 0.9.4", + "rand_chacha 0.9.0", + "rand_xorshift", + "regex-syntax", + "unarray", +] + +[[package]] +name = "prost" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2ea70524a2f82d518bce41317d0fae74151505651af45faf1ffbd6fd33f0568" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b" +dependencies = [ + "anyhow", + "itertools 0.14.0", + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "ptr_meta" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0738ccf7ea06b608c10564b31debd4f5bc5e197fc8bfe088f68ae5ce81e7a4f1" +dependencies = [ + "ptr_meta_derive", +] + +[[package]] +name = "ptr_meta_derive" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "quinn" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" +dependencies = [ + "bytes", + "cfg_aliases", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls", + "socket2 0.6.3", + "thiserror 2.0.18", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-proto" +version = "0.11.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "434b42fec591c96ef50e21e886936e66d3cc3f737104fdb9b737c40ffb94c098" +dependencies = [ + "bytes", + "getrandom 0.3.4", + "lru-slab", + "rand 0.9.4", + "ring", + "rustc-hash", + "rustls", + "rustls-pki-types", + "slab", + "thiserror 2.0.18", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2 0.6.3", + "tracing", + "windows-sys 0.60.2", +] + +[[package]] +name = "quote" +version = "1.0.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41f2619966050689382d2b44f664f4bc593e129785a36d6ee376ddf37259b924" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "r-efi" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dcc9c7d52a811697d2151c701e0d08956f92b0e24136cf4cf27b57a6a0d9bf" + +[[package]] +name = "r2d2" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93" +dependencies = [ + "log", + "parking_lot", + "scheduled-thread-pool", +] + +[[package]] +name = "r2d2_sqlite" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a982edf65c129796dba72f8775b292ef482b40d035e827a9825b3bc07ccc5f2" +dependencies = [ + "r2d2", + "rusqlite", + "uuid", +] + +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + +[[package]] +name = "rand" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca0ecfa931c29007047d1bc58e623ab12e5590e8c7cc53200d5202b69266d8a" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c5af06bb1b7d3216d91932aed5265164bf384dc89cd6ba05cf59a35f5f76ea" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.5", +] + +[[package]] +name = "rand" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2e8e8bcc7961af1fdac401278c6a831614941f6164ee3bf4ce61b7edb162207" +dependencies = [ + "chacha20", + "getrandom 0.4.2", + "rand_core 0.10.1", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.5", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.17", +] + +[[package]] +name = "rand_core" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" +dependencies = [ + "getrandom 0.3.4", +] + +[[package]] +name = "rand_core" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63b8176103e19a2643978565ca18b50549f6101881c443590420e4dc998a3c69" + +[[package]] +name = "rand_xorshift" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a" +dependencies = [ + "rand_core 0.9.5", +] + +[[package]] +name = "redox_syscall" +version = "0.5.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" +dependencies = [ + "bitflags", +] + +[[package]] +name = "regex" +version = "1.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a" + +[[package]] +name = "rend" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71fe3824f5629716b1589be05dacd749f6aa084c87e00e016714a8cdfccc997c" +dependencies = [ + "bytecheck", +] + +[[package]] +name = "reqwest" +version = "0.12.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-core", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-util", + "js-sys", + "log", + "percent-encoding", + "pin-project-lite", + "quinn", + "rustls", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tokio-rustls", + "tower", + "tower-http", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "webpki-roots 1.0.7", +] + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.17", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "rkyv" +version = "0.7.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2297bf9c81a3f0dc96bc9521370b88f054168c29826a75e89c55ff196e7ed6a1" +dependencies = [ + "bitvec", + "bytecheck", + "bytes", + "hashbrown 0.12.3", + "ptr_meta", + "rend", + "rkyv_derive", + "seahash", + "tinyvec", + "uuid", +] + +[[package]] +name = "rkyv_derive" +version = "0.7.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84d7b42d4b8d06048d3ac8db0eb31bcb942cbeb709f0b5f2b2ebde398d3038f5" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "rusqlite" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae" +dependencies = [ + "bitflags", + "chrono", + "fallible-iterator 0.3.0", + "fallible-streaming-iterator", + "hashlink", + "libsqlite3-sys", + "smallvec", +] + +[[package]] +name = "rust_decimal" +version = "1.41.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ce901f9a19d251159075a4c37af514c3b8ef99c22e02dd8c19161cf397ee94a" +dependencies = [ + "arrayvec", + "borsh", + "bytes", + "num-traits", + "postgres-types", + "rand 0.8.6", + "rkyv", + "serde", + "serde_json", + "wasm-bindgen", +] + +[[package]] +name = "rustc-hash" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94300abf3f1ae2e2b8ffb7b58043de3d399c73fa6f4b73826402a5c457614dbe" + +[[package]] +name = "rustix" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.61.2", +] + +[[package]] +name = "rustls" +version = "0.23.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef86cd5876211988985292b91c96a8f2d298df24e75989a43a3c73f2d4d8168b" +dependencies = [ + "aws-lc-rs", + "log", + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-native-certs" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" +dependencies = [ + "openssl-probe", + "rustls-pki-types", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30a7197ae7eb376e574fe940d068c30fe0462554a3ddbe4eca7838e049c937a9" +dependencies = [ + "web-time", + "zeroize", +] + +[[package]] +name = "rustls-webpki" +version = "0.103.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61c429a8649f110dddef65e2a5ad240f747e85f7758a6bccc7e5777bd33f756e" +dependencies = [ + "aws-lc-rs", + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "ryu" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" + +[[package]] +name = "saturating" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ece8e78b2f38ec51c51f5d475df0a7187ba5111b2a28bdc761ee05b075d40a71" + +[[package]] +name = "schannel" +version = "0.1.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91c1b7e4904c873ef0710c1f407dde2e6287de2bebc1bbbf7d430bb7cbffd939" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "scheduled-thread-pool" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" +dependencies = [ + "parking_lot", +] + +[[package]] +name = "schemars" +version = "0.8.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fbf2ae1b8bc8e02df939598064d22402220cd5bbcca1c76f7d6a310974d5615" +dependencies = [ + "chrono", + "dyn-clone", + "schemars_derive", + "serde", + "serde_json", +] + +[[package]] +name = "schemars_derive" +version = "0.8.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32e265784ad618884abaea0600a9adf15393368d840e0222d101a072f3f7534d" +dependencies = [ + "proc-macro2", + "quote", + "serde_derive_internals", + "syn 2.0.117", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "seahash" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" + +[[package]] +name = "security-framework" +version = "3.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7f4bc775c73d9a02cde8bf7b2ec4c9d12743edf609006c7facc23998404cd1d" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2691df843ecc5d231c0b14ece2acc3efb62c0a398c7e1d875f3983ce020e3" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "semver" +version = "1.0.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a7852d02fc848982e0c167ef163aaff9cd91dc640ba85e263cb1ce46fae51cd" + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "serde_derive_internals" +version = "0.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "serde_json" +version = "1.0.149" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" +dependencies = [ + "itoa", + "memchr", + "serde", + "serde_core", + "zmij", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_yml" +version = "0.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59e2dd588bf1597a252c3b920e0143eb99b0f76e4e082f4c92ce34fbc9e71ddd" +dependencies = [ + "indexmap", + "itoa", + "libyml", + "memchr", + "ryu", + "serde", + "version_check", +] + +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if", + "cpufeatures 0.2.17", + "digest 0.10.7", +] + +[[package]] +name = "sha2" +version = "0.10.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" +dependencies = [ + "cfg-if", + "cpufeatures 0.2.17", + "digest 0.10.7", +] + +[[package]] +name = "sha2" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "446ba717509524cb3f22f17ecc096f10f4822d76ab5c0b9822c5f9c284e825f4" +dependencies = [ + "cfg-if", + "cpufeatures 0.3.0", + "digest 0.11.2", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook-registry" +version = "1.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b" +dependencies = [ + "errno", + "libc", +] + +[[package]] +name = "simd-adler32" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "703d5c7ef118737c72f1af64ad2f6f8c5e1921f818cdcb97b8fe6fc69bf66214" + +[[package]] +name = "simdutf8" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" + +[[package]] +name = "siphasher" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2aa850e253778c88a04c3d7323b043aeda9d3e30d5971937c1855769763678e" + +[[package]] +name = "slab" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + +[[package]] +name = "socket2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "socket2" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a766e1110788c36f4fa1c2b71b387a7815aa65f88ce0229841826633d93723e" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "stringprep" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b4df3d392d81bd458a8a621b8bffbd2302a12ffe288a9d931670948749463b1" +dependencies = [ + "unicode-bidi", + "unicode-normalization", + "unicode-properties", +] + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "subprocess" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c56e8662b206b9892d7a5a3f2ecdbcb455d3d6b259111373b7e08b8055158a8" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.117" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "sysinfo" +version = "0.38.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92ab6a2f8bfe508deb3c6406578252e491d299cbbf3bc0529ecc3313aee4a52f" +dependencies = [ + "libc", + "memchr", + "ntapi", + "objc2-core-foundation", + "objc2-io-kit", + "windows", +] + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "tempfile" +version = "3.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32497e9a4c7b38532efcdebeef879707aa9f794296a4f0244f6f69e9bc8574bd" +dependencies = [ + "fastrand", + "getrandom 0.4.2", + "once_cell", + "rustix", + "windows-sys 0.61.2", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" +dependencies = [ + "thiserror-impl 2.0.18", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "tinystr" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8323304221c2a851516f22236c5722a72eaa19749016521d6dff0824447d96d" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tinyvec" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e61e67053d25a4e82c844e8424039d9745781b3fc4f32b8d55ed50f5f667ef3" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tls_codec" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de2e01245e2bb89d6f05801c564fa27624dbd7b1846859876c7dad82e90bf6b" +dependencies = [ + "tls_codec_derive", + "zeroize", +] + +[[package]] +name = "tls_codec_derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d2e76690929402faae40aebdda620a2c0e25dd6d3b9afe48867dfd95991f4bd" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "tokio" +version = "1.52.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b67dee974fe86fd92cc45b7a95fdd2f99a36a6d7b0d431a231178d3d670bbcc6" +dependencies = [ + "bytes", + "libc", + "mio", + "pin-project-lite", + "signal-hook-registry", + "socket2 0.6.3", + "tokio-macros", + "windows-sys 0.61.2", +] + +[[package]] +name = "tokio-macros" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "385a6cb71ab9ab790c5fe8d67f1645e6c450a7ce006a33de03daa956cf70a496" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "tokio-postgres" +version = "0.7.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dd8df5ef180f6364759a6f00f7aadda4fbbac86cdee37480826a6ff9f3574ce" +dependencies = [ + "async-trait", + "byteorder", + "bytes", + "fallible-iterator 0.2.0", + "futures-channel", + "futures-util", + "log", + "parking_lot", + "percent-encoding", + "phf", + "pin-project-lite", + "postgres-protocol", + "postgres-types", + "rand 0.10.1", + "socket2 0.6.3", + "tokio", + "tokio-util", + "whoami", +] + +[[package]] +name = "tokio-postgres-rustls" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27d684bad428a0f2481f42241f821db42c54e2dc81d8c00db8536c506b0a0144" +dependencies = [ + "const-oid 0.9.6", + "ring", + "rustls", + "tokio", + "tokio-postgres", + "tokio-rustls", + "x509-cert", +] + +[[package]] +name = "tokio-rustls" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" +dependencies = [ + "rustls", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32da49809aab5c3bc678af03902d4ccddea2a87d028d86392a4b1560c6906c70" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-tungstenite" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25a406cddcc431a75d3d9afc6a7c0f7428d4891dd973e4d54c56b46127bf857" +dependencies = [ + "futures-util", + "log", + "rustls", + "rustls-native-certs", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tungstenite", +] + +[[package]] +name = "tokio-util" +version = "0.7.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "toml_datetime" +version = "1.1.1+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3165f65f62e28e0115a00b2ebdd37eb6f3b641855f9d636d3cd4103767159ad7" +dependencies = [ + "serde_core", +] + +[[package]] +name = "toml_edit" +version = "0.25.11+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b59c4d22ed448339746c59b905d24568fcbb3ab65a500494f7b8c3e97739f2b" +dependencies = [ + "indexmap", + "toml_datetime", + "toml_parser", + "winnow", +] + +[[package]] +name = "toml_parser" +version = "1.1.2+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2abe9b86193656635d2411dc43050282ca48aa31c2451210f4202550afb7526" +dependencies = [ + "winnow", +] + +[[package]] +name = "tonic" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fec7c61a0695dc1887c1b53952990f3ad2e3a31453e1f49f10e75424943a93ec" +dependencies = [ + "async-trait", + "base64 0.22.1", + "bytes", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-timeout", + "hyper-util", + "percent-encoding", + "pin-project", + "sync_wrapper", + "tokio", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tonic-prost" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a55376a0bbaa4975a3f10d009ad763d8f4108f067c7c2e74f3001fb49778d309" +dependencies = [ + "bytes", + "prost", + "tonic", +] + +[[package]] +name = "tower" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" +dependencies = [ + "futures-core", + "futures-util", + "indexmap", + "pin-project-lite", + "slab", + "sync_wrapper", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" +dependencies = [ + "bitflags", + "bytes", + "futures-util", + "http", + "http-body", + "iri-string", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tracing" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" +dependencies = [ + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "tracing-core" +version = "0.1.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7f578e5945fb242538965c2d0b04418d38ec25c79d160cd279bf0731c8d319" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex-automata", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "tungstenite" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8628dcc84e5a09eb3d8423d6cb682965dea9133204e8fb3efee74c2a0c259442" +dependencies = [ + "bytes", + "data-encoding", + "http", + "httparse", + "log", + "rand 0.9.4", + "rustls", + "rustls-pki-types", + "sha1", + "thiserror 2.0.18", + "utf-8", +] + +[[package]] +name = "twox-hash" +version = "1.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" +dependencies = [ + "cfg-if", + "rand 0.8.6", + "static_assertions", +] + +[[package]] +name = "typenum" +version = "1.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40ce102ab67701b8526c123c1bab5cbe42d7040ccfd0f64af1a385808d2f43de" + +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + +[[package]] +name = "unicode-bidi" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5" + +[[package]] +name = "unicode-ident" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" + +[[package]] +name = "unicode-normalization" +version = "0.1.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fd4f6878c9cb28d874b009da9e8d183b5abc80117c40bbd187a1fde336be6e8" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-properties" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7df058c713841ad818f1dc5d3fd88063241cc61f49f5fbea4b951e8cf5a8d71d" + +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "utf-8" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + +[[package]] +name = "uuid" +version = "1.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddd74a9687298c6858e9b88ec8935ec45d22e8fd5e6394fa1bd4e99a87789c76" +dependencies = [ + "getrandom 0.4.2", + "js-sys", + "rand 0.10.1", + "serde_core", + "wasm-bindgen", +] + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasi" +version = "0.14.7+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "883478de20367e224c0090af9cf5f9fa85bed63a95c1abf3afc5c083ebc06e8c" +dependencies = [ + "wasip2", +] + +[[package]] +name = "wasip2" +version = "1.0.3+wasi-0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20064672db26d7cdc89c7798c48a0fdfac8213434a1186e5ef29fd560ae223d6" +dependencies = [ + "wit-bindgen 0.57.1", +] + +[[package]] +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" +dependencies = [ + "wit-bindgen 0.51.0", +] + +[[package]] +name = "wasite" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66fe902b4a6b8028a753d5424909b764ccf79b7a209eac9bf97e59cda9f71a42" +dependencies = [ + "wasi 0.14.7+wasi-0.2.4", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.120" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df52b6d9b87e0c74c9edfa1eb2d9bf85e5d63515474513aa50fa181b3c4f5db1" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "serde", + "wasm-bindgen-macro", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.70" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af934872acec734c2d80e6617bbb5ff4f12b052dd8e6332b0817bce889516084" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.120" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b1041f495fb322e64aca85f5756b2172e35cd459376e67f2a6c9dffcedb103" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.120" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dcd0ff20416988a18ac686d4d4d0f6aae9ebf08a389ff5d29012b05af2a1b41" +dependencies = [ + "bumpalo", + "proc-macro2", + "quote", + "syn 2.0.117", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.120" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49757b3c82ebf16c57d69365a142940b384176c24df52a087fb748e2085359ea" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "wasm-encoder" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" +dependencies = [ + "leb128fmt", + "wasmparser", +] + +[[package]] +name = "wasm-metadata" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" +dependencies = [ + "anyhow", + "indexmap", + "wasm-encoder", + "wasmparser", +] + +[[package]] +name = "wasmparser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" +dependencies = [ + "bitflags", + "hashbrown 0.15.5", + "indexmap", + "semver", +] + +[[package]] +name = "web-sys" +version = "0.3.97" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2eadbac71025cd7b0834f20d1fe8472e8495821b4e9801eb0a60bd1f19827602" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki" +version = "0.22.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed63aea5ce73d0ff405984102c42de94fc55a6b75765d621c65262469b3c9b53" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "webpki-roots" +version = "0.26.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" +dependencies = [ + "webpki-roots 1.0.7", +] + +[[package]] +name = "webpki-roots" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52f5ee44c96cf55f1b349600768e3ece3a8f26010c05265ab73f945bb1a2eb9d" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "whoami" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "998767ef88740d1f5b0682a9c53c24431453923962269c2db68ee43788c5a40d" +dependencies = [ + "libc", + "libredox", + "objc2-system-configuration", + "wasite", + "web-sys", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "527fadee13e0c05939a6a05d5bd6eec6cd2e3dbd648b9f8e447c6518133d8580" +dependencies = [ + "windows-collections", + "windows-core", + "windows-future", + "windows-numerics", +] + +[[package]] +name = "windows-collections" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23b2d95af1a8a14a3c7367e1ed4fc9c20e0a26e79551b1454d72583c97cc6610" +dependencies = [ + "windows-core", +] + +[[package]] +name = "windows-core" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-future" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1d6f90251fe18a279739e78025bd6ddc52a7e22f921070ccdc67dde84c605cb" +dependencies = [ + "windows-core", + "windows-link", + "windows-threading", +] + +[[package]] +name = "windows-implement" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "windows-interface" +version = "0.59.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-numerics" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e2e40844ac143cdb44aead537bbf727de9b044e107a0f1220392177d15b0f26" +dependencies = [ + "windows-core", + "windows-link", +] + +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.5", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", +] + +[[package]] +name = "windows-threading" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3949bd5b99cafdf1c7ca86b43ca564028dfe27d66958f2470940f73d86d75b37" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" + +[[package]] +name = "winnow" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ee1708bef14716a11bae175f579062d4554d95be2c6829f518df847b7b3fdd0" +dependencies = [ + "memchr", +] + +[[package]] +name = "wit-bindgen" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" +dependencies = [ + "wit-bindgen-rust-macro", +] + +[[package]] +name = "wit-bindgen" +version = "0.57.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ebf944e87a7c253233ad6766e082e3cd714b5d03812acc24c318f549614536e" + +[[package]] +name = "wit-bindgen-core" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] + +[[package]] +name = "wit-bindgen-rust" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" +dependencies = [ + "anyhow", + "heck", + "indexmap", + "prettyplease", + "syn 2.0.117", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", +] + +[[package]] +name = "wit-bindgen-rust-macro" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn 2.0.117", + "wit-bindgen-core", + "wit-bindgen-rust", +] + +[[package]] +name = "wit-component" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +dependencies = [ + "anyhow", + "bitflags", + "indexmap", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser", + "wit-parser", +] + +[[package]] +name = "wit-parser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +dependencies = [ + "anyhow", + "id-arena", + "indexmap", + "log", + "semver", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser", +] + +[[package]] +name = "writeable" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ffae5123b2d3fc086436f8834ae3ab053a283cfac8fe0a0b8eaae044768a4c4" + +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + +[[package]] +name = "x509-cert" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1301e935010a701ae5f8655edc0ad17c44bad3ac5ce8c39185f75453b720ae94" +dependencies = [ + "const-oid 0.9.6", + "der", + "spki", + "tls_codec", +] + +[[package]] +name = "yoke" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abe8c5fda708d9ca3df187cae8bfb9ceda00dd96231bed36e445a1a48e66f9ca" +dependencies = [ + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de844c262c8848816172cef550288e7dc6c7b7814b4ee56b3e1553f275f1858e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.8.48" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eed437bf9d6692032087e337407a86f04cd8d6a16a37199ed57949d415bd68e9" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.48" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70e3cd084b1788766f53af483dd21f93881ff30d7320490ec3ef7526d203bad4" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "zerofrom" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69faa1f2a1ea75661980b013019ed6687ed0e83d069bc1114e2cc74c6c04c4df" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11532158c46691caf0f2593ea8358fed6bbf68a0315e80aae9bd41fbade684a1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "zerotrie" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f9152d31db0792fa83f70fb2f83148effb5c1f5b8c7686c3459e361d9bc20bf" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90f911cbc359ab6af17377d242225f4d75119aec87ea711a880987b18cd7b239" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "625dc425cab0dca6dc3c3319506e6593dcb08a9f387ea3b284dbd52a92c40555" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "zmij" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" + +[[package]] +name = "zstd" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "7.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" +dependencies = [ + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.16+zstd.1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" +dependencies = [ + "cc", + "pkg-config", +] diff --git a/iii-database/Cargo.toml b/iii-database/Cargo.toml new file mode 100644 index 00000000..05fa7b6e --- /dev/null +++ b/iii-database/Cargo.toml @@ -0,0 +1,67 @@ +[workspace] + +[package] +name = "iii-database" +version = "1.0.0" +edition = "2021" +publish = false + +[[bin]] +name = "iii-database" +path = "src/main.rs" + +[lib] +path = "src/lib.rs" + +[dependencies] +iii-sdk = "=0.12.0-next.1" +tokio = { version = "1", features = ["rt-multi-thread", "macros", "sync", "signal", "time"] } +serde = { version = "1", features = ["derive"] } +serde_json = "1" +serde_yml = "0.0.12" +schemars = { version = "0.8", features = ["chrono"] } +anyhow = "1" +thiserror = "1" +tracing = "0.1" +tracing-subscriber = { version = "0.3", features = ["fmt", "env-filter"] } +clap = { version = "4", features = ["derive"] } +async-trait = "0.1" +uuid = { version = "1", features = ["v4", "serde"] } +url = "2" +chrono = { version = "0.4", features = ["serde"] } +base64 = "0.22" +bytes = "1" +futures-util = "0.3" + +# Postgres +tokio-postgres = { version = "0.7", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } +deadpool-postgres = "0.14" +postgres-types = { version = "0.2", features = ["with-chrono-0_4", "with-serde_json-1"] } +postgres-protocol = "0.6" +tokio-postgres-rustls = "0.13" +# `db-tokio-postgres` registers FromSql/ToSql for NUMERIC. postgres-types +# itself ships no NUMERIC FromSql impl — String accepts only TEXT-family OIDs, +# so without this dep `try_get::<_, String>` on a NUMERIC column fails with +# WrongType. rust_decimal is 96-bit (~28 sig digits); values exceeding that +# error out at decode rather than silently truncating. +rust_decimal = { version = "1", features = ["db-tokio-postgres"] } + +# MySQL — `rustls-tls` adds rustls-backed TLS without pulling in OpenSSL. +mysql_async = { version = "0.34", default-features = false, features = ["minimal-rust", "rustls-tls"] } + +# TLS (shared between postgres and mysql) +rustls = { version = "0.23", default-features = false, features = ["aws_lc_rs", "tls12", "logging"] } +rustls-native-certs = "0.8" +rustls-pemfile = "2" +rustls-pki-types = "1" +rustls-webpki = { version = "0.103", default-features = false, features = ["std", "aws-lc-rs"] } + +# SQLite (sync; wrapped in spawn_blocking) +rusqlite = { version = "0.31", features = ["bundled", "chrono", "column_decltype"] } +r2d2 = "0.8" +r2d2_sqlite = "0.24" + +[dev-dependencies] +tempfile = "3" +serde_json = "1" +tokio = { version = "1", features = ["rt-multi-thread", "macros", "test-util"] } diff --git a/iii-database/README.md b/iii-database/README.md new file mode 100644 index 00000000..bd039d30 --- /dev/null +++ b/iii-database/README.md @@ -0,0 +1,168 @@ +# iii-database + +> Connect to PostgreSQL, MySQL, and SQLite. Run queries, prepared statements, transactions, and subscribe to row-level change feeds. + +| field | value | +|-------|-------| +| version | 1.0.0 | +| type | binary | +| supported_targets | x86_64-apple-darwin, aarch64-apple-darwin, x86_64-unknown-linux-gnu, aarch64-unknown-linux-gnu | +| author | iii | + +## Install + +```sh +iii worker add iii-database@1.0.0 +``` + +## Configure + +Add a single `databases` block to your `config.yaml`. SQLite is the recommended starting point — no server, just a file: + +```yaml +workers: + - name: iii-database + config: + databases: + primary: + url: sqlite:./data/iii.db + pool: + max: 10 + idle_timeout_ms: 30000 + acquire_timeout_ms: 5000 + analytics: + url: ${ANALYTICS_URL} # postgres:// or mysql:// + pool: { max: 5 } +``` + +URL scheme picks the driver: `sqlite:`, `postgres://`, `postgresql://`, `mysql://`. + +### TLS (postgres + mysql) + +Postgres and mysql connections default to **`tls.mode: require`** — TLS handshake required, certificate chain validated against the system trust store, hostname verification skipped (matches libpq's `sslmode=require`). Override per-database: + +```yaml +databases: + primary: + url: postgres://app@db.example.com:5432/app + tls: + mode: verify-full # disable | require | verify-full (default: require) + ca_cert: /etc/ssl/internal-ca.pem # optional; replaces the system trust store + local: + url: postgres://dev@localhost:5432/dev + tls: + mode: disable # plaintext, local development only +``` + +- **`disable`** — plaintext. Local dev only. +- **`require`** (default) — encrypted; cert chain validated; hostname is **not** verified. Catches passive eavesdropping, doesn't catch a determined MITM with their own valid-chain cert. +- **`verify-full`** — encrypted; cert chain validated; cert hostname must match the URL host. Production default for managed services (RDS, Neon, Supabase). + +`ca_cert` lets you point at a private CA bundle for self-hosted databases. When set, it **replaces** the system trust store rather than extending it. + +SQLite ignores the `tls` block (local-file driver). + +## Quick start (SQLite) + +```ts +import { call } from 'iii-sdk' + +await call('iii-database::execute', { + db: 'primary', + sql: 'CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, email TEXT)' +}) + +await call('iii-database::execute', { + db: 'primary', + sql: 'INSERT INTO users (email) VALUES (?), (?)', + params: ['a@x', 'b@x'] +}) + +const { rows } = await call('iii-database::query', { + db: 'primary', + sql: 'SELECT id, email FROM users ORDER BY id' +}) +``` + +## Functions + +| Function | Purpose | +|---|---| +| `iii-database::query` | Read SQL. Returns `{ rows, row_count, columns }`. | +| `iii-database::execute` | Write SQL. Returns `{ affected_rows, last_insert_id, returned_rows }`.
**`last_insert_id` semantics:** SQLite/MySQL surface the engine's `last_insert_rowid()` / `LAST_INSERT_ID()` (only populated for INSERT). Postgres has no equivalent — `last_insert_id` is set from the **first column of the first RETURNING row**, so put your PK first: `RETURNING id, name`, not `RETURNING name, id`. | +| `iii-database::prepareStatement` | Pin a connection and return `{ handle: { id, expires_at } }`. | +| `iii-database::runStatement` | Run a previously-prepared handle. (No `timeout_ms` — uses the pinned connection's session lifetime; configure via `ttl_seconds` on `prepareStatement`.) | +| `iii-database::transaction` | Atomic sequence; rolls back on first failure. | + +## Triggers + +### `iii-database::query-poll` +Polls a SQL query at a fixed interval, dispatches new rows, and persists a cursor inside the watched database in `__iii_cursors`. + +```yaml +triggers: + - type: iii-database::query-poll + config: + db: primary + sql: SELECT id, body FROM outbox WHERE id > COALESCE(?, 0) ORDER BY id LIMIT 50 + interval_ms: 1000 + cursor_column: id +``` + +The trigger binds the cursor as the single positional parameter (`?` for SQLite/MySQL, `$1` for Postgres). On the first poll the cursor binds as `NULL`. + +The dispatched event includes a `cursor` field that is **always serialized as a JSON string**, regardless of the underlying column type. Callers must parse it (e.g. `parseInt(event.cursor)`) when expecting numeric comparison. + +### `iii-database::row-change` +Postgres only. Streams row-level changes via logical replication (`pgoutput`). + +> **NOTE (v1.0.0):** Event dispatch is not yet functional. The publication and replication slot are created at startup, but the streaming decode loop is stubbed pending an upstream `tokio-postgres` replication API release. Operators can pre-provision slots and publications now; events will start flowing in a later release. + +```yaml +triggers: + - type: iii-database::row-change + config: + db: primary + schema: public + tables: [orders, payments] +``` + +The worker derives slot/publication names from `trigger_id`: `iii_slot__<8hex>` and `iii_pub__<8hex>`, where the 8-hex-char suffix is an FNV-1a-32 hash of the original `trigger_id`. The hash guarantees that two distinct trigger_ids (e.g. `orders-v1` vs `orders.v1`) produce distinct names even though both sanitize to `orders_v1`. The sanitized prefix is truncated at 40 chars so the final name fits in Postgres' 63-byte slot-name limit. Operators can override slot/publication names explicitly with `slot_name`/`publication_name`. Drop them with `pg_drop_replication_slot('')` and `DROP PUBLICATION ` if the worker is decommissioned without graceful shutdown. + +## Errors + +Returned `IIIError::Handler` bodies carry a stable `code` field: + +| Code | Meaning | +|---|---| +| `POOL_TIMEOUT` | Pool acquire exceeded `acquire_timeout_ms`. | +| `QUERY_TIMEOUT` | Query exceeded `timeout_ms`. | +| `STATEMENT_NOT_FOUND` | Handle expired or unknown — re-prepare. | +| `UNKNOWN_DB` | `db` parameter doesn't match any configured database. | +| `INVALID_PARAM` | JSON value couldn't be coerced for the target driver. | +| `DRIVER_ERROR` | Wraps underlying driver error with `driver` and `inner_code` (nullable). `inner_code` format is per-driver: Postgres = SQLSTATE 5-char string (e.g. `42P01`), MySQL = server error number as string, SQLite = `rusqlite::ErrorCode` debug name. | +| `REPLICATION_SLOT_EXISTS` | Startup-only: another instance owns the slot. | +| `UNSUPPORTED` | Operation not supported on the chosen driver. | +| `CONFIG_ERROR` | Config parse, pool init, or trigger misconfiguration (e.g. `cursor_column` not in result). | + +## Driver compatibility + +A few operations are no-ops on certain drivers. They emit a `tracing::warn!` rather than an error: + +| Operation | SQLite | Postgres | MySQL | +|---|---|---|---| +| `execute` with `returning: [...]` | ✓ | ✓ | warn-once + ignore | +| `transaction` `isolation: read_committed` / `repeatable_read` | warn + use serializable | ✓ | ✓ | +| `transaction` `isolation: serializable` | ✓ (`BEGIN IMMEDIATE`) | ✓ | ✓ | +| `iii-database::row-change` trigger | — | setup-only in v1.0.0 (see above) | — | + +## Troubleshooting + +- **Pool exhausted (`POOL_TIMEOUT`)**: bump `pool.max` or shorten the longest-running query. Live `prepareStatement` handles each pin one connection from the pool until they expire. +- **`STATEMENT_NOT_FOUND` from a long-lived handle**: handles are bounded to `ttl_seconds` (default 3600, max 86400). Re-prepare and retry. +- **SQLite write contention with `query-poll`**: enable WAL mode in your DB: `PRAGMA journal_mode=WAL;` once after creation. +- **Replication slot already exists**: another instance is consuming the slot. Either reuse the slot name or run `SELECT pg_drop_replication_slot('')`. + +## License + +MIT. diff --git a/iii-database/config.yaml.example b/iii-database/config.yaml.example new file mode 100644 index 00000000..5ed41b7e --- /dev/null +++ b/iii-database/config.yaml.example @@ -0,0 +1,10 @@ +workers: + - name: database + config: + databases: + primary: + url: sqlite:./data/iii.db + pool: + max: 10 + idle_timeout_ms: 30000 + acquire_timeout_ms: 5000 diff --git a/iii-database/iii.worker.yaml b/iii-database/iii.worker.yaml new file mode 100644 index 00000000..d764755e --- /dev/null +++ b/iii-database/iii.worker.yaml @@ -0,0 +1,7 @@ +iii: v1 +name: iii-database +language: rust +deploy: binary +manifest: Cargo.toml +bin: iii-database +description: Talk to PostgreSQL, MySQL, and SQLite from iii — query, execute, transactions, prepared statements, and change feeds. diff --git a/iii-database/src/config.rs b/iii-database/src/config.rs new file mode 100644 index 00000000..b9e2da54 --- /dev/null +++ b/iii-database/src/config.rs @@ -0,0 +1,431 @@ +//! Configuration parsing for the database worker. +//! +//! The worker accepts a YAML file with a `databases:` map keyed by name. +//! Each entry has a `url` (whose scheme picks the driver) and an optional +//! `pool` block. Environment variables in the form `${NAME}` are expanded +//! against the process environment. + +use serde::Deserialize; +use std::collections::HashMap; + +/// Top-level worker config (the contents of `config.yaml`, or the `config` +/// block of `iii-config.yaml` when running embedded). +#[derive(Debug, Clone, Deserialize)] +pub struct WorkerConfig { + #[serde(default)] + pub databases: HashMap, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct DatabaseConfig { + pub url: String, + #[serde(default)] + pub pool: PoolConfig, + #[serde(default)] + pub tls: TlsConfig, + /// Populated by [`WorkerConfig::from_yaml`] from the URL scheme. + /// Do not construct `DatabaseConfig` directly without calling + /// `detect_driver` — the default `Sqlite` value will silently mismatch + /// the URL. + #[serde(skip)] + pub driver: DriverKind, +} + +/// TLS settings for a single database. Applies to postgres and mysql. +/// Sqlite is local-file and ignores this block. +/// +/// Default is `mode: require` — TLS handshake required, certificate chain +/// validated against the system trust store, hostname verification skipped +/// (matching libpq's `sslmode=require` semantics). Use `mode: verify-full` +/// to additionally verify the certificate hostname matches the URL host, +/// and `mode: disable` to opt out of TLS entirely (local-dev only). +#[derive(Debug, Clone, Default, Deserialize)] +pub struct TlsConfig { + #[serde(default)] + pub mode: TlsMode, + /// Optional path to a PEM file containing one or more CA certificates. + /// When set, the system trust store is **replaced** by these certs + /// (not extended). Use this for self-hosted databases with a private CA. + #[serde(default)] + pub ca_cert: Option, +} + +#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Deserialize)] +#[serde(rename_all = "kebab-case")] +pub enum TlsMode { + /// No TLS. Plaintext connection. Local-dev only. + Disable, + /// TLS handshake required; certificate chain validated; hostname NOT + /// verified. Matches libpq's `sslmode=require`. The default. + #[default] + Require, + /// TLS handshake required; certificate chain validated; certificate + /// hostname must match the URL host. Matches libpq's `sslmode=verify-full`. + #[serde(rename = "verify-full")] + VerifyFull, +} + +#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)] +pub enum DriverKind { + Postgres, + Mysql, + #[default] + Sqlite, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct PoolConfig { + #[serde(default = "default_pool_max")] + pub max: u32, + #[serde(default = "default_idle_timeout_ms")] + pub idle_timeout_ms: u64, + #[serde(default = "default_acquire_timeout_ms")] + pub acquire_timeout_ms: u64, +} + +impl Default for PoolConfig { + fn default() -> Self { + Self { + max: default_pool_max(), + idle_timeout_ms: default_idle_timeout_ms(), + acquire_timeout_ms: default_acquire_timeout_ms(), + } + } +} + +fn default_pool_max() -> u32 { + 10 +} +fn default_idle_timeout_ms() -> u64 { + 30_000 +} +fn default_acquire_timeout_ms() -> u64 { + 5_000 +} + +impl WorkerConfig { + pub fn from_yaml(yaml: &str) -> Result { + let expanded = expand_env(yaml); + let mut cfg: WorkerConfig = + serde_yml::from_str(&expanded).map_err(|e| format!("yaml parse: {e}"))?; + if cfg.databases.is_empty() { + return Err("config must declare at least one database".into()); + } + for (name, db) in cfg.databases.iter_mut() { + db.driver = detect_driver(&db.url).ok_or_else(|| { + format!( + "unknown url scheme for db `{name}`: {}", + redact_url(&db.url) + ) + })?; + } + Ok(cfg) + } + + pub fn from_file(path: &str) -> Result { + let raw = std::fs::read_to_string(path).map_err(|e| format!("read {path}: {e}"))?; + Self::from_yaml(&raw) + } +} + +/// Strip the userinfo from a URL-like string for safe logging. +/// +/// Best-effort: malformed or non-URL forms (e.g. `sqlite::memory:`) are +/// returned unchanged because the `url` crate cannot parse them and they +/// cannot carry credentials anyway. Successfully parsed URLs have their +/// password removed and any non-empty username replaced with `***`. +pub fn redact_url(input: &str) -> String { + use url::Url; + if let Ok(parsed) = Url::parse(input) { + let mut redacted = parsed; + if redacted.password().is_some() { + let _ = redacted.set_password(None); + } + if !redacted.username().is_empty() { + let _ = redacted.set_username("***"); + } + return redacted.into(); + } + input.to_string() +} + +/// Validate a SQL identifier component (table name, column name, schema, etc.). +/// Allows ASCII letters, digits, underscore. Must start with letter or underscore. +/// Max 63 chars (Postgres NAMEDATALEN - 1). +/// +/// This is the chokepoint for any operator-supplied identifier that gets +/// interpolated into a SQL string via `format!()` (replication slots, +/// publication names, schema/table names, cursor table). Validation is +/// strict ASCII because the alternative — quoting and escaping per-driver — +/// is fragile and the v1.0 surface does not need unicode identifiers. +pub fn validate_sql_identifier(s: &str) -> Result<(), String> { + if s.is_empty() { + return Err("identifier is empty".into()); + } + if s.len() > 63 { + return Err(format!("identifier `{s}` exceeds 63 characters")); + } + let mut chars = s.chars(); + let first = chars.next().unwrap(); + if !(first.is_ascii_alphabetic() || first == '_') { + return Err(format!( + "identifier `{s}` must start with letter or underscore" + )); + } + for c in chars { + if !(c.is_ascii_alphanumeric() || c == '_') { + return Err(format!( + "identifier `{s}` contains invalid character `{c}` (only [a-zA-Z0-9_] allowed)" + )); + } + } + Ok(()) +} + +fn detect_driver(url: &str) -> Option { + let lower = url.to_ascii_lowercase(); + if lower.starts_with("postgres://") || lower.starts_with("postgresql://") { + Some(DriverKind::Postgres) + } else if lower.starts_with("mysql://") { + Some(DriverKind::Mysql) + } else if lower.starts_with("sqlite:") { + Some(DriverKind::Sqlite) + } else { + None + } +} + +/// Expand `${NAME}` occurrences against the process environment. +/// Unknown variables expand to the empty string and emit a tracing warning. +/// Non-ASCII content outside `${...}` markers is preserved verbatim. +fn expand_env(input: &str) -> String { + let mut out = String::with_capacity(input.len()); + let mut rest = input; + while let Some(start) = rest.find("${") { + // Push the prefix verbatim (UTF-8-safe slice — start is a char boundary + // because it points at an ASCII `$`). + out.push_str(&rest[..start]); + let after = &rest[start + 2..]; + match after.find('}') { + Some(end) => { + let name = &after[..end]; + match std::env::var(name) { + Ok(v) => out.push_str(&v), + Err(_) => { + tracing::warn!(var = %name, "config references undefined env var"); + } + } + rest = &after[end + 1..]; + } + None => { + // Unterminated `${`; treat as literal. + out.push_str("${"); + rest = after; + } + } + } + out.push_str(rest); + out +} + +#[cfg(test)] +mod tests { + use super::*; + + fn cfg(yaml: &str) -> WorkerConfig { + WorkerConfig::from_yaml(yaml).unwrap() + } + + #[test] + fn parses_single_sqlite_database() { + let yaml = r#" +databases: + primary: + url: sqlite:./data/iii.db +"#; + let c = cfg(yaml); + assert_eq!(c.databases.len(), 1); + let db = &c.databases["primary"]; + assert!(matches!(db.driver, DriverKind::Sqlite)); + assert_eq!(db.url, "sqlite:./data/iii.db"); + assert_eq!(db.pool.max, 10); + assert_eq!(db.pool.idle_timeout_ms, 30_000); + assert_eq!(db.pool.acquire_timeout_ms, 5_000); + } + + #[test] + fn parses_postgres_url() { + let c = cfg("databases:\n p:\n url: postgres://u@h/db\n"); + assert!(matches!(c.databases["p"].driver, DriverKind::Postgres)); + } + + #[test] + fn parses_postgresql_alias() { + let c = cfg("databases:\n p:\n url: postgresql://u@h/db\n"); + assert!(matches!(c.databases["p"].driver, DriverKind::Postgres)); + } + + #[test] + fn parses_mysql_url() { + let c = cfg("databases:\n m:\n url: mysql://u@h/db\n"); + assert!(matches!(c.databases["m"].driver, DriverKind::Mysql)); + } + + #[test] + fn unknown_url_scheme_errors() { + let err = + WorkerConfig::from_yaml("databases:\n x:\n url: oracle://h/db\n").unwrap_err(); + assert!(err.contains("unknown url scheme"), "got: {err}"); + } + + #[test] + fn pool_overrides_take_effect() { + // URL is quoted because `sqlite::memory:` contains a trailing colon + // that YAML would otherwise interpret as a nested mapping key. + let yaml = r#" +databases: + primary: + url: "sqlite::memory:" + pool: + max: 25 + idle_timeout_ms: 1000 + acquire_timeout_ms: 250 +"#; + let c = cfg(yaml); + let p = &c.databases["primary"].pool; + assert_eq!(p.max, 25); + assert_eq!(p.idle_timeout_ms, 1000); + assert_eq!(p.acquire_timeout_ms, 250); + } + + #[test] + fn env_var_expansion_in_url() { + std::env::set_var("DATABASE_WORKER_TEST_URL", "sqlite::memory:"); + // Quote the interpolation site so the expanded value (which ends in + // a colon) is unambiguously a YAML scalar. + let yaml = "databases:\n p:\n url: \"${DATABASE_WORKER_TEST_URL}\"\n"; + let c = cfg(yaml); + assert_eq!(c.databases["p"].url, "sqlite::memory:"); + std::env::remove_var("DATABASE_WORKER_TEST_URL"); + } + + #[test] + fn empty_databases_block_errors() { + let err = WorkerConfig::from_yaml("databases: {}\n").unwrap_err(); + assert!(err.contains("at least one database"), "got: {err}"); + } + + #[test] + fn env_var_expansion_multiple_in_one_url() { + std::env::set_var("DBW_TEST_USER", "alice"); + std::env::set_var("DBW_TEST_HOST", "host.example"); + std::env::set_var("DBW_TEST_DB", "shop"); + let yaml = "databases:\n p:\n url: \"postgres://${DBW_TEST_USER}@${DBW_TEST_HOST}/${DBW_TEST_DB}\"\n"; + let c = cfg(yaml); + assert_eq!(c.databases["p"].url, "postgres://alice@host.example/shop"); + std::env::remove_var("DBW_TEST_USER"); + std::env::remove_var("DBW_TEST_HOST"); + std::env::remove_var("DBW_TEST_DB"); + } + + #[test] + fn validate_sql_identifier_accepts_normal_names() { + assert!(validate_sql_identifier("orders").is_ok()); + assert!(validate_sql_identifier("_iii_cursors").is_ok()); + assert!(validate_sql_identifier("users_2024").is_ok()); + assert!(validate_sql_identifier("A").is_ok()); + assert!(validate_sql_identifier("_").is_ok()); + } + + #[test] + fn validate_sql_identifier_rejects_empty() { + let err = validate_sql_identifier("").unwrap_err(); + assert!(err.contains("empty"), "got: {err}"); + } + + #[test] + fn validate_sql_identifier_rejects_digit_first() { + let err = validate_sql_identifier("1users").unwrap_err(); + assert!(err.contains("start with"), "got: {err}"); + } + + #[test] + fn validate_sql_identifier_rejects_injection_chars() { + assert!(validate_sql_identifier("orders; DROP").is_err()); + assert!(validate_sql_identifier("orders'--").is_err()); + assert!(validate_sql_identifier("orders\"").is_err()); + assert!(validate_sql_identifier("a b").is_err()); + assert!(validate_sql_identifier("a.b").is_err()); + } + + #[test] + fn validate_sql_identifier_rejects_too_long() { + let s: String = "a".repeat(64); + let err = validate_sql_identifier(&s).unwrap_err(); + assert!(err.contains("exceeds 63"), "got: {err}"); + // Boundary: 63 is OK. + let ok: String = "a".repeat(63); + assert!(validate_sql_identifier(&ok).is_ok()); + } + + #[test] + fn redact_url_strips_password() { + assert_eq!( + redact_url("postgres://user:pass@host/db"), + "postgres://***@host/db" + ); + assert_eq!( + redact_url("mysql://admin:s3cret@127.0.0.1:3306/test"), + "mysql://***@127.0.0.1:3306/test" + ); + } + + #[test] + fn redact_url_handles_no_password() { + assert_eq!( + redact_url("postgres://user@host/db"), + "postgres://***@host/db" + ); + } + + #[test] + fn redact_url_handles_no_userinfo() { + let result = redact_url("postgres://host/db"); + assert!(!result.contains('@'), "no userinfo should remain: {result}"); + } + + #[test] + fn redact_url_passthrough_sqlite() { + // The `url` crate does not parse `sqlite:` URIs (no authority); the + // helper falls back to returning the input unchanged. Either way + // these forms cannot carry credentials. + assert_eq!(redact_url("sqlite::memory:"), "sqlite::memory:"); + let result = redact_url("sqlite:./data/iii.db"); + assert!( + !result.contains("user:"), + "no credentials present: {result}" + ); + } + + #[test] + fn redact_url_unknown_scheme_passthrough() { + // Malformed/unknown schemes round-trip unchanged. The caller is + // responsible for not leaking them in error messages, but redact_url + // is best-effort. + assert_eq!(redact_url("not-a-url"), "not-a-url"); + } + + #[test] + fn expand_env_preserves_unicode_outside_markers() { + // Direct unit test of the expand_env helper to guard against the + // "byte-iteration mojibake" regression. The helper is private; we + // exercise it via a YAML containing a non-ASCII comment. + let yaml = "# café 日本語\ndatabases:\n p:\n url: \"sqlite::memory:\"\n"; + // Note: serde_yml strips comments, but expand_env runs on the raw + // text *before* parsing. If the helper corrupted UTF-8, the parse + // would fail because the multibyte sequence would be mangled into + // an invalid byte run inside the string we hand to serde_yml. + let c = cfg(yaml); + assert!(matches!(c.databases["p"].driver, DriverKind::Sqlite)); + } +} diff --git a/iii-database/src/cursor.rs b/iii-database/src/cursor.rs new file mode 100644 index 00000000..a711d72d --- /dev/null +++ b/iii-database/src/cursor.rs @@ -0,0 +1,217 @@ +//! __iii_cursors table CRUD. +//! +//! This table tracks the last successfully-acked cursor for each query-poll +//! trigger. It is created on first poll inside the watched database. + +use crate::driver::{self}; +use crate::error::DbError; +use crate::pool::Pool; +use crate::value::JsonParam; + +/// Default table name; overridable per-trigger via `cursor_table`. +pub const DEFAULT_CURSOR_TABLE: &str = "__iii_cursors"; + +/// Per-driver quoted form of the `cursor` column. `cursor` is a reserved +/// word in MySQL 8 (and standard SQL/PSM); without quoting, the table DDL +/// fails with ERROR 1064 syntax error. Postgres accepts unquoted but we +/// quote defensively for parity. SQLite needs no quoting but tolerates it. +fn cursor_col(pool: &Pool) -> &'static str { + match pool { + Pool::Postgres(_) => "\"cursor\"", + Pool::Mysql(_) => "`cursor`", + Pool::Sqlite(_) => "cursor", + } +} + +/// Issue `CREATE TABLE IF NOT EXISTS` for the cursor table. Called on every +/// poll tick. Intentionally NOT cached: a process-wide cache silently lies +/// when the table is dropped externally (e.g. test harness SCHEMA_RESET, +/// operator running `DROP TABLE __iii_cursors` for any reason), causing +/// every subsequent tick on the same worker process to silently fail +/// because read_cursor hits the missing table while the cache says +/// "ensured". `CREATE TABLE IF NOT EXISTS` against an existing table is a +/// system-catalog check on every driver — measured at sub-millisecond on +/// Postgres/MySQL/SQLite. At the default 1s polling interval that overhead +/// is invisible; the cache traded too much correctness for too little +/// throughput. +pub async fn ensure_table(pool: &Pool, table: &str) -> Result<(), DbError> { + let cursor = cursor_col(pool); + let sql = match pool { + Pool::Postgres(_) => format!( + "CREATE TABLE IF NOT EXISTS {table} (\ + trigger_id TEXT PRIMARY KEY, \ + {cursor} TEXT, \ + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW())" + ), + Pool::Mysql(_) => format!( + "CREATE TABLE IF NOT EXISTS {table} (\ + trigger_id VARCHAR(191) PRIMARY KEY, \ + {cursor} TEXT, \ + updated_at DATETIME(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6))" + ), + Pool::Sqlite(_) => format!( + "CREATE TABLE IF NOT EXISTS {table} (\ + trigger_id TEXT PRIMARY KEY, \ + {cursor} TEXT, \ + updated_at TEXT NOT NULL)" + ), + }; + match pool { + Pool::Postgres(p) => driver::postgres::execute(p, &sql, &[], &[]) + .await + .map(|_| ()), + Pool::Mysql(p) => driver::mysql::execute(p, &sql, &[], &[]).await.map(|_| ()), + Pool::Sqlite(p) => driver::sqlite::execute(p, &sql, &[], &[]).await.map(|_| ()), + } +} + +pub async fn read_cursor( + pool: &Pool, + table: &str, + trigger_id: &str, +) -> Result, DbError> { + let placeholder = match pool { + Pool::Postgres(_) => "$1", + _ => "?", + }; + let cursor = cursor_col(pool); + let sql = format!("SELECT {cursor} FROM {table} WHERE trigger_id = {placeholder} LIMIT 1"); + let result = match pool { + Pool::Postgres(p) => { + driver::postgres::query(p, &sql, &[JsonParam::Text(trigger_id.into())], 30_000).await? + } + Pool::Mysql(p) => { + driver::mysql::query(p, &sql, &[JsonParam::Text(trigger_id.into())], 30_000).await? + } + Pool::Sqlite(p) => { + driver::sqlite::query(p, &sql, &[JsonParam::Text(trigger_id.into())], 30_000).await? + } + }; + Ok(result + .rows + .first() + .and_then(|r| r.0.first().map(|v| v.to_json())) + .and_then(|v| v.as_str().map(|s| s.to_string()))) +} + +pub async fn write_cursor( + pool: &Pool, + table: &str, + trigger_id: &str, + cursor: &str, +) -> Result<(), DbError> { + // `col` rather than `cursor` here: the function parameter is also named + // `cursor` (the cursor value), so we must avoid shadowing it — otherwise + // the bind below sends the column-name string instead of the value. + let col = cursor_col(pool); + let sql = match pool { + Pool::Postgres(_) => format!( + "INSERT INTO {table} (trigger_id, {col}, updated_at) VALUES ($1, $2, NOW()) \ + ON CONFLICT (trigger_id) DO UPDATE SET {col} = EXCLUDED.{col}, updated_at = NOW()" + ), + Pool::Mysql(_) => format!( + "INSERT INTO {table} (trigger_id, {col}, updated_at) VALUES (?, ?, CURRENT_TIMESTAMP(6)) \ + ON DUPLICATE KEY UPDATE {col} = VALUES({col}), updated_at = CURRENT_TIMESTAMP(6)" + ), + Pool::Sqlite(_) => format!( + "INSERT INTO {table} (trigger_id, {col}, updated_at) VALUES (?, ?, datetime('now')) \ + ON CONFLICT(trigger_id) DO UPDATE SET {col} = excluded.{col}, updated_at = datetime('now')" + ), + }; + let params = vec![ + JsonParam::Text(trigger_id.into()), + JsonParam::Text(cursor.into()), + ]; + match pool { + Pool::Postgres(p) => driver::postgres::execute(p, &sql, ¶ms, &[]) + .await + .map(|_| ()), + Pool::Mysql(p) => driver::mysql::execute(p, &sql, ¶ms, &[]) + .await + .map(|_| ()), + Pool::Sqlite(p) => driver::sqlite::execute(p, &sql, ¶ms, &[]) + .await + .map(|_| ()), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::config::PoolConfig; + use crate::pool::SqlitePool; + + fn pool() -> SqlitePool { + SqlitePool::new("sqlite::memory:", &PoolConfig::default()).unwrap() + } + + #[tokio::test(flavor = "multi_thread")] + async fn ensure_table_creates_in_sqlite() { + let p = Pool::Sqlite(pool()); + ensure_table(&p, DEFAULT_CURSOR_TABLE).await.unwrap(); + // running again is idempotent + ensure_table(&p, DEFAULT_CURSOR_TABLE).await.unwrap(); + } + + #[tokio::test(flavor = "multi_thread")] + async fn read_returns_none_for_unknown_trigger() { + let p = Pool::Sqlite(pool()); + ensure_table(&p, DEFAULT_CURSOR_TABLE).await.unwrap(); + let v = read_cursor(&p, DEFAULT_CURSOR_TABLE, "trig-1") + .await + .unwrap(); + assert!(v.is_none()); + } + + #[tokio::test(flavor = "multi_thread")] + async fn ensure_table_survives_external_drop() { + // Regression: a process-wide ensure_table cache previously made the + // second call a no-op, but if the table was dropped externally + // between calls (test harness SCHEMA_RESET, operator action, ...) + // every subsequent poll silently failed. Removing the cache makes + // ensure_table idempotent under external drops. + let p = Pool::Sqlite(pool()); + let table = "test_drop_resilience_xj"; + ensure_table(&p, table).await.unwrap(); + // External drop simulation. + match &p { + Pool::Sqlite(sp) => { + crate::driver::sqlite::execute( + sp, + &format!("DROP TABLE IF EXISTS {table}"), + &[], + &[], + ) + .await + .unwrap(); + } + _ => unreachable!(), + } + // Second call must re-create — not silently believe it's already done. + ensure_table(&p, table).await.unwrap(); + // Verify the table exists by writing+reading a cursor row. + write_cursor(&p, table, "trig-survive", "1").await.unwrap(); + let v = read_cursor(&p, table, "trig-survive").await.unwrap(); + assert_eq!(v.as_deref(), Some("1")); + } + + #[tokio::test(flavor = "multi_thread")] + async fn write_then_read_round_trips() { + let p = Pool::Sqlite(pool()); + ensure_table(&p, DEFAULT_CURSOR_TABLE).await.unwrap(); + write_cursor(&p, DEFAULT_CURSOR_TABLE, "trig-1", "42") + .await + .unwrap(); + let v = read_cursor(&p, DEFAULT_CURSOR_TABLE, "trig-1") + .await + .unwrap(); + assert_eq!(v.as_deref(), Some("42")); + write_cursor(&p, DEFAULT_CURSOR_TABLE, "trig-1", "100") + .await + .unwrap(); + let v = read_cursor(&p, DEFAULT_CURSOR_TABLE, "trig-1") + .await + .unwrap(); + assert_eq!(v.as_deref(), Some("100")); + } +} diff --git a/iii-database/src/driver/mod.rs b/iii-database/src/driver/mod.rs new file mode 100644 index 00000000..a40b903e --- /dev/null +++ b/iii-database/src/driver/mod.rs @@ -0,0 +1,54 @@ +//! Shared driver-facing types. Each driver (postgres / mysql / sqlite) +//! exposes async functions returning these types so the dispatch layer in +//! `pool::Pool` is uniform. + +use crate::value::{JsonParam, RowValue}; +use schemars::JsonSchema; +use serde::Serialize; + +pub mod mysql; +pub mod postgres; +pub mod sqlite; + +#[derive(Debug, Clone, Serialize, JsonSchema)] +pub struct ColumnMeta { + pub name: String, + #[serde(rename = "type")] + pub ty: String, +} + +#[derive(Debug, Clone)] +pub struct Row(pub Vec); + +#[derive(Debug)] +pub struct QueryResult { + pub columns: Vec, + pub rows: Vec, +} + +#[derive(Debug, Default)] +pub struct ExecuteResult { + pub affected_rows: u64, + pub last_insert_id: Option, + pub returned_rows: Vec, + pub returned_columns: Vec, +} + +#[derive(Debug, Clone, Copy)] +pub enum Isolation { + ReadCommitted, + RepeatableRead, + Serializable, +} + +#[derive(Debug)] +pub struct TxStatement { + pub sql: String, + pub params: Vec, +} + +#[derive(Debug)] +pub struct TxStepResult { + pub affected_rows: u64, + pub rows: Vec, +} diff --git a/iii-database/src/driver/mysql.rs b/iii-database/src/driver/mysql.rs new file mode 100644 index 00000000..39771c77 --- /dev/null +++ b/iii-database/src/driver/mysql.rs @@ -0,0 +1,479 @@ +//! MySQL driver: query/execute/transaction/prepare. + +use crate::driver::{ + ColumnMeta, ExecuteResult, Isolation, QueryResult, Row, TxStatement, TxStepResult, +}; +use crate::error::DbError; +use crate::pool::MysqlPool; +use crate::value::{JsonParam, RowValue}; +use mysql_async::prelude::Queryable; +use mysql_async::{params::Params, Value as MyValue}; +use std::time::Duration; + +pub async fn query( + pool: &MysqlPool, + sql: &str, + params: &[JsonParam], + timeout_ms: u64, +) -> Result { + let mut conn = pool.acquire().await?; + let bound = bind_params(params); + + // mysql_async's `exec_iter` resolves once the server returns column + // metadata; row payloads are streamed lazily by `collect()`. Wrapping + // only `exec_iter` would leave row collection unbounded — a slow-row + // query (e.g., per-row SLEEP, or a slow scan) would silently bypass + // `timeout_ms`. Wrap the whole prepare→stream pipeline. + let work = async { + let mut result = conn.exec_iter(sql, bound).await.map_err(map_err)?; + let cols: Vec = result + .columns_ref() + .iter() + .map(|c| ColumnMeta { + name: c.name_str().to_string(), + ty: format!("{:?}", c.column_type()), + }) + .collect(); + let raw_rows: Vec = result.collect().await.map_err(map_err)?; + Ok::<_, DbError>((cols, raw_rows)) + }; + let (cols, raw_rows) = tokio::time::timeout(Duration::from_millis(timeout_ms), work) + .await + .map_err(|_| DbError::QueryTimeout { + db: "(mysql)".into(), + timeout_ms, + })??; + + let mut out_rows: Vec = Vec::with_capacity(raw_rows.len()); + for row in raw_rows { + let cells = row_cells(&row); + out_rows.push(Row(cells)); + } + Ok(QueryResult { + columns: cols, + rows: out_rows, + }) +} + +fn bind_params(params: &[JsonParam]) -> Params { + let v: Vec = params.iter().map(json_param_to_my).collect(); + if v.is_empty() { + Params::Empty + } else { + Params::Positional(v) + } +} + +fn json_param_to_my(p: &JsonParam) -> MyValue { + match p { + JsonParam::Null => MyValue::NULL, + JsonParam::Bool(b) => MyValue::Int(if *b { 1 } else { 0 }), + JsonParam::Int(i) => MyValue::Int(*i), + JsonParam::Float(f) => MyValue::Double(*f), + JsonParam::Text(s) => MyValue::Bytes(s.as_bytes().to_vec()), + JsonParam::Json(v) => MyValue::Bytes(v.to_string().into_bytes()), + } +} + +fn row_cells(row: &mysql_async::Row) -> Vec { + let mut cells = Vec::with_capacity(row.columns_ref().len()); + for i in 0..row.columns_ref().len() { + let v: MyValue = row.as_ref(i).cloned().unwrap_or(MyValue::NULL); + cells.push(my_to_row_value(v)); + } + cells +} + +fn my_to_row_value(v: MyValue) -> RowValue { + match v { + MyValue::NULL => RowValue::Null, + MyValue::Int(i) => RowValue::Int(i), + MyValue::UInt(u) => { + if u <= i64::MAX as u64 { + RowValue::Int(u as i64) + } else { + RowValue::Decimal(u.to_string()) + } + } + MyValue::Float(f) => RowValue::Float(f as f64), + MyValue::Double(f) => RowValue::Float(f), + MyValue::Bytes(b) => match std::str::from_utf8(&b) { + Ok(s) => RowValue::Text(s.to_string()), + Err(_) => RowValue::Bytes(b), + }, + MyValue::Date(y, mo, d, h, mi, s, _us) => { + use chrono::{TimeZone, Utc}; + match Utc.with_ymd_and_hms(y as i32, mo as u32, d as u32, h as u32, mi as u32, s as u32) + { + chrono::LocalResult::Single(t) => RowValue::Timestamp(t), + _ => RowValue::Null, + } + } + MyValue::Time(_, _, _, _, _, _) => RowValue::Text(format!("{v:?}")), + } +} + +pub(crate) fn map_err(e: mysql_async::Error) -> DbError { + let code = match &e { + mysql_async::Error::Server(s) => Some(s.code.to_string()), + _ => None, + }; + DbError::DriverError { + driver: "mysql".into(), + code, + message: e.to_string(), + failed_index: None, + } +} + +pub async fn execute( + pool: &MysqlPool, + sql: &str, + params: &[JsonParam], + returning: &[String], +) -> Result { + if !returning.is_empty() { + tracing::warn!( + driver = "mysql", + "RETURNING not supported on MySQL; ignoring `returning` array" + ); + } + let mut conn = pool.acquire().await?; + let bound = bind_params(params); + conn.exec_drop(sql, bound).await.map_err(map_err)?; + let affected = conn.affected_rows(); + // mysql_async's last_insert_id() is sticky per-connection: an UPDATE on + // a pool-reused connection that previously ran an INSERT will still + // return Some(prior_id). Gate on the SQL prefix so non-INSERTs always + // report None. + let last_insert_id = if is_insert(sql) { + conn.last_insert_id().map(|i| i.to_string()) + } else { + None + }; + Ok(ExecuteResult { + affected_rows: affected, + last_insert_id, + returned_rows: vec![], + returned_columns: vec![], + }) +} + +/// Same naïve prefix check as `driver::sqlite::is_insert`. False-negatives on +/// `REPLACE INTO …` and CTE-prefixed INSERTs fall through to +/// `last_insert_id: None`, which is safer than reporting a stale id from a +/// pool-reused connection. +fn is_insert(sql: &str) -> bool { + sql.trim_start().to_ascii_uppercase().starts_with("INSERT") +} + +pub async fn transaction( + pool: &MysqlPool, + statements: Vec, + isolation: Option, +) -> Result, DbError> { + let mut conn = pool.acquire().await?; + let iso_sql = match isolation { + Some(Isolation::ReadCommitted) => "SET TRANSACTION ISOLATION LEVEL READ COMMITTED", + Some(Isolation::RepeatableRead) => "SET TRANSACTION ISOLATION LEVEL REPEATABLE READ", + Some(Isolation::Serializable) => "SET TRANSACTION ISOLATION LEVEL SERIALIZABLE", + None => "", + }; + if !iso_sql.is_empty() { + conn.query_drop(iso_sql).await.map_err(map_err)?; + } + conn.query_drop("START TRANSACTION") + .await + .map_err(map_err)?; + + let mut results: Vec = Vec::with_capacity(statements.len()); + + for (idx, stmt) in statements.iter().enumerate() { + let upper = stmt.sql.to_ascii_uppercase(); + let returns_rows = upper.trim_start().starts_with("SELECT"); + let bound = bind_params(&stmt.params); + + let step_result: Result = if returns_rows { + match conn.exec_iter(stmt.sql.as_str(), bound).await { + Ok(mut iter) => { + let raw: Result, _> = iter.collect().await; + match raw { + Ok(raw_rows) => { + let cells_rows: Vec = + raw_rows.iter().map(|r| Row(row_cells(r))).collect(); + Ok(TxStepResult { + affected_rows: cells_rows.len() as u64, + rows: cells_rows, + }) + } + Err(e) => Err(step_err(idx, e)), + } + } + Err(e) => Err(step_err(idx, e)), + } + } else { + match conn.exec_drop(stmt.sql.as_str(), bound).await { + Ok(_) => Ok(TxStepResult { + affected_rows: conn.affected_rows(), + rows: vec![], + }), + Err(e) => Err(step_err(idx, e)), + } + }; + match step_result { + Ok(s) => results.push(s), + Err(e) => { + let _ = conn.query_drop("ROLLBACK").await; + return Err(e); + } + } + } + conn.query_drop("COMMIT").await.map_err(map_err)?; + Ok(results) +} + +fn step_err(idx: usize, e: mysql_async::Error) -> DbError { + DbError::DriverError { + driver: "mysql".into(), + code: match &e { + mysql_async::Error::Server(s) => Some(s.code.to_string()), + _ => None, + }, + message: e.to_string(), + failed_index: Some(idx), + } +} + +pub async fn run_prepared( + conn: &mut crate::pool::mysql::MysqlConn, + sql: &str, + params: &[JsonParam], +) -> Result { + let bound = bind_params(params); + let mut iter = conn.exec_iter(sql, bound).await.map_err(map_err)?; + let cols: Vec = iter + .columns_ref() + .iter() + .map(|c| ColumnMeta { + name: c.name_str().to_string(), + ty: format!("{:?}", c.column_type()), + }) + .collect(); + let raw_rows: Vec = iter.collect().await.map_err(map_err)?; + let out_rows: Vec = raw_rows.iter().map(|r| Row(row_cells(r))).collect(); + Ok(QueryResult { + columns: cols, + rows: out_rows, + }) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::config::PoolConfig; + use crate::pool::MysqlPool; + use crate::value::{JsonParam, RowValue}; + + fn url() -> Option { + std::env::var("TEST_MYSQL_URL").ok() + } + + async fn pool() -> Option { + let tls = crate::config::TlsConfig { + mode: crate::config::TlsMode::Disable, + ca_cert: None, + }; + Some(MysqlPool::new(&url()?, &PoolConfig::default(), &tls).unwrap()) + } + + #[tokio::test(flavor = "multi_thread")] + async fn my_query_returns_int_text_null() { + let Some(p) = pool().await else { return }; + let r = query(&p, "SELECT 1 AS a, 'x' AS b, NULL AS c", &[], 30_000) + .await + .unwrap(); + assert_eq!(r.columns.len(), 3); + assert!(matches!( + &r.rows[0].0[0], + RowValue::Int(1) | RowValue::BigInt(1) + )); + assert!(matches!(&r.rows[0].0[1], RowValue::Text(s) if s == "x")); + assert!(matches!(&r.rows[0].0[2], RowValue::Null)); + } + + #[tokio::test(flavor = "multi_thread")] + async fn my_query_with_positional_params() { + let Some(p) = pool().await else { return }; + let r = query( + &p, + "SELECT ? + ? AS sum", + &[JsonParam::Int(40), JsonParam::Int(2)], + 30_000, + ) + .await + .unwrap(); + // MySQL types `?+?` as MYSQL_TYPE_DOUBLE (parameter placeholders carry no + // declared type, so the optimizer picks DOUBLE for the result column). + // Accept any numeric variant equal to 42 — the test asserts "positional + // params bind correctly", not "integer arithmetic preserves type". + let v = &r.rows[0].0[0]; + let ok = match v { + RowValue::Int(42) | RowValue::BigInt(42) => true, + RowValue::Float(f) => (f - 42.0).abs() < 1e-9, + _ => false, + }; + assert!(ok, "expected ~42, got {v:?}"); + } + + #[tokio::test(flavor = "multi_thread")] + async fn my_execute_insert_reports_affected_and_last_insert_id() { + let Some(p) = pool().await else { return }; + let _ = execute(&p, "DROP TABLE IF EXISTS db_w_t", &[], &[]).await; + let _ = execute( + &p, + "CREATE TABLE db_w_t (id INT AUTO_INCREMENT PRIMARY KEY, n INT)", + &[], + &[], + ) + .await + .unwrap(); + let r = execute( + &p, + "INSERT INTO db_w_t (n) VALUES (?), (?)", + &[JsonParam::Int(1), JsonParam::Int(2)], + &[], + ) + .await + .unwrap(); + assert_eq!(r.affected_rows, 2); + assert!(r.last_insert_id.is_some()); + } + + #[tokio::test(flavor = "multi_thread")] + async fn my_execute_with_returning_warns_and_ignores() { + let Some(p) = pool().await else { return }; + let _ = execute(&p, "DROP TABLE IF EXISTS db_w_t2", &[], &[]).await; + let _ = execute( + &p, + "CREATE TABLE db_w_t2 (id INT AUTO_INCREMENT PRIMARY KEY, n INT)", + &[], + &[], + ) + .await + .unwrap(); + let r = execute( + &p, + "INSERT INTO db_w_t2 (n) VALUES (?)", + &[JsonParam::Int(7)], + &["id".into()], + ) + .await + .unwrap(); + assert_eq!(r.affected_rows, 1); + assert!(r.returned_rows.is_empty()); + } + + #[tokio::test(flavor = "multi_thread")] + async fn my_transaction_commits() { + let Some(p) = pool().await else { return }; + let _ = execute(&p, "DROP TABLE IF EXISTS db_w_tx", &[], &[]).await; + let _ = execute(&p, "CREATE TABLE db_w_tx (n INT)", &[], &[]) + .await + .unwrap(); + let stmts = vec![ + TxStatement { + sql: "INSERT INTO db_w_tx VALUES (?)".into(), + params: vec![JsonParam::Int(1)], + }, + TxStatement { + sql: "INSERT INTO db_w_tx VALUES (?)".into(), + params: vec![JsonParam::Int(2)], + }, + ]; + let res = transaction(&p, stmts, Some(Isolation::RepeatableRead)) + .await + .unwrap(); + assert_eq!(res.len(), 2); + } + + #[tokio::test(flavor = "multi_thread")] + async fn my_transaction_rolls_back_on_failure() { + let Some(p) = pool().await else { return }; + let _ = execute(&p, "DROP TABLE IF EXISTS db_w_tx2", &[], &[]).await; + let _ = execute(&p, "CREATE TABLE db_w_tx2 (n INT NOT NULL)", &[], &[]) + .await + .unwrap(); + let stmts = vec![ + TxStatement { + sql: "INSERT INTO db_w_tx2 VALUES (?)".into(), + params: vec![JsonParam::Int(1)], + }, + TxStatement { + sql: "INSERT INTO db_w_tx2 VALUES (?)".into(), + params: vec![JsonParam::Null], + }, + ]; + let err = transaction(&p, stmts, None).await.unwrap_err(); + assert!(matches!(err, DbError::DriverError { .. })); + let r = query(&p, "SELECT COUNT(*) AS c FROM db_w_tx2", &[], 30_000) + .await + .unwrap(); + assert!(matches!( + &r.rows[0].0[0], + RowValue::Int(0) | RowValue::BigInt(0) + )); + } + + /// Regression: `tokio::time::timeout` must wrap both the `exec_iter` + /// dispatch *and* the `result.collect()` row stream. mysql_async's + /// `exec_iter` resolves once the server returns column metadata, so a + /// query whose planner is fast but whose row stream is slow (here: + /// `SELECT SLEEP(N)` per-row) would silently bypass the timeout if only + /// `exec_iter` were wrapped. The fix wraps the whole pipeline; this test + /// asserts the timeout fires. + #[tokio::test(flavor = "multi_thread")] + async fn my_query_timeout_applies_to_row_streaming() { + let Some(p) = pool().await else { return }; + let start = std::time::Instant::now(); + // 3 rows × SLEEP(2) = ~6s of server-side row generation. With + // timeout_ms = 500, the fix's whole-pipeline wrap fires within ~1s. + // The buggy code would wait the full ~6s and return rows. + let res = query( + &p, + "SELECT SLEEP(2), n FROM (SELECT 1 AS n UNION SELECT 2 UNION SELECT 3) AS t", + &[], + 500, + ) + .await; + let elapsed = start.elapsed(); + assert!( + matches!(res, Err(DbError::QueryTimeout { .. })), + "expected QueryTimeout, got {res:?}" + ); + assert!( + elapsed < std::time::Duration::from_secs(3), + "timeout should fire well before the ~6s row stream completes; elapsed={elapsed:?}" + ); + } + + #[tokio::test(flavor = "multi_thread")] + async fn my_run_prepared_executes_with_params() { + let Some(p) = pool().await else { return }; + let mut conn = p.acquire().await.unwrap(); + let result = run_prepared( + &mut conn, + "SELECT ? + ? AS total", + &[JsonParam::Int(40), JsonParam::Int(2)], + ) + .await + .unwrap(); + // See note in `my_query_with_positional_params`: `?+?` returns DOUBLE. + let v = &result.rows[0].0[0]; + let ok = match v { + RowValue::Int(42) | RowValue::BigInt(42) => true, + RowValue::Float(f) => (f - 42.0).abs() < 1e-9, + _ => false, + }; + assert!(ok, "expected ~42, got {v:?}"); + } +} diff --git a/iii-database/src/driver/postgres.rs b/iii-database/src/driver/postgres.rs new file mode 100644 index 00000000..92f27919 --- /dev/null +++ b/iii-database/src/driver/postgres.rs @@ -0,0 +1,991 @@ +//! Postgres driver: query/execute/transaction/prepare. + +use crate::driver::{ + ColumnMeta, ExecuteResult, Isolation, QueryResult, Row, TxStatement, TxStepResult, +}; +use crate::error::DbError; +use crate::pool::PostgresPool; +use crate::value::{JsonParam, RowValue}; +use chrono::{DateTime, NaiveDateTime, TimeZone, Utc}; +use postgres_types::{ToSql, Type}; +use serde_json::Value as JsonValue; +use std::time::Duration; + +pub async fn query( + pool: &PostgresPool, + sql: &str, + params: &[JsonParam], + timeout_ms: u64, +) -> Result { + let client = pool.acquire().await?; + let bound = bind_params(params); + let bound_refs: Vec<&(dyn ToSql + Sync)> = + bound.iter().map(|p| p as &(dyn ToSql + Sync)).collect(); + + let fut = client.query(sql, bound_refs.as_slice()); + let rows = tokio::time::timeout(Duration::from_millis(timeout_ms), fut) + .await + .map_err(|_| DbError::QueryTimeout { + db: "(pg)".into(), + timeout_ms, + })? + .map_err(map_err)?; + + if rows.is_empty() { + return Ok(QueryResult { + columns: vec![], + rows: vec![], + }); + } + + let columns: Vec = rows[0] + .columns() + .iter() + .map(|c| ColumnMeta { + name: c.name().to_string(), + ty: c.type_().name().to_string(), + }) + .collect(); + + let mut out_rows: Vec = Vec::with_capacity(rows.len()); + for row in rows { + let mut cells = Vec::with_capacity(row.columns().len()); + for (i, col) in row.columns().iter().enumerate() { + cells.push(pg_cell_to_row_value(&row, i, col.type_())?); + } + out_rows.push(Row(cells)); + } + Ok(QueryResult { + columns, + rows: out_rows, + }) +} + +fn bind_params(params: &[JsonParam]) -> Vec { + params.iter().map(PgBind::from_param).collect() +} + +#[derive(Debug)] +enum PgBind { + Null, + Bool(bool), + Int(i64), + Float(f64), + Text(String), + Json(JsonValue), +} + +impl PgBind { + fn from_param(p: &JsonParam) -> Self { + match p { + JsonParam::Null => PgBind::Null, + JsonParam::Bool(b) => PgBind::Bool(*b), + JsonParam::Int(i) => PgBind::Int(*i), + JsonParam::Float(f) => PgBind::Float(*f), + JsonParam::Text(s) => PgBind::Text(s.clone()), + JsonParam::Json(v) => PgBind::Json(v.clone()), + } + } +} + +impl ToSql for PgBind { + fn to_sql( + &self, + ty: &Type, + out: &mut bytes::BytesMut, + ) -> Result> { + // Postgres binary protocol requires the wire-format byte width to + // match the column's declared type. JsonParam carries i64/f64 but + // columns are commonly INT4 / FLOAT4 / etc. Without dispatching on + // `ty`, an `i64.to_sql(INT4, ...)` writes 8 bytes where the server + // expects 4, producing SQLSTATE 22P03 (invalid_binary_representation). + // Coerce numeric variants to the column's actual type before binding. + match self { + PgBind::Null => Ok(postgres_types::IsNull::Yes), + PgBind::Bool(b) => b.to_sql(ty, out), + PgBind::Int(i) => match *ty { + // Reject overflow rather than silently wrapping. Without + // try_from, `(*i as i16)` truncates 40000 → -25536 and writes + // it to the column; that's silent data corruption with no + // server-side error since the wire bytes are technically valid. + Type::INT2 => i16::try_from(*i) + .map_err(|_| format!("value {i} out of range for INT2 (i16)").into()) + .and_then(|v: i16| v.to_sql(ty, out)), + Type::INT4 => i32::try_from(*i) + .map_err(|_| format!("value {i} out of range for INT4 (i32)").into()) + .and_then(|v: i32| v.to_sql(ty, out)), + Type::INT8 => i.to_sql(ty, out), + Type::FLOAT4 => (*i as f32).to_sql(ty, out), + Type::FLOAT8 => (*i as f64).to_sql(ty, out), + _ => i.to_sql(ty, out), + }, + PgBind::Float(f) => match *ty { + Type::FLOAT4 => (*f as f32).to_sql(ty, out), + _ => f.to_sql(ty, out), + }, + PgBind::Text(s) => s.to_sql(ty, out), + PgBind::Json(v) => v.to_sql(ty, out), + } + } + + fn accepts(_ty: &Type) -> bool { + true + } + + postgres_types::to_sql_checked!(); +} + +fn pg_cell_to_row_value( + row: &tokio_postgres::Row, + idx: usize, + ty: &Type, +) -> Result { + macro_rules! get { + ($t:ty) => {{ + let v: Option<$t> = row.try_get(idx).map_err(map_err)?; + v + }}; + } + use tokio_postgres::types::Type as T; + Ok(match *ty { + T::BOOL => match get!(bool) { + Some(b) => RowValue::Bool(b), + None => RowValue::Null, + }, + T::INT2 => match get!(i16) { + Some(i) => RowValue::Int(i as i64), + None => RowValue::Null, + }, + T::INT4 => match get!(i32) { + Some(i) => RowValue::Int(i as i64), + None => RowValue::Null, + }, + T::INT8 => match get!(i64) { + Some(i) => RowValue::BigInt(i), + None => RowValue::Null, + }, + T::FLOAT4 => match get!(f32) { + Some(f) => RowValue::Float(f as f64), + None => RowValue::Null, + }, + T::FLOAT8 => match get!(f64) { + Some(f) => RowValue::Float(f), + None => RowValue::Null, + }, + T::TEXT | T::VARCHAR | T::BPCHAR | T::NAME | T::UUID => match get!(String) { + Some(s) => RowValue::Text(s), + None => RowValue::Null, + }, + T::BYTEA => match get!(Vec) { + Some(b) => RowValue::Bytes(b), + None => RowValue::Null, + }, + // postgres-types' chrono FromSql impls bind by exact OID: + // `DateTime` declares `accepts!(TIMESTAMPTZ)` and `NaiveDateTime` + // declares `accepts!(TIMESTAMP)`. Decoding TIMESTAMP (no tz) as + // `DateTime` fails at runtime with WrongType. Split the arms: + // TIMESTAMP → NaiveDateTime, then assume UTC for the wire envelope + // so RowValue::Timestamp keeps its DateTime shape. + T::TIMESTAMPTZ => match get!(DateTime) { + Some(t) => RowValue::Timestamp(t), + None => RowValue::Null, + }, + T::TIMESTAMP => match get!(NaiveDateTime) { + Some(n) => RowValue::Timestamp(Utc.from_utc_datetime(&n)), + None => RowValue::Null, + }, + T::JSON | T::JSONB => match get!(JsonValue) { + Some(v) => RowValue::Json(v), + None => RowValue::Null, + }, + T::NUMERIC => { + // Layered decode: + // 1. rust_decimal::Decimal — fast, well-tested, handles 99% of + // real-world NUMERIC values (96-bit / ~28 sig digits). + // 2. Custom binary parser fallback — for values rust_decimal + // rejects: NaN, ±Infinity, and arbitrary-precision NUMERIC + // beyond 96 bits (rust_decimal-1.41/src/postgres/driver.rs:91 + // returns `Err(ConversionTo)` for special signs and line 109 + // returns `Err(ExceedsMaximumPossibleValue)` for overflow). + // 3. Null on parse failure. + // + // The reviewer originally suggested `get!(String)` as the + // fallback, but `String: FromSql::accepts` is gated to TEXT- + // family OIDs (postgres-types-0.2/src/lib.rs:729) and rejects + // NUMERIC at runtime — confirmed earlier in this branch. The + // custom parser sidesteps that by accepting NUMERIC directly + // and stringifying the binary digits. + match row.try_get::<_, Option>(idx) { + Ok(Some(d)) => RowValue::Decimal(d.to_string()), + Ok(None) => RowValue::Null, + Err(e) => { + tracing::debug!( + column = idx, + error = %e, + "NUMERIC outside rust_decimal range; falling back to binary parser" + ); + match row.try_get::<_, Option>(idx) { + Ok(Some(t)) => RowValue::Decimal(t.0), + Ok(None) => RowValue::Null, + Err(e) => { + tracing::warn!( + column = idx, + error = %e, + "NUMERIC binary parser also failed; surfacing Null" + ); + RowValue::Null + } + } + } + } + } + _ => { + // Unknown / unmapped type — fall back to text representation. + match row.try_get::<_, Option>(idx).map_err(map_err)? { + Some(s) => RowValue::Text(s), + None => RowValue::Null, + } + } + }) +} + +pub(crate) fn map_err(e: tokio_postgres::Error) -> DbError { + let code = e.code().map(|c| c.code().to_string()); + DbError::DriverError { + driver: "postgres".into(), + code, + message: e.to_string(), + failed_index: None, + } +} + +pub async fn execute( + pool: &PostgresPool, + sql: &str, + params: &[JsonParam], + _returning: &[String], +) -> Result { + let client = pool.acquire().await?; + let bound = bind_params(params); + let bound_refs: Vec<&(dyn ToSql + Sync)> = + bound.iter().map(|p| p as &(dyn ToSql + Sync)).collect(); + + let upper = sql.to_ascii_uppercase(); + if upper.contains(" RETURNING ") { + let rows = client + .query(sql, bound_refs.as_slice()) + .await + .map_err(map_err)?; + let columns: Vec = rows + .first() + .map(|r| { + r.columns() + .iter() + .map(|c| ColumnMeta { + name: c.name().to_string(), + ty: c.type_().name().to_string(), + }) + .collect() + }) + .unwrap_or_default(); + + let mut returned: Vec = Vec::with_capacity(rows.len()); + let mut last_insert_id: Option = None; + + // Postgres has no `last_insert_rowid()` equivalent; we extract + // `last_insert_id` from the first cell of the first RETURNING row. + // This means the caller's RETURNING clause column ORDER is part of + // the contract: `RETURNING id, name` produces last_insert_id = the + // id column; `RETURNING name, id` produces last_insert_id = the + // name column (which is rarely useful). + // + // Convention for callers who want the row's PK as last_insert_id: + // put it first in RETURNING. + for (ri, row) in rows.iter().enumerate() { + let mut cells = Vec::with_capacity(row.columns().len()); + for (i, col) in row.columns().iter().enumerate() { + cells.push(pg_cell_to_row_value(row, i, col.type_())?); + } + if ri == 0 { + if let Some(first) = cells.first() { + last_insert_id = match first { + RowValue::Int(i) => Some(i.to_string()), + RowValue::BigInt(i) => Some(i.to_string()), + RowValue::Text(s) => Some(s.clone()), + _ => None, + }; + } + } + returned.push(Row(cells)); + } + + Ok(ExecuteResult { + affected_rows: returned.len() as u64, + last_insert_id, + returned_rows: returned, + returned_columns: columns, + }) + } else { + let n = client + .execute(sql, bound_refs.as_slice()) + .await + .map_err(map_err)?; + Ok(ExecuteResult { + affected_rows: n, + last_insert_id: None, + returned_rows: vec![], + returned_columns: vec![], + }) + } +} + +pub async fn transaction( + pool: &PostgresPool, + statements: Vec, + isolation: Option, +) -> Result, DbError> { + let mut client = pool.acquire().await?; + let begin_sql = match isolation { + Some(Isolation::ReadCommitted) => "BEGIN ISOLATION LEVEL READ COMMITTED", + Some(Isolation::RepeatableRead) => "BEGIN ISOLATION LEVEL REPEATABLE READ", + Some(Isolation::Serializable) => "BEGIN ISOLATION LEVEL SERIALIZABLE", + None => "BEGIN", + }; + let tx_client = &mut *client; + tx_client.batch_execute(begin_sql).await.map_err(map_err)?; + + let mut results: Vec = Vec::with_capacity(statements.len()); + + for (idx, stmt) in statements.iter().enumerate() { + let bound = bind_params(&stmt.params); + let bound_refs: Vec<&(dyn ToSql + Sync)> = + bound.iter().map(|p| p as &(dyn ToSql + Sync)).collect(); + let upper = stmt.sql.to_ascii_uppercase(); + let returns_rows = + upper.trim_start().starts_with("SELECT") || upper.contains(" RETURNING "); + + let step = if returns_rows { + match tx_client.query(&stmt.sql, bound_refs.as_slice()).await { + Ok(rows) => { + let mut cells_rows: Vec = Vec::with_capacity(rows.len()); + for row in &rows { + let mut cells = Vec::with_capacity(row.columns().len()); + for (i, col) in row.columns().iter().enumerate() { + cells.push(pg_cell_to_row_value(row, i, col.type_())?); + } + cells_rows.push(Row(cells)); + } + TxStepResult { + affected_rows: cells_rows.len() as u64, + rows: cells_rows, + } + } + Err(e) => { + let _ = tx_client.batch_execute("ROLLBACK").await; + return Err(step_err(idx, e)); + } + } + } else { + match tx_client.execute(&stmt.sql, bound_refs.as_slice()).await { + Ok(n) => TxStepResult { + affected_rows: n, + rows: vec![], + }, + Err(e) => { + let _ = tx_client.batch_execute("ROLLBACK").await; + return Err(step_err(idx, e)); + } + } + }; + results.push(step); + } + + if let Err(e) = tx_client.batch_execute("COMMIT").await { + // Best-effort ROLLBACK so the connection isn't returned to the pool + // mid-transaction. deadpool's Fast recycler does not issue ROLLBACK, + // so without this the next caller on this connection sees + // "current transaction is aborted, commands ignored". + let _ = tx_client.batch_execute("ROLLBACK").await; + return Err(map_err(e)); + } + Ok(results) +} + +fn step_err(idx: usize, e: tokio_postgres::Error) -> DbError { + let code = e.code().map(|c| c.code().to_string()); + DbError::DriverError { + driver: "postgres".into(), + code, + message: e.to_string(), + failed_index: Some(idx), + } +} + +pub async fn run_prepared( + client: &mut crate::pool::postgres::PgClient, + sql: &str, + params: &[JsonParam], +) -> Result { + let bound = bind_params(params); + let bound_refs: Vec<&(dyn ToSql + Sync)> = + bound.iter().map(|p| p as &(dyn ToSql + Sync)).collect(); + let stmt = client.prepare(sql).await.map_err(map_err)?; + let rows = client + .query(&stmt, bound_refs.as_slice()) + .await + .map_err(map_err)?; + let columns: Vec = stmt + .columns() + .iter() + .map(|c| ColumnMeta { + name: c.name().to_string(), + ty: c.type_().name().to_string(), + }) + .collect(); + let mut out_rows: Vec = Vec::with_capacity(rows.len()); + for row in rows { + let mut cells = Vec::with_capacity(row.columns().len()); + for (i, col) in row.columns().iter().enumerate() { + cells.push(pg_cell_to_row_value(&row, i, col.type_())?); + } + out_rows.push(Row(cells)); + } + Ok(QueryResult { + columns, + rows: out_rows, + }) +} + +/// Fallback NUMERIC decoder used when `rust_decimal` rejects a value +/// (NaN, ±Infinity, or precision beyond 96 bits). Captures the raw +/// Postgres binary numeric format and stringifies it directly so the +/// caller sees a precision-preserving decimal/special-value string. +struct PgNumericText(String); + +impl<'a> postgres_types::FromSql<'a> for PgNumericText { + fn from_sql( + _ty: &Type, + raw: &'a [u8], + ) -> Result> { + stringify_pg_numeric_binary(raw) + .map(PgNumericText) + .map_err(|e| -> Box { e.into() }) + } + + fn accepts(ty: &Type) -> bool { + *ty == Type::NUMERIC + } +} + +/// Postgres binary NUMERIC layout (network byte order, all 16-bit fields): +/// +/// u16 ndigits — number of base-10000 digit groups +/// i16 weight — weight of first group (signed) +/// u16 sign — 0x0000=+, 0x4000=−, 0xC000=NaN, 0xD000=+Inf, 0xF000=−Inf +/// u16 dscale — display scale (decimal fractional digits) +/// u16 digits[] — base-10000 digits, MSB first +/// +/// References: rust_decimal-1.41/src/postgres/driver.rs (the impl we fall +/// back from, which uses the same layout) and the Postgres source at +/// `src/backend/utils/adt/numeric.c`. The custom stringifier handles every +/// shape rust_decimal rejects so callers see a value rather than an error. +fn stringify_pg_numeric_binary(raw: &[u8]) -> Result { + if raw.len() < 8 { + return Err("numeric: header too short"); + } + let ndigits = u16::from_be_bytes([raw[0], raw[1]]) as usize; + let weight = i16::from_be_bytes([raw[2], raw[3]]) as i32; + let sign = u16::from_be_bytes([raw[4], raw[5]]); + let dscale = u16::from_be_bytes([raw[6], raw[7]]) as usize; + + match sign { + 0xC000 => return Ok("NaN".to_string()), + 0xD000 => return Ok("Infinity".to_string()), + 0xF000 => return Ok("-Infinity".to_string()), + 0x0000 | 0x4000 => {} // positive / negative — fall through + _ => return Err("numeric: unknown sign"), + } + + if raw.len() < 8 + ndigits * 2 { + return Err("numeric: digit buffer truncated"); + } + + let mut digits: Vec = Vec::with_capacity(ndigits); + for i in 0..ndigits { + let off = 8 + i * 2; + let d = u16::from_be_bytes([raw[off], raw[off + 1]]); + if d >= 10000 { + return Err("numeric: digit out of range"); + } + digits.push(d); + } + + // Integer part. Empty digits or weight < 0 → "0". + let mut int_part = String::new(); + if ndigits == 0 || weight < 0 { + int_part.push('0'); + } else { + let weight_u = weight as usize; + let stored_int_count = std::cmp::min(ndigits, weight_u + 1); + for (i, &d) in digits.iter().take(stored_int_count).enumerate() { + if i == 0 { + int_part.push_str(&d.to_string()); + } else { + int_part.push_str(&format!("{d:04}")); + } + } + // Append zero groups for integer positions beyond stored digits. + let trailing_zero_groups = (weight_u + 1).saturating_sub(stored_int_count); + for _ in 0..trailing_zero_groups { + int_part.push_str("0000"); + } + } + + // Fractional part. Only emit if dscale > 0. + let mut frac_part = String::new(); + if dscale > 0 { + // Leading zero groups when weight < -1 (the value is < 0.0001). + if weight < -1 { + let leading = ((-weight) - 1) as usize; + for _ in 0..leading { + frac_part.push_str("0000"); + } + } + // Stored fractional digits start where integer digits ended. + let frac_start_idx = if weight < 0 { 0 } else { (weight as usize) + 1 }; + for &d in digits.iter().skip(frac_start_idx) { + frac_part.push_str(&format!("{d:04}")); + } + // Trim or right-pad to exactly dscale chars. + if frac_part.len() > dscale { + frac_part.truncate(dscale); + } + while frac_part.len() < dscale { + frac_part.push('0'); + } + } + + let mut out = String::new(); + if sign == 0x4000 { + out.push('-'); + } + out.push_str(&int_part); + if !frac_part.is_empty() { + out.push('.'); + out.push_str(&frac_part); + } + Ok(out) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::config::PoolConfig; + use crate::pool::PostgresPool; + use crate::value::{JsonParam, RowValue}; + + fn url() -> Option { + std::env::var("TEST_POSTGRES_URL").ok() + } + + async fn fresh_pool() -> Option { + let u = url()?; + let tls = crate::config::TlsConfig { + mode: crate::config::TlsMode::Disable, + ca_cert: None, + }; + Some( + PostgresPool::new(&u, &PoolConfig::default(), &tls) + .await + .unwrap(), + ) + } + + #[tokio::test(flavor = "multi_thread")] + async fn pg_query_returns_rows_with_int_text_bool_null() { + let Some(p) = fresh_pool().await else { return }; + let r = query( + &p, + "SELECT 1::int AS a, 'x'::text AS b, true AS c, NULL::int AS d", + &[], + 30_000, + ) + .await + .unwrap(); + assert_eq!(r.columns.len(), 4); + assert!(matches!(&r.rows[0].0[0], RowValue::Int(1))); + assert!(matches!(&r.rows[0].0[1], RowValue::Text(s) if s == "x")); + assert!(matches!(&r.rows[0].0[2], RowValue::Bool(true))); + assert!(matches!(&r.rows[0].0[3], RowValue::Null)); + } + + #[tokio::test(flavor = "multi_thread")] + async fn pg_query_with_positional_params() { + let Some(p) = fresh_pool().await else { return }; + let r = query( + &p, + "SELECT $1::int + $2::int AS sum", + &[JsonParam::Int(2), JsonParam::Int(3)], + 30_000, + ) + .await + .unwrap(); + assert!(matches!(&r.rows[0].0[0], RowValue::Int(5))); + } + + #[tokio::test(flavor = "multi_thread")] + async fn pg_query_jsonb_round_trips_as_value() { + let Some(p) = fresh_pool().await else { return }; + let r = query(&p, "SELECT '{\"k\":1}'::jsonb AS j", &[], 30_000) + .await + .unwrap(); + match &r.rows[0].0[0] { + RowValue::Json(v) => assert_eq!(v["k"], 1), + other => panic!("expected Json, got {other:?}"), + } + } + + #[tokio::test(flavor = "multi_thread")] + async fn pg_query_bigint_returns_string() { + let Some(p) = fresh_pool().await else { return }; + let r = query(&p, "SELECT 9007199254740993::bigint AS big", &[], 30_000) + .await + .unwrap(); + assert!(matches!( + &r.rows[0].0[0], + RowValue::BigInt(9_007_199_254_740_993) + )); + } + + /// Regression: `String: FromSql::accepts` is gated to TEXT-family OIDs + /// (postgres-types-0.2/src/lib.rs:729), so the previous + /// `try_get::<_, Option>` on a NUMERIC column failed at runtime + /// with WrongType and the entire RPC call rejected. The driver now + /// decodes via `rust_decimal::Decimal` (which declares + /// `accepts!(NUMERIC)` under the `db-tokio-postgres` feature) and + /// stringifies to keep RowValue::Decimal precision-preserving on the + /// wire. This test pins both that the decode succeeds and that the + /// stringified form matches the source literal. + #[tokio::test(flavor = "multi_thread")] + async fn pg_query_decodes_numeric_to_string() { + let Some(p) = fresh_pool().await else { return }; + let r = query( + &p, + "SELECT 12345.6789::numeric AS exact, \ + -0.001::numeric AS negf, \ + 0::numeric AS zero", + &[], + 30_000, + ) + .await + .unwrap(); + match &r.rows[0].0[0] { + RowValue::Decimal(s) => assert_eq!(s, "12345.6789"), + other => panic!("exact: expected Decimal, got {other:?}"), + } + match &r.rows[0].0[1] { + RowValue::Decimal(s) => assert_eq!(s, "-0.001"), + other => panic!("negf: expected Decimal, got {other:?}"), + } + // rust_decimal stringifies zero as "0" (no trailing decimals when + // dscale=0); we just assert it's a Decimal variant carrying "0". + match &r.rows[0].0[2] { + RowValue::Decimal(s) => assert_eq!(s, "0"), + other => panic!("zero: expected Decimal, got {other:?}"), + } + } + + /// Regression: integration test for the layered NUMERIC decode path + /// against a live postgres. Exercises three values that previously + /// failed the entire query because rust_decimal returned Err for them: + /// - 'NaN'::numeric → rust_decimal Err(ConversionTo) + /// - very large numeric → rust_decimal Err(ExceedsMaximumPossibleValue) + /// - 'Infinity'::numeric → rust_decimal Err(ConversionTo) + /// The fix routes these through PgNumericText, which decodes the + /// Postgres binary format directly and surfaces a precision-preserving + /// string. This test asserts the wiring: `try_get` errors, + /// `try_get` succeeds, and the final RowValue carries + /// the right string. Gated on TEST_POSTGRES_URL like the other pg tests. + #[tokio::test(flavor = "multi_thread")] + async fn pg_query_falls_back_to_binary_parser_for_numeric_edge_cases() { + let Some(p) = fresh_pool().await else { return }; + let r = query( + &p, + "SELECT 'NaN'::numeric AS nan, \ + 'Infinity'::numeric AS pinf, \ + '-Infinity'::numeric AS ninf, \ + 100000000000000000000000000000::numeric AS big", + &[], + 30_000, + ) + .await + .unwrap(); + match &r.rows[0].0[0] { + RowValue::Decimal(s) => assert_eq!(s, "NaN"), + other => panic!("nan: expected Decimal(\"NaN\"), got {other:?}"), + } + match &r.rows[0].0[1] { + RowValue::Decimal(s) => assert_eq!(s, "Infinity"), + other => panic!("pinf: expected Decimal(\"Infinity\"), got {other:?}"), + } + match &r.rows[0].0[2] { + RowValue::Decimal(s) => assert_eq!(s, "-Infinity"), + other => panic!("ninf: expected Decimal(\"-Infinity\"), got {other:?}"), + } + // 10^29 — beyond rust_decimal's ~10^28 cap. Pre-fix this exploded the + // entire query; now the binary parser stringifies it exactly. + match &r.rows[0].0[3] { + RowValue::Decimal(s) => { + assert_eq!(s, "100000000000000000000000000000"); + } + other => panic!("big: expected Decimal, got {other:?}"), + } + } + + /// Unit tests for the binary NUMERIC fallback parser. Drive it with + /// crafted byte sequences in the documented Postgres format so we don't + /// need a live postgres connection — the parser is the part of the fix + /// that handles values rust_decimal rejects (NaN, ±Infinity, large + /// magnitudes), and the production code path `try_get → from_sql` only + /// exercises it when the primary decode errors. + #[test] + fn stringify_pg_numeric_handles_zero() { + // ndigits=0, weight=0, sign=+, dscale=0 → "0" + let raw = [0, 0, 0, 0, 0, 0, 0, 0]; + assert_eq!(stringify_pg_numeric_binary(&raw).unwrap(), "0"); + } + + #[test] + fn stringify_pg_numeric_handles_zero_with_scale() { + // ndigits=0, weight=0, sign=+, dscale=5 → "0.00000" + let raw = [0, 0, 0, 0, 0, 0, 0, 5]; + assert_eq!(stringify_pg_numeric_binary(&raw).unwrap(), "0.00000"); + } + + #[test] + fn stringify_pg_numeric_handles_simple_fraction() { + // 1234.5 → ndigits=2, weight=0, sign=+, dscale=1, digits=[1234, 5000] + let raw = [ + 0, 2, // ndigits + 0, 0, // weight + 0, 0, // sign + + 0, 1, // dscale + 0x04, 0xD2, // 1234 + 0x13, 0x88, // 5000 + ]; + assert_eq!(stringify_pg_numeric_binary(&raw).unwrap(), "1234.5"); + } + + #[test] + fn stringify_pg_numeric_handles_small_fraction() { + // 0.001 → ndigits=1, weight=-1, sign=+, dscale=3, digits=[10] + let raw = [ + 0, 1, // ndigits + 0xFF, 0xFF, // weight = -1 + 0, 0, // sign + + 0, 3, // dscale + 0, 10, // digit + ]; + assert_eq!(stringify_pg_numeric_binary(&raw).unwrap(), "0.001"); + } + + #[test] + fn stringify_pg_numeric_handles_negative() { + // -42 → ndigits=1, weight=0, sign=0x4000, dscale=0, digits=[42] + let raw = [ + 0, 1, // ndigits + 0, 0, // weight + 0x40, 0x00, // sign - + 0, 0, // dscale + 0, 42, // digit + ]; + assert_eq!(stringify_pg_numeric_binary(&raw).unwrap(), "-42"); + } + + #[test] + fn stringify_pg_numeric_handles_large_value_beyond_rust_decimal() { + // 10^30 = 100 * 10000^7. ndigits=1, weight=7, sign=+, dscale=0, + // digits=[100]. This value overflows rust_decimal (96-bit cap ~10^28), + // so before the fallback parser it failed query calls outright. + let raw = [ + 0, 1, // ndigits + 0, 7, // weight + 0, 0, // sign + + 0, 0, // dscale + 0, 100, // digit + ]; + assert_eq!( + stringify_pg_numeric_binary(&raw).unwrap(), + "1000000000000000000000000000000" + ); + } + + #[test] + fn stringify_pg_numeric_handles_special_signs() { + let nan = [0, 0, 0, 0, 0xC0, 0x00, 0, 0]; + assert_eq!(stringify_pg_numeric_binary(&nan).unwrap(), "NaN"); + + let pinf = [0, 0, 0, 0, 0xD0, 0x00, 0, 0]; + assert_eq!(stringify_pg_numeric_binary(&pinf).unwrap(), "Infinity"); + + let ninf = [0, 0, 0, 0, 0xF0, 0x00, 0, 0]; + assert_eq!(stringify_pg_numeric_binary(&ninf).unwrap(), "-Infinity"); + } + + #[test] + fn stringify_pg_numeric_rejects_truncated_buffers() { + // Header too short + assert!(stringify_pg_numeric_binary(&[0, 0]).is_err()); + // Header claims 3 digits but buffer is too small + let raw = [0, 3, 0, 0, 0, 0, 0, 0, 0, 1]; + assert!(stringify_pg_numeric_binary(&raw).is_err()); + } + + /// Regression: `DateTime: FromSql` declares `accepts!(TIMESTAMPTZ)` + /// (postgres-types-0.2/src/chrono_04.rs:48), so decoding a TIMESTAMP (no + /// tz) column as `DateTime` fails at runtime with WrongType. The + /// driver now decodes TIMESTAMP via `NaiveDateTime` and folds it into + /// `RowValue::Timestamp(DateTime)` by treating the naive value as + /// UTC. This test pins both the failing-before path (TIMESTAMP) and the + /// working path (TIMESTAMPTZ) so a regression on either side fails fast. + #[tokio::test(flavor = "multi_thread")] + async fn pg_query_decodes_timestamp_without_tz_and_with_tz() { + let Some(p) = fresh_pool().await else { return }; + let r = query( + &p, + "SELECT \ + '2026-04-29 12:00:00'::timestamp AS naive, \ + '2026-04-29 12:00:00+00'::timestamptz AS with_tz", + &[], + 30_000, + ) + .await + .unwrap(); + // Both columns surface as RowValue::Timestamp; both round-trip through + // RFC 3339 UTC at the wire. The buggy code panicked on the `naive` + // column with a WrongType error before reaching this assertion. + match &r.rows[0].0[0] { + RowValue::Timestamp(t) => { + assert_eq!(t.to_rfc3339(), "2026-04-29T12:00:00+00:00"); + } + other => panic!("expected Timestamp for TIMESTAMP column, got {other:?}"), + } + match &r.rows[0].0[1] { + RowValue::Timestamp(t) => { + assert_eq!(t.to_rfc3339(), "2026-04-29T12:00:00+00:00"); + } + other => panic!("expected Timestamp for TIMESTAMPTZ column, got {other:?}"), + } + } + + #[tokio::test(flavor = "multi_thread")] + async fn pg_execute_insert_returns_affected_count() { + let Some(p) = fresh_pool().await else { return }; + let _ = execute(&p, "DROP TABLE IF EXISTS db_w_t", &[], &[]).await; + let _ = execute( + &p, + "CREATE TABLE db_w_t (id SERIAL PRIMARY KEY, n INT)", + &[], + &[], + ) + .await + .unwrap(); + let r = execute( + &p, + "INSERT INTO db_w_t (n) VALUES ($1), ($2)", + &[JsonParam::Int(1), JsonParam::Int(2)], + &[], + ) + .await + .unwrap(); + assert_eq!(r.affected_rows, 2); + assert!(r.last_insert_id.is_none()); + } + + #[tokio::test(flavor = "multi_thread")] + async fn pg_execute_with_returning_populates_rows_and_last_insert_id() { + let Some(p) = fresh_pool().await else { return }; + let _ = execute(&p, "DROP TABLE IF EXISTS db_w_t2", &[], &[]).await; + let _ = execute( + &p, + "CREATE TABLE db_w_t2 (id SERIAL PRIMARY KEY, n INT)", + &[], + &[], + ) + .await + .unwrap(); + let r = execute( + &p, + "INSERT INTO db_w_t2 (n) VALUES ($1) RETURNING id, n", + &[JsonParam::Int(7)], + &["id".into(), "n".into()], + ) + .await + .unwrap(); + assert_eq!(r.returned_rows.len(), 1); + assert_eq!(r.returned_columns.len(), 2); + assert!(r.last_insert_id.is_some()); + } + + #[tokio::test(flavor = "multi_thread")] + async fn pg_transaction_commits() { + let Some(p) = fresh_pool().await else { return }; + let _ = execute(&p, "DROP TABLE IF EXISTS db_w_tx", &[], &[]).await; + let _ = execute(&p, "CREATE TABLE db_w_tx (n INT)", &[], &[]) + .await + .unwrap(); + let stmts = vec![ + TxStatement { + sql: "INSERT INTO db_w_tx VALUES ($1)".into(), + params: vec![JsonParam::Int(1)], + }, + TxStatement { + sql: "INSERT INTO db_w_tx VALUES ($1)".into(), + params: vec![JsonParam::Int(2)], + }, + ]; + let res = transaction(&p, stmts, Some(Isolation::ReadCommitted)) + .await + .unwrap(); + assert_eq!(res.len(), 2); + } + + #[tokio::test(flavor = "multi_thread")] + async fn pg_transaction_rolls_back_on_failure() { + let Some(p) = fresh_pool().await else { return }; + let _ = execute(&p, "DROP TABLE IF EXISTS db_w_tx2", &[], &[]).await; + let _ = execute(&p, "CREATE TABLE db_w_tx2 (n INT NOT NULL)", &[], &[]) + .await + .unwrap(); + let stmts = vec![ + TxStatement { + sql: "INSERT INTO db_w_tx2 VALUES ($1)".into(), + params: vec![JsonParam::Int(1)], + }, + TxStatement { + sql: "INSERT INTO db_w_tx2 VALUES ($1)".into(), + params: vec![JsonParam::Null], + }, + ]; + let err = transaction(&p, stmts, None).await.unwrap_err(); + assert!(matches!(err, DbError::DriverError { .. })); + let r = query(&p, "SELECT COUNT(*) FROM db_w_tx2", &[], 30_000) + .await + .unwrap(); + assert!(matches!(&r.rows[0].0[0], RowValue::BigInt(0))); + } + + #[tokio::test(flavor = "multi_thread")] + async fn pg_run_prepared_executes_with_params() { + let Some(p) = fresh_pool().await else { return }; + let mut client = p.acquire().await.unwrap(); + let result = run_prepared( + &mut client, + "SELECT $1::int + $2::int AS total", + &[JsonParam::Int(40), JsonParam::Int(2)], + ) + .await + .unwrap(); + assert!(matches!(&result.rows[0].0[0], RowValue::Int(42))); + } +} diff --git a/iii-database/src/driver/sqlite.rs b/iii-database/src/driver/sqlite.rs new file mode 100644 index 00000000..c8ee6b0e --- /dev/null +++ b/iii-database/src/driver/sqlite.rs @@ -0,0 +1,968 @@ +//! SQLite driver methods. Each function takes the pool, runs work via +//! `spawn_blocking`, and returns the shared driver types. + +use crate::driver::{ + ColumnMeta, ExecuteResult, Isolation, QueryResult, Row, TxStatement, TxStepResult, +}; +use crate::error::DbError; +use crate::pool::SqlitePool; +use crate::value::{JsonParam, RowValue}; +use rusqlite::types::{Value as SqlValue, ValueRef}; + +pub async fn query( + pool: &SqlitePool, + sql: &str, + params: &[JsonParam], + _timeout_ms: u64, // SQLite has no per-query timeout; honored via spawn_blocking budget upstream +) -> Result { + let conn = pool.acquire().await?; + let sql = sql.to_string(); + let params = params.to_vec(); + + tokio::task::spawn_blocking(move || -> Result { + conn.with(|c| { + let mut stmt = c.prepare(&sql).map_err(map_err)?; + let columns: Vec = stmt + .columns() + .into_iter() + .map(|col| ColumnMeta { + name: col.name().to_string(), + ty: col.decl_type().unwrap_or("").to_string(), + }) + .collect(); + + let bound: Vec = params.iter().map(json_param_to_sql).collect(); + let bound_refs: Vec<&dyn rusqlite::ToSql> = + bound.iter().map(|v| v as &dyn rusqlite::ToSql).collect(); + + let n = columns.len(); + let mut rows_out: Vec = Vec::new(); + let mut rows = stmt.query(bound_refs.as_slice()).map_err(map_err)?; + while let Some(row) = rows.next().map_err(map_err)? { + let mut vals = Vec::with_capacity(n); + for i in 0..n { + vals.push(row_value_at(row, i)?); + } + rows_out.push(Row(vals)); + } + Ok(QueryResult { + columns, + rows: rows_out, + }) + }) + }) + .await + .map_err(|e| DbError::DriverError { + driver: "sqlite".into(), + code: None, + message: format!("spawn_blocking join: {e}"), + failed_index: None, + })? +} + +fn json_param_to_sql(p: &JsonParam) -> SqlValue { + match p { + JsonParam::Null => SqlValue::Null, + JsonParam::Bool(b) => SqlValue::Integer(if *b { 1 } else { 0 }), + JsonParam::Int(i) => SqlValue::Integer(*i), + JsonParam::Float(f) => SqlValue::Real(*f), + JsonParam::Text(s) => SqlValue::Text(s.clone()), + JsonParam::Json(v) => SqlValue::Text(v.to_string()), + } +} + +fn row_value_at(row: &rusqlite::Row<'_>, idx: usize) -> Result { + let r: ValueRef = row.get_ref(idx).map_err(map_err)?; + Ok(match r { + ValueRef::Null => RowValue::Null, + ValueRef::Integer(i) => RowValue::Int(i), + ValueRef::Real(f) => RowValue::Float(f), + ValueRef::Text(t) => RowValue::Text(String::from_utf8_lossy(t).into_owned()), + ValueRef::Blob(b) => RowValue::Bytes(b.to_vec()), + }) +} + +pub(crate) fn map_err(e: rusqlite::Error) -> DbError { + let code = match &e { + rusqlite::Error::SqliteFailure(f, _) => Some(format!("{:?}", f.code)), + _ => None, + }; + DbError::DriverError { + driver: "sqlite".into(), + code, + message: e.to_string(), + failed_index: None, + } +} + +/// Stamp a transaction-step index onto an existing `DbError`. Used inside +/// `run_tx_steps` to preserve the failed-step index when an error bubbles up +/// from a helper (e.g. `row_value_at`) that has no notion of "which step is +/// running". Existing `failed_index` values are preserved (an inner step may +/// have already attributed itself); only the `None` case is filled in. +fn with_failed_index(e: DbError, idx: usize) -> DbError { + match e { + DbError::DriverError { + driver, + code, + message, + failed_index, + } => DbError::DriverError { + driver, + code, + message, + failed_index: failed_index.or(Some(idx)), + }, + other => other, + } +} + +/// Pessimistic multi-statement detector. After stripping trailing +/// whitespace and semicolons, any remaining `;` is treated as a separator. +/// String-literal edge cases (e.g. a `;` inside a quoted string) are not +/// handled — for v1.0, false positives are an acceptable price for +/// preventing silent statement-drop in `Connection::execute`. +fn looks_like_multi_statement(sql: &str) -> bool { + let trimmed = sql.trim_end_matches(|c: char| c.is_whitespace() || c == ';'); + trimmed.contains(';') +} + +/// True when the SQL statement is an INSERT. Used to gate `last_insert_rowid()` +/// reporting: that function is sticky per-connection and pool reuse means a +/// non-INSERT statement on a connection that previously inserted will still +/// see the prior rowid. +/// +/// Naïve prefix check by design: false-negatives (e.g. `REPLACE INTO …` or +/// `WITH cte AS (…) INSERT …`) fall through to `last_insert_id: None`, which +/// is safe — the alternative is leaking a stale rowid from a prior pool +/// caller's INSERT, which is what we're guarding against. +fn is_insert(sql: &str) -> bool { + sql.trim_start().to_ascii_uppercase().starts_with("INSERT") +} + +/// Returns true if the prepared statement will surface result rows, or the +/// caller explicitly requested row capture via `returning`. SQLite's +/// `Statement::column_count` is the planner's source-of-truth: it returns +/// `> 0` for any statement shape that produces rows — `SELECT`, +/// `WITH cte AS (...) SELECT ...`, `VALUES (...)`, `PRAGMA foreign_keys`, +/// `EXPLAIN QUERY PLAN`, and any DML with a `RETURNING` clause regardless +/// of casing or whitespace. Replaces brittle text-prefix matches that +/// false-negatived CTE-prefixed SELECTs and aborted transactions for them. +fn statement_returns_rows(stmt: &rusqlite::Statement<'_>, returning: &[String]) -> bool { + !returning.is_empty() || stmt.column_count() > 0 +} + +pub async fn execute( + pool: &SqlitePool, + sql: &str, + params: &[JsonParam], + returning: &[String], +) -> Result { + if looks_like_multi_statement(sql) { + return Err(DbError::DriverError { + driver: "sqlite".into(), + code: Some("MULTI_STATEMENT".into()), + message: "rusqlite execute() supports only a single statement; \ + use multiple execute() calls or execute_batch via DDL" + .into(), + failed_index: None, + }); + } + let conn = pool.acquire().await?; + let sql = sql.to_string(); + let params = params.to_vec(); + let returning = returning.to_vec(); + + tokio::task::spawn_blocking(move || -> Result { + conn.with(|c| { + let bound: Vec = params.iter().map(json_param_to_sql).collect(); + let bound_refs: Vec<&dyn rusqlite::ToSql> = + bound.iter().map(|v| v as &dyn rusqlite::ToSql).collect(); + + // Always prepare first: `Statement::column_count` is the planner's + // source of truth for whether the statement produces rows, and it + // works uniformly for SELECT, CTE-prefixed SELECT, VALUES, PRAGMA, + // EXPLAIN, and DML-with-RETURNING regardless of casing/whitespace. + // The previous text-prefix heuristic missed CTE-prefixed SELECTs + // and DML-with-RETURNING split across lines, falling through to + // `c.execute(...)` which errored with ExecuteReturnedResults. + let (affected_rows, returned_rows, returned_columns) = { + let mut stmt = c.prepare(&sql).map_err(map_err)?; + if statement_returns_rows(&stmt, &returning) { + let columns: Vec = stmt + .columns() + .into_iter() + .map(|col| ColumnMeta { + name: col.name().to_string(), + ty: col.decl_type().unwrap_or("").to_string(), + }) + .collect(); + let n = columns.len(); + let mut returned: Vec = Vec::new(); + let mut rows = stmt.query(bound_refs.as_slice()).map_err(map_err)?; + while let Some(row) = rows.next().map_err(map_err)? { + let mut vals = Vec::with_capacity(n); + for i in 0..n { + vals.push(row_value_at(row, i)?); + } + returned.push(Row(vals)); + } + (returned.len() as u64, returned, columns) + } else { + let affected = stmt.execute(bound_refs.as_slice()).map_err(map_err)?; + (affected as u64, vec![], vec![]) + } + }; + + // last_insert_rowid() is sticky per-connection: it retains the + // rowid from any prior INSERT on this physical connection and + // survives intervening UPDATE/DELETE. The pool reuses connections, + // so a non-INSERT statement here would otherwise report a stale + // rowid from someone else's earlier INSERT. Read it after the + // prepared statement is dropped so we hold no stale borrow. + let last_insert_id = if is_insert(&sql) { + let r = c.last_insert_rowid(); + if r != 0 { + Some(r.to_string()) + } else { + None + } + } else { + None + }; + Ok(ExecuteResult { + affected_rows, + last_insert_id, + returned_rows, + returned_columns, + }) + }) + }) + .await + .map_err(|e| DbError::DriverError { + driver: "sqlite".into(), + code: None, + message: format!("spawn_blocking join: {e}"), + failed_index: None, + })? +} + +/// Returns an `Err(DbError::DriverError {..})` carrying `failed_index` set +/// to the 0-based index of the failing statement. The handler layer in +/// `handlers::transaction` reads this directly to build the spec's +/// `{committed: false, failed_index, error}` envelope. +pub async fn transaction( + pool: &SqlitePool, + statements: Vec, + isolation: Option, +) -> Result, DbError> { + let conn = pool.acquire().await?; + + tokio::task::spawn_blocking(move || -> Result, DbError> { + let mut conn = conn; + conn.with_mut(|c| { + let begin_sql = match isolation { + Some(Isolation::Serializable) => "BEGIN IMMEDIATE", + Some(Isolation::ReadCommitted) | Some(Isolation::RepeatableRead) => { + tracing::warn!( + "sqlite ignores requested isolation; using BEGIN DEFERRED (always serializable in practice)" + ); + "BEGIN DEFERRED" + } + None => "BEGIN DEFERRED", + }; + c.execute_batch(begin_sql).map_err(map_err)?; + + let inner = run_tx_steps(c, &statements); + match inner { + Ok(results) => { + c.execute_batch("COMMIT").map_err(|e| { + // COMMIT failed: best-effort rollback to release the + // implicit txn on the pooled connection. + let _ = c.execute_batch("ROLLBACK"); + map_err(e) + })?; + Ok(results) + } + Err(e) => { + // Best-effort rollback; ignore rollback errors (e.g. txn + // already aborted by SQLite). + let _ = c.execute_batch("ROLLBACK"); + Err(e) + } + } + }) + }) + .await + .map_err(|e| DbError::DriverError { + driver: "sqlite".into(), + code: None, + message: format!("spawn_blocking join: {e}"), + failed_index: None, + })? +} + +fn step_err(idx: usize, e: rusqlite::Error) -> DbError { + let code = match &e { + rusqlite::Error::SqliteFailure(f, _) => Some(format!("{:?}", f.code)), + _ => None, + }; + DbError::DriverError { + driver: "sqlite".into(), + code, + message: e.to_string(), + failed_index: Some(idx), + } +} + +/// Execute the body of a transaction (after BEGIN, before COMMIT/ROLLBACK). +/// On error, returns Err so the caller can issue an explicit ROLLBACK. +fn run_tx_steps( + c: &mut rusqlite::Connection, + statements: &[TxStatement], +) -> Result, DbError> { + let mut results: Vec = Vec::with_capacity(statements.len()); + + for (idx, stmt) in statements.iter().enumerate() { + // Symmetric with execute()'s single-statement guard: rusqlite's + // prepare_v2 only parses the first statement and silently ignores + // the rest, so `INSERT ...; DELETE ...` in a TxStatement.sql would + // run only the INSERT. Reject up-front and attribute to this step. + if looks_like_multi_statement(&stmt.sql) { + return Err(DbError::DriverError { + driver: "sqlite".into(), + code: Some("MULTI_STATEMENT".into()), + message: "rusqlite transaction step supports only a single statement; \ + split into multiple TxStatement entries" + .into(), + failed_index: Some(idx), + }); + } + + let bound: Vec = stmt.params.iter().map(json_param_to_sql).collect(); + let bound_refs: Vec<&dyn rusqlite::ToSql> = + bound.iter().map(|v| v as &dyn rusqlite::ToSql).collect(); + + // Route via SQLite's planner (Statement::column_count) instead of + // text matching on the SQL prefix. Previously, statements like + // `WITH cte AS (...) SELECT ...`, `VALUES (1),(2)`, `PRAGMA ...`, + // `EXPLAIN QUERY PLAN ...`, or `INSERT ... RETURNING` with the + // RETURNING keyword on a new line slipped past the + // `is_select || is_returning` heuristic and fell through to + // `c.execute(...)`, which errors with ExecuteReturnedResults and + // aborts the entire transaction. + let mut prepared = c.prepare(&stmt.sql).map_err(|e| step_err(idx, e))?; + if statement_returns_rows(&prepared, &[]) { + let n = prepared.columns().len(); + let mut rows_out: Vec = Vec::new(); + let mut rows = prepared + .query(bound_refs.as_slice()) + .map_err(|e| step_err(idx, e))?; + while let Some(row) = rows.next().map_err(|e| step_err(idx, e))? { + let mut vals = Vec::with_capacity(n); + for i in 0..n { + // row_value_at returns DbError::DriverError with + // failed_index: None (it has no step context). Stamp the + // current step idx so the wire envelope's failed_index + // points at the right TxStatement instead of None. + vals.push(row_value_at(row, i).map_err(|e| with_failed_index(e, idx))?); + } + rows_out.push(Row(vals)); + } + results.push(TxStepResult { + affected_rows: rows_out.len() as u64, + rows: rows_out, + }); + } else { + let affected = prepared + .execute(bound_refs.as_slice()) + .map_err(|e| step_err(idx, e))?; + results.push(TxStepResult { + affected_rows: affected as u64, + rows: vec![], + }); + } + } + Ok(results) +} + +/// Run an arbitrary SELECT/RETURNING-bearing statement against a pinned +/// connection held in an Option slot (the registry's `PinnedConn::Sqlite` +/// variant). The slot is `.take()`-en to move the connection into +/// `spawn_blocking` and `.replace()`-d after the work completes. +/// +/// The Option indirection lets us hand the connection to `spawn_blocking` +/// (which requires `'static`) without allocating a throwaway in-memory pool +/// just to satisfy `mem::replace`. +/// +/// Note: SQLite re-prepares cheaply via its statement cache; the "handle" +/// in this driver is really a pinned connection rather than a server-side +/// plan. Callers pass the same SQL each time. +pub async fn run_prepared( + conn_slot: &mut Option, + sql: &str, + params: &[JsonParam], +) -> Result { + let owned = conn_slot.take().ok_or_else(|| DbError::DriverError { + driver: "sqlite".into(), + code: None, + message: "pinned connection already taken (concurrent run_prepared?)".into(), + failed_index: None, + })?; + let sql = sql.to_string(); + let params = params.to_vec(); + + let (result, returned) = tokio::task::spawn_blocking( + move || -> (Result, crate::pool::sqlite::SqliteConn) { + let mut owned = owned; + let result = owned.with_mut(|c| -> Result { + let bound: Vec = params.iter().map(json_param_to_sql).collect(); + let bound_refs: Vec<&dyn rusqlite::ToSql> = + bound.iter().map(|v| v as &dyn rusqlite::ToSql).collect(); + let mut stmt = c.prepare(&sql).map_err(map_err)?; + let columns: Vec = stmt + .columns() + .into_iter() + .map(|col| ColumnMeta { + name: col.name().to_string(), + ty: col.decl_type().unwrap_or("").to_string(), + }) + .collect(); + let n = columns.len(); + let mut rows_out: Vec = Vec::new(); + let mut rows = stmt.query(bound_refs.as_slice()).map_err(map_err)?; + while let Some(row) = rows.next().map_err(map_err)? { + let mut vals = Vec::with_capacity(n); + for i in 0..n { + vals.push(row_value_at(row, i)?); + } + rows_out.push(Row(vals)); + } + Ok(QueryResult { + columns, + rows: rows_out, + }) + }); + (result, owned) + }, + ) + .await + .map_err(|e| DbError::DriverError { + driver: "sqlite".into(), + code: None, + message: format!("spawn_blocking join: {e}"), + failed_index: None, + })?; + + *conn_slot = Some(returned); + result +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::config::PoolConfig; + use crate::value::{JsonParam, RowValue}; + + async fn pool() -> SqlitePool { + SqlitePool::new("sqlite::memory:", &PoolConfig::default()).unwrap() + } + + #[tokio::test(flavor = "multi_thread")] + async fn query_returns_rows_and_columns() { + let p = pool().await; + let setup = p.acquire().await.unwrap(); + tokio::task::spawn_blocking(move || { + setup.with(|c| { + c.execute_batch( + "CREATE TABLE t (id INTEGER PRIMARY KEY, name TEXT NOT NULL); \ + INSERT INTO t (id, name) VALUES (1, 'alice'), (2, 'bob');", + ) + }) + }) + .await + .unwrap() + .unwrap(); + + let result = query(&p, "SELECT id, name FROM t ORDER BY id", &[], 30_000) + .await + .unwrap(); + assert_eq!(result.columns.len(), 2); + assert_eq!(result.columns[0].name, "id"); + assert_eq!(result.columns[1].name, "name"); + assert_eq!(result.rows.len(), 2); + assert!(matches!(&result.rows[0].0[0], RowValue::Int(1))); + assert!(matches!(&result.rows[0].0[1], RowValue::Text(s) if s == "alice")); + } + + #[tokio::test(flavor = "multi_thread")] + async fn query_with_positional_params() { + let p = pool().await; + let setup = p.acquire().await.unwrap(); + tokio::task::spawn_blocking(move || { + setup.with(|c| { + c.execute_batch("CREATE TABLE t (n INTEGER); INSERT INTO t VALUES (1),(2),(3);") + }) + }) + .await + .unwrap() + .unwrap(); + + let r = query( + &p, + "SELECT n FROM t WHERE n > ? ORDER BY n", + &[JsonParam::Int(1)], + 30_000, + ) + .await + .unwrap(); + assert_eq!(r.rows.len(), 2); + } + + #[tokio::test(flavor = "multi_thread")] + async fn query_returns_null_for_null_columns() { + let p = pool().await; + let setup = p.acquire().await.unwrap(); + tokio::task::spawn_blocking(move || { + setup.with(|c| c.execute_batch("CREATE TABLE t (x TEXT); INSERT INTO t VALUES (NULL);")) + }) + .await + .unwrap() + .unwrap(); + + let r = query(&p, "SELECT x FROM t", &[], 30_000).await.unwrap(); + assert_eq!(r.rows.len(), 1); + assert!(matches!(r.rows[0].0[0], RowValue::Null)); + } + + #[tokio::test(flavor = "multi_thread")] + async fn malformed_sql_returns_driver_error() { + let p = pool().await; + let err = query(&p, "SELEKT 1", &[], 30_000).await.unwrap_err(); + match err { + DbError::DriverError { driver, .. } => assert_eq!(driver, "sqlite"), + other => panic!("expected DriverError, got {other:?}"), + } + } + + #[tokio::test(flavor = "multi_thread")] + async fn execute_insert_reports_affected_and_last_insert_id() { + let p = pool().await; + let s = p.acquire().await.unwrap(); + tokio::task::spawn_blocking(move || { + s.with(|c| c.execute_batch("CREATE TABLE t (id INTEGER PRIMARY KEY, n INT);")) + }) + .await + .unwrap() + .unwrap(); + + let r = execute( + &p, + "INSERT INTO t (n) VALUES (?), (?)", + &[JsonParam::Int(1), JsonParam::Int(2)], + &[], + ) + .await + .unwrap(); + assert_eq!(r.affected_rows, 2); + assert_eq!(r.last_insert_id.as_deref(), Some("2")); + assert!(r.returned_rows.is_empty()); + } + + #[tokio::test(flavor = "multi_thread")] + async fn execute_with_returning_populates_returned_rows() { + let p = pool().await; + let s = p.acquire().await.unwrap(); + tokio::task::spawn_blocking(move || { + s.with(|c| c.execute_batch("CREATE TABLE t (id INTEGER PRIMARY KEY, n INT);")) + }) + .await + .unwrap() + .unwrap(); + + let r = execute( + &p, + "INSERT INTO t (n) VALUES (?) RETURNING id, n", + &[JsonParam::Int(7)], + &["id".into(), "n".into()], + ) + .await + .unwrap(); + assert_eq!(r.returned_rows.len(), 1); + assert_eq!(r.returned_columns.len(), 2); + } + + #[tokio::test(flavor = "multi_thread")] + async fn execute_rejects_multi_statement_sql() { + let p = pool().await; + let s = p.acquire().await.unwrap(); + tokio::task::spawn_blocking(move || s.with(|c| c.execute_batch("CREATE TABLE t (n INT);"))) + .await + .unwrap() + .unwrap(); + let err = execute( + &p, + "INSERT INTO t VALUES (1); INSERT INTO t VALUES (2)", + &[], + &[], + ) + .await + .unwrap_err(); + match err { + DbError::DriverError { driver, code, .. } => { + assert_eq!(driver, "sqlite"); + assert_eq!(code.as_deref(), Some("MULTI_STATEMENT")); + } + other => panic!("expected DriverError, got {other:?}"), + } + } + + /// Regression: `row_value_at` returns `DriverError { failed_index: None }` + /// because it has no step context. Inside `run_tx_steps`, the previous + /// `vals.push(row_value_at(row, i)?)` propagated that error verbatim, so + /// any cell-decode failure during a transaction lost its step attribution + /// — the wire envelope said "tx failed" but not "at step N". The + /// `with_failed_index` helper stamps the current step idx onto a + /// failed_index-less error while preserving any pre-existing index. + #[test] + fn with_failed_index_stamps_idx_when_missing() { + let e = DbError::DriverError { + driver: "sqlite".into(), + code: None, + message: "x".into(), + failed_index: None, + }; + match with_failed_index(e, 3) { + DbError::DriverError { failed_index, .. } => assert_eq!(failed_index, Some(3)), + other => panic!("expected DriverError, got {other:?}"), + } + } + + #[test] + fn with_failed_index_preserves_existing_idx() { + // If an inner helper already attributed itself to step 7, the outer + // `with_failed_index(_, 3)` must not clobber that — `Option::or` + // semantics keep the inner. + let e = DbError::DriverError { + driver: "sqlite".into(), + code: None, + message: "x".into(), + failed_index: Some(7), + }; + match with_failed_index(e, 3) { + DbError::DriverError { failed_index, .. } => assert_eq!(failed_index, Some(7)), + other => panic!("expected DriverError, got {other:?}"), + } + } + + #[test] + fn with_failed_index_passes_through_non_driver_errors() { + // Non-DriverError variants (PoolTimeout, UnknownDb, …) carry no + // failed_index field; the helper must not synthesize one onto a + // different variant. + let e = DbError::UnknownDb { + db: "primary".into(), + }; + assert!(matches!(with_failed_index(e, 3), DbError::UnknownDb { .. })); + } + + /// Regression: `transaction()` must reject multi-statement SQL inside a + /// single TxStatement, mirroring `execute()`'s guard. SQLite's prepare_v2 + /// silently parses only the first statement, so without the guard a + /// caller writing `INSERT ...; DELETE ...` would commit a partial + /// transaction (just the INSERT) without diagnostic. + #[tokio::test(flavor = "multi_thread")] + async fn transaction_rejects_multi_statement_in_step() { + let p = pool().await; + let s = p.acquire().await.unwrap(); + tokio::task::spawn_blocking(move || s.with(|c| c.execute_batch("CREATE TABLE t (n INT)"))) + .await + .unwrap() + .unwrap(); + + let stmts = vec![ + TxStatement { + sql: "INSERT INTO t VALUES (1)".into(), + params: vec![], + }, + // step idx 1 contains two statements separated by ';' + TxStatement { + sql: "INSERT INTO t VALUES (2); DELETE FROM t".into(), + params: vec![], + }, + ]; + let err = transaction(&p, stmts, None).await.unwrap_err(); + match err { + DbError::DriverError { + code, + failed_index, + driver, + .. + } => { + assert_eq!(driver, "sqlite"); + assert_eq!(code.as_deref(), Some("MULTI_STATEMENT")); + assert_eq!(failed_index, Some(1)); + } + other => panic!("expected MULTI_STATEMENT, got {other:?}"), + } + + // Verify rollback: step 0's INSERT must have been undone — no rows. + let r = query(&p, "SELECT COUNT(*) AS c FROM t", &[], 30_000) + .await + .unwrap(); + assert!(matches!( + &r.rows[0].0[0], + RowValue::Int(0) | RowValue::BigInt(0) + )); + } + + /// Regression: `is_select || is_returning` text matching missed + /// CTE-prefixed SELECTs (start with `WITH`, not `SELECT`) and aborted + /// the entire transaction by routing them to `c.execute(...)` which + /// errors with `ExecuteReturnedResults`. After switching to + /// `Statement::column_count` routing, all row-producing statement + /// shapes flow through the row-capture path correctly. + #[tokio::test(flavor = "multi_thread")] + async fn transaction_handles_cte_select_values_and_multiline_returning() { + let p = pool().await; + let s = p.acquire().await.unwrap(); + tokio::task::spawn_blocking(move || { + s.with(|c| c.execute_batch("CREATE TABLE t (id INTEGER PRIMARY KEY, n INT)")) + }) + .await + .unwrap() + .unwrap(); + + let stmts = vec![ + // CTE-prefixed SELECT — does not start with "SELECT" + TxStatement { + sql: "WITH cte AS (SELECT 1 AS n) SELECT n FROM cte".into(), + params: vec![], + }, + // VALUES — produces rows with no SELECT or RETURNING keyword + TxStatement { + sql: "VALUES (10), (20), (30)".into(), + params: vec![], + }, + // INSERT...RETURNING with the keyword on a new line — fails the + // `contains(" RETURNING ")` text check (no surrounding space on + // the right side). + TxStatement { + sql: "INSERT INTO t (n) VALUES (?)\nRETURNING\nid, n".into(), + params: vec![JsonParam::Int(42)], + }, + // Plain DML — doesn't produce rows. + TxStatement { + sql: "UPDATE t SET n = n + 1 WHERE id = ?".into(), + params: vec![JsonParam::Int(1)], + }, + ]; + + let results = transaction(&p, stmts, None).await.unwrap(); + assert_eq!(results.len(), 4); + // CTE SELECT → 1 row + assert_eq!(results[0].rows.len(), 1); + assert_eq!(results[0].affected_rows, 1); + // VALUES → 3 rows + assert_eq!(results[1].rows.len(), 3); + // INSERT...RETURNING → 1 returned row, columns id+n + assert_eq!(results[2].rows.len(), 1); + // UPDATE → no rows, affected_rows reflects the actual update count + assert!(results[3].rows.is_empty()); + } + + #[tokio::test(flavor = "multi_thread")] + async fn execute_with_select_does_not_throw_and_surfaces_rows() { + // Cross-driver invariant: execute(SELECT) must not throw — rusqlite's + // Connection::execute returns ExecuteReturnedResults for row-producing + // statements, which previously the driver caught with a fallback that + // drained rows and reported 0 affected. After switching to + // `statement_returns_rows` routing (planner-driven via column_count), + // SELECT-via-execute now goes through the row-capture path and + // surfaces the result rows on `returned_rows`. Strictly more useful + // than silently dropping the rows the caller's SQL produced. + let p = pool().await; + let r = execute(&p, "SELECT 1 AS v", &[], &[]).await.unwrap(); + assert_eq!(r.affected_rows, 1); + assert_eq!(r.returned_columns.len(), 1); + assert_eq!(r.returned_columns[0].name, "v"); + assert_eq!(r.returned_rows.len(), 1); + assert!(r.last_insert_id.is_none()); + } + + #[tokio::test(flavor = "multi_thread")] + async fn execute_allows_trailing_semicolon() { + let p = pool().await; + let s = p.acquire().await.unwrap(); + tokio::task::spawn_blocking(move || s.with(|c| c.execute_batch("CREATE TABLE t (n INT);"))) + .await + .unwrap() + .unwrap(); + // Trailing `;` and whitespace must not trigger multi-statement detection. + let r = execute(&p, "INSERT INTO t VALUES (1); ", &[], &[]) + .await + .unwrap(); + assert_eq!(r.affected_rows, 1); + } + + #[tokio::test(flavor = "multi_thread")] + async fn execute_update_reports_affected_only() { + let p = pool().await; + let s = p.acquire().await.unwrap(); + tokio::task::spawn_blocking(move || { + s.with(|c| c.execute_batch("CREATE TABLE t (n INT); INSERT INTO t VALUES (1),(2),(3);")) + }) + .await + .unwrap() + .unwrap(); + + let r = execute( + &p, + "UPDATE t SET n = n + 10 WHERE n > ?", + &[JsonParam::Int(1)], + &[], + ) + .await + .unwrap(); + assert_eq!(r.affected_rows, 2); + } + + #[tokio::test(flavor = "multi_thread")] + async fn transaction_commits_when_all_statements_succeed() { + let p = pool().await; + let s = p.acquire().await.unwrap(); + tokio::task::spawn_blocking(move || s.with(|c| c.execute_batch("CREATE TABLE t (n INT);"))) + .await + .unwrap() + .unwrap(); + + let stmts = vec![ + TxStatement { + sql: "INSERT INTO t VALUES (?)".into(), + params: vec![JsonParam::Int(1)], + }, + TxStatement { + sql: "INSERT INTO t VALUES (?)".into(), + params: vec![JsonParam::Int(2)], + }, + ]; + let res = transaction(&p, stmts, None).await.unwrap(); + assert_eq!(res.len(), 2); + assert_eq!(res[0].affected_rows, 1); + } + + #[tokio::test(flavor = "multi_thread")] + async fn transaction_rolls_back_on_failure_and_returns_failed_index() { + let p = pool().await; + let s = p.acquire().await.unwrap(); + tokio::task::spawn_blocking(move || { + s.with(|c| c.execute_batch("CREATE TABLE t (n INT NOT NULL);")) + }) + .await + .unwrap() + .unwrap(); + + let stmts = vec![ + TxStatement { + sql: "INSERT INTO t VALUES (?)".into(), + params: vec![JsonParam::Int(1)], + }, + TxStatement { + sql: "INSERT INTO t VALUES (?)".into(), + params: vec![JsonParam::Null], // violates NOT NULL + }, + ]; + let err = transaction(&p, stmts, None).await.unwrap_err(); + match err { + DbError::DriverError { + driver, + message, + failed_index, + .. + } => { + assert_eq!(driver, "sqlite"); + assert_eq!(failed_index, Some(1)); + assert!( + message.contains("NOT NULL") || message.contains("constraint"), + "got: {message}" + ); + } + other => panic!("expected DriverError, got {other:?}"), + } + + // Verify rollback: table should be empty. + let r = query(&p, "SELECT COUNT(*) FROM t", &[], 30_000) + .await + .unwrap(); + assert!(matches!(&r.rows[0].0[0], RowValue::Int(0))); + } + + #[tokio::test(flavor = "multi_thread")] + async fn transaction_serializable_uses_begin_immediate() { + // Smoke: running with Serializable should not error on SQLite. + let p = pool().await; + let s = p.acquire().await.unwrap(); + tokio::task::spawn_blocking(move || s.with(|c| c.execute_batch("CREATE TABLE t (n INT);"))) + .await + .unwrap() + .unwrap(); + + let stmts = vec![TxStatement { + sql: "INSERT INTO t VALUES (1)".into(), + params: vec![], + }]; + let res = transaction(&p, stmts, Some(Isolation::Serializable)) + .await + .unwrap(); + assert_eq!(res.len(), 1); + } + + #[tokio::test(flavor = "multi_thread")] + async fn prepare_then_run_executes_with_params() { + let p = pool().await; + let s = p.acquire().await.unwrap(); + tokio::task::spawn_blocking(move || { + s.with(|c| { + c.execute_batch("CREATE TABLE t (n INT); INSERT INTO t VALUES (10),(20),(30);") + }) + }) + .await + .unwrap() + .unwrap(); + + let mut conn_slot = Some(p.acquire().await.unwrap()); + let result = run_prepared( + &mut conn_slot, + "SELECT n FROM t WHERE n > ? ORDER BY n", + &[JsonParam::Int(15)], + ) + .await + .unwrap(); + assert_eq!(result.rows.len(), 2); + assert!( + conn_slot.is_some(), + "conn should be returned to the slot after run_prepared" + ); + } + + #[tokio::test(flavor = "multi_thread")] + async fn run_prepared_returns_error_when_conn_slot_empty() { + // Defends the race-guard at the top of `run_prepared`: if two callers + // hit the same registry entry concurrently, the second `.take()` sees + // None and must return a DriverError rather than panicking. + let mut empty: Option = None; + let err = run_prepared(&mut empty, "SELECT 1", &[]).await.unwrap_err(); + match err { + DbError::DriverError { + driver, message, .. + } => { + assert_eq!(driver, "sqlite"); + assert!( + message.contains("already taken") || message.contains("pinned"), + "got: {message}" + ); + } + other => panic!("expected DriverError, got {other:?}"), + } + } +} diff --git a/iii-database/src/error.rs b/iii-database/src/error.rs new file mode 100644 index 00000000..f6f5eea3 --- /dev/null +++ b/iii-database/src/error.rs @@ -0,0 +1,128 @@ +//! Discriminated error codes returned to the engine. +//! +//! The `code` field is stable; clients should match on it. The remaining +//! fields are diagnostic. + +use serde::Serialize; +use thiserror::Error; + +#[derive(Debug, Error, Serialize)] +#[serde(tag = "code")] +pub enum DbError { + #[serde(rename = "POOL_TIMEOUT")] + #[error("pool acquire timed out for db {db} after {waited_ms}ms")] + PoolTimeout { db: String, waited_ms: u64 }, + + #[serde(rename = "QUERY_TIMEOUT")] + #[error("query exceeded timeout {timeout_ms}ms on db {db}")] + QueryTimeout { db: String, timeout_ms: u64 }, + + #[serde(rename = "STATEMENT_NOT_FOUND")] + #[error("statement handle {handle_id} not found or expired")] + StatementNotFound { handle_id: String }, + + #[serde(rename = "UNKNOWN_DB")] + #[error("unknown db {db}")] + UnknownDb { db: String }, + + #[serde(rename = "INVALID_PARAM")] + #[error("invalid parameter at index {index}: {reason}")] + InvalidParam { index: usize, reason: String }, + + #[serde(rename = "DRIVER_ERROR")] + #[error("driver {driver} error: {message}")] + DriverError { + driver: String, + #[serde(rename = "inner_code")] + code: Option, + message: String, + /// Set when this error occurred during a multi-statement transaction. + /// The 0-based index of the statement that failed. + #[serde(skip_serializing_if = "Option::is_none")] + failed_index: Option, + }, + + #[serde(rename = "REPLICATION_SLOT_EXISTS")] + #[error("replication slot {slot} already in use")] + ReplicationSlotExists { slot: String }, + + #[serde(rename = "UNSUPPORTED")] + #[error("operation {op} not supported on driver {driver}")] + Unsupported { op: String, driver: String }, + + #[serde(rename = "CONFIG_ERROR")] + #[error("config error: {message}")] + ConfigError { message: String }, +} + +impl From for iii_sdk::IIIError { + fn from(e: DbError) -> Self { + let body = serde_json::to_string(&e) + .expect("DbError serialization is infallible (only primitive fields)"); + iii_sdk::IIIError::Handler(body) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn pool_timeout_serializes_with_stable_code() { + let e = DbError::PoolTimeout { + db: "primary".into(), + waited_ms: 5000, + }; + let v: serde_json::Value = serde_json::to_value(&e).unwrap(); + assert_eq!(v["code"], "POOL_TIMEOUT"); + assert_eq!(v["db"], "primary"); + assert_eq!(v["waited_ms"], 5000); + } + + #[test] + fn unknown_db_serializes_with_stable_code() { + let e = DbError::UnknownDb { + db: "missing".into(), + }; + let v: serde_json::Value = serde_json::to_value(&e).unwrap(); + assert_eq!(v["code"], "UNKNOWN_DB"); + } + + #[test] + fn driver_error_carries_driver_name_and_inner() { + let e = DbError::DriverError { + driver: "postgres".into(), + code: Some("42P01".into()), + message: "relation \"x\" does not exist".into(), + failed_index: None, + }; + let v: serde_json::Value = serde_json::to_value(&e).unwrap(); + assert_eq!(v["code"], "DRIVER_ERROR"); + assert_eq!(v["driver"], "postgres"); + assert_eq!(v["inner_code"], "42P01"); + assert!(v.get("failed_index").is_none()); + } + + #[test] + fn driver_error_serializes_failed_index_when_set() { + let e = DbError::DriverError { + driver: "sqlite".into(), + code: None, + message: "constraint failed".into(), + failed_index: Some(2), + }; + let v: serde_json::Value = serde_json::to_value(&e).unwrap(); + assert_eq!(v["failed_index"], 2); + } + + #[test] + fn into_iii_error_preserves_json_body() { + let e = DbError::QueryTimeout { + db: "primary".into(), + timeout_ms: 30000, + }; + let iii_e: iii_sdk::IIIError = e.into(); + let body = format!("{iii_e:?}"); + assert!(body.contains("QUERY_TIMEOUT")); + } +} diff --git a/iii-database/src/handle.rs b/iii-database/src/handle.rs new file mode 100644 index 00000000..15b08c2f --- /dev/null +++ b/iii-database/src/handle.rs @@ -0,0 +1,203 @@ +//! Handle registry — UUID → pinned connection + SQL. +//! +//! Each entry owns a `tokio::sync::Mutex` so async drivers can +//! acquire the connection across `.await`. The outer map is a `tokio::sync::RwLock`. + +use crate::error::DbError; +use chrono::{DateTime, Duration as CDuration, Utc}; +use schemars::JsonSchema; +use serde::Serialize; +use std::collections::HashMap; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::{Mutex, OwnedMutexGuard, RwLock}; +use uuid::Uuid; + +#[derive(Debug, Clone, Serialize, JsonSchema)] +pub struct HandleResponse { + pub id: String, + pub expires_at: DateTime, +} + +pub enum PinnedConn { + /// SQLite is wrapped in `Option` so the SQLite driver's blocking-task + /// shim can `.take()` ownership into `spawn_blocking` and `.replace()` + /// it on return without needing a throwaway placeholder pool. The slot + /// is `Some` between `prepareStatement` and the registry entry's TTL, + /// and transiently `None` only inside an in-flight `runStatement`. + Sqlite(Option), + Postgres(crate::pool::postgres::PgClient), + Mysql(crate::pool::mysql::MysqlConn), +} + +struct Entry { + sql: String, + expires_at: DateTime, + conn: Arc>, +} + +#[derive(Clone, Default)] +pub struct HandleRegistry { + inner: Arc>>, +} + +impl HandleRegistry { + pub fn new() -> Self { + Self::default() + } + + pub async fn insert_sqlite( + &self, + sql: String, + conn: crate::pool::sqlite::SqliteConn, + ttl: Duration, + ) -> HandleResponse { + self.insert(sql, PinnedConn::Sqlite(Some(conn)), ttl).await + } + + pub async fn insert_postgres( + &self, + sql: String, + conn: crate::pool::postgres::PgClient, + ttl: Duration, + ) -> HandleResponse { + self.insert(sql, PinnedConn::Postgres(conn), ttl).await + } + + pub async fn insert_mysql( + &self, + sql: String, + conn: crate::pool::mysql::MysqlConn, + ttl: Duration, + ) -> HandleResponse { + self.insert(sql, PinnedConn::Mysql(conn), ttl).await + } + + async fn insert(&self, sql: String, conn: PinnedConn, ttl: Duration) -> HandleResponse { + let id = Uuid::new_v4().to_string(); + let expires_at = + Utc::now() + CDuration::from_std(ttl).unwrap_or_else(|_| CDuration::seconds(3600)); + self.inner.write().await.insert( + id.clone(), + Entry { + sql, + expires_at, + conn: Arc::new(Mutex::new(conn)), + }, + ); + HandleResponse { id, expires_at } + } + + pub async fn contains(&self, id: &str) -> bool { + self.inner.read().await.contains_key(id) + } + + pub async fn evict_expired(&self) { + let now = Utc::now(); + self.inner.write().await.retain(|_, e| e.expires_at > now); + } + + /// Acquire a pinned conn lock guard. Returns `STATEMENT_NOT_FOUND` if the + /// id is unknown or expired. The caller drives `.await` against the lock. + pub async fn lock(&self, id: &str) -> Result<(String, OwnedMutexGuard), DbError> { + let g = self.inner.read().await; + let entry = g.get(id).ok_or_else(|| DbError::StatementNotFound { + handle_id: id.to_string(), + })?; + if entry.expires_at <= Utc::now() { + drop(g); + self.inner.write().await.remove(id); + return Err(DbError::StatementNotFound { + handle_id: id.to_string(), + }); + } + let sql = entry.sql.clone(); + let arc = Arc::clone(&entry.conn); + drop(g); + Ok((sql, arc.lock_owned().await)) + } + + pub fn spawn_evictor(self: &Arc) -> tokio::task::JoinHandle<()> { + let me = Arc::clone(self); + tokio::spawn(async move { + let mut interval = tokio::time::interval(Duration::from_secs(30)); + loop { + interval.tick().await; + me.evict_expired().await; + } + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::config::PoolConfig; + use crate::pool::SqlitePool; + use std::time::Duration; + + fn pool() -> SqlitePool { + SqlitePool::new("sqlite::memory:", &PoolConfig::default()).unwrap() + } + + #[tokio::test(flavor = "multi_thread")] + async fn insert_then_lookup() { + let reg = HandleRegistry::new(); + let p = pool(); + let conn = p.acquire().await.unwrap(); + let handle = reg + .insert_sqlite("hot SQL".into(), conn, Duration::from_secs(60)) + .await; + assert!(reg.contains(&handle.id).await); + } + + #[tokio::test(flavor = "multi_thread")] + async fn expired_handles_get_evicted() { + let reg = HandleRegistry::new(); + let p = pool(); + let conn = p.acquire().await.unwrap(); + let handle = reg + .insert_sqlite("hot SQL".into(), conn, Duration::from_millis(50)) + .await; + tokio::time::sleep(Duration::from_millis(120)).await; + reg.evict_expired().await; + assert!(!reg.contains(&handle.id).await); + } + + #[tokio::test(flavor = "multi_thread")] + async fn lookup_unknown_returns_statement_not_found() { + let reg = HandleRegistry::new(); + let id = "00000000-0000-0000-0000-000000000000"; + let result = reg.lock(id).await; + assert!(matches!(result, Err(DbError::StatementNotFound { .. }))); + } + + /// `lock()` has two STATEMENT_NOT_FOUND paths: (1) the id is missing + /// outright, (2) the id is present but its TTL has elapsed and the + /// background evictor has not yet run. This covers path (2) — the lazy + /// expiry branch — and confirms it removes the stale entry as a side + /// effect without relying on `evict_expired()`. + #[tokio::test(flavor = "multi_thread")] + async fn lock_on_expired_handle_returns_statement_not_found_and_evicts() { + let reg = HandleRegistry::new(); + let p = pool(); + let conn = p.acquire().await.unwrap(); + let h = reg + .insert_sqlite("SELECT 1".into(), conn, Duration::from_millis(1)) + .await; + // Wait past the TTL. + tokio::time::sleep(Duration::from_millis(20)).await; + // Do NOT call evict_expired() — exercise the lazy path in lock(). + // `OwnedMutexGuard` doesn't impl Debug, so we can't + // unwrap_err()/format the Result; pattern-match the error directly. + match reg.lock(&h.id).await { + Err(DbError::StatementNotFound { .. }) => {} + Err(other) => panic!("expected StatementNotFound, got {other:?}"), + Ok(_) => panic!("expected StatementNotFound, got Ok"), + } + assert!( + !reg.contains(&h.id).await, + "expired entry should be evicted by lock()" + ); + } +} diff --git a/iii-database/src/handlers/execute.rs b/iii-database/src/handlers/execute.rs new file mode 100644 index 00000000..f5633156 --- /dev/null +++ b/iii-database/src/handlers/execute.rs @@ -0,0 +1,185 @@ +//! `iii-database::execute` — write SQL. + +use super::AppState; +use crate::driver; +use crate::handlers::query::err_to_str; +use crate::pool::Pool; +use crate::value::JsonParam; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +#[derive(Deserialize, JsonSchema)] +pub struct ExecuteReq { + pub db: String, + pub sql: String, + #[serde(default)] + pub params: Vec, + #[serde(default)] + pub returning: Vec, +} + +#[derive(Serialize, JsonSchema)] +pub struct ExecuteResp { + pub affected_rows: u64, + pub last_insert_id: Option, + pub returned_rows: Vec>, +} + +pub async fn handle(state: &AppState, req: ExecuteReq) -> Result { + let pool = state.pool(&req.db).map_err(err_to_str)?; + // Reject empty SQL uniformly. See the matching guard in query.rs for why + // this is at the handler boundary rather than per-driver: postgres' driver + // accepts empty SQL as a no-op success, sqlite/mysql reject — guarding + // here keeps the worker's contract symmetric across all three. + if req.sql.trim().is_empty() { + return Err(err_to_str(crate::error::DbError::DriverError { + driver: format!("{:?}", pool.driver()), + code: None, + message: "empty SQL".into(), + failed_index: None, + })); + } + let params = JsonParam::from_json_slice(&req.params).map_err(err_to_str)?; + + let result = match pool { + Pool::Sqlite(p) => driver::sqlite::execute(p, &req.sql, ¶ms, &req.returning).await, + Pool::Postgres(p) => driver::postgres::execute(p, &req.sql, ¶ms, &req.returning).await, + Pool::Mysql(p) => driver::mysql::execute(p, &req.sql, ¶ms, &req.returning).await, + } + .map_err(err_to_str)?; + + let returned_rows = + crate::handlers::query_rows_to_objects(&result.returned_columns, result.returned_rows); + Ok(ExecuteResp { + affected_rows: result.affected_rows, + last_insert_id: result.last_insert_id, + returned_rows, + }) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::config::PoolConfig; + use crate::handle::HandleRegistry; + use crate::handlers::AppState; + use crate::pool::{Pool, SqlitePool}; + use serde_json::json; + use std::collections::HashMap; + use std::sync::Arc; + + fn state() -> AppState { + let pool = SqlitePool::new("sqlite::memory:", &PoolConfig::default()).unwrap(); + let mut pools = HashMap::new(); + pools.insert("primary".to_string(), Pool::Sqlite(pool)); + AppState { + pools: Arc::new(pools), + handles: Arc::new(HandleRegistry::new()), + } + } + + fn req(v: Value) -> ExecuteReq { + serde_json::from_value(v).unwrap() + } + + #[tokio::test(flavor = "multi_thread")] + async fn execute_insert_returns_envelope() { + let st = state(); + handle( + &st, + req(json!({ + "db": "primary", + "sql": "CREATE TABLE t (id INTEGER PRIMARY KEY, n INT)" + })), + ) + .await + .unwrap(); + + let resp = handle( + &st, + req(json!({ + "db": "primary", + "sql": "INSERT INTO t (n) VALUES (?)", + "params": [42] + })), + ) + .await + .unwrap(); + assert_eq!(resp.affected_rows, 1); + assert_eq!(resp.last_insert_id.as_deref(), Some("1")); + } + + #[tokio::test(flavor = "multi_thread")] + async fn execute_update_with_no_prior_insert_returns_null_last_insert_id() { + // SQLite's `last_insert_rowid()` is sticky per-connection — it stays + // set across non-INSERT statements until another INSERT runs. To + // exercise the None branch we run an UPDATE against a freshly-created + // table without any prior INSERT on this pool's connection. + let st = state(); + handle( + &st, + req(json!({ + "db": "primary", + "sql": "CREATE TABLE t (n INT)" + })), + ) + .await + .unwrap(); + let resp = handle( + &st, + req(json!({ + "db": "primary", + "sql": "UPDATE t SET n = ? WHERE n = ?", + "params": [99, 1] + })), + ) + .await + .unwrap(); + assert_eq!(resp.affected_rows, 0); + // No INSERT has ever run on this connection, so last_insert_rowid() + // is 0 → driver returns None → JSON null (NOT the empty string ""). + assert!( + resp.last_insert_id.is_none(), + "expected None, got {:?}", + resp.last_insert_id + ); + } + + #[tokio::test(flavor = "multi_thread")] + async fn execute_update_after_insert_does_not_carry_stale_last_insert_id() { + // Regression: SQLite's last_insert_rowid() is sticky per-connection, + // and the pool reuses connections. Without an is_insert() guard, an + // UPDATE running on a connection whose prior caller ran an INSERT + // would report the prior INSERT's rowid as last_insert_id — a phantom + // success signal that corrupts caller logic. + let st = state(); + handle( + &st, + req(json!({"db":"primary","sql":"CREATE TABLE t (id INTEGER PRIMARY KEY, n INT)"})), + ) + .await + .unwrap(); + let ins = handle( + &st, + req(json!({"db":"primary","sql":"INSERT INTO t (n) VALUES (?)","params":[1]})), + ) + .await + .unwrap(); + assert_eq!(ins.last_insert_id.as_deref(), Some("1")); + // Same pool, same connection (default max). The UPDATE must NOT + // surface the rowid the INSERT just set. + let upd = handle( + &st, + req(json!({"db":"primary","sql":"UPDATE t SET n = ? WHERE id = ?","params":[2, 1]})), + ) + .await + .unwrap(); + assert_eq!(upd.affected_rows, 1); + assert!( + upd.last_insert_id.is_none(), + "UPDATE response leaked stale rowid: {:?}", + upd.last_insert_id + ); + } +} diff --git a/iii-database/src/handlers/mod.rs b/iii-database/src/handlers/mod.rs new file mode 100644 index 00000000..434ef469 --- /dev/null +++ b/iii-database/src/handlers/mod.rs @@ -0,0 +1,31 @@ +//! RPC handlers for `database::*` functions. Each handler accepts a JSON +//! payload from the SDK, validates it, dispatches to the configured pool, +//! and serializes the result. + +use crate::error::DbError; +use crate::handle::HandleRegistry; +use crate::pool::Pool; +use std::collections::HashMap; +use std::sync::Arc; + +pub mod execute; +pub mod prepare; +pub mod query; +pub mod run_statement; +pub mod transaction; + +pub(crate) use query::rows_to_objects as query_rows_to_objects; + +#[derive(Clone)] +pub struct AppState { + pub pools: Arc>, + pub handles: Arc, +} + +impl AppState { + pub fn pool(&self, db: &str) -> Result<&Pool, DbError> { + self.pools + .get(db) + .ok_or_else(|| DbError::UnknownDb { db: db.to_string() }) + } +} diff --git a/iii-database/src/handlers/prepare.rs b/iii-database/src/handlers/prepare.rs new file mode 100644 index 00000000..56c83ce3 --- /dev/null +++ b/iii-database/src/handlers/prepare.rs @@ -0,0 +1,148 @@ +//! `iii-database::prepareStatement` — pin a connection and return a UUID handle. + +use super::AppState; +use crate::handle::HandleResponse; +use crate::handlers::query::err_to_str; +use crate::pool::Pool; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use std::time::Duration; + +#[derive(Deserialize, JsonSchema)] +pub struct PrepareReq { + pub db: String, + pub sql: String, + #[serde(default = "default_ttl")] + pub ttl_seconds: u64, +} + +#[derive(Debug, Serialize, JsonSchema)] +pub struct PrepareResp { + pub handle: HandleResponse, +} + +fn default_ttl() -> u64 { + 3600 +} + +const MAX_TTL_SECONDS: u64 = 86_400; + +pub async fn handle(state: &AppState, req: PrepareReq) -> Result { + let ttl = Duration::from_secs(req.ttl_seconds.min(MAX_TTL_SECONDS)); + let pool = state.pool(&req.db).map_err(err_to_str)?; + // Reject empty SQL at the handler boundary, mirroring query.rs / execute.rs. + // Without this, prepareStatement happily acquires a pool connection and + // pins it under a UUID handle that can never run successfully — the + // connection is leaked until the TTL expires. + if req.sql.trim().is_empty() { + return Err(err_to_str(crate::error::DbError::DriverError { + driver: format!("{:?}", pool.driver()), + code: None, + message: "empty SQL".into(), + failed_index: None, + })); + } + + let h = match pool { + Pool::Sqlite(p) => { + let conn = p.acquire().await.map_err(err_to_str)?; + state + .handles + .insert_sqlite(req.sql.clone(), conn, ttl) + .await + } + Pool::Postgres(p) => { + let conn = p.acquire().await.map_err(err_to_str)?; + state + .handles + .insert_postgres(req.sql.clone(), conn, ttl) + .await + } + Pool::Mysql(p) => { + let conn = p.acquire().await.map_err(err_to_str)?; + state.handles.insert_mysql(req.sql.clone(), conn, ttl).await + } + }; + + Ok(PrepareResp { handle: h }) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::config::PoolConfig; + use crate::handle::HandleRegistry; + use crate::handlers::AppState; + use crate::pool::{Pool, SqlitePool}; + use serde_json::{json, Value}; + use std::collections::HashMap; + use std::sync::Arc; + + fn state() -> AppState { + let pool = SqlitePool::new("sqlite::memory:", &PoolConfig::default()).unwrap(); + let mut pools = HashMap::new(); + pools.insert("primary".to_string(), Pool::Sqlite(pool)); + AppState { + pools: Arc::new(pools), + handles: Arc::new(HandleRegistry::new()), + } + } + + fn req(v: Value) -> PrepareReq { + serde_json::from_value(v).unwrap() + } + + #[tokio::test(flavor = "multi_thread")] + async fn prepare_returns_handle_with_uuid_and_expiry() { + let st = state(); + let resp = handle( + &st, + req(json!({ + "db": "primary", + "sql": "SELECT 1" + })), + ) + .await + .unwrap(); + let id = &resp.handle.id; + assert_eq!(id.len(), 36); // UUID + assert!(st.handles.contains(id).await); + } + + #[tokio::test(flavor = "multi_thread")] + async fn prepare_clamps_ttl_to_max() { + let st = state(); + let resp = handle( + &st, + req(json!({ + "db": "primary", + "sql": "SELECT 1", + "ttl_seconds": 999_999 // exceeds max 86400 + })), + ) + .await + .unwrap(); + // Should not error; expires_at should be ~24h out, not 11 days. + assert!(!resp.handle.id.is_empty()); + } + + #[tokio::test(flavor = "multi_thread")] + async fn prepare_rejects_empty_sql() { + // Without the handler-boundary guard, an empty SQL leaks the pool + // connection until TTL expiry: prepareStatement acquires a connection + // and pins it under a UUID handle that can never run successfully. + let st = state(); + let err = handle(&st, req(json!({"db": "primary", "sql": ""}))) + .await + .unwrap_err(); + assert!( + err.contains("DRIVER_ERROR") && err.contains("empty SQL"), + "expected DRIVER_ERROR/empty SQL, got: {err}" + ); + // Whitespace-only is the same case. + let err2 = handle(&st, req(json!({"db": "primary", "sql": " \n\t"}))) + .await + .unwrap_err(); + assert!(err2.contains("empty SQL"), "got: {err2}"); + } +} diff --git a/iii-database/src/handlers/query.rs b/iii-database/src/handlers/query.rs new file mode 100644 index 00000000..92a473f6 --- /dev/null +++ b/iii-database/src/handlers/query.rs @@ -0,0 +1,165 @@ +//! `iii-database::query` — read-only SQL. + +use super::AppState; +use crate::driver::{self, ColumnMeta}; +use crate::error::DbError; +use crate::pool::Pool; +use crate::value::JsonParam; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +#[derive(Debug, Deserialize, JsonSchema)] +pub struct QueryReq { + pub db: String, + pub sql: String, + #[serde(default)] + pub params: Vec, + #[serde(default = "default_timeout")] + pub timeout_ms: u64, +} + +#[derive(Debug, Serialize, JsonSchema)] +pub struct QueryResp { + pub rows: Vec>, + pub row_count: usize, + pub columns: Vec, +} + +fn default_timeout() -> u64 { + 30_000 +} + +/// Returns a JSON string body suitable to wrap in IIIError on failure. +pub async fn handle(state: &AppState, req: QueryReq) -> Result { + let pool = state.pool(&req.db).map_err(err_to_str)?; + // Reject empty SQL uniformly. Postgres' tokio-postgres treats `client.query("")` + // as a valid no-op and returns Ok([]), but sqlite (rusqlite) and mysql + // (mysql_async) reject it at parse time — without this guard the worker's + // contract diverges per driver. + if req.sql.trim().is_empty() { + return Err(err_to_str(DbError::DriverError { + driver: format!("{:?}", pool.driver()), + code: None, + message: "empty SQL".into(), + failed_index: None, + })); + } + let params = JsonParam::from_json_slice(&req.params).map_err(err_to_str)?; + + let result = match pool { + Pool::Sqlite(p) => driver::sqlite::query(p, &req.sql, ¶ms, req.timeout_ms).await, + Pool::Postgres(p) => driver::postgres::query(p, &req.sql, ¶ms, req.timeout_ms).await, + Pool::Mysql(p) => driver::mysql::query(p, &req.sql, ¶ms, req.timeout_ms).await, + } + .map_err(err_to_str)?; + + let row_count = result.rows.len(); + let rows = rows_to_objects(&result.columns, result.rows); + Ok(QueryResp { + rows, + row_count, + columns: result.columns, + }) +} + +/// Project a result set into row-of-objects JSON. Consumes `rows` so each +/// `RowValue` cell can be moved into its `Value` form via `into_json` instead +/// of cloned — on a 1000-row × 10-col SELECT this removes ~10k allocations +/// of the cell payload data. Column names are still cloned per row because +/// `serde_json::Map` requires owned `String` keys; that's an unavoidable cost +/// of the row-of-objects shape and is dominated by the cell-data win. +pub(crate) fn rows_to_objects( + columns: &[crate::driver::ColumnMeta], + rows: Vec, +) -> Vec> { + rows.into_iter() + .map(|row| { + let mut obj = serde_json::Map::with_capacity(columns.len()); + for (i, v) in row.0.into_iter().enumerate() { + if let Some(col) = columns.get(i) { + obj.insert(col.name.clone(), v.into_json()); + } + } + obj + }) + .collect() +} + +pub(crate) fn err_to_str(e: DbError) -> String { + serde_json::to_string(&e).unwrap_or_else(|_| { + format!( + "{{\"code\":\"DRIVER_ERROR\",\"message\":{:?}}}", + e.to_string() + ) + }) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::config::PoolConfig; + use crate::handle::HandleRegistry; + use crate::pool::{Pool, SqlitePool}; + use serde_json::json; + use std::collections::HashMap; + use std::sync::Arc; + + fn state() -> AppState { + let pool = SqlitePool::new("sqlite::memory:", &PoolConfig::default()).unwrap(); + let mut pools = HashMap::new(); + pools.insert("primary".to_string(), Pool::Sqlite(pool)); + AppState { + pools: Arc::new(pools), + handles: Arc::new(HandleRegistry::new()), + } + } + + fn req(v: Value) -> QueryReq { + serde_json::from_value(v).unwrap() + } + + #[tokio::test(flavor = "multi_thread")] + async fn query_returns_rows_envelope() { + let st = state(); + if let Pool::Sqlite(p) = st.pool("primary").unwrap() { + let c = p.acquire().await.unwrap(); + tokio::task::spawn_blocking(move || { + c.with(|c| c.execute_batch("CREATE TABLE t (n INT); INSERT INTO t VALUES (1),(2);")) + }) + .await + .unwrap() + .unwrap(); + } + let resp = handle( + &st, + req(json!({"db":"primary","sql":"SELECT n FROM t ORDER BY n"})), + ) + .await + .unwrap(); + assert_eq!(resp.row_count, 2); + assert_eq!(resp.rows[0]["n"], 1); + assert_eq!(resp.columns[0].name, "n"); + } + + #[tokio::test(flavor = "multi_thread")] + async fn query_unknown_db_errors() { + let st = state(); + let err = handle(&st, req(json!({"db":"missing","sql":"SELECT 1"}))) + .await + .unwrap_err(); + assert!(err.contains("UNKNOWN_DB"), "got: {err}"); + } + + #[tokio::test(flavor = "multi_thread")] + async fn query_missing_db_field_errors() { + // Deserialization moved into the SDK once we switched to typed + // RegisterFunction::new_async — at the handler boundary the input is + // already a QueryReq. This test keeps the missing-field contract by + // exercising the deserialization step explicitly. + let err = serde_json::from_value::(json!({"sql":"SELECT 1"})) + .unwrap_err() + .to_string(); + assert!(err.contains("missing field"), "got: {err}"); + } +} diff --git a/iii-database/src/handlers/run_statement.rs b/iii-database/src/handlers/run_statement.rs new file mode 100644 index 00000000..69066ac8 --- /dev/null +++ b/iii-database/src/handlers/run_statement.rs @@ -0,0 +1,152 @@ +//! `iii-database::runStatement` — run a previously-prepared handle. + +use super::AppState; +use crate::driver; +use crate::handle::PinnedConn; +use crate::handlers::query::QueryResp; +use crate::handlers::{query::err_to_str, query_rows_to_objects}; +use crate::value::JsonParam; +use schemars::JsonSchema; +use serde::Deserialize; +use serde_json::Value; + +#[derive(Deserialize, JsonSchema)] +pub struct RunReq { + pub handle_id: String, + #[serde(default)] + pub params: Vec, +} + +pub async fn handle(state: &AppState, req: RunReq) -> Result { + let params = JsonParam::from_json_slice(&req.params).map_err(err_to_str)?; + let (sql, mut guard) = state + .handles + .lock(&req.handle_id) + .await + .map_err(err_to_str)?; + + let result = match &mut *guard { + PinnedConn::Sqlite(slot) => driver::sqlite::run_prepared(slot, &sql, ¶ms).await, + PinnedConn::Postgres(conn) => driver::postgres::run_prepared(conn, &sql, ¶ms).await, + PinnedConn::Mysql(conn) => driver::mysql::run_prepared(conn, &sql, ¶ms).await, + } + .map_err(err_to_str)?; + + let row_count = result.rows.len(); + let rows = query_rows_to_objects(&result.columns, result.rows); + Ok(QueryResp { + rows, + row_count, + columns: result.columns, + }) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::config::PoolConfig; + use crate::handle::HandleRegistry; + use crate::handlers::{prepare, AppState}; + use crate::pool::{Pool, SqlitePool}; + use serde_json::json; + use std::collections::HashMap; + use std::sync::Arc; + + fn state_in_memory() -> AppState { + let pool = SqlitePool::new("sqlite::memory:", &PoolConfig::default()).unwrap(); + let mut pools = HashMap::new(); + pools.insert("primary".to_string(), Pool::Sqlite(pool)); + AppState { + pools: Arc::new(pools), + handles: Arc::new(HandleRegistry::new()), + } + } + + /// Build an AppState backed by a tempfile-backed SQLite DB. + /// Returned `_tmp` keeps the file alive for the test duration. + fn state_on_disk() -> (AppState, tempfile::NamedTempFile) { + let tmp = tempfile::NamedTempFile::new().unwrap(); + let url = format!("sqlite:{}", tmp.path().display()); + let pool = SqlitePool::new(&url, &PoolConfig::default()).unwrap(); + let mut pools = HashMap::new(); + pools.insert("primary".to_string(), Pool::Sqlite(pool)); + let st = AppState { + pools: Arc::new(pools), + handles: Arc::new(HandleRegistry::new()), + }; + (st, tmp) + } + + fn run_req(v: Value) -> RunReq { + serde_json::from_value(v).unwrap() + } + + #[tokio::test(flavor = "multi_thread")] + async fn run_unknown_handle_returns_statement_not_found() { + let st = state_in_memory(); + let err = handle( + &st, + run_req(json!({ + "handle_id": "00000000-0000-0000-0000-000000000000", + "params": [] + })), + ) + .await + .unwrap_err(); + assert!(err.contains("STATEMENT_NOT_FOUND"), "got: {err}"); + } + + #[tokio::test(flavor = "multi_thread")] + async fn prepare_then_run_returns_rows() { + // Use a file-backed SQLite so that the `execute` setup conn and the + // pinned `prepareStatement` conn see the same database. + let (st, _tmp) = state_on_disk(); + // execute() runs a single statement at a time, so issue them separately. + crate::handlers::execute::handle( + &st, + serde_json::from_value(json!({ + "db": "primary", + "sql": "CREATE TABLE t (n INT)" + })) + .unwrap(), + ) + .await + .unwrap(); + for n in [1, 2, 3] { + crate::handlers::execute::handle( + &st, + serde_json::from_value(json!({ + "db": "primary", + "sql": "INSERT INTO t (n) VALUES (?)", + "params": [n] + })) + .unwrap(), + ) + .await + .unwrap(); + } + + let prep = prepare::handle( + &st, + serde_json::from_value(json!({ + "db": "primary", + "sql": "SELECT n FROM t WHERE n > ? ORDER BY n" + })) + .unwrap(), + ) + .await + .unwrap(); + let id = prep.handle.id.clone(); + + let resp = handle( + &st, + run_req(json!({ + "handle_id": id, + "params": [1] + })), + ) + .await + .unwrap(); + assert_eq!(resp.row_count, 2); + } +} diff --git a/iii-database/src/handlers/transaction.rs b/iii-database/src/handlers/transaction.rs new file mode 100644 index 00000000..36056ae6 --- /dev/null +++ b/iii-database/src/handlers/transaction.rs @@ -0,0 +1,286 @@ +//! `iii-database::transaction` — atomic sequence of statements. + +use super::AppState; +use crate::driver::{self, Isolation, TxStatement}; +use crate::handlers::query::err_to_str; +use crate::pool::Pool; +use crate::value::JsonParam; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Value}; + +#[derive(Deserialize, JsonSchema)] +pub struct TxReq { + pub db: String, + pub statements: Vec, + #[serde(default)] + pub isolation: Option, +} + +#[derive(Deserialize, JsonSchema)] +pub struct TxStmtReq { + pub sql: String, + #[serde(default)] + pub params: Vec, +} + +#[derive(Serialize, JsonSchema)] +pub struct TxStepResp { + pub affected_rows: u64, + pub rows: Vec>, +} + +#[derive(Serialize, JsonSchema)] +pub struct TxResp { + pub committed: bool, + #[serde(skip_serializing_if = "Option::is_none")] + pub results: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub failed_index: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub error: Option, +} + +/// Extract the per-statement index from a driver error if and only if it is +/// a `DriverError` carrying one. Non-step failures (pool acquire timeout, +/// connection-level errors, multi-statement guard hits *without* an index, +/// `UnknownDb`, `ConfigError`, etc.) yield `None` so the wire envelope's +/// `failed_index` stays absent rather than falsely pointing at step 0. +fn failed_index_of(e: &crate::error::DbError) -> Option { + match e { + crate::error::DbError::DriverError { failed_index, .. } => *failed_index, + _ => None, + } +} + +pub async fn handle(state: &AppState, req: TxReq) -> Result { + let pool = state.pool(&req.db).map_err(err_to_str)?; + + let isolation = match req.isolation.as_deref() { + Some("read_committed") => Some(Isolation::ReadCommitted), + Some("repeatable_read") => Some(Isolation::RepeatableRead), + Some("serializable") => Some(Isolation::Serializable), + Some(other) => { + return Err(err_to_str(crate::error::DbError::InvalidParam { + index: 0, + reason: format!("unknown isolation `{other}`"), + })) + } + None => None, + }; + + let mut stmts: Vec = Vec::with_capacity(req.statements.len()); + for s in req.statements { + let params = JsonParam::from_json_slice(&s.params).map_err(err_to_str)?; + stmts.push(TxStatement { sql: s.sql, params }); + } + + let result = match pool { + Pool::Sqlite(p) => driver::sqlite::transaction(p, stmts, isolation).await, + Pool::Postgres(p) => driver::postgres::transaction(p, stmts, isolation).await, + Pool::Mysql(p) => driver::mysql::transaction(p, stmts, isolation).await, + }; + + match result { + Ok(steps) => Ok(TxResp { + committed: true, + results: Some( + steps + .into_iter() + .map(|s| TxStepResp { + affected_rows: s.affected_rows, + rows: s + .rows + .into_iter() + .map(|r| r.0.into_iter().map(|v| v.into_json()).collect::>()) + .collect::>(), + }) + .collect(), + ), + failed_index: None, + error: None, + }), + Err(e) => { + // Preserve None for non-step failures (pool acquire, BEGIN, etc.) + // — those errors don't have a specific statement index, and + // unwrap_or(0) would falsely attribute them to step 0. + let failed_index = failed_index_of(&e); + let error_value = serde_json::to_value(&e) + .unwrap_or_else(|_| json!({"code": "DRIVER_ERROR", "message": e.to_string()})); + Ok(TxResp { + committed: false, + results: None, + failed_index, + error: Some(error_value), + }) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::config::PoolConfig; + use crate::handle::HandleRegistry; + use crate::handlers::AppState; + use crate::pool::{Pool, SqlitePool}; + use serde_json::json; + use std::collections::HashMap; + use std::sync::Arc; + + fn state() -> AppState { + let pool = SqlitePool::new("sqlite::memory:", &PoolConfig::default()).unwrap(); + let mut pools = HashMap::new(); + pools.insert("primary".to_string(), Pool::Sqlite(pool)); + AppState { + pools: Arc::new(pools), + handles: Arc::new(HandleRegistry::new()), + } + } + + fn tx_req(v: Value) -> TxReq { + serde_json::from_value(v).unwrap() + } + + /// Regression: `failed_index` must stay None for non-step failures + /// (PoolTimeout, UnknownDb, ConfigError, DriverError without an index). + /// The previous `unwrap_or(0)` falsely attributed every connection-level + /// failure to "statement 0", confusing wire callers about where things + /// went wrong. + #[test] + fn failed_index_extraction_preserves_none_for_non_step_errors() { + // DriverError carrying a step index → preserved + let driver_with_idx = crate::error::DbError::DriverError { + driver: "sqlite".into(), + code: None, + message: "x".into(), + failed_index: Some(2), + }; + assert_eq!(failed_index_of(&driver_with_idx), Some(2)); + + // DriverError without an index → None (was: Some(0)) + let driver_no_idx = crate::error::DbError::DriverError { + driver: "sqlite".into(), + code: None, + message: "x".into(), + failed_index: None, + }; + assert_eq!(failed_index_of(&driver_no_idx), None); + + // Non-DriverError variants → None + assert_eq!( + failed_index_of(&crate::error::DbError::UnknownDb { db: "x".into() }), + None + ); + assert_eq!( + failed_index_of(&crate::error::DbError::PoolTimeout { + db: "x".into(), + waited_ms: 100, + }), + None + ); + assert_eq!( + failed_index_of(&crate::error::DbError::ConfigError { + message: "x".into(), + }), + None + ); + } + + #[tokio::test(flavor = "multi_thread")] + async fn tx_commits_when_all_succeed() { + let st = state(); + crate::handlers::execute::handle( + &st, + serde_json::from_value(json!({ + "db": "primary", + "sql": "CREATE TABLE t (n INT)" + })) + .unwrap(), + ) + .await + .unwrap(); + let resp = handle( + &st, + tx_req(json!({ + "db": "primary", + "statements": [ + {"sql": "INSERT INTO t VALUES (?)", "params": [1]}, + {"sql": "INSERT INTO t VALUES (?)", "params": [2]}, + ] + })), + ) + .await + .unwrap(); + assert!(resp.committed); + assert_eq!(resp.results.as_ref().unwrap().len(), 2); + assert!(resp.failed_index.is_none()); + assert!(resp.error.is_none()); + } + + #[tokio::test(flavor = "multi_thread")] + async fn tx_returns_failed_index_on_rollback() { + let st = state(); + crate::handlers::execute::handle( + &st, + serde_json::from_value(json!({ + "db": "primary", + "sql": "CREATE TABLE t (n INT NOT NULL)" + })) + .unwrap(), + ) + .await + .unwrap(); + let resp = handle( + &st, + tx_req(json!({ + "db": "primary", + "statements": [ + {"sql": "INSERT INTO t VALUES (?)", "params": [1]}, + {"sql": "INSERT INTO t VALUES (?)", "params": [null]}, + ] + })), + ) + .await + .unwrap(); + assert!(!resp.committed); + assert_eq!(resp.failed_index, Some(1)); + let err = resp.error.as_ref().expect("error should be present"); + assert!( + err.is_object(), + "error should be a structured object, got {err:?}" + ); + assert_eq!(err["code"], "DRIVER_ERROR"); + assert_eq!(err["driver"], "sqlite"); + assert_eq!(err["failed_index"], 1); + assert!(resp.results.is_none()); + } + + #[test] + fn tx_resp_skips_none_fields_on_wire() { + // Wire-format invariant: success shape has no `failed_index`/`error`, + // failure shape has no `results`. Verifies the + // skip_serializing_if = "Option::is_none" attributes are wired up. + let success = TxResp { + committed: true, + results: Some(vec![]), + failed_index: None, + error: None, + }; + let v = serde_json::to_value(&success).unwrap(); + assert!(v.get("failed_index").is_none()); + assert!(v.get("error").is_none()); + assert!(v.get("results").is_some()); + + let failure = TxResp { + committed: false, + results: None, + failed_index: Some(0), + error: Some(json!({"code": "DRIVER_ERROR"})), + }; + let v = serde_json::to_value(&failure).unwrap(); + assert!(v.get("results").is_none()); + assert!(v.get("failed_index").is_some()); + assert!(v.get("error").is_some()); + } +} diff --git a/iii-database/src/lib.rs b/iii-database/src/lib.rs new file mode 100644 index 00000000..459c0f2e --- /dev/null +++ b/iii-database/src/lib.rs @@ -0,0 +1,15 @@ +//! iii-database worker — public surface for the binary and tests. + +pub mod config; +pub(crate) mod cursor; +pub(crate) mod driver; +pub mod error; +pub mod handle; +pub mod handlers; +pub mod pool; +pub mod triggers; +pub mod value; + +pub fn worker_name() -> &'static str { + "iii-database" +} diff --git a/iii-database/src/main.rs b/iii-database/src/main.rs new file mode 100644 index 00000000..ab34a0b6 --- /dev/null +++ b/iii-database/src/main.rs @@ -0,0 +1,217 @@ +use anyhow::{Context, Result}; +use clap::Parser; +use iii_database::config::WorkerConfig; +use iii_database::handle::HandleRegistry; +use iii_database::handlers::{ + execute::{self, ExecuteReq}, + prepare::{self, PrepareReq}, + query::{self, QueryReq}, + run_statement::{self, RunReq}, + transaction::{self, TxReq}, + AppState, +}; +use iii_database::pool; +use iii_database::triggers::handler::{QueryPollTrigger, RowChangeTrigger}; +use iii_sdk::{register_worker, InitOptions, OtelConfig, RegisterFunction, RegisterTriggerType}; +use std::collections::HashMap; +use std::sync::Arc; + +#[derive(Parser, Debug)] +#[command( + name = "iii-database", + about = "iii-database worker (PostgreSQL, MySQL, SQLite)" +)] +struct Cli { + /// Path to config.yaml file + #[arg(long, default_value = "./config.yaml")] + config: String, + + /// WebSocket URL of the iii engine + #[arg(long, default_value = "ws://127.0.0.1:49134")] + url: String, +} + +#[tokio::main] +async fn main() -> Result<()> { + tracing_subscriber::fmt() + .with_env_filter( + tracing_subscriber::EnvFilter::try_from_default_env() + .unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")), + ) + .init(); + + let cli = Cli::parse(); + tracing::info!( + name = iii_database::worker_name(), + config = %cli.config, + url = %redact_url(&cli.url), + "starting" + ); + + let cfg = WorkerConfig::from_file(&cli.config) + .map_err(|e| anyhow::anyhow!(e)) + .with_context(|| format!("loading config from {}", cli.config))?; + + let mut pools = HashMap::new(); + for (name, db) in &cfg.databases { + let p = pool::build(name, db) + .await + .map_err(|e| anyhow::anyhow!(serde_json::to_string(&e).unwrap_or_default())) + .with_context(|| format!("building pool for db `{name}`"))?; + tracing::info!(db = %name, driver = ?p.driver(), "pool ready"); + pools.insert(name.clone(), p); + } + + let handles = Arc::new(HandleRegistry::new()); + let state = AppState { + pools: Arc::new(pools), + handles: handles.clone(), + }; + + let _evictor = handles.spawn_evictor(); + + let iii = register_worker( + &cli.url, + InitOptions { + otel: Some(OtelConfig::default()), + ..Default::default() + }, + ); + + { + let st = state.clone(); + iii.register_function( + RegisterFunction::new_async("iii-database::query", move |req: QueryReq| { + let st = st.clone(); + async move { query::handle(&st, req).await } + }) + .description("Run a read-only SQL query and return the result rows."), + ); + } + { + let st = state.clone(); + iii.register_function( + RegisterFunction::new_async("iii-database::execute", move |req: ExecuteReq| { + let st = st.clone(); + async move { execute::handle(&st, req).await } + }) + .description("Run a write statement (INSERT/UPDATE/DELETE/DDL)."), + ); + } + { + let st = state.clone(); + iii.register_function( + RegisterFunction::new_async( + "iii-database::prepareStatement", + move |req: PrepareReq| { + let st = st.clone(); + async move { prepare::handle(&st, req).await } + }, + ) + .description("Prepare a parameterized statement once."), + ); + } + { + let st = state.clone(); + iii.register_function( + RegisterFunction::new_async("iii-database::runStatement", move |req: RunReq| { + let st = st.clone(); + async move { run_statement::handle(&st, req).await } + }) + .description("Run a previously-prepared handle."), + ); + } + { + let st = state.clone(); + iii.register_function( + RegisterFunction::new_async("iii-database::transaction", move |req: TxReq| { + let st = st.clone(); + async move { transaction::handle(&st, req).await } + }) + .description("Run a sequence of statements atomically."), + ); + } + + let _query_poll = iii.register_trigger_type(RegisterTriggerType::new( + "iii-database::query-poll", + "Polls a SQL query at a fixed interval and dispatches new rows since the last cursor.", + QueryPollTrigger::new(state.clone(), iii.clone()), + )); + let _row_change = iii.register_trigger_type(RegisterTriggerType::new( + "iii-database::row-change", + "Postgres logical replication. Stubbed in v1.0 pending tokio-postgres replication API.", + RowChangeTrigger, + )); + + tracing::info!( + "iii-database worker registered 5 functions and 2 trigger types, waiting for invocations" + ); + wait_for_shutdown_signal().await?; + tracing::info!("iii-database worker shutting down"); + iii.shutdown_async().await; + Ok(()) +} + +/// Strip userinfo (username:password) from a URL before logging it. The +/// engine websocket URL is operator-controlled and can carry credentials in +/// `wss://user:secret@host` form; `tracing::info!(url = %cli.url, ...)` +/// would otherwise emit them. Falls back to the original string on parse +/// failure (no logging-time panics). +fn redact_url(s: &str) -> String { + match url::Url::parse(s) { + Ok(mut u) => { + let _ = u.set_username(""); + let _ = u.set_password(None); + u.to_string() + } + Err(_) => s.to_string(), + } +} + +/// Wait for SIGINT or, on Unix, SIGTERM. `tokio::signal::ctrl_c()` alone +/// only catches SIGINT, leaving Docker `docker stop` / k8s `kubectl delete` +/// (which send SIGTERM) to bypass `iii.shutdown_async()` entirely — the +/// engine connection would dangle until the process was killed. +async fn wait_for_shutdown_signal() -> std::io::Result<()> { + #[cfg(unix)] + { + use tokio::signal::unix::{signal, SignalKind}; + let mut sigterm = signal(SignalKind::terminate())?; + tokio::select! { + r = tokio::signal::ctrl_c() => r, + _ = sigterm.recv() => Ok(()), + } + } + #[cfg(not(unix))] + { + tokio::signal::ctrl_c().await + } +} + +#[cfg(test)] +mod tests { + use super::redact_url; + + /// Regression: operator-controlled engine URLs may carry credentials in + /// `wss://user:secret@host` form. `tracing::info!(url = %cli.url, ...)` + /// previously emitted them verbatim. The redactor strips userinfo and + /// preserves the rest so logs remain useful for diagnostics. + #[test] + fn redact_url_strips_userinfo_only() { + // Plain URL without credentials → unchanged (modulo url crate's + // canonicalization, which adds a trailing `/` for empty paths). + assert_eq!(redact_url("ws://127.0.0.1:49134"), "ws://127.0.0.1:49134/"); + // Username + password fully stripped. + assert_eq!( + redact_url("wss://user:secret@iii.example.com:1234/path"), + "wss://iii.example.com:1234/path" + ); + // Username only. + assert_eq!( + redact_url("wss://user@iii.example.com/"), + "wss://iii.example.com/" + ); + // Garbage strings fall through unchanged — no logging-time panics. + assert_eq!(redact_url("not a url"), "not a url"); + } +} diff --git a/iii-database/src/pool/mod.rs b/iii-database/src/pool/mod.rs new file mode 100644 index 00000000..e85978a6 --- /dev/null +++ b/iii-database/src/pool/mod.rs @@ -0,0 +1,46 @@ +//! Connection pool dispatch. The `Pool` enum holds one of three concrete +//! pool types; method-level dispatch lives in driver-specific modules and +//! is wired in by `handlers::*` via `match pool { ... }`. + +pub(crate) mod mysql; +pub(crate) mod postgres; +pub(crate) mod sqlite; +pub(crate) mod tls; + +use crate::config::DriverKind; +use crate::error::DbError; +pub(crate) use mysql::MysqlPool; +pub(crate) use postgres::PostgresPool; +pub(crate) use sqlite::SqlitePool; + +#[derive(Clone)] +pub enum Pool { + Postgres(PostgresPool), + Mysql(MysqlPool), + Sqlite(SqlitePool), +} + +impl Pool { + pub fn driver(&self) -> DriverKind { + match self { + Pool::Postgres(_) => DriverKind::Postgres, + Pool::Mysql(_) => DriverKind::Mysql, + Pool::Sqlite(_) => DriverKind::Sqlite, + } + } +} + +/// Build a pool for a single configured database. Used at startup by main.rs. +pub async fn build(db_name: &str, cfg: &crate::config::DatabaseConfig) -> Result { + match cfg.driver { + DriverKind::Sqlite => { + // Sqlite is local-file; the `tls` block has no meaning for it. + SqlitePool::new(&cfg.url, &cfg.pool).map(|p| Pool::Sqlite(p.with_db_name(db_name))) + } + DriverKind::Postgres => PostgresPool::new(&cfg.url, &cfg.pool, &cfg.tls) + .await + .map(|p| Pool::Postgres(p.with_db_name(db_name))), + DriverKind::Mysql => MysqlPool::new(&cfg.url, &cfg.pool, &cfg.tls) + .map(|p| Pool::Mysql(p.with_db_name(db_name))), + } +} diff --git a/iii-database/src/pool/mysql.rs b/iii-database/src/pool/mysql.rs new file mode 100644 index 00000000..bf7c73ac --- /dev/null +++ b/iii-database/src/pool/mysql.rs @@ -0,0 +1,141 @@ +//! MySQL pool wrapping `mysql_async::Pool`. + +use crate::config::{PoolConfig, TlsConfig}; +use crate::error::DbError; +use crate::pool::tls::make_mysql_ssl_opts; +use mysql_async::{Pool as MyPool, PoolConstraints, PoolOpts}; +use std::sync::Arc; +use std::time::Duration; + +#[derive(Clone)] +pub struct MysqlPool { + inner: Arc, + db_name: Arc, + acquire_timeout: Duration, +} + +pub type MysqlConn = mysql_async::Conn; + +impl MysqlPool { + pub fn new(url: &str, pool_cfg: &PoolConfig, tls_cfg: &TlsConfig) -> Result { + let constraints = + PoolConstraints::new(0, pool_cfg.max as usize).ok_or_else(|| DbError::ConfigError { + message: "mysql pool constraints invalid".into(), + })?; + let opts = PoolOpts::default() + .with_constraints(constraints) + .with_inactive_connection_ttl(Duration::from_millis(pool_cfg.idle_timeout_ms)); + // Don't echo the underlying parse error — mysql_async's error message + // includes the offending URL verbatim, which would leak any embedded + // password into logs. Surface a generic message; the operator knows + // which db they configured. + let mut url_opts = mysql_async::OptsBuilder::from_opts( + mysql_async::Opts::from_url(url).map_err(|_| DbError::ConfigError { + message: "mysql url parse failed; check the configured url".into(), + })?, + ); + url_opts = url_opts.pool_opts(opts); + // `make_mysql_ssl_opts` returns None for `mode: disable`; in that + // case we leave the SSL knob untouched (= no TLS). + if let Some(ssl) = make_mysql_ssl_opts(tls_cfg)? { + url_opts = url_opts.ssl_opts(ssl); + } + let pool = MyPool::new(url_opts); + Ok(Self { + inner: Arc::new(pool), + db_name: Arc::from("(unset)"), + acquire_timeout: Duration::from_millis(pool_cfg.acquire_timeout_ms), + }) + } + + pub fn with_db_name(mut self, name: &str) -> Self { + self.db_name = Arc::from(name); + self + } + + pub async fn acquire(&self) -> Result { + let db_name = self.db_name.to_string(); + let waited_ms = self.acquire_timeout.as_millis() as u64; + let fut = self.inner.get_conn(); + match tokio::time::timeout(self.acquire_timeout, fut).await { + Ok(Ok(conn)) => Ok(conn), + Ok(Err(e)) => { + // `mysql_async::Error::Display` can echo the server address, + // username, and other connection details. Log the full error + // operator-side via tracing; surface a generic message in + // the RPC reply so untrusted callers don't see infra details. + tracing::warn!( + driver = "mysql", + db = %db_name, + error = ?e, + "pool acquire failed" + ); + Err(DbError::DriverError { + driver: "mysql".into(), + code: None, + message: "pool connection failed; check server availability".into(), + failed_index: None, + }) + } + Err(_) => Err(DbError::PoolTimeout { + db: db_name, + waited_ms, + }), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn url() -> Option { + std::env::var("TEST_MYSQL_URL").ok() + } + + #[tokio::test(flavor = "multi_thread")] + async fn mysql_pool_acquires() { + let Some(u) = url() else { return }; + // Local docker mysql in tests is plaintext; explicitly disable TLS. + let tls = crate::config::TlsConfig { + mode: crate::config::TlsMode::Disable, + ca_cert: None, + }; + let pool = MysqlPool::new(&u, &PoolConfig::default(), &tls).unwrap(); + let mut conn = pool.acquire().await.unwrap(); + use mysql_async::prelude::Queryable; + let v: Option = conn.query_first("SELECT 1").await.unwrap(); + assert_eq!(v, Some(1)); + } + + #[tokio::test(flavor = "multi_thread")] + async fn mysql_acquire_failure_message_is_redacted() { + // Hits a port that nothing listens on so mysql_async surfaces a + // connect error — the non-Timeout path that previously echoed the + // underlying error verbatim. Asserts the RPC body uses the generic + // message and does not leak userinfo/host fragments. + let cfg = PoolConfig { + max: 1, + idle_timeout_ms: 1_000, + acquire_timeout_ms: 500, + }; + let tls = crate::config::TlsConfig { + mode: crate::config::TlsMode::Disable, + ca_cert: None, + }; + let url = "mysql://leaky_user:leaky_pass@127.0.0.1:1/some_db"; + let pool = MysqlPool::new(url, &cfg, &tls).unwrap(); + let err = pool.acquire().await.unwrap_err(); + let body = serde_json::to_string(&err).unwrap(); + assert!( + body.contains("pool connection failed"), + "expected generic message; got: {body}" + ); + for forbidden in ["leaky_user", "leaky_pass", "some_db"] { + assert!( + !body.contains(forbidden), + "leaked `{forbidden}` in RPC body: {body}" + ); + } + } +} diff --git a/iii-database/src/pool/postgres.rs b/iii-database/src/pool/postgres.rs new file mode 100644 index 00000000..3d5a5939 --- /dev/null +++ b/iii-database/src/pool/postgres.rs @@ -0,0 +1,159 @@ +//! Postgres pool wrapping `deadpool-postgres` over `tokio-postgres`. + +use crate::config::{PoolConfig, TlsConfig}; +use crate::error::DbError; +use crate::pool::tls::make_pg_connector; +use deadpool_postgres::{Config as DpConfig, ManagerConfig, Pool as DpPool, RecyclingMethod}; +use std::sync::Arc; +use std::time::Duration; +use tokio_postgres::NoTls; + +#[derive(Clone)] +pub struct PostgresPool { + inner: Arc, + db_name: Arc, + acquire_timeout: Duration, +} + +pub type PgClient = deadpool_postgres::Object; + +impl PostgresPool { + pub async fn new( + url: &str, + pool_cfg: &PoolConfig, + tls_cfg: &TlsConfig, + ) -> Result { + let mut dp = DpConfig::new(); + dp.url = Some(url.to_string()); + dp.manager = Some(ManagerConfig { + recycling_method: RecyclingMethod::Fast, + }); + // `queue_mode` defaults to `Fifo`; we set it implicitly via + // `..Default::default()` because `QueueMode` is not re-exported + // by `deadpool_postgres` (it lives in `deadpool::managed`), and + // adding a direct `deadpool` dep just for the explicit value would + // be needless coupling. + dp.pool = Some(deadpool_postgres::PoolConfig { + max_size: pool_cfg.max as usize, + timeouts: deadpool_postgres::Timeouts { + wait: Some(Duration::from_millis(pool_cfg.acquire_timeout_ms)), + create: Some(Duration::from_millis(pool_cfg.acquire_timeout_ms)), + recycle: Some(Duration::from_millis(pool_cfg.idle_timeout_ms)), + }, + ..Default::default() + }); + // `make_pg_connector` returns None for `mode: disable`; in that + // case we hand `NoTls` to deadpool. Otherwise we use the rustls + // connector which honors the configured chain/hostname policy. + let pool = match make_pg_connector(tls_cfg)? { + Some(connector) => dp.create_pool(Some(deadpool_postgres::Runtime::Tokio1), connector), + None => dp.create_pool(Some(deadpool_postgres::Runtime::Tokio1), NoTls), + } + .map_err(|_| DbError::ConfigError { + message: "postgres pool init failed; check the configured url".into(), + })?; + Ok(Self { + inner: Arc::new(pool), + db_name: Arc::from("(unset)"), + acquire_timeout: Duration::from_millis(pool_cfg.acquire_timeout_ms), + }) + } + + pub fn with_db_name(mut self, name: &str) -> Self { + self.db_name = Arc::from(name); + self + } + + pub async fn acquire(&self) -> Result { + let db_name = self.db_name.to_string(); + let waited_ms = self.acquire_timeout.as_millis() as u64; + self.inner.get().await.map_err(|e| match e { + deadpool_postgres::PoolError::Timeout(_) => DbError::PoolTimeout { + db: db_name.clone(), + waited_ms, + }, + other => { + // `deadpool_postgres::PoolError::Display` chains through + // `tokio_postgres::Error`, which can include the configured + // host and other connection-string fragments. Logging is + // operator-only (stderr); the RPC reply gets a generic + // message so cross-tenant callers don't see infra details. + tracing::warn!( + driver = "postgres", + db = %db_name, + error = ?other, + "pool acquire failed" + ); + DbError::DriverError { + driver: "postgres".into(), + code: None, + message: "pool connection failed; check server availability".into(), + failed_index: None, + } + } + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::config::PoolConfig; + + fn url() -> Option { + std::env::var("TEST_POSTGRES_URL").ok() + } + + #[tokio::test(flavor = "multi_thread")] + async fn pool_acquires_a_connection() { + let Some(u) = url() else { + eprintln!("skipping: TEST_POSTGRES_URL not set"); + return; + }; + // Local docker postgres in tests is plaintext; explicitly disable + // TLS so the test passes without a server-side cert. + let tls = crate::config::TlsConfig { + mode: crate::config::TlsMode::Disable, + ca_cert: None, + }; + let pool = PostgresPool::new(&u, &PoolConfig::default(), &tls) + .await + .unwrap(); + let client = pool.acquire().await.unwrap(); + let row = client.query_one("SELECT 1::int", &[]).await.unwrap(); + let v: i32 = row.get(0); + assert_eq!(v, 1); + } + + #[tokio::test(flavor = "multi_thread")] + async fn pg_acquire_failure_message_is_redacted() { + // Hits a port that nothing listens on so deadpool returns + // `PoolError::Backend(tokio_postgres::Error)` — the non-Timeout + // path that previously echoed the underlying error verbatim. + // Asserts the RPC body uses the generic message and contains + // none of the userinfo/host fragments from the URL. + let cfg = PoolConfig { + max: 1, + idle_timeout_ms: 1_000, + acquire_timeout_ms: 500, + }; + let tls = crate::config::TlsConfig { + mode: crate::config::TlsMode::Disable, + ca_cert: None, + }; + let url = "postgres://leaky_user:leaky_pass@127.0.0.1:1/some_db"; + let pool = PostgresPool::new(url, &cfg, &tls).await.unwrap(); + let err = pool.acquire().await.unwrap_err(); + let body = serde_json::to_string(&err).unwrap(); + assert!( + body.contains("pool connection failed"), + "expected generic message; got: {body}" + ); + for forbidden in ["leaky_user", "leaky_pass", "some_db"] { + assert!( + !body.contains(forbidden), + "leaked `{forbidden}` in RPC body: {body}" + ); + } + } +} diff --git a/iii-database/src/pool/sqlite.rs b/iii-database/src/pool/sqlite.rs new file mode 100644 index 00000000..933627a0 --- /dev/null +++ b/iii-database/src/pool/sqlite.rs @@ -0,0 +1,197 @@ +//! SQLite pool wrapping `r2d2_sqlite`. Calls cross `spawn_blocking`. + +use crate::config::PoolConfig; +use crate::error::DbError; +use r2d2::{Pool as R2Pool, PooledConnection}; +use r2d2_sqlite::SqliteConnectionManager; +use std::sync::Arc; +use std::time::Duration; + +#[derive(Clone)] +pub struct SqlitePool { + inner: Arc>, + db_name: Arc, + acquire_timeout: Duration, +} + +/// A held connection from the pool. Closures run synchronously; callers wrap +/// in `tokio::task::spawn_blocking`. +#[derive(Debug)] +pub struct SqliteConn { + conn: PooledConnection, +} + +impl SqliteConn { + pub fn with(&self, f: impl FnOnce(&rusqlite::Connection) -> R) -> R { + f(&self.conn) + } + + pub fn with_mut(&mut self, f: impl FnOnce(&mut rusqlite::Connection) -> R) -> R { + f(&mut self.conn) + } +} + +impl SqlitePool { + pub fn new(url: &str, pool_cfg: &PoolConfig) -> Result { + let path = url.strip_prefix("sqlite:").unwrap_or(url); + let manager = if path == ":memory:" || path.starts_with(":memory:") { + SqliteConnectionManager::memory() + } else { + SqliteConnectionManager::file(path) + }; + let inner = R2Pool::builder() + .max_size(pool_cfg.max) + .idle_timeout(Some(Duration::from_millis(pool_cfg.idle_timeout_ms))) + .build(manager) + .map_err(|e| DbError::ConfigError { + message: format!("sqlite pool init: {e}"), + })?; + Ok(Self { + inner: Arc::new(inner), + db_name: Arc::from("(unset)"), + acquire_timeout: Duration::from_millis(pool_cfg.acquire_timeout_ms), + }) + } + + /// Tag the pool with a config name for error messages. Called by `pool::build`. + pub fn with_db_name(mut self, name: &str) -> Self { + self.db_name = Arc::from(name); + self + } + + pub async fn acquire(&self) -> Result { + let pool = Arc::clone(&self.inner); + let timeout = self.acquire_timeout; + let db_name = self.db_name.to_string(); + let res = tokio::task::spawn_blocking(move || pool.get_timeout(timeout)) + .await + .map_err(|e| DbError::DriverError { + driver: "sqlite".into(), + code: None, + message: format!("spawn_blocking join: {e}"), + failed_index: None, + })?; + match res { + Ok(conn) => Ok(SqliteConn { conn }), + Err(e) => Err(classify_acquire_error(&e.to_string(), db_name, timeout)), + } + } +} + +/// `r2d2::get_timeout` returns one error type (`r2d2::Error`) for both +/// "no connection became free in time" and "the underlying connection +/// manager kept failing to open a connection until we hit the timeout". +/// Collapsing both to `PoolTimeout` masks misconfiguration (bad SQLite +/// path, missing parent dir, locked db) as pool exhaustion. r2d2's +/// `Display` writes `"timed out waiting for connection"` for the pure +/// timeout case and `"timed out waiting for connection: "` when +/// the most recent connection attempt left a failure on the pool's +/// internal `last_error` slot — the `: ` separator is the discriminator. +/// `r2d2::Error::source()` is the default `None` so the reviewer's +/// suggested `source().is_none()` check is a no-op against this crate +/// version (verified against r2d2-0.8.10/src/lib.rs:567-571). +/// +/// Takes the formatted message rather than the `r2d2::Error` directly so +/// the classification logic can be unit-tested without constructing real +/// r2d2 errors (the inner field is private). +fn classify_acquire_error(display_msg: &str, db: String, timeout: Duration) -> DbError { + if let Some((_, inner)) = display_msg.split_once(": ") { + DbError::DriverError { + driver: "sqlite".into(), + code: None, + message: format!("pool acquire failed: {inner}"), + failed_index: None, + } + } else { + DbError::PoolTimeout { + db, + waited_ms: timeout.as_millis() as u64, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::config::PoolConfig; + + #[tokio::test(flavor = "multi_thread")] + async fn in_memory_pool_acquires_a_connection() { + let pool = SqlitePool::new("sqlite::memory:", &PoolConfig::default()).unwrap(); + let conn = pool.acquire().await.unwrap(); + let result: i64 = tokio::task::spawn_blocking(move || { + conn.with(|c| c.query_row("SELECT 1", [], |row| row.get(0))) + .unwrap() + }) + .await + .unwrap(); + assert_eq!(result, 1); + } + + /// Regression: previously `Err(_) => DbError::PoolTimeout { .. }` + /// collapsed every r2d2 acquire failure into a "pool saturated" error, + /// even when the actual cause was the connection manager failing to + /// open the database (e.g., parent directory missing, permissions, + /// locked file). Operators staring at PoolTimeout would scale the pool + /// up forever while the real fix was a path/perms issue. The classifier + /// now inspects r2d2's Display string to distinguish the two cases. + /// Tested at the helper boundary because r2d2_sqlite opens connections + /// at pool-init (build) time — bad paths fail in `SqlitePool::new` + /// before reaching `acquire()`, so we can't drive the live path with + /// a dummy file. The helper is what carries the bug-fix logic. + #[test] + fn classify_acquire_error_with_inner_reason_returns_driver_error() { + let err = classify_acquire_error( + "timed out waiting for connection: unable to open database file", + "primary".into(), + Duration::from_millis(100), + ); + match err { + DbError::DriverError { + driver, message, .. + } => { + assert_eq!(driver, "sqlite"); + assert!( + message.contains("unable to open database file"), + "got: {message}" + ); + } + other => panic!("expected DriverError, got {other:?}"), + } + } + + #[test] + fn classify_acquire_error_pure_timeout_returns_pool_timeout() { + let err = classify_acquire_error( + "timed out waiting for connection", + "primary".into(), + Duration::from_millis(150), + ); + match err { + DbError::PoolTimeout { db, waited_ms } => { + assert_eq!(db, "primary"); + assert_eq!(waited_ms, 150); + } + other => panic!("expected PoolTimeout, got {other:?}"), + } + } + + #[tokio::test(flavor = "multi_thread")] + async fn pool_timeout_when_max_one_and_held() { + let pool = SqlitePool::new( + "sqlite::memory:", + &PoolConfig { + max: 1, + idle_timeout_ms: 30_000, + acquire_timeout_ms: 50, + }, + ) + .unwrap(); + let _held = pool.acquire().await.unwrap(); + let err = pool.acquire().await.unwrap_err(); + match err { + crate::error::DbError::PoolTimeout { waited_ms, .. } => assert!(waited_ms >= 50), + other => panic!("expected PoolTimeout, got {other:?}"), + } + } +} diff --git a/iii-database/src/pool/tls.rs b/iii-database/src/pool/tls.rs new file mode 100644 index 00000000..0d496a06 --- /dev/null +++ b/iii-database/src/pool/tls.rs @@ -0,0 +1,357 @@ +//! Shared TLS connector construction for postgres and mysql pools. +//! +//! Three operator-visible modes (see `crate::config::TlsMode`): +//! +//! - `disable` → caller passes `NoTls`; this module never runs. +//! - `require` → cert chain validated, hostname NOT verified +//! (matches libpq `sslmode=require`). +//! - `verify-full` → cert chain + hostname verified +//! (matches libpq `sslmode=verify-full`). +//! +//! Trust roots come from the OS-provided trust store +//! (`rustls-native-certs`) by default. An optional `ca_cert` PEM file +//! **replaces** the system store with the operator-supplied certs — useful +//! for self-hosted databases with private CAs. +//! +//! `aws_lc_rs` is the rustls crypto provider; it's the modern default and +//! lets us avoid an OpenSSL system dep. `tls12` and TLS 1.3 are both +//! enabled because real-world managed Postgres still negotiates 1.2. + +use crate::config::{TlsConfig, TlsMode}; +use crate::error::DbError; +use rustls::client::danger::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier}; +use rustls::client::WebPkiServerVerifier; +use rustls::crypto::{verify_tls12_signature, verify_tls13_signature, CryptoProvider}; +use rustls::pki_types::{CertificateDer, ServerName, UnixTime}; +use rustls::{ClientConfig, DigitallySignedStruct, RootCertStore, SignatureScheme}; +use std::sync::{Arc, Once}; + +/// Install `aws_lc_rs` as the process-level default rustls `CryptoProvider`. +/// +/// rustls 0.23 requires a process default when the dep graph contains more +/// than one provider feature (we have `aws_lc_rs` direct + `aws-lc-rs` +/// transitively via `tokio-postgres-rustls`). Without this, the first +/// rustls user that doesn't take an explicit provider — notably +/// `mysql_async`'s `rustls-tls` path — **panics** on first TLS attempt. +/// The panic happens inside the spawned connection task, where it +/// invisibly crashes the pool's `get_conn()` future and presents to the +/// caller as a multi-second hang rather than an error. +/// +/// Idempotent: `install_default` returns Err on second call; `Once` +/// guarantees we only attempt it once per process. +fn ensure_crypto_provider_installed() { + static INSTALL: Once = Once::new(); + INSTALL.call_once(|| { + // Best-effort: if some other crate beat us to it, that's fine — + // either provider works for our verification path. + let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); + }); +} + +/// Build a `tokio_postgres_rustls::MakeRustlsConnect` for the given TLS +/// config. Returns `Ok(None)` when the operator chose `mode: disable` — +/// callers should fall back to `NoTls` in that case. +pub fn make_pg_connector( + tls: &TlsConfig, +) -> Result, DbError> { + if matches!(tls.mode, TlsMode::Disable) { + return Ok(None); + } + ensure_crypto_provider_installed(); + let client_config = build_client_config(tls)?; + Ok(Some(tokio_postgres_rustls::MakeRustlsConnect::new( + client_config, + ))) +} + +/// Build a `mysql_async::SslOpts` for the given TLS config. Returns +/// `Ok(None)` when `mode: disable` — callers must NOT enable any TLS opts +/// in that case. +pub fn make_mysql_ssl_opts(tls: &TlsConfig) -> Result, DbError> { + use mysql_async::SslOpts; + if matches!(tls.mode, TlsMode::Disable) { + return Ok(None); + } + // Mandatory: mysql_async's rustls-tls feature reaches for the + // process-default provider. Without this install, the first TLS + // attempt panics inside a spawned task and presents as a 30s pool + // hang to the caller. See `ensure_crypto_provider_installed` doc. + ensure_crypto_provider_installed(); + let mut opts = SslOpts::default(); + if let Some(path) = tls.ca_cert.as_deref() { + // mysql_async accepts a PEM file path directly. + opts = opts.with_root_certs(vec![std::path::PathBuf::from(path).into()]); + } + // libpq-aligned semantics: require = chain only (skip hostname); + // verify-full = chain + hostname. + if matches!(tls.mode, TlsMode::Require) { + opts = opts + .with_danger_skip_domain_validation(true) + .with_danger_accept_invalid_certs(false); + } + Ok(Some(opts)) +} + +/// Construct the `rustls::ClientConfig` matching `tls.mode`. Used by the +/// postgres connector; the mysql side has its own knobs and doesn't share +/// this `ClientConfig`. +fn build_client_config(tls: &TlsConfig) -> Result { + let roots = build_root_store(tls.ca_cert.as_deref())?; + let provider = Arc::new(default_provider()); + + match tls.mode { + TlsMode::Disable => { + // Caller should have short-circuited; defensive panic-avoid path. + Err(DbError::ConfigError { + message: "internal: build_client_config called with mode=disable".into(), + }) + } + TlsMode::VerifyFull => { + let verifier = + WebPkiServerVerifier::builder_with_provider(Arc::new(roots), provider.clone()) + .build() + .map_err(|e| DbError::ConfigError { + message: format!("tls verifier build failed: {e}"), + })?; + let cfg = ClientConfig::builder_with_provider(provider) + .with_safe_default_protocol_versions() + .map_err(|e| DbError::ConfigError { + message: format!("tls protocol negotiation failed: {e}"), + })? + .with_webpki_verifier(verifier) + .with_no_client_auth(); + Ok(cfg) + } + TlsMode::Require => { + // Chain-only verifier. Validates the certificate chain against + // the trust store but does NOT verify the cert hostname matches + // the URL host. Same security posture as libpq's + // `sslmode=require`: catches eavesdropping, doesn't catch a + // determined MITM with their own valid-chain cert. + let verifier = Arc::new(ChainOnlyVerifier { + roots: Arc::new(roots), + provider: provider.clone(), + }); + let cfg = ClientConfig::builder_with_provider(provider) + .with_safe_default_protocol_versions() + .map_err(|e| DbError::ConfigError { + message: format!("tls protocol negotiation failed: {e}"), + })? + .dangerous() + .with_custom_certificate_verifier(verifier) + .with_no_client_auth(); + Ok(cfg) + } + } +} + +/// Build a `RootCertStore` from either an operator-supplied PEM file or +/// the OS trust store. The `ca_cert` path **replaces** the native store; +/// it is not additive. This matches the typical operator intent: "trust +/// these certs, nothing else." +pub fn build_root_store(ca_cert: Option<&str>) -> Result { + let mut store = RootCertStore::empty(); + if let Some(path) = ca_cert { + let pem = std::fs::read(path).map_err(|e| DbError::ConfigError { + message: format!("ca_cert read `{path}`: {e}"), + })?; + let mut cursor = std::io::Cursor::new(pem); + let mut added = 0usize; + for item in rustls_pemfile::certs(&mut cursor) { + let cert = item.map_err(|e| DbError::ConfigError { + message: format!("ca_cert parse `{path}`: {e}"), + })?; + store.add(cert).map_err(|e| DbError::ConfigError { + message: format!("ca_cert add `{path}`: {e}"), + })?; + added += 1; + } + if added == 0 { + return Err(DbError::ConfigError { + message: format!("ca_cert `{path}`: no PEM CERTIFICATE blocks found"), + }); + } + return Ok(store); + } + // Native trust store. `load_native_certs` returns errors as a Vec + // alongside the certs — non-fatal (one bad cert in the store + // shouldn't block startup if the rest are usable). + let result = rustls_native_certs::load_native_certs(); + if result.certs.is_empty() { + return Err(DbError::ConfigError { + message: format!( + "no native CA certificates loaded ({} errors); set `tls.ca_cert` to provide them", + result.errors.len() + ), + }); + } + for cert in result.certs { + // Ignore individual `add` failures: bad cert in the OS store + // shouldn't fail the whole worker if other certs work. + let _ = store.add(cert); + } + Ok(store) +} + +fn default_provider() -> CryptoProvider { + rustls::crypto::aws_lc_rs::default_provider() +} + +/// Custom verifier that performs certificate chain validation against the +/// trust store but skips hostname verification. Used for `mode: require` +/// to match libpq's `sslmode=require` semantics. +#[derive(Debug)] +struct ChainOnlyVerifier { + roots: Arc, + provider: Arc, +} + +impl ServerCertVerifier for ChainOnlyVerifier { + fn verify_server_cert( + &self, + end_entity: &CertificateDer<'_>, + intermediates: &[CertificateDer<'_>], + _server_name: &ServerName<'_>, + _ocsp_response: &[u8], + now: UnixTime, + ) -> Result { + let cert = webpki::EndEntityCert::try_from(end_entity) + .map_err(|e| rustls::Error::General(format!("cert parse: {e}")))?; + let trust_anchors: Vec<_> = self.roots.roots.to_vec(); + let revocation: Option> = None; + cert.verify_for_usage( + self.provider.signature_verification_algorithms.all, + &trust_anchors, + intermediates, + now, + webpki::KeyUsage::server_auth(), + revocation, + None, + ) + .map_err(|e| rustls::Error::General(format!("cert chain: {e}")))?; + Ok(ServerCertVerified::assertion()) + } + + fn verify_tls12_signature( + &self, + message: &[u8], + cert: &CertificateDer<'_>, + dss: &DigitallySignedStruct, + ) -> Result { + verify_tls12_signature( + message, + cert, + dss, + &self.provider.signature_verification_algorithms, + ) + } + + fn verify_tls13_signature( + &self, + message: &[u8], + cert: &CertificateDer<'_>, + dss: &DigitallySignedStruct, + ) -> Result { + verify_tls13_signature( + message, + cert, + dss, + &self.provider.signature_verification_algorithms, + ) + } + + fn supported_verify_schemes(&self) -> Vec { + self.provider + .signature_verification_algorithms + .supported_schemes() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::io::Write; + + #[test] + fn build_root_store_loads_native_certs() { + // System trust store should have at least a few CAs on a normal + // dev machine; if this fails the dev environment is unusual. + let store = build_root_store(None).expect("native certs"); + assert!(!store.roots.is_empty(), "native trust store is empty"); + } + + #[test] + fn build_root_store_rejects_missing_file() { + let err = build_root_store(Some("/no/such/path/ca.pem")).unwrap_err(); + let body = serde_json::to_string(&err).unwrap(); + assert!(body.contains("ca_cert read"), "got: {body}"); + } + + #[test] + fn build_root_store_rejects_pem_with_no_certs() { + let mut f = tempfile::NamedTempFile::new().unwrap(); + // Write a PEM-shaped block that's not a certificate. + f.write_all(b"-----BEGIN PRIVATE KEY-----\nMIIBVQ==\n-----END PRIVATE KEY-----\n") + .unwrap(); + let err = build_root_store(Some(f.path().to_str().unwrap())).unwrap_err(); + let body = serde_json::to_string(&err).unwrap(); + assert!(body.contains("no PEM CERTIFICATE"), "got: {body}"); + } + + #[test] + fn make_pg_connector_disable_returns_none() { + let tls = TlsConfig { + mode: TlsMode::Disable, + ca_cert: None, + }; + assert!(make_pg_connector(&tls).unwrap().is_none()); + } + + #[test] + fn make_pg_connector_require_returns_some() { + let tls = TlsConfig { + mode: TlsMode::Require, + ca_cert: None, + }; + let conn = make_pg_connector(&tls).expect("require mode builds"); + assert!(conn.is_some()); + } + + #[test] + fn make_pg_connector_verify_full_returns_some() { + let tls = TlsConfig { + mode: TlsMode::VerifyFull, + ca_cert: None, + }; + let conn = make_pg_connector(&tls).expect("verify-full mode builds"); + assert!(conn.is_some()); + } + + #[test] + fn make_mysql_ssl_opts_disable_returns_none() { + let tls = TlsConfig { + mode: TlsMode::Disable, + ca_cert: None, + }; + assert!(make_mysql_ssl_opts(&tls).unwrap().is_none()); + } + + #[test] + fn make_mysql_ssl_opts_require_skips_domain_validation() { + let tls = TlsConfig { + mode: TlsMode::Require, + ca_cert: None, + }; + let opts = make_mysql_ssl_opts(&tls).unwrap().unwrap(); + // SslOpts doesn't expose getters for its danger-flags directly, + // but Debug output captures the configuration. Use it as a + // proxy for the test contract: `require` mode disables domain + // validation, `verify-full` keeps it on. + let dbg = format!("{opts:?}"); + assert!( + dbg.contains("skip_domain_validation: true") + || dbg.contains("DangerSkipDomainValidation: true"), + "expected domain validation off in require mode; got: {dbg}" + ); + } +} diff --git a/iii-database/src/triggers/handler.rs b/iii-database/src/triggers/handler.rs new file mode 100644 index 00000000..49e3b045 --- /dev/null +++ b/iii-database/src/triggers/handler.rs @@ -0,0 +1,228 @@ +//! TriggerHandler implementations for `iii-database::query-poll` and +//! `iii-database::row-change`. Wired into the worker via +//! `iii.register_trigger_type` from main.rs. +//! +//! The engine routes `iii.registerTrigger({type: "iii-database::query-poll", ...})` +//! calls from any client (e.g. the test harness) back to the worker that +//! registered that trigger type. We spawn a per-instance polling loop on +//! `register_trigger` and cancel it on `unregister_trigger`. + +use crate::handlers::AppState; +use crate::triggers::query_poll::{self, Dispatch, DispatchAck, DispatchedBatch, QueryPollConfig}; +use async_trait::async_trait; +use iii_sdk::{protocol::TriggerRequest, IIIError, TriggerConfig, TriggerHandler, III}; +use std::collections::HashMap; +use std::sync::Arc; +use tokio::sync::Mutex; +use tokio::task::JoinHandle; + +/// Dispatch impl that forwards a polled batch to the engine via `iii.trigger`. +/// The engine routes the invocation to whoever registered `function_id`. +struct EngineDispatch { + iii: III, + function_id: String, +} + +#[async_trait] +impl Dispatch for EngineDispatch { + async fn dispatch(&self, batch: DispatchedBatch) -> Result { + let payload = + serde_json::to_value(&batch).map_err(|e| crate::error::DbError::DriverError { + driver: "engine-dispatch".into(), + code: None, + message: format!("serialize batch: {e}"), + failed_index: None, + })?; + let req = TriggerRequest { + function_id: self.function_id.clone(), + payload, + action: None, + timeout_ms: None, + }; + match self.iii.trigger(req).await { + Ok(value) => { + // Three response shapes, three behaviors: + // 1. null → function returned void = successful completion; + // ack so the cursor advances. + // 2. valid {ack?, commit_cursor?} → use as-is. + // 3. anything else → malformed; fail-safe to ack=false so + // the next tick retries instead of silently + // dropping rows the function never processed. + if value.is_null() { + Ok(DispatchAck { + ack: true, + commit_cursor: None, + }) + } else { + Ok( + serde_json::from_value::(value).unwrap_or(DispatchAck { + ack: false, + commit_cursor: None, + }), + ) + } + } + Err(e) => Err(crate::error::DbError::DriverError { + driver: "engine-dispatch".into(), + code: None, + message: format!("trigger invocation failed: {e}"), + failed_index: None, + }), + } + } +} + +/// `iii-database::query-poll` trigger handler. Spawns a polling loop per +/// registered trigger instance; cancels on unregister. +/// +/// Tasks are tracked twice: +/// - by engine-assigned instance id (for unregister, which receives that id) +/// - by user-supplied `trigger_id` (so a re-registration with the same +/// trigger_id replaces the old task, which is essential for idempotent +/// re-runs of clients whose `unregister` is fire-and-forget across a +/// process exit). +pub struct QueryPollTrigger { + state: AppState, + iii: III, + /// Map of trigger instance id → spawned task handle. Indexed for + /// `unregister_trigger`. + tasks: Arc>>>, + /// Map of user-supplied `trigger_id` → engine-assigned instance id. + /// Used to evict stale tasks when the same `trigger_id` is registered + /// again before the prior `unregister` has reached us. + by_trigger_id: Arc>>, +} + +impl QueryPollTrigger { + pub fn new(state: AppState, iii: III) -> Self { + Self { + state, + iii, + tasks: Arc::new(Mutex::new(HashMap::new())), + by_trigger_id: Arc::new(Mutex::new(HashMap::new())), + } + } +} + +fn iii_err(err: T) -> IIIError { + IIIError::Handler(serde_json::to_string(&err).unwrap_or_else(|_| "{}".into())) +} + +#[async_trait] +impl TriggerHandler for QueryPollTrigger { + async fn register_trigger(&self, config: TriggerConfig) -> Result<(), IIIError> { + let mut cfg: QueryPollConfig = + serde_json::from_value(config.config.clone()).map_err(|e| { + iii_err(crate::error::DbError::ConfigError { + message: format!("query-poll config: {e}"), + }) + })?; + // If the user-provided trigger_id is empty (or absent — serde default), + // fall back to the engine-assigned instance id so the cursor table + // key is stable across restarts of the same instance. + if cfg.trigger_id.is_empty() { + cfg.trigger_id = config.id.clone(); + } + cfg.validate().map_err(iii_err)?; + + let pool = self + .state + .pools + .get(&cfg.db_name) + .ok_or_else(|| { + iii_err(crate::error::DbError::UnknownDb { + db: cfg.db_name.clone(), + }) + })? + .clone(); + + let dispatch: Arc = Arc::new(EngineDispatch { + iii: self.iii.clone(), + function_id: config.function_id.clone(), + }); + + let trigger_id = cfg.trigger_id.clone(); + + // Acquire the registry locks *before* spawning. Previously the spawn + // ran first (outside any lock) and the JoinHandle was inserted into + // `tasks` afterward — a concurrent `unregister_trigger(config.id)` + // could fire in between, find `tasks` empty, return Ok, and the + // newly-spawned poller would leak running forever (orphaned task + // with no abort handle and no entry in either index). + // + // `tokio::spawn` returns synchronously (it just schedules the future + // on the runtime), so holding the lock across it grows the critical + // section by microseconds — well worth closing the TOCTOU race. + // + // Lock order `by_trigger_id` → `tasks` is canonical and matches + // `unregister_trigger` below; reversing would deadlock on concurrent + // calls. + { + let mut by_id = self.by_trigger_id.lock().await; + let mut tasks = self.tasks.lock().await; + + // Evict stale instance for the same user-supplied trigger_id. + let stale = by_id.insert(trigger_id.clone(), config.id.clone()); + if let Some(s) = stale { + if let Some(old_task) = tasks.remove(&s) { + old_task.abort(); + tracing::info!( + trigger_id = %trigger_id, + evicted_instance = %s, + "query-poll evicted stale task on re-registration" + ); + } + } + + // Spawn under the lock so unregister_trigger(config.id) cannot + // interleave between "task spawned" and "task in registry". + let task = tokio::spawn(async move { + query_poll::run_loop(pool, cfg, dispatch).await; + }); + tasks.insert(config.id.clone(), task); + } + + tracing::info!( + trigger_instance = %config.id, + trigger_id = %trigger_id, + function_id = %config.function_id, + "query-poll trigger registered" + ); + Ok(()) + } + + async fn unregister_trigger(&self, config: TriggerConfig) -> Result<(), IIIError> { + // Lock order: `by_trigger_id` → `tasks`, matching `register_trigger`. + // Reverse ordering would deadlock against a concurrent register. + let mut by_id = self.by_trigger_id.lock().await; + let mut tasks = self.tasks.lock().await; + if let Some(task) = tasks.remove(&config.id) { + task.abort(); + tracing::info!(trigger_instance = %config.id, "query-poll trigger unregistered"); + } + // Best-effort: drop any reverse-index entries that point at this + // instance. If a different instance has since taken the trigger_id + // slot, leave that mapping alone. + by_id.retain(|_, instance| instance != &config.id); + Ok(()) + } +} + +/// `iii-database::row-change` trigger handler. v1.0 stubs the streaming decoder +/// pending an upstream tokio-postgres replication API release. `register_trigger` +/// returns Unsupported so callers see a clear error instead of silently never +/// receiving events. +pub struct RowChangeTrigger; + +#[async_trait] +impl TriggerHandler for RowChangeTrigger { + async fn register_trigger(&self, _config: TriggerConfig) -> Result<(), IIIError> { + Err(iii_err(crate::error::DbError::Unsupported { + op: "row-change".into(), + driver: "postgres (pending tokio-postgres replication API release)".into(), + })) + } + async fn unregister_trigger(&self, _config: TriggerConfig) -> Result<(), IIIError> { + Ok(()) + } +} diff --git a/iii-database/src/triggers/mod.rs b/iii-database/src/triggers/mod.rs new file mode 100644 index 00000000..5ca0c457 --- /dev/null +++ b/iii-database/src/triggers/mod.rs @@ -0,0 +1,9 @@ +//! Trigger background tasks. Each trigger runs as its own tokio task spawned +//! at worker startup. +//! +//! Only `handler` is part of the public crate surface (consumed by main.rs). +//! `query_poll` and `row_change` are implementation modules. + +pub mod handler; +pub(crate) mod query_poll; +pub(crate) mod row_change; diff --git a/iii-database/src/triggers/query_poll.rs b/iii-database/src/triggers/query_poll.rs new file mode 100644 index 00000000..66d862be --- /dev/null +++ b/iii-database/src/triggers/query_poll.rs @@ -0,0 +1,390 @@ +//! query-poll trigger — cursor-based polling loop. +//! +//! On each tick: +//! 1. Read the cursor from `__iii_cursors` for this `trigger_id`. +//! 2. Run the user SQL with the cursor bound as the single positional parameter. +//! 3. If rows returned: dispatch a batch to the engine. +//! 4. On `ack: true`, write the max of `cursor_column` back to `__iii_cursors`. + +use crate::cursor; +use crate::driver; +use crate::error::DbError; +use crate::pool::Pool; +use crate::value::{JsonParam, RowValue}; +use async_trait::async_trait; +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use std::sync::Arc; +use std::time::Duration; + +#[derive(Debug, Clone, Deserialize)] +pub struct QueryPollConfig { + pub trigger_id: String, + #[serde(rename = "db")] + pub db_name: String, + pub sql: String, + #[serde(default = "default_interval_ms")] + pub interval_ms: u64, + pub cursor_column: String, + #[serde(default = "default_cursor_table")] + pub cursor_table: String, +} + +fn default_interval_ms() -> u64 { + 1000 +} +fn default_cursor_table() -> String { + cursor::DEFAULT_CURSOR_TABLE.to_string() +} + +#[derive(Debug, Clone, Serialize)] +pub struct DispatchedBatch { + pub db: String, + pub rows: Vec>, + pub cursor: Option, + pub polled_at: chrono::DateTime, +} + +#[derive(Debug, Clone, Deserialize, Default)] +pub struct DispatchAck { + #[serde(default = "default_ack")] + pub ack: bool, + #[serde(default)] + pub commit_cursor: Option, +} + +fn default_ack() -> bool { + true +} + +#[async_trait] +pub trait Dispatch: Send + Sync { + async fn dispatch(&self, batch: DispatchedBatch) -> Result; +} + +impl QueryPollConfig { + /// Validate operator-supplied identifiers that get interpolated into + /// SQL strings (currently `cursor_table`). Call this once at config-load + /// time; `run_one_tick` also defends in depth on the chance the config + /// was constructed in code without going through `from_yaml`. + pub fn validate(&self) -> Result<(), DbError> { + crate::config::validate_sql_identifier(&self.cursor_table).map_err(|e| { + DbError::ConfigError { + message: format!("query-poll cursor_table: {e}"), + } + })?; + Ok(()) + } +} + +/// Compute the max cursor value across all rows in a poll batch. +/// +/// Two-pass design keeps the integer hot path zero-alloc: +/// 1. Try to compute `i64::max` over every row's cursor cell. Pure +/// `RowValue::Int`/`BigInt` go straight in; `Text`/`Decimal` are +/// `parse::()`'d (handles stringly-typed integer cursors). +/// 2. If any row's cell can't be coerced to i64, fall back to lexicographic +/// max — but only stringify cells that aren't already textual. +/// +/// On the common integer-cursor case (the example SQL in the README), this +/// allocates exactly one String at the end (the i64::to_string of the max), +/// regardless of batch size. +fn compute_cursor_max(rows: &[crate::driver::Row], col_idx: usize) -> Option { + let mut int_max: Option = None; + let mut all_ints = true; + for row in rows { + let Some(v) = row.0.get(col_idx) else { + continue; + }; + let parsed: Option = match v { + RowValue::Int(n) | RowValue::BigInt(n) => Some(*n), + RowValue::Text(s) | RowValue::Decimal(s) => s.parse::().ok(), + _ => None, + }; + match parsed { + Some(n) => int_max = Some(int_max.map_or(n, |m| m.max(n))), + None => { + all_ints = false; + break; + } + } + } + if all_ints { + return int_max.map(|n| n.to_string()); + } + // Fallback: stringly-typed cursor (UUIDs, ISO-8601, etc.). Borrow `&str` + // from `Text`/`Decimal` cells; for variants without a native string repr, + // stringify just-in-time. Cow keeps the per-row branch alloc-free on the + // common path where every row is text. + use std::borrow::Cow; + let mut best: Option> = None; + for row in rows { + let Some(v) = row.0.get(col_idx) else { + continue; + }; + let cur: Cow<'_, str> = match v { + RowValue::Text(s) | RowValue::Decimal(s) => Cow::Borrowed(s.as_str()), + other => Cow::Owned(other.to_json().to_string().trim_matches('"').to_string()), + }; + best = Some(match best { + Some(prev) if prev.as_ref() >= cur.as_ref() => prev, + _ => cur, + }); + } + best.map(Cow::into_owned) +} + +pub async fn run_one_tick( + pool: &Pool, + cfg: &QueryPollConfig, + dispatch: Arc, +) -> Result<(), DbError> { + cfg.validate()?; + cursor::ensure_table(pool, &cfg.cursor_table).await?; + let cur = cursor::read_cursor(pool, &cfg.cursor_table, &cfg.trigger_id).await?; + let cur_param = match cur.as_deref() { + Some(s) => match s.parse::() { + Ok(n) => JsonParam::Int(n), + Err(_) => JsonParam::Text(s.to_string()), + }, + None => JsonParam::Null, + }; + + let result = match pool { + Pool::Sqlite(p) => driver::sqlite::query(p, &cfg.sql, &[cur_param], 30_000).await?, + Pool::Postgres(p) => driver::postgres::query(p, &cfg.sql, &[cur_param], 30_000).await?, + Pool::Mysql(p) => driver::mysql::query(p, &cfg.sql, &[cur_param], 30_000).await?, + }; + + if result.rows.is_empty() { + return Ok(()); + } + + let col_idx = result + .columns + .iter() + .position(|c| c.name == cfg.cursor_column) + .ok_or_else(|| DbError::ConfigError { + message: format!( + "cursor_column `{}` not found in result columns", + cfg.cursor_column + ), + })?; + + let max_cursor: Option = compute_cursor_max(&result.rows, col_idx); + + let json_rows = crate::handlers::query_rows_to_objects(&result.columns, result.rows); + + let batch = DispatchedBatch { + db: cfg.db_name.clone(), + rows: json_rows, + cursor: max_cursor.clone(), + polled_at: chrono::Utc::now(), + }; + + let ack = dispatch.dispatch(batch).await?; + if ack.ack { + let new_cursor = ack.commit_cursor.or(max_cursor); + if let Some(c) = new_cursor { + cursor::write_cursor(pool, &cfg.cursor_table, &cfg.trigger_id, &c).await?; + } + } + Ok(()) +} + +pub async fn run_loop(pool: Pool, cfg: QueryPollConfig, dispatch: Arc) { + let mut interval = tokio::time::interval(Duration::from_millis(cfg.interval_ms)); + // Drop ticks that fire while a previous tick is still running, instead of + // bursting to catch up. Without this, a slow query would queue up multiple + // back-to-back polls and hammer the DB once the slow tick completes. + interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + loop { + interval.tick().await; + if let Err(e) = run_one_tick(&pool, &cfg, dispatch.clone()).await { + tracing::warn!(trigger_id = %cfg.trigger_id, error = ?e, "query-poll tick failed"); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::config::PoolConfig; + use crate::pool::{Pool, SqlitePool}; + use std::sync::{Arc, Mutex}; + + #[derive(Default)] + struct CapturingDispatch { + calls: Mutex>, + ack: bool, + } + + #[async_trait] + impl Dispatch for CapturingDispatch { + async fn dispatch( + &self, + batch: DispatchedBatch, + ) -> Result { + self.calls.lock().unwrap().push(batch); + Ok(DispatchAck { + ack: self.ack, + commit_cursor: None, + }) + } + } + + #[tokio::test(flavor = "multi_thread")] + async fn poll_emits_new_rows_and_advances_cursor_on_ack() { + let p = Pool::Sqlite(SqlitePool::new("sqlite::memory:", &PoolConfig::default()).unwrap()); + + // Setup table + let s = match &p { + Pool::Sqlite(s) => s, + _ => unreachable!(), + }; + crate::driver::sqlite::execute( + s, + "CREATE TABLE outbox (id INTEGER PRIMARY KEY, body TEXT)", + &[], + &[], + ) + .await + .unwrap(); + // Use separate inserts because driver::sqlite::execute uses Connection::execute (single statement) + for body in &["a", "b", "c"] { + crate::driver::sqlite::execute( + s, + "INSERT INTO outbox (body) VALUES (?)", + &[JsonParam::Text((*body).into())], + &[], + ) + .await + .unwrap(); + } + + let dispatch = Arc::new(CapturingDispatch { + calls: Default::default(), + ack: true, + }); + let cfg = QueryPollConfig { + trigger_id: "trig-1".into(), + db_name: "primary".into(), + sql: "SELECT id, body FROM outbox WHERE id > COALESCE(?, 0) ORDER BY id LIMIT 50" + .into(), + interval_ms: 25, + cursor_column: "id".into(), + cursor_table: crate::cursor::DEFAULT_CURSOR_TABLE.into(), + }; + + run_one_tick(&p, &cfg, dispatch.clone()).await.unwrap(); + let calls_len = dispatch.calls.lock().unwrap().len(); + assert_eq!(calls_len, 1); + assert_eq!(dispatch.calls.lock().unwrap()[0].rows.len(), 3); + + // Cursor should now be "3". + let v = crate::cursor::read_cursor(&p, &cfg.cursor_table, &cfg.trigger_id) + .await + .unwrap(); + assert_eq!(v.as_deref(), Some("3")); + + // Second tick produces no new rows. + run_one_tick(&p, &cfg, dispatch.clone()).await.unwrap(); + assert_eq!( + dispatch.calls.lock().unwrap().len(), + 1, + "no new rows should produce no dispatch" + ); + } + + #[tokio::test(flavor = "multi_thread")] + async fn poll_does_not_advance_on_nack() { + let p = Pool::Sqlite(SqlitePool::new("sqlite::memory:", &PoolConfig::default()).unwrap()); + let s = match &p { + Pool::Sqlite(s) => s, + _ => unreachable!(), + }; + crate::driver::sqlite::execute( + s, + "CREATE TABLE outbox (id INTEGER PRIMARY KEY, body TEXT)", + &[], + &[], + ) + .await + .unwrap(); + crate::driver::sqlite::execute(s, "INSERT INTO outbox (body) VALUES ('x')", &[], &[]) + .await + .unwrap(); + + let dispatch = Arc::new(CapturingDispatch { + calls: Default::default(), + ack: false, + }); + let cfg = QueryPollConfig { + trigger_id: "trig-x".into(), + db_name: "primary".into(), + sql: "SELECT id, body FROM outbox WHERE id > COALESCE(?, 0) ORDER BY id".into(), + interval_ms: 25, + cursor_column: "id".into(), + cursor_table: crate::cursor::DEFAULT_CURSOR_TABLE.into(), + }; + run_one_tick(&p, &cfg, dispatch.clone()).await.unwrap(); + let v = crate::cursor::read_cursor(&p, &cfg.cursor_table, &cfg.trigger_id) + .await + .unwrap(); + assert!(v.is_none(), "cursor should not advance on nack"); + } + + #[tokio::test(flavor = "multi_thread")] + async fn poll_cursor_advances_numerically_across_digit_boundary() { + // Regression: lexicographic max("9","10") == "9", which would replay + // row 10 forever. The driver must compare cursor values numerically + // when every value parses as i64. + let p = Pool::Sqlite(SqlitePool::new("sqlite::memory:", &PoolConfig::default()).unwrap()); + let s = match &p { + Pool::Sqlite(s) => s, + _ => unreachable!(), + }; + crate::driver::sqlite::execute( + s, + "CREATE TABLE outbox (id INTEGER PRIMARY KEY, body TEXT)", + &[], + &[], + ) + .await + .unwrap(); + // 12 rows so the batch crosses the 9→10 boundary. + for i in 1..=12 { + crate::driver::sqlite::execute( + s, + "INSERT INTO outbox (body) VALUES (?)", + &[JsonParam::Text(format!("body-{i}"))], + &[], + ) + .await + .unwrap(); + } + + let dispatch = Arc::new(CapturingDispatch { + calls: Default::default(), + ack: true, + }); + let cfg = QueryPollConfig { + trigger_id: "trig-num".into(), + db_name: "primary".into(), + sql: "SELECT id, body FROM outbox WHERE id > COALESCE(?, 0) ORDER BY id LIMIT 50" + .into(), + interval_ms: 25, + cursor_column: "id".into(), + cursor_table: crate::cursor::DEFAULT_CURSOR_TABLE.into(), + }; + run_one_tick(&p, &cfg, dispatch.clone()).await.unwrap(); + let v = crate::cursor::read_cursor(&p, &cfg.cursor_table, &cfg.trigger_id) + .await + .unwrap(); + assert_eq!( + v.as_deref(), + Some("12"), + "cursor must be the numeric max (12), not the lexicographic max (9)" + ); + } +} diff --git a/iii-database/src/triggers/row_change.rs b/iii-database/src/triggers/row_change.rs new file mode 100644 index 00000000..ebba14db --- /dev/null +++ b/iii-database/src/triggers/row_change.rs @@ -0,0 +1,411 @@ +//! row-change trigger — Postgres logical replication via pgoutput. +//! +//! v1.0 scope: +//! - Create a publication for the configured tables (idempotent, real impl). +//! - Create a logical replication slot with output plugin `pgoutput` (idempotent, real impl). +//! - Stream events; decode INSERT/UPDATE/DELETE for the configured tables. +//! - Advance LSN only on caller `ack: true`. +//! +//! IMPLEMENTATION STATUS: setup is complete (publication + slot creation are +//! tested). The streaming decode loop is a STUB — see `run_loop` and +//! `connect_replication`. The decoder belongs in this same module and consumes +//! `postgres_protocol::message::backend::LogicalReplicationMessage` from a +//! `client.copy_both_simple()` stream over a *replication-mode* connection. +//! +//! The currently-pinned `tokio-postgres = "0.7.17"` does not expose the +//! replication API (the unreleased master branch on github does). When that +//! API ships, replace `connect_replication`'s stub with a real implementation +//! and fill in `run_loop`. Reference: +//! https://github.com/sfackler/rust-postgres/blob/master/tokio-postgres/tests/test/replication.rs + +// Pre-staged setup code (`connect_for_setup`, `ensure_publication_and_slot`, +// `RowChangeConfig::validate`) is exercised by gated integration tests but +// has no production caller until the streaming decode loop ships. Allow +// dead code at the module level so the lib build is clean; the items will +// become live when `run_loop` is wired up. +#![allow(dead_code)] + +use crate::error::DbError; +use serde::{Deserialize, Serialize}; +use tokio_postgres::{Client, Config, NoTls}; + +#[derive(Debug, Clone, Deserialize)] +pub struct RowChangeConfig { + pub trigger_id: String, + #[serde(rename = "db")] + pub db_name: String, + #[serde(default = "default_schema")] + pub schema: String, + pub tables: Vec, + #[serde(default)] + pub slot_name: Option, + #[serde(default)] + pub publication_name: Option, +} + +fn default_schema() -> String { + "public".into() +} + +#[derive(Debug, Clone, Serialize)] +pub struct RowChangeEvent { + pub db: String, + pub schema: String, + pub table: String, + pub op: String, // "INSERT" | "UPDATE" | "DELETE" + pub new: Option, + pub old: Option, + pub committed_at: chrono::DateTime, + pub lsn: String, +} + +pub fn derive_names(cfg: &RowChangeConfig) -> (String, String) { + // Sanitize trigger_id for use in a Postgres identifier (slot/publication + // names accept `[A-Za-z0-9_]` only). Distinct trigger_ids can sanitize to + // the same form (`Orders.v1` and `orders-v1` both become `orders_v1`); if + // we used the sanitized form alone, two registrations would silently + // share one replication slot and consume each other's events. Append an + // FNV-1a-32 hash of the *original* trigger_id so distinct inputs always + // produce distinct outputs while collision-free identifiers stay readable. + // + // Truncate the sanitized prefix at 40 chars so the final name fits inside + // Postgres' 63-byte slot_name limit: `iii_slot_` (9) + sanitized (≤40) + // + `_` + 8 hex chars = 58. + let sanitized: String = cfg + .trigger_id + .chars() + .map(|c| { + if c.is_ascii_alphanumeric() { + c.to_ascii_lowercase() + } else { + '_' + } + }) + .take(40) + .collect(); + let h = fnv1a_32(cfg.trigger_id.as_bytes()); + let slot = cfg + .slot_name + .clone() + .unwrap_or_else(|| format!("iii_slot_{sanitized}_{h:08x}")); + let pubname = cfg + .publication_name + .clone() + .unwrap_or_else(|| format!("iii_pub_{sanitized}_{h:08x}")); + (slot, pubname) +} + +fn fnv1a_32(bytes: &[u8]) -> u32 { + let mut hash: u32 = 0x811c_9dc5; + for &b in bytes { + hash ^= b as u32; + hash = hash.wrapping_mul(0x0100_0193); + } + hash +} + +/// Open a normal (non-replication-mode) connection. Suitable for setup +/// (publication + slot creation). NOT suitable for the streaming decode loop. +pub async fn connect_for_setup( + url: &str, + tls_cfg: &crate::config::TlsConfig, +) -> Result { + // Don't echo the underlying parse error — tokio_postgres's error message + // can include the offending URL, which would leak any embedded password + // into logs. Surface a generic message instead. + let cfg: Config = url + .parse() + .map_err(|_: tokio_postgres::Error| DbError::ConfigError { + message: "postgres url parse failed; check the configured url".into(), + })?; + // Same connector as `pool::postgres`. `disable` falls back to NoTls. + let client_and_conn = match crate::pool::tls::make_pg_connector(tls_cfg)? { + Some(connector) => cfg + .connect(connector) + .await + .map(|(c, conn)| (c, futures_util::future::Either::Left(conn))), + None => cfg + .connect(NoTls) + .await + .map(|(c, conn)| (c, futures_util::future::Either::Right(conn))), + } + .map_err(crate::driver::postgres::map_err)?; + let (client, conn) = client_and_conn; + tokio::spawn(async move { + if let Err(e) = conn.await { + tracing::error!(error = ?e, "row-change setup connection terminated"); + } + }); + Ok(client) +} + +/// STUB: open a replication-mode connection. The crates.io `tokio-postgres +/// = 0.7.17` doesn't expose the replication API. When upstream cuts a +/// release with `Config::replication_mode`, replace this stub. +#[allow(dead_code)] +pub async fn connect_replication(_url: &str) -> Result { + Err(DbError::Unsupported { + op: "connect_replication".into(), + driver: "postgres (pending tokio-postgres replication API release)".into(), + }) +} + +impl RowChangeConfig { + /// Validate operator-supplied identifiers that flow into `format!()` + /// SQL strings: `slot_name`, `publication_name`, `schema`, and each + /// element of `tables` (split on `.` for qualified names). Validation + /// uses the strict ASCII identifier rule from `crate::config`. + pub fn validate(&self) -> Result<(), DbError> { + let cfg_err = |e: String| DbError::ConfigError { message: e }; + crate::config::validate_sql_identifier(&self.schema) + .map_err(|e| cfg_err(format!("row-change schema: {e}")))?; + if let Some(slot) = &self.slot_name { + crate::config::validate_sql_identifier(slot) + .map_err(|e| cfg_err(format!("row-change slot_name: {e}")))?; + } + if let Some(pubname) = &self.publication_name { + crate::config::validate_sql_identifier(pubname) + .map_err(|e| cfg_err(format!("row-change publication_name: {e}")))?; + } + for t in &self.tables { + // Qualified names allowed (`schema.table`); validate each part. + for part in t.split('.') { + crate::config::validate_sql_identifier(part) + .map_err(|e| cfg_err(format!("row-change tables entry `{t}`: {e}")))?; + } + } + Ok(()) + } +} + +pub async fn ensure_publication_and_slot( + client: &mut Client, + cfg: &RowChangeConfig, +) -> Result<(), DbError> { + cfg.validate()?; + let (slot, pubname) = derive_names(cfg); + let pub_exists = client + .query_one( + "SELECT EXISTS(SELECT 1 FROM pg_publication WHERE pubname = $1) AS ex", + &[&pubname], + ) + .await + .map_err(crate::driver::postgres::map_err)? + .get::<_, bool>("ex"); + + if !pub_exists { + let qualified: Vec = cfg + .tables + .iter() + .map(|t| { + if t.contains('.') { + t.clone() + } else { + format!("{}.{t}", cfg.schema) + } + }) + .collect(); + let stmt = format!( + "CREATE PUBLICATION {pubname} FOR TABLE {}", + qualified.join(", ") + ); + client + .simple_query(&stmt) + .await + .map_err(crate::driver::postgres::map_err)?; + } + + let slot_exists = client + .query_one( + "SELECT EXISTS(SELECT 1 FROM pg_replication_slots WHERE slot_name = $1) AS ex", + &[&slot], + ) + .await + .map_err(crate::driver::postgres::map_err)? + .get::<_, bool>("ex"); + + if !slot_exists { + let stmt = + format!("SELECT * FROM pg_create_logical_replication_slot('{slot}', 'pgoutput')"); + match client.simple_query(&stmt).await { + Ok(_) => { + tracing::info!(slot = %slot, publication = %pubname, "created replication artifacts"); + } + Err(e) => { + if e.to_string().contains("already exists") { + return Err(DbError::ReplicationSlotExists { slot }); + } else { + return Err(crate::driver::postgres::map_err(e)); + } + } + } + } + Ok(()) +} + +#[async_trait::async_trait] +pub trait QueryPollLikeDispatcher: Send + Sync { + async fn dispatch(&self, ev: RowChangeEvent) -> Result; +} + +/// STUB: streaming decoder loop. Requires a replication-mode `Client` from +/// `connect_replication` (also currently a stub). When the upstream replication +/// API ships, fill this in using `client.copy_both_simple` and +/// `postgres_protocol::message::backend::LogicalReplicationMessage::parse`. +pub async fn run_loop( + _client: Client, + _cfg: RowChangeConfig, + _dispatch: std::sync::Arc, +) -> Result<(), DbError> { + tracing::warn!( + "row-change run_loop is a stub — pgoutput decode requires the unreleased \ + tokio-postgres replication API. See module-level docstring for status." + ); + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + fn url() -> Option { + std::env::var("TEST_POSTGRES_URL").ok() + } + + #[tokio::test(flavor = "multi_thread")] + async fn slot_and_publication_names_use_sanitized_trigger_id() { + let cfg = RowChangeConfig { + trigger_id: "my:trigger.id-with/funky chars".into(), + db_name: "primary".into(), + schema: "public".into(), + tables: vec!["orders".into()], + slot_name: None, + publication_name: None, + }; + let (slot, pubname) = derive_names(&cfg); + assert!(slot.starts_with("iii_slot_")); + assert!(pubname.starts_with("iii_pub_")); + // No characters that aren't [a-z0-9_]. + assert!(slot + .chars() + .all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || c == '_')); + assert!(pubname + .chars() + .all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || c == '_')); + // 63-byte slot_name limit on postgres. + assert!(slot.len() <= 63, "slot name too long: {} bytes", slot.len()); + assert!( + pubname.len() <= 63, + "publication name too long: {} bytes", + pubname.len() + ); + } + + #[tokio::test(flavor = "multi_thread")] + async fn distinct_trigger_ids_produce_distinct_slot_names() { + // Regression: prior versions sanitized trigger_id to lower-alnum-with- + // underscores; `Orders.v1`, `orders-v1`, `orders_v1`, `orders v1` all + // collapsed to `orders_v1` and silently shared one replication slot, + // letting one trigger consume another's events. Distinct trigger_ids + // must produce distinct slot/publication names. + let mk = |id: &str| RowChangeConfig { + trigger_id: id.into(), + db_name: "primary".into(), + schema: "public".into(), + tables: vec!["orders".into()], + slot_name: None, + publication_name: None, + }; + let ids = [ + "Orders.v1", + "orders-v1", + "orders_v1", + "orders v1", + "ORDERS_V1", + ]; + let mut slots = std::collections::HashSet::new(); + let mut pubs = std::collections::HashSet::new(); + for id in ids { + let (s, p) = derive_names(&mk(id)); + assert!(slots.insert(s.clone()), "slot collision on `{id}`: {s}"); + assert!(pubs.insert(p.clone()), "pub collision on `{id}`: {p}"); + } + } + + #[tokio::test(flavor = "multi_thread")] + async fn long_trigger_id_truncates_to_postgres_limit() { + // Even a pathological trigger_id must produce a valid postgres slot + // name (≤ 63 bytes). Hash suffix preserves uniqueness across the + // truncation boundary. + let cfg = RowChangeConfig { + trigger_id: "a".repeat(100), + db_name: "primary".into(), + schema: "public".into(), + tables: vec!["orders".into()], + slot_name: None, + publication_name: None, + }; + let (slot, pubname) = derive_names(&cfg); + assert!(slot.len() <= 63, "slot too long: {}", slot.len()); + assert!( + pubname.len() <= 63, + "publication too long: {}", + pubname.len() + ); + } + + #[tokio::test(flavor = "multi_thread")] + async fn explicit_slot_and_publication_names_bypass_derivation() { + let cfg = RowChangeConfig { + trigger_id: "anything".into(), + db_name: "primary".into(), + schema: "public".into(), + tables: vec!["orders".into()], + slot_name: Some("custom_slot".into()), + publication_name: Some("custom_pub".into()), + }; + let (slot, pubname) = derive_names(&cfg); + assert_eq!(slot, "custom_slot"); + assert_eq!(pubname, "custom_pub"); + } + + #[tokio::test(flavor = "multi_thread")] + async fn create_slot_and_publication_idempotent() { + let Some(u) = url() else { return }; + let cfg = RowChangeConfig { + trigger_id: "test_idem".into(), + db_name: "primary".into(), + schema: "public".into(), + tables: vec!["public.test_idem_t".into()], + slot_name: Some("iii_slot_test_idem".into()), + publication_name: Some("iii_pub_test_idem".into()), + }; + let tls = crate::config::TlsConfig { + mode: crate::config::TlsMode::Disable, + ca_cert: None, + }; + let mut client = connect_for_setup(&u, &tls).await.unwrap(); + // Cleanup from prior run + let _ = client + .simple_query("SELECT pg_drop_replication_slot('iii_slot_test_idem')") + .await; + let _ = client + .simple_query("DROP PUBLICATION IF EXISTS iii_pub_test_idem") + .await; + let _ = client + .simple_query("DROP TABLE IF EXISTS public.test_idem_t") + .await; + client + .simple_query("CREATE TABLE public.test_idem_t (id SERIAL PRIMARY KEY, n INT)") + .await + .unwrap(); + + ensure_publication_and_slot(&mut client, &cfg) + .await + .unwrap(); + // Running again is idempotent. + ensure_publication_and_slot(&mut client, &cfg) + .await + .unwrap(); + } +} diff --git a/iii-database/src/value.rs b/iii-database/src/value.rs new file mode 100644 index 00000000..89e79591 --- /dev/null +++ b/iii-database/src/value.rs @@ -0,0 +1,296 @@ +//! JSON ↔ SQL value coercion shared across drivers. +//! +//! `JsonParam` is the driver-agnostic representation of a parameter sent in +//! by a caller. Each driver translates `JsonParam` to its native bind type. +//! +//! `RowValue` is the driver-agnostic representation of a returned cell, which +//! `to_json` flattens back to `serde_json::Value` for transport. + +use crate::error::DbError; +use base64::{engine::general_purpose::STANDARD as B64, Engine as _}; +use chrono::{DateTime, Utc}; +use serde_json::Value; + +/// Driver-agnostic input parameter. Each driver translates this to its +/// native bind type. +#[derive(Debug, Clone, PartialEq)] +pub enum JsonParam { + Null, + Bool(bool), + Int(i64), + Float(f64), + Text(String), + Json(Value), +} + +impl JsonParam { + pub fn from_json(v: &Value) -> Result { + Ok(match v { + Value::Null => JsonParam::Null, + Value::Bool(b) => JsonParam::Bool(*b), + Value::Number(n) => { + if let Some(i) = n.as_i64() { + JsonParam::Int(i) + } else if let Some(f) = n.as_f64() { + JsonParam::Float(f) + } else { + return Err(DbError::InvalidParam { + index: 0, + reason: format!("number {n} not representable as i64 or f64"), + }); + } + } + Value::String(s) => JsonParam::Text(s.clone()), + Value::Array(_) | Value::Object(_) => JsonParam::Json(v.clone()), + }) + } + + /// Convenience: coerce a slice of JSON values, tagging each error with its index. + pub fn from_json_slice(values: &[Value]) -> Result, DbError> { + values + .iter() + .enumerate() + .map(|(i, v)| { + Self::from_json(v).map_err(|e| match e { + DbError::InvalidParam { reason, .. } => { + DbError::InvalidParam { index: i, reason } + } + other => other, + }) + }) + .collect() + } +} + +/// Driver-agnostic returned cell. Each driver maps its row types into this +/// enum; `to_json` flattens it for transport. +#[derive(Debug, Clone, PartialEq)] +pub enum RowValue { + Null, + Bool(bool), + Int(i64), + /// 64-bit identities. Serialized as JSON string to preserve precision in JS. + BigInt(i64), + Float(f64), + Text(String), + Bytes(Vec), + Timestamp(DateTime), + /// Numeric / decimal values preserved as string. + Decimal(String), + /// JSON / JSONB columns surfaced as a JSON value. + Json(Value), +} + +impl RowValue { + pub fn to_json(&self) -> Value { + match self { + RowValue::Null => Value::Null, + RowValue::Bool(b) => Value::Bool(*b), + RowValue::Int(i) => Value::from(*i), + RowValue::BigInt(i) => Value::String(i.to_string()), + RowValue::Float(f) => serde_json::Number::from_f64(*f) + .map(Value::Number) + .unwrap_or(Value::Null), + RowValue::Text(s) => Value::String(s.clone()), + RowValue::Bytes(b) => Value::String(B64.encode(b)), + RowValue::Timestamp(t) => { + Value::String(t.to_rfc3339_opts(chrono::SecondsFormat::Secs, true)) + } + RowValue::Decimal(s) => Value::String(s.clone()), + RowValue::Json(v) => v.clone(), + } + } + + /// Consuming variant of `to_json` that moves heap-allocated payloads + /// (`Text`, `Decimal`, `Json`) instead of cloning. On row-heavy SELECTs + /// this eliminates one allocation per text/json cell. + pub fn into_json(self) -> Value { + match self { + RowValue::Null => Value::Null, + RowValue::Bool(b) => Value::Bool(b), + RowValue::Int(i) => Value::from(i), + RowValue::BigInt(i) => Value::String(i.to_string()), + RowValue::Float(f) => serde_json::Number::from_f64(f) + .map(Value::Number) + .unwrap_or(Value::Null), + RowValue::Text(s) => Value::String(s), + RowValue::Bytes(b) => Value::String(B64.encode(&b)), + RowValue::Timestamp(t) => { + Value::String(t.to_rfc3339_opts(chrono::SecondsFormat::Secs, true)) + } + RowValue::Decimal(s) => Value::String(s), + RowValue::Json(v) => v, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + #[test] + fn json_null_becomes_null_param() { + assert_eq!(JsonParam::from_json(&json!(null)).unwrap(), JsonParam::Null); + } + + #[test] + fn json_int_becomes_int_param() { + assert_eq!( + JsonParam::from_json(&json!(42)).unwrap(), + JsonParam::Int(42) + ); + } + + #[test] + fn json_negative_int_preserves_sign() { + assert_eq!( + JsonParam::from_json(&json!(-7)).unwrap(), + JsonParam::Int(-7) + ); + } + + #[test] + fn json_float_becomes_float_param() { + // 2.5 chosen as a clean fraction that avoids clippy::approx_constant + // (which flags numbers like 3.14 / 2.71 as imprecise math constants). + match JsonParam::from_json(&json!(2.5)).unwrap() { + JsonParam::Float(f) => assert!((f - 2.5).abs() < 1e-9), + other => panic!("expected Float, got {other:?}"), + } + } + + #[test] + fn json_bool_becomes_bool_param() { + assert_eq!( + JsonParam::from_json(&json!(true)).unwrap(), + JsonParam::Bool(true) + ); + assert_eq!( + JsonParam::from_json(&json!(false)).unwrap(), + JsonParam::Bool(false) + ); + } + + #[test] + fn json_string_becomes_text_param() { + assert_eq!( + JsonParam::from_json(&json!("hello")).unwrap(), + JsonParam::Text("hello".into()) + ); + } + + #[test] + fn json_object_becomes_json_param() { + let v = json!({"a": 1}); + match JsonParam::from_json(&v).unwrap() { + JsonParam::Json(inner) => assert_eq!(inner, v), + other => panic!("expected Json, got {other:?}"), + } + } + + #[test] + fn json_array_becomes_json_param() { + let v = json!([1, 2, 3]); + match JsonParam::from_json(&v).unwrap() { + JsonParam::Json(inner) => assert_eq!(inner, v), + other => panic!("expected Json, got {other:?}"), + } + } + + #[test] + fn row_value_int_to_json() { + assert_eq!(RowValue::Int(42).to_json(), json!(42)); + } + + #[test] + fn row_value_bigint_to_json_is_string() { + // BIGINT identities serialize as string to preserve precision in JS clients. + assert_eq!( + RowValue::BigInt(9_007_199_254_740_993).to_json(), + json!("9007199254740993") + ); + } + + #[test] + fn row_value_bytes_to_json_is_base64() { + let v = RowValue::Bytes(vec![0xff, 0x00, 0x10]); + assert_eq!(v.to_json(), json!("/wAQ")); + } + + #[test] + fn row_value_bytes_base64_includes_padding() { + // 1 byte → 2 base64 chars + "==" padding + assert_eq!(RowValue::Bytes(vec![0xff]).to_json(), json!("/w==")); + // 2 bytes → 3 base64 chars + "=" padding + assert_eq!(RowValue::Bytes(vec![0xff, 0x00]).to_json(), json!("/wA=")); + // 3 bytes → 4 base64 chars, no padding (already covered by row_value_bytes_to_json_is_base64) + } + + #[test] + fn row_value_decimal_to_json_is_string() { + let v = RowValue::Decimal("123.456000".into()); + assert_eq!(v.to_json(), json!("123.456000")); + } + + #[test] + fn row_value_timestamp_to_json_is_iso8601() { + use chrono::{TimeZone, Utc}; + let ts = Utc.with_ymd_and_hms(2026, 4, 29, 12, 0, 0).unwrap(); + let v = RowValue::Timestamp(ts); + assert_eq!(v.to_json(), json!("2026-04-29T12:00:00Z")); + } + + #[test] + fn row_value_json_passes_through() { + let v = RowValue::Json(json!({"k": "v"})); + assert_eq!(v.to_json(), json!({"k": "v"})); + } + + #[test] + fn row_value_null_to_json() { + assert_eq!(RowValue::Null.to_json(), json!(null)); + } + + #[test] + fn row_value_into_json_text_moves_string() { + // Smoke: same value as to_json but the consuming variant. + assert_eq!(RowValue::Text("hello".into()).into_json(), json!("hello")); + } + + #[test] + fn row_value_into_json_json_moves_value() { + let inner = json!({"k": [1, 2, 3]}); + assert_eq!(RowValue::Json(inner.clone()).into_json(), inner); + } + + #[test] + fn row_value_into_json_decimal_moves_string() { + assert_eq!( + RowValue::Decimal("123.456".into()).into_json(), + json!("123.456") + ); + } + + #[test] + fn row_value_into_json_matches_to_json_across_variants() { + // Equivalence sweep: `into_json` must produce the exact same JSON as + // `to_json` for every variant; the only difference is allocation. + use chrono::{TimeZone, Utc}; + let cases: Vec = vec![ + RowValue::Null, + RowValue::Bool(true), + RowValue::Int(-7), + RowValue::BigInt(9_007_199_254_740_993), + RowValue::Float(2.5), + RowValue::Text("x".into()), + RowValue::Bytes(vec![0xff, 0x00]), + RowValue::Timestamp(Utc.with_ymd_and_hms(2026, 4, 29, 12, 0, 0).unwrap()), + RowValue::Decimal("1.0".into()), + RowValue::Json(json!([1, "two", null])), + ]; + for v in cases { + assert_eq!(v.clone().into_json(), v.to_json()); + } + } +} diff --git a/iii-database/tests/e2e/.gitignore b/iii-database/tests/e2e/.gitignore new file mode 100644 index 00000000..468b1078 --- /dev/null +++ b/iii-database/tests/e2e/.gitignore @@ -0,0 +1,8 @@ +data/* +!data/.gitkeep +reports/* +!reports/.gitkeep +node_modules/ +dist/ +*.log +.DS_Store diff --git a/iii-database/tests/e2e/README.md b/iii-database/tests/e2e/README.md new file mode 100644 index 00000000..3d0467fd --- /dev/null +++ b/iii-database/tests/e2e/README.md @@ -0,0 +1,80 @@ +# iii-database worker — end-to-end harness + +Self-asserting smoke harness for the `iii-database` worker. Validates the 5 +core RPC functions, the `query-poll` trigger, and the `row-change` slot/ +publication derivation contract against real **SQLite**, **PostgreSQL 16**, +and **MySQL 8.4** with one command. + +Runs locally and in CI (`.github/workflows/iii-database-e2e.yml`). + +## Prerequisites + +- Docker (for the postgres + mysql containers) +- Rust toolchain (`cargo` on `$PATH`) +- Node.js 20+ (`npm` on `$PATH`) +- The iii engine on `$PATH`. Install with: + ```sh + curl -fsSL https://install.iii.dev/iii/main/install.sh | sh + ``` + The script drops the binary at `$HOME/.local/bin/iii` (override with + `BIN_DIR=...` or `PREFIX=...`). + +## Run + +```sh +./run-tests.sh +``` + +Builds the worker (`cargo build --release --bin iii-database`), brings up +the docker stack with `wal_level=logical`, starts the engine, and runs ~90 +assertions across all 3 drivers. Exits 0 on PASS, 1 on any FAIL. + +## Flags + +| Flag | Effect | +|---|---| +| `--keep` | Leave docker stack up after the run for debugging | +| `--no-build` | Skip the cargo build step | +| `--filter=` | Run only one driver | + +## Env overrides + +The script auto-detects paths relative to its own location, but each can be +overridden: + +| Var | Default | Purpose | +|---|---|---| +| `WORKER_SRC` | `../..` (the `iii-database/` crate) | Where to `cargo build` | +| `III_BIN` | `$(command -v iii)` then `$HOME/.local/bin/iii` | Engine binary | +| `WORKER_BIN_TARGET` | `$WORKER_SRC/target/release/iii-database` | Built worker | +| `WORKER_BIN_LINK` | `$HOME/.iii/workers/iii-database` | Symlink the engine reads | +| `HARNESS_TIMEOUT` | `180` | Seconds to wait for the test sentinel | +| `HEALTH_TIMEOUT` | `60` | Seconds to wait for db healthchecks | + +## Layout + +| File | Role | +|---|---| +| `run-tests.sh` | Orchestrator | +| `docker-compose.yml` | Postgres (wal_level=logical) + MySQL with healthchecks | +| `config.yaml` | Engine config (queue, observability, iii-database, harness) | +| `workers/harness/` | TypeScript smoke-test worker (runs as a host process) | +| `reports/report.json` | Per-case results (latest run) | + +## CI + +The harness runs in `.github/workflows/iii-database-e2e.yml` on any PR +that touches `iii-database/**`. The workflow installs the engine via the install +script (always tracks `main`, no version pin), builds the worker, brings up +the same docker compose stack used locally, and shells out to +`./run-tests.sh`. + +## Troubleshooting + +- **Port already in use** (55432 or 53306): something else is bound to the + test ports. Stop it, or edit `docker-compose.yml`. +- **`worker binary missing`**: run without `--no-build` once. +- **`iii engine binary missing`**: install with the script above. +- **Sentinel timeout**: tail `reports/harness-*.log` for the harness output. +- **Docker daemon not running**: start Docker Desktop (or `colima start`) + and re-run. diff --git a/iii-database/tests/e2e/config.yaml b/iii-database/tests/e2e/config.yaml new file mode 100644 index 00000000..dada83a0 --- /dev/null +++ b/iii-database/tests/e2e/config.yaml @@ -0,0 +1,60 @@ +# iii engine configuration — passed via `iii -c config.yaml`. +# +# The harness TS worker is NOT registered here. It runs as a plain host +# node process launched from run-tests.sh and connects to the engine via +# WebSocket like any external client — sidesteps the libkrun-VM-based +# managed-worker setup, which is overkill for a test harness. +# +# The iii-database worker config is inlined under its worker entry. The +# engine serializes this `config:` value to /tmp/iii-database-config.yaml +# and threads `--config ` through `iii-worker start` to the spawned +# binary (see engine/src/workers/registry_worker.rs::spawn and +# crates/iii-worker/src/cli/managed.rs::start_binary_worker on branch +# feat/registry-worker-config-delivery). + +workers: + - name: iii-queue + config: + adapter: + name: builtin + + - name: iii-observability + config: + enabled: true + service_name: iii-database-tests + exporter: memory + logs_console_output: true + sampling_ratio: 1.0 + + - name: iii-database + config: + databases: + sqlite_db: + url: sqlite:./data/iii.db + pool: + max: 10 + idle_timeout_ms: 30000 + acquire_timeout_ms: 5000 + pg_db: + url: postgres://iii:iii@127.0.0.1:55432/iii_test + pool: + max: 10 + idle_timeout_ms: 30000 + acquire_timeout_ms: 5000 + # Local docker postgres uses a self-signed cert that doesn't + # chain to any system CA. The worker's `tls.mode` defaults to + # `require` (chain-validated) — opt out for the test harness. + tls: + mode: disable + mysql_db: + url: mysql://iii:iii@127.0.0.1:53306/iii_test + pool: + max: 10 + idle_timeout_ms: 30000 + acquire_timeout_ms: 5000 + # Local docker mysql:8.4 ships an auto-generated self-signed cert + # that doesn't chain to any system CA. Without `tls.mode: disable` + # the worker's require-mode rustls verifier rejects it and every + # mysql RPC fails with DRIVER_ERROR. + tls: + mode: disable diff --git a/iii-database/tests/e2e/data/.gitkeep b/iii-database/tests/e2e/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/iii-database/tests/e2e/docker-compose.yml b/iii-database/tests/e2e/docker-compose.yml new file mode 100644 index 00000000..0ac998b2 --- /dev/null +++ b/iii-database/tests/e2e/docker-compose.yml @@ -0,0 +1,45 @@ +services: + postgres: + image: postgres:16-alpine + environment: + POSTGRES_USER: iii + POSTGRES_PASSWORD: iii + POSTGRES_DB: iii_test + command: + - postgres + - -c + - wal_level=logical + - -c + - max_wal_senders=4 + - -c + - max_replication_slots=4 + ports: + - "55432:5432" + volumes: + - pg_data:/var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready -U iii -d iii_test"] + interval: 2s + timeout: 3s + retries: 30 + + mysql: + image: mysql:8.4 + environment: + MYSQL_ROOT_PASSWORD: iii + MYSQL_DATABASE: iii_test + MYSQL_USER: iii + MYSQL_PASSWORD: iii + ports: + - "53306:3306" + volumes: + - mysql_data:/var/lib/mysql + healthcheck: + test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-uiii", "-piii"] + interval: 2s + timeout: 3s + retries: 30 + +volumes: + pg_data: + mysql_data: diff --git a/iii-database/tests/e2e/reports/.gitkeep b/iii-database/tests/e2e/reports/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/iii-database/tests/e2e/run-tests.sh b/iii-database/tests/e2e/run-tests.sh new file mode 100755 index 00000000..e5601372 --- /dev/null +++ b/iii-database/tests/e2e/run-tests.sh @@ -0,0 +1,230 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# Path overrides (set in CI; defaults assume the harness lives at +# iii-database/tests/e2e/ inside the workers repo and the iii engine is on +# $PATH or at $HOME/.local/bin/iii — which is where the install script +# `curl -fsSL https://install.iii.dev/iii/main/install.sh | sh` puts it). +WORKER_SRC="${WORKER_SRC:-$(cd "$ROOT_DIR/../.." && pwd)}" +III_BIN="${III_BIN:-$(command -v iii 2>/dev/null || echo "$HOME/.local/bin/iii")}" +WORKER_BIN_TARGET="${WORKER_BIN_TARGET:-$WORKER_SRC/target/release/iii-database}" +WORKER_BIN_LINK="${WORKER_BIN_LINK:-$HOME/.iii/workers/iii-database}" + +REPORT_PATH="$ROOT_DIR/reports/report.json" +TS=$(date +%Y%m%d-%H%M%S) +ENGINE_LOG="$ROOT_DIR/reports/engine-$TS.log" +HARNESS_LOG="$ROOT_DIR/reports/harness-$TS.log" +SENTINEL_TIMEOUT="${HARNESS_TIMEOUT:-180}" +HEALTH_TIMEOUT="${HEALTH_TIMEOUT:-60}" + +KEEP=0 +NO_BUILD=0 +WITH_CARGO_TEST=0 +FILTER="" + +for arg in "$@"; do + case "$arg" in + --keep) KEEP=1 ;; + --no-build) NO_BUILD=1 ;; + --with-cargo-test) WITH_CARGO_TEST=1 ;; + --filter=*) FILTER="${arg#--filter=}" ;; + -h|--help) + cat <] + + --keep Leave docker compose stack running after the run. + --no-build Skip cargo build of the iii-database worker. + --with-cargo-test Run \`cargo test --all-features\` after compose is healthy + with TEST_POSTGRES_URL and TEST_MYSQL_URL pointing at the + docker stack — exercises gated driver/pool tests with + real DBs. CI uses this; local dev usually doesn't need it. + --filter=KEY Run only one driver (default: all 3). + +Env overrides: + WORKER_SRC Path to the database worker crate (default: ../..). + III_BIN Path to the iii engine binary (default: \$(command -v iii) or \$HOME/.local/bin/iii). + WORKER_BIN_TARGET Path to the built worker binary (default: \$WORKER_SRC/target/release/iii-database). + WORKER_BIN_LINK Path to the symlink the engine reads (default: \$HOME/.iii/workers/iii-database). + HARNESS_TIMEOUT Seconds to wait for the harness sentinel (default: 180). + HEALTH_TIMEOUT Seconds to wait for postgres/mysql healthchecks (default: 60). +EOF + exit 0 + ;; + *) echo "unknown arg: $arg" >&2; exit 2 ;; + esac +done + +ENGINE_PID="" +HARNESS_PID="" +cleanup() { + local code=$? + if [[ -n "$HARNESS_PID" ]] && kill -0 "$HARNESS_PID" 2>/dev/null; then + kill "$HARNESS_PID" 2>/dev/null || true + wait "$HARNESS_PID" 2>/dev/null || true + fi + if [[ -n "$ENGINE_PID" ]] && kill -0 "$ENGINE_PID" 2>/dev/null; then + kill "$ENGINE_PID" 2>/dev/null || true + wait "$ENGINE_PID" 2>/dev/null || true + fi + if [[ "$KEEP" -eq 0 ]]; then + (cd "$ROOT_DIR" && docker compose down -v >/dev/null 2>&1) || true + fi + exit "$code" +} +trap cleanup EXIT INT TERM + +mkdir -p "$ROOT_DIR/reports" "$ROOT_DIR/data" "$(dirname "$WORKER_BIN_LINK")" + +# 1. Ensure binary symlink at $WORKER_BIN_LINK +if [[ ! -L "$WORKER_BIN_LINK" || "$(readlink "$WORKER_BIN_LINK")" != "$WORKER_BIN_TARGET" ]]; then + ln -sfn "$WORKER_BIN_TARGET" "$WORKER_BIN_LINK" + echo "[run-tests] symlink: $WORKER_BIN_LINK -> $WORKER_BIN_TARGET" +fi + +# 2. Build the worker (unless --no-build) +if [[ "$NO_BUILD" -eq 0 ]]; then + echo "[run-tests] cargo build --release (iii-database worker)" + (cd "$WORKER_SRC" && cargo build --release --bin iii-database) +fi +if [[ ! -x "$WORKER_BIN_TARGET" ]]; then + echo "[run-tests] FATAL: worker binary missing at $WORKER_BIN_TARGET — run without --no-build" >&2 + exit 1 +fi + +# 3. Verify engine binary +if [[ ! -x "$III_BIN" ]]; then + echo "[run-tests] FATAL: iii engine binary missing at $III_BIN" >&2 + echo "[run-tests] install with: curl -fsSL https://install.iii.dev/iii/main/install.sh | sh" >&2 + exit 1 +fi + +# 4. Bring up postgres + mysql and wait for healthchecks via compose's --wait +# (compose v2 native; exits non-zero if any service fails to become healthy +# within HEALTH_TIMEOUT). Beats parsing `compose ps --format json` with regex. +echo "[run-tests] docker compose up -d --wait (timeout=${HEALTH_TIMEOUT}s)" +if ! (cd "$ROOT_DIR" && docker compose up -d --wait --wait-timeout "$HEALTH_TIMEOUT"); then + echo "[run-tests] FATAL: services did not become healthy within ${HEALTH_TIMEOUT}s" >&2 + (cd "$ROOT_DIR" && docker compose logs --tail 40) >&2 + exit 1 +fi +echo "[run-tests] both services healthy" + +# 5. (Optional) Run cargo unit + integration tests against the live DBs. +# The 5 gated tests in src/{driver,pool}/{postgres,mysql}.rs and +# src/triggers/row_change.rs early-return when TEST_*_URL is unset; we set +# both here so they exercise real connections, binary param encoding, +# replication-slot creation, etc — finer-grained than the e2e harness +# alone. CI passes --with-cargo-test; local runs skip this by default. +if [[ "$WITH_CARGO_TEST" -eq 1 ]]; then + echo "[run-tests] cargo test --all-features (with TEST_POSTGRES_URL + TEST_MYSQL_URL)" + ( + cd "$WORKER_SRC" && \ + TEST_POSTGRES_URL="postgres://iii:iii@127.0.0.1:55432/iii_test" \ + TEST_MYSQL_URL="mysql://iii:iii@127.0.0.1:53306/iii_test" \ + cargo test --all-features + ) +fi + +# 6. Reset SQLite file +rm -f "$ROOT_DIR/data/test.sqlite" + +# 7. Install harness deps if needed +if [[ ! -d "$ROOT_DIR/workers/harness/node_modules" ]]; then + echo "[run-tests] npm install (harness)" + (cd "$ROOT_DIR/workers/harness" && npm install --silent) +fi + +# 8. Start the engine (default config: ./config.yaml) +echo "[run-tests] starting iii engine" +: > "$ENGINE_LOG" +: > "$HARNESS_LOG" + +( cd "$ROOT_DIR" && "$III_BIN" --no-update-check -c ./config.yaml ) > "$ENGINE_LOG" 2>&1 & +ENGINE_PID=$! +echo "[run-tests] engine pid=$ENGINE_PID" + +# 9. Wait for the engine to accept TCP on its WebSocket port (49134). +# Probing the port directly instead of grepping for an engine log line +# decouples this script from the engine's logging format — a quiet log +# refactor upstream used to silently break us as a 30s timeout. +deadline=$(( $(date +%s) + 30 )) +while :; do + if (echo > /dev/tcp/127.0.0.1/49134) 2>/dev/null; then + break + fi + if ! kill -0 "$ENGINE_PID" 2>/dev/null; then + echo "[run-tests] FATAL: engine exited before binding port; tail of engine log:" >&2 + tail -40 "$ENGINE_LOG" >&2 + exit 1 + fi + if (( $(date +%s) > deadline )); then + echo "[run-tests] FATAL: engine did not bind port 49134 within 30s; tail of engine log:" >&2 + tail -40 "$ENGINE_LOG" >&2 + exit 1 + fi + sleep 0.5 +done +echo "[run-tests] engine listening" + +# 10. Launch the harness as a host node process +echo "[run-tests] starting harness" +HARNESS_ENV=() +if [[ -n "$FILTER" ]]; then + HARNESS_ENV+=("HARNESS_FILTER=$FILTER") +fi +HARNESS_ENV+=("III_URL=ws://127.0.0.1:49134") +HARNESS_ENV+=("HARNESS_REPORT_PATH=$REPORT_PATH") + +( cd "$ROOT_DIR/workers/harness" && env "${HARNESS_ENV[@]}" npm run --silent dev ) > "$HARNESS_LOG" 2>&1 & +HARNESS_PID=$! +echo "[run-tests] harness pid=$HARNESS_PID" + +# 11. Wait for sentinel line +sentinel="" +deadline=$(( $(date +%s) + SENTINEL_TIMEOUT )) +while (( $(date +%s) < deadline )); do + if ! kill -0 "$HARNESS_PID" 2>/dev/null; then + if grep -m1 -E '^HARNESS_DONE: (PASS|FAIL) [0-9]+/[0-9]+$' "$HARNESS_LOG" >/dev/null 2>&1; then + sentinel=$(grep -m1 -E '^HARNESS_DONE: (PASS|FAIL) [0-9]+/[0-9]+$' "$HARNESS_LOG") + break + fi + echo "[run-tests] harness exited without sentinel; tail of harness log:" >&2 + tail -40 "$HARNESS_LOG" >&2 + exit 1 + fi + if grep -m1 -E '^HARNESS_DONE: (PASS|FAIL) [0-9]+/[0-9]+$' "$HARNESS_LOG" >/dev/null 2>&1; then + sentinel=$(grep -m1 -E '^HARNESS_DONE: (PASS|FAIL) [0-9]+/[0-9]+$' "$HARNESS_LOG") + break + fi + sleep 1 +done + +if [[ -z "$sentinel" ]]; then + echo "[run-tests] FATAL: harness did not emit sentinel within ${SENTINEL_TIMEOUT}s" >&2 + echo "[run-tests] tail of harness log:" >&2 + tail -40 "$HARNESS_LOG" >&2 + exit 1 +fi + +# 12. Print summary +echo +echo "=======================================================================" +echo "$sentinel" +if [[ -f "$REPORT_PATH" ]]; then + python3 - "$REPORT_PATH" <<'PY' 2>/dev/null || cat "$REPORT_PATH" +import json, sys +data = json.load(open(sys.argv[1])) +for r in data["results"]: + tag = "PASS" if r["status"] == "PASS" else "FAIL" + err = (" — " + r.get("error","")) if r["status"] == "FAIL" else "" + print(f" [{tag}] {r['driver']:10s} {r['case']}{err}") +PY +fi +echo "=======================================================================" + +case "$sentinel" in + *PASS*) exit 0 ;; + *) exit 1 ;; +esac diff --git a/iii-database/tests/e2e/workers/harness/iii.worker.yaml b/iii-database/tests/e2e/workers/harness/iii.worker.yaml new file mode 100644 index 00000000..0bd52690 --- /dev/null +++ b/iii-database/tests/e2e/workers/harness/iii.worker.yaml @@ -0,0 +1,8 @@ +name: harness +runtime: + language: typescript + package_manager: npm + entry: src/worker.ts +scripts: + install: 'npm install' + start: 'npm run dev' diff --git a/iii-database/tests/e2e/workers/harness/package-lock.json b/iii-database/tests/e2e/workers/harness/package-lock.json new file mode 100644 index 00000000..3cd1ffdd --- /dev/null +++ b/iii-database/tests/e2e/workers/harness/package-lock.json @@ -0,0 +1,1160 @@ +{ + "name": "iii-database-tests-harness", + "version": "0.1.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "iii-database-tests-harness", + "version": "0.1.0", + "license": "Apache-2.0", + "dependencies": { + "iii-sdk": "0.11.2" + }, + "devDependencies": { + "@types/node": "^20.0.0", + "tsx": "^4.0.0", + "typescript": "^5.4.0" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.7.tgz", + "integrity": "sha512-EKX3Qwmhz1eMdEJokhALr0YiD0lhQNwDqkPYyPhiSwKrh7/4KRjQc04sZ8db+5DVVnZ1LmbNDI1uAMPEUBnQPg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.7.tgz", + "integrity": "sha512-jbPXvB4Yj2yBV7HUfE2KHe4GJX51QplCN1pGbYjvsyCZbQmies29EoJbkEc+vYuU5o45AfQn37vZlyXy4YJ8RQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.7.tgz", + "integrity": "sha512-62dPZHpIXzvChfvfLJow3q5dDtiNMkwiRzPylSCfriLvZeq0a1bWChrGx/BbUbPwOrsWKMn8idSllklzBy+dgQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.7.tgz", + "integrity": "sha512-x5VpMODneVDb70PYV2VQOmIUUiBtY3D3mPBG8NxVk5CogneYhkR7MmM3yR/uMdITLrC1ml/NV1rj4bMJuy9MCg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.7.tgz", + "integrity": "sha512-5lckdqeuBPlKUwvoCXIgI2D9/ABmPq3Rdp7IfL70393YgaASt7tbju3Ac+ePVi3KDH6N2RqePfHnXkaDtY9fkw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.7.tgz", + "integrity": "sha512-rYnXrKcXuT7Z+WL5K980jVFdvVKhCHhUwid+dDYQpH+qu+TefcomiMAJpIiC2EM3Rjtq0sO3StMV/+3w3MyyqQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.7.tgz", + "integrity": "sha512-B48PqeCsEgOtzME2GbNM2roU29AMTuOIN91dsMO30t+Ydis3z/3Ngoj5hhnsOSSwNzS+6JppqWsuhTp6E82l2w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.7.tgz", + "integrity": "sha512-jOBDK5XEjA4m5IJK3bpAQF9/Lelu/Z9ZcdhTRLf4cajlB+8VEhFFRjWgfy3M1O4rO2GQ/b2dLwCUGpiF/eATNQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.7.tgz", + "integrity": "sha512-RkT/YXYBTSULo3+af8Ib0ykH8u2MBh57o7q/DAs3lTJlyVQkgQvlrPTnjIzzRPQyavxtPtfg0EopvDyIt0j1rA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.7.tgz", + "integrity": "sha512-RZPHBoxXuNnPQO9rvjh5jdkRmVizktkT7TCDkDmQ0W2SwHInKCAV95GRuvdSvA7w4VMwfCjUiPwDi0ZO6Nfe9A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.7.tgz", + "integrity": "sha512-GA48aKNkyQDbd3KtkplYWT102C5sn/EZTY4XROkxONgruHPU72l+gW+FfF8tf2cFjeHaRbWpOYa/uRBz/Xq1Pg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.7.tgz", + "integrity": "sha512-a4POruNM2oWsD4WKvBSEKGIiWQF8fZOAsycHOt6JBpZ+JN2n2JH9WAv56SOyu9X5IqAjqSIPTaJkqN8F7XOQ5Q==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.7.tgz", + "integrity": "sha512-KabT5I6StirGfIz0FMgl1I+R1H73Gp0ofL9A3nG3i/cYFJzKHhouBV5VWK1CSgKvVaG4q1RNpCTR2LuTVB3fIw==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.7.tgz", + "integrity": "sha512-gRsL4x6wsGHGRqhtI+ifpN/vpOFTQtnbsupUF5R5YTAg+y/lKelYR1hXbnBdzDjGbMYjVJLJTd2OFmMewAgwlQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.7.tgz", + "integrity": "sha512-hL25LbxO1QOngGzu2U5xeXtxXcW+/GvMN3ejANqXkxZ/opySAZMrc+9LY/WyjAan41unrR3YrmtTsUpwT66InQ==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.7.tgz", + "integrity": "sha512-2k8go8Ycu1Kb46vEelhu1vqEP+UeRVj2zY1pSuPdgvbd5ykAw82Lrro28vXUrRmzEsUV0NzCf54yARIK8r0fdw==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.7.tgz", + "integrity": "sha512-hzznmADPt+OmsYzw1EE33ccA+HPdIqiCRq7cQeL1Jlq2gb1+OyWBkMCrYGBJ+sxVzve2ZJEVeePbLM2iEIZSxA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.7.tgz", + "integrity": "sha512-b6pqtrQdigZBwZxAn1UpazEisvwaIDvdbMbmrly7cDTMFnw/+3lVxxCTGOrkPVnsYIosJJXAsILG9XcQS+Yu6w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.7.tgz", + "integrity": "sha512-OfatkLojr6U+WN5EDYuoQhtM+1xco+/6FSzJJnuWiUw5eVcicbyK3dq5EeV/QHT1uy6GoDhGbFpprUiHUYggrw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.7.tgz", + "integrity": "sha512-AFuojMQTxAz75Fo8idVcqoQWEHIXFRbOc1TrVcFSgCZtQfSdc1RXgB3tjOn/krRHENUB4j00bfGjyl2mJrU37A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.7.tgz", + "integrity": "sha512-+A1NJmfM8WNDv5CLVQYJ5PshuRm/4cI6WMZRg1by1GwPIQPCTs1GLEUHwiiQGT5zDdyLiRM/l1G0Pv54gvtKIg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.7.tgz", + "integrity": "sha512-+KrvYb/C8zA9CU/g0sR6w2RBw7IGc5J2BPnc3dYc5VJxHCSF1yNMxTV5LQ7GuKteQXZtspjFbiuW5/dOj7H4Yw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.7.tgz", + "integrity": "sha512-ikktIhFBzQNt/QDyOL580ti9+5mL/YZeUPKU2ivGtGjdTYoqz6jObj6nOMfhASpS4GU4Q/Clh1QtxWAvcYKamA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.7.tgz", + "integrity": "sha512-7yRhbHvPqSpRUV7Q20VuDwbjW5kIMwTHpptuUzV+AA46kiPze5Z7qgt6CLCK3pWFrHeNfDd1VKgyP4O+ng17CA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.7.tgz", + "integrity": "sha512-SmwKXe6VHIyZYbBLJrhOoCJRB/Z1tckzmgTLfFYOfpMAx63BJEaL9ExI8x7v0oAO3Zh6D/Oi1gVxEYr5oUCFhw==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.7.tgz", + "integrity": "sha512-56hiAJPhwQ1R4i+21FVF7V8kSD5zZTdHcVuRFMW0hn753vVfQN8xlx4uOPT4xoGH0Z/oVATuR82AiqSTDIpaHg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@opentelemetry/api": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.1.tgz", + "integrity": "sha512-gLyJlPHPZYdAk1JENA9LeHejZe1Ti77/pTeFm/nMXmQH/HFZlcS/O2XJB+L8fkbrNSqhdtlvjBVjxwUYanNH5Q==", + "license": "Apache-2.0", + "peer": true, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/@opentelemetry/api-logs": { + "version": "0.57.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.57.2.tgz", + "integrity": "sha512-uIX52NnTM0iBh84MShlpouI7UKqkZ7MrUszTmaypHBu4r7NofznSnQRfJ+uUeDtQDj6w8eFGg5KBLDAwAPz1+A==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/api": "^1.3.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/@opentelemetry/context-async-hooks": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/context-async-hooks/-/context-async-hooks-1.30.1.tgz", + "integrity": "sha512-s5vvxXPVdjqS3kTLKMeBMvop9hbWkwzBpu+mUO2M7sZtlkyDJGwFe33wRKnbaYDo8ExRVBIIdwIGrqpxHuKttA==", + "license": "Apache-2.0", + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/core": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-1.30.1.tgz", + "integrity": "sha512-OOCM2C/QIURhJMuKaekP3TRBxBKxG/TWWA0TL2J6nXUtDnuCtccy49LUJF8xPFXMX+0LMcxFpCo8M9cGY1W6rQ==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/semantic-conventions": "1.28.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/core/node_modules/@opentelemetry/semantic-conventions": { + "version": "1.28.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.28.0.tgz", + "integrity": "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA==", + "license": "Apache-2.0", + "engines": { + "node": ">=14" + } + }, + "node_modules/@opentelemetry/instrumentation": { + "version": "0.57.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation/-/instrumentation-0.57.2.tgz", + "integrity": "sha512-BdBGhQBh8IjZ2oIIX6F2/Q3LKm/FDDKi6ccYKcBTeilh6SNdNKveDOLk73BkSJjQLJk6qe4Yh+hHw1UPhCDdrg==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/api-logs": "0.57.2", + "@types/shimmer": "^1.2.0", + "import-in-the-middle": "^1.8.1", + "require-in-the-middle": "^7.1.1", + "semver": "^7.5.2", + "shimmer": "^1.2.1" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/otlp-transformer": { + "version": "0.57.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/otlp-transformer/-/otlp-transformer-0.57.2.tgz", + "integrity": "sha512-48IIRj49gbQVK52jYsw70+Jv+JbahT8BqT2Th7C4H7RCM9d0gZ5sgNPoMpWldmfjvIsSgiGJtjfk9MeZvjhoig==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/api-logs": "0.57.2", + "@opentelemetry/core": "1.30.1", + "@opentelemetry/resources": "1.30.1", + "@opentelemetry/sdk-logs": "0.57.2", + "@opentelemetry/sdk-metrics": "1.30.1", + "@opentelemetry/sdk-trace-base": "1.30.1", + "protobufjs": "^7.3.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/propagator-b3": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/propagator-b3/-/propagator-b3-1.30.1.tgz", + "integrity": "sha512-oATwWWDIJzybAZ4pO76ATN5N6FFbOA1otibAVlS8v90B4S1wClnhRUk7K+2CHAwN1JKYuj4jh/lpCEG5BAqFuQ==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "1.30.1" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/propagator-jaeger": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/propagator-jaeger/-/propagator-jaeger-1.30.1.tgz", + "integrity": "sha512-Pj/BfnYEKIOImirH76M4hDaBSx6HyZ2CXUqk+Kj02m6BB80c/yo4BdWkn/1gDFfU+YPY+bPR2U0DKBfdxCKwmg==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "1.30.1" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/resources": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-1.30.1.tgz", + "integrity": "sha512-5UxZqiAgLYGFjS4s9qm5mBVo433u+dSPUFWVWXmLAD4wB65oMCoXaJP1KJa9DIYYMeHu3z4BZcStG3LC593cWA==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "1.30.1", + "@opentelemetry/semantic-conventions": "1.28.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/resources/node_modules/@opentelemetry/semantic-conventions": { + "version": "1.28.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.28.0.tgz", + "integrity": "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA==", + "license": "Apache-2.0", + "engines": { + "node": ">=14" + } + }, + "node_modules/@opentelemetry/sdk-logs": { + "version": "0.57.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-logs/-/sdk-logs-0.57.2.tgz", + "integrity": "sha512-TXFHJ5c+BKggWbdEQ/inpgIzEmS2BGQowLE9UhsMd7YYlUfBQJ4uax0VF/B5NYigdM/75OoJGhAV3upEhK+3gg==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/api-logs": "0.57.2", + "@opentelemetry/core": "1.30.1", + "@opentelemetry/resources": "1.30.1" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.4.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/sdk-metrics": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-metrics/-/sdk-metrics-1.30.1.tgz", + "integrity": "sha512-q9zcZ0Okl8jRgmy7eNW3Ku1XSgg3sDLa5evHZpCwjspw7E8Is4K/haRPDJrBcX3YSn/Y7gUvFnByNYEKQNbNog==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "1.30.1", + "@opentelemetry/resources": "1.30.1" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.3.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/sdk-trace-base": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-trace-base/-/sdk-trace-base-1.30.1.tgz", + "integrity": "sha512-jVPgBbH1gCy2Lb7X0AVQ8XAfgg0pJ4nvl8/IiQA6nxOsPvS+0zMJaFSs2ltXe0J6C8dqjcnpyqINDJmU30+uOg==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "1.30.1", + "@opentelemetry/resources": "1.30.1", + "@opentelemetry/semantic-conventions": "1.28.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/sdk-trace-base/node_modules/@opentelemetry/semantic-conventions": { + "version": "1.28.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.28.0.tgz", + "integrity": "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA==", + "license": "Apache-2.0", + "engines": { + "node": ">=14" + } + }, + "node_modules/@opentelemetry/sdk-trace-node": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-trace-node/-/sdk-trace-node-1.30.1.tgz", + "integrity": "sha512-cBjYOINt1JxXdpw1e5MlHmFRc5fgj4GW/86vsKFxJCJ8AL4PdVtYH41gWwl4qd4uQjqEL1oJVrXkSy5cnduAnQ==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/context-async-hooks": "1.30.1", + "@opentelemetry/core": "1.30.1", + "@opentelemetry/propagator-b3": "1.30.1", + "@opentelemetry/propagator-jaeger": "1.30.1", + "@opentelemetry/sdk-trace-base": "1.30.1", + "semver": "^7.5.2" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/semantic-conventions": { + "version": "1.40.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.40.0.tgz", + "integrity": "sha512-cifvXDhcqMwwTlTK04GBNeIe7yyo28Mfby85QXFe1Yk8nmi36Ab/5UQwptOx84SsoGNRg+EVSjwzfSZMy6pmlw==", + "license": "Apache-2.0", + "engines": { + "node": ">=14" + } + }, + "node_modules/@protobufjs/aspromise": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", + "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/base64": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz", + "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/codegen": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.5.tgz", + "integrity": "sha512-zgXFLzW3Ap33e6d0Wlj4MGIm6Ce8O89n/apUaGNB/jx+hw+ruWEp7EwGUshdLKVRCxZW12fp9r40E1mQrf/34g==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/eventemitter": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", + "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/fetch": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", + "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==", + "license": "BSD-3-Clause", + "dependencies": { + "@protobufjs/aspromise": "^1.1.1", + "@protobufjs/inquire": "^1.1.0" + } + }, + "node_modules/@protobufjs/float": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", + "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/inquire": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.1.tgz", + "integrity": "sha512-mnzgDV26ueAvk7rsbt9L7bE0SuAoqyuys/sMMrmVcN5x9VsxpcG3rqAUSgDyLp0UZlmNfIbQ4fHfCtreVBk8Ew==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/path": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", + "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/pool": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", + "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/utf8": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.1.tgz", + "integrity": "sha512-oOAWABowe8EAbMyWKM0tYDKi8Yaox52D+HWZhAIJqQXbqe0xI/GV7FhLWqlEKreMkfDjshR5FKgi3mnle0h6Eg==", + "license": "BSD-3-Clause" + }, + "node_modules/@types/node": { + "version": "20.19.39", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.39.tgz", + "integrity": "sha512-orrrD74MBUyK8jOAD/r0+lfa1I2MO6I+vAkmAWzMYbCcgrN4lCrmK52gRFQq/JRxfYPfonkr4b0jcY7Olqdqbw==", + "license": "MIT", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/@types/shimmer": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@types/shimmer/-/shimmer-1.2.0.tgz", + "integrity": "sha512-UE7oxhQLLd9gub6JKIAhDq06T0F6FnztwMNRvYgjeQSBeMc1ZG/tA47EwfduvkuQS8apbkM/lpLpWsaCeYsXVg==", + "license": "MIT" + }, + "node_modules/acorn": { + "version": "8.16.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.16.0.tgz", + "integrity": "sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==", + "license": "MIT", + "peer": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-import-attributes": { + "version": "1.9.5", + "resolved": "https://registry.npmjs.org/acorn-import-attributes/-/acorn-import-attributes-1.9.5.tgz", + "integrity": "sha512-n02Vykv5uA3eHGM/Z2dQrcD56kL8TyDb2p1+0P83PClMnC/nc+anbQRhIOWnSq4Ke/KvDPrY3C9hDtC/A3eHnQ==", + "license": "MIT", + "peerDependencies": { + "acorn": "^8" + } + }, + "node_modules/cjs-module-lexer": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.4.3.tgz", + "integrity": "sha512-9z8TZaGM1pfswYeXrUpzPrkx8UnWYdhJclsiYMm6x/w5+nN+8Tf/LnAgfLGQCm59qAOxU8WwHEq2vNwF6i4j+Q==", + "license": "MIT" + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/esbuild": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.7.tgz", + "integrity": "sha512-IxpibTjyVnmrIQo5aqNpCgoACA/dTKLTlhMHihVHhdkxKyPO1uBBthumT0rdHmcsk9uMonIWS0m4FljWzILh3w==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.27.7", + "@esbuild/android-arm": "0.27.7", + "@esbuild/android-arm64": "0.27.7", + "@esbuild/android-x64": "0.27.7", + "@esbuild/darwin-arm64": "0.27.7", + "@esbuild/darwin-x64": "0.27.7", + "@esbuild/freebsd-arm64": "0.27.7", + "@esbuild/freebsd-x64": "0.27.7", + "@esbuild/linux-arm": "0.27.7", + "@esbuild/linux-arm64": "0.27.7", + "@esbuild/linux-ia32": "0.27.7", + "@esbuild/linux-loong64": "0.27.7", + "@esbuild/linux-mips64el": "0.27.7", + "@esbuild/linux-ppc64": "0.27.7", + "@esbuild/linux-riscv64": "0.27.7", + "@esbuild/linux-s390x": "0.27.7", + "@esbuild/linux-x64": "0.27.7", + "@esbuild/netbsd-arm64": "0.27.7", + "@esbuild/netbsd-x64": "0.27.7", + "@esbuild/openbsd-arm64": "0.27.7", + "@esbuild/openbsd-x64": "0.27.7", + "@esbuild/openharmony-arm64": "0.27.7", + "@esbuild/sunos-x64": "0.27.7", + "@esbuild/win32-arm64": "0.27.7", + "@esbuild/win32-ia32": "0.27.7", + "@esbuild/win32-x64": "0.27.7" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-tsconfig": { + "version": "4.14.0", + "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.14.0.tgz", + "integrity": "sha512-yTb+8DXzDREzgvYmh6s9vHsSVCHeC0G3PI5bEXNBHtmshPnO+S5O7qgLEOn0I5QvMy6kpZN8K1NKGyilLb93wA==", + "dev": true, + "license": "MIT", + "dependencies": { + "resolve-pkg-maps": "^1.0.0" + }, + "funding": { + "url": "https://github.com/privatenumber/get-tsconfig?sponsor=1" + } + }, + "node_modules/hasown": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.3.tgz", + "integrity": "sha512-ej4AhfhfL2Q2zpMmLo7U1Uv9+PyhIZpgQLGT1F9miIGmiCJIoCgSmczFdrc97mWT4kVY72KA+WnnhJ5pghSvSg==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/iii-sdk": { + "version": "0.11.2", + "resolved": "https://registry.npmjs.org/iii-sdk/-/iii-sdk-0.11.2.tgz", + "integrity": "sha512-S8/o53j1z+IOU6Mp1f3GbivJ59hEgWhtT6hNutVpfwhJK5Q9zS2rV2LUX1Ko6+xF/Zr3Y6xodNRmBRng0qiZZA==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/api": "^1.9.0", + "@opentelemetry/api-logs": "^0.57.0", + "@opentelemetry/core": "^1.30.0", + "@opentelemetry/instrumentation": "^0.57.0", + "@opentelemetry/otlp-transformer": "^0.57.0", + "@opentelemetry/resources": "^1.30.0", + "@opentelemetry/sdk-logs": "^0.57.0", + "@opentelemetry/sdk-metrics": "^1.30.0", + "@opentelemetry/sdk-trace-base": "^1.30.0", + "@opentelemetry/sdk-trace-node": "^1.30.0", + "@opentelemetry/semantic-conventions": "^1.28.0", + "ws": "^8.18.3" + } + }, + "node_modules/import-in-the-middle": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/import-in-the-middle/-/import-in-the-middle-1.15.0.tgz", + "integrity": "sha512-bpQy+CrsRmYmoPMAE/0G33iwRqwW4ouqdRg8jgbH3aKuCtOc8lxgmYXg2dMM92CRiGP660EtBcymH/eVUpCSaA==", + "license": "Apache-2.0", + "dependencies": { + "acorn": "^8.14.0", + "acorn-import-attributes": "^1.9.5", + "cjs-module-lexer": "^1.2.2", + "module-details-from-path": "^1.0.3" + } + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/long": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/long/-/long-5.3.2.tgz", + "integrity": "sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==", + "license": "Apache-2.0" + }, + "node_modules/module-details-from-path": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/module-details-from-path/-/module-details-from-path-1.0.4.tgz", + "integrity": "sha512-EGWKgxALGMgzvxYF1UyGTy0HXX/2vHLkw6+NvDKW2jypWbHpjQuj4UMcqQWXHERJhVGKikolT06G3bcKe4fi7w==", + "license": "MIT" + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "license": "MIT" + }, + "node_modules/protobufjs": { + "version": "7.5.6", + "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.5.6.tgz", + "integrity": "sha512-M71sTMB146U3u0di3yup8iM+zv8yPRNQVr1KK4tyBitl3qFvEGucq/rGDRShD2rsJhtN02RJaJ7j5X5hmy8SJg==", + "hasInstallScript": true, + "license": "BSD-3-Clause", + "dependencies": { + "@protobufjs/aspromise": "^1.1.2", + "@protobufjs/base64": "^1.1.2", + "@protobufjs/codegen": "^2.0.5", + "@protobufjs/eventemitter": "^1.1.0", + "@protobufjs/fetch": "^1.1.0", + "@protobufjs/float": "^1.0.2", + "@protobufjs/inquire": "^1.1.1", + "@protobufjs/path": "^1.1.2", + "@protobufjs/pool": "^1.1.0", + "@protobufjs/utf8": "^1.1.1", + "@types/node": ">=13.7.0", + "long": "^5.0.0" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/require-in-the-middle": { + "version": "7.5.2", + "resolved": "https://registry.npmjs.org/require-in-the-middle/-/require-in-the-middle-7.5.2.tgz", + "integrity": "sha512-gAZ+kLqBdHarXB64XpAe2VCjB7rIRv+mU8tfRWziHRJ5umKsIHN2tLLv6EtMw7WCdP19S0ERVMldNvxYCHnhSQ==", + "license": "MIT", + "dependencies": { + "debug": "^4.3.5", + "module-details-from-path": "^1.0.3", + "resolve": "^1.22.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/resolve": { + "version": "1.22.12", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.12.tgz", + "integrity": "sha512-TyeJ1zif53BPfHootBGwPRYT1RUt6oGWsaQr8UyZW/eAm9bKoijtvruSDEmZHm92CwS9nj7/fWttqPCgzep8CA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "is-core-module": "^2.16.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-pkg-maps": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz", + "integrity": "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1" + } + }, + "node_modules/semver": { + "version": "7.7.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz", + "integrity": "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/shimmer": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/shimmer/-/shimmer-1.2.1.tgz", + "integrity": "sha512-sQTKC1Re/rM6XyFM6fIAGHRPVGvyXfgzIDvzoq608vM+jeyVD0Tu1E6Np0Kc2zAIFWIj963V2800iF/9LPieQw==", + "license": "BSD-2-Clause" + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/tsx": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/tsx/-/tsx-4.21.0.tgz", + "integrity": "sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "~0.27.0", + "get-tsconfig": "^4.7.5" + }, + "bin": { + "tsx": "dist/cli.mjs" + }, + "engines": { + "node": ">=18.0.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "license": "MIT" + }, + "node_modules/ws": { + "version": "8.20.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.20.0.tgz", + "integrity": "sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA==", + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + } + } +} diff --git a/iii-database/tests/e2e/workers/harness/package.json b/iii-database/tests/e2e/workers/harness/package.json new file mode 100644 index 00000000..cd956afc --- /dev/null +++ b/iii-database/tests/e2e/workers/harness/package.json @@ -0,0 +1,20 @@ +{ + "name": "iii-database-tests-harness", + "version": "0.1.0", + "type": "module", + "private": true, + "description": "Self-asserting smoke harness for the iii database worker (SQLite, Postgres, MySQL).", + "scripts": { + "dev": "tsx src/worker.ts", + "build": "tsc" + }, + "dependencies": { + "iii-sdk": "0.11.2" + }, + "devDependencies": { + "@types/node": "^20.0.0", + "tsx": "^4.0.0", + "typescript": "^5.4.0" + }, + "license": "Apache-2.0" +} diff --git a/iii-database/tests/e2e/workers/harness/src/cases-boundary.ts b/iii-database/tests/e2e/workers/harness/src/cases-boundary.ts new file mode 100644 index 00000000..06cb2b26 --- /dev/null +++ b/iii-database/tests/e2e/workers/harness/src/cases-boundary.ts @@ -0,0 +1,432 @@ +import type { TestCase } from './cases.ts'; +import { expect, expectEqual } from './cases.ts'; + +/** + * Boundary-value cases targeting type encoding, NULL handling, and string + * round-trip. Each test creates and drops its own scratch table so it stays + * independent of the shared `t` / `outbox` tables touched by the function suite. + * + * i64 boundary tests use inline SQL literals because JSON cannot carry an + * exact i64 across all values (JS Number tops out at 2^53-1). The bug surface + * the recent debugging found was on the *read-back* path (RowValue::BigInt → + * JSON string in value.rs:90), which inline-literal inserts exercise just as + * well as parameterized inserts. We test the param-decode path separately + * within Number.MAX_SAFE_INTEGER. + */ +export const BOUNDARY_CASES: TestCase[] = [ + { + name: 'i64 max round-trip (BIGINT-as-string)', + // Gated to pg_db: postgres has a BIGINT/INT8 column type the driver can map + // to RowValue::BigInt → JSON string for precision. SQLite has no column-type + // distinction (single INTEGER affinity, value-dependent storage), so its + // driver maps to RowValue::Int unconditionally → JSON Number → precision + // loss above 2^53. MySQL's BIGINT-mapping behavior is a separate finding; + // see mysql_db: i64 max round-trip failure for whether the mysql driver + // also drops to Int despite having distinct BIGINT type info available. + applies: ['pg_db'], + async run({ driver, call }) { + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE IF EXISTS bx_i64max' }); + await call('iii-database::execute', { db: driver, sql: 'CREATE TABLE bx_i64max (n BIGINT NOT NULL)' }); + await call('iii-database::execute', { + db: driver, + sql: 'INSERT INTO bx_i64max (n) VALUES (9223372036854775807)', + }); + const q = await call('iii-database::query', { db: driver, sql: 'SELECT n FROM bx_i64max' }); + const v = q.rows[0].n; + expectEqual(v, '9223372036854775807', 'i64::MAX preserved as JSON string'); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE bx_i64max' }); + }, + }, + { + name: 'i64 min round-trip (BIGINT-as-string)', + applies: ['pg_db'], + async run({ driver, call }) { + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE IF EXISTS bx_i64min' }); + await call('iii-database::execute', { db: driver, sql: 'CREATE TABLE bx_i64min (n BIGINT NOT NULL)' }); + await call('iii-database::execute', { + db: driver, + sql: 'INSERT INTO bx_i64min (n) VALUES (-9223372036854775808)', + }); + const q = await call('iii-database::query', { db: driver, sql: 'SELECT n FROM bx_i64min' }); + expectEqual(q.rows[0].n, '-9223372036854775808', 'i64::MIN preserved as JSON string'); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE bx_i64min' }); + }, + }, + { + name: 'large integer precision documentation (sqlite + mysql)', + // SQLite/MySQL drivers currently emit large i64 as JSON Number, not string. + // Number.MAX_SAFE_INTEGER = 2^53 - 1 = 9007199254740991. We assert that + // values within that bound round-trip exactly, documenting the working + // contract while the BIGINT-as-string-test (pg-only) holds the bar above. + applies: ['sqlite_db', 'mysql_db'], + async run({ driver, call }) { + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE IF EXISTS bx_i64safe' }); + await call('iii-database::execute', { db: driver, sql: 'CREATE TABLE bx_i64safe (n BIGINT NOT NULL)' }); + // 9007199254740991 = Number.MAX_SAFE_INTEGER + await call('iii-database::execute', { + db: driver, + sql: 'INSERT INTO bx_i64safe (n) VALUES (9007199254740991)', + }); + const q = await call('iii-database::query', { db: driver, sql: 'SELECT n FROM bx_i64safe' }); + const v = q.rows[0].n; + expect( + v === 9007199254740991 || v === '9007199254740991', + `MAX_SAFE_INTEGER round-trip: got ${JSON.stringify(v)}`, + ); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE bx_i64safe' }); + }, + }, + { + name: 'param-decode int into INT4 column (regression)', + async run({ driver, dialect, call }) { + // Recent debugging found a bug where the postgres driver wrote 8-byte i64 into + // a 4-byte INT4 column, surfacing as `22P03 invalid_binary_representation`. + // This case binds an i64-shaped JSON number (within Number.MAX_SAFE_INTEGER) + // to a 32-bit-wide column type. Drivers must dispatch on column type width. + const ph1 = dialect.placeholder(1); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE IF EXISTS bx_int4' }); + // Use INT (postgres maps to INT4, mysql to INT, sqlite stores as INTEGER affinity). + await call('iii-database::execute', { db: driver, sql: 'CREATE TABLE bx_int4 (n INT NOT NULL)' }); + await call('iii-database::execute', { + db: driver, + sql: `INSERT INTO bx_int4 (n) VALUES (${ph1})`, + params: [12345], + }); + const q = await call('iii-database::query', { db: driver, sql: 'SELECT n FROM bx_int4' }); + expectEqual(Number(q.rows[0].n), 12345, 'INT column round-trip'); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE bx_int4' }); + }, + }, + { + name: 'NULL param insert and select', + async run({ driver, dialect, call }) { + const ph1 = dialect.placeholder(1); + const ph2 = dialect.placeholder(2); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE IF EXISTS bx_null' }); + await call('iii-database::execute', { db: driver, sql: 'CREATE TABLE bx_null (a INT NULL, b TEXT NULL)' }); + const r = await call('iii-database::execute', { + db: driver, + sql: `INSERT INTO bx_null (a, b) VALUES (${ph1}, ${ph2})`, + params: [null, null], + }); + expectEqual(r.affected_rows, 1, 'insert with null params'); + const q = await call('iii-database::query', { + db: driver, + sql: 'SELECT a, b FROM bx_null WHERE a IS NULL AND b IS NULL', + }); + expectEqual(q.row_count, 1, 'one matching row with both nulls'); + expectEqual(q.rows[0].a, null, 'a is JSON null'); + expectEqual(q.rows[0].b, null, 'b is JSON null'); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE bx_null' }); + }, + }, + { + name: 'empty string vs NULL distinction', + async run({ driver, dialect, call }) { + const ph1 = dialect.placeholder(1); + const ph2 = dialect.placeholder(2); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE IF EXISTS bx_emptynull' }); + // postgres/mysql/sqlite all distinguish '' from NULL; assert the worker doesn't conflate. + await call('iii-database::execute', { + db: driver, + sql: `CREATE TABLE bx_emptynull (id ${dialect.idColumnDDL()}, s TEXT NULL)`, + }); + await call('iii-database::execute', { + db: driver, + sql: `INSERT INTO bx_emptynull (s) VALUES (${ph1}), (${ph2})`, + params: ['', null], + }); + const q = await call('iii-database::query', { + db: driver, + sql: 'SELECT s FROM bx_emptynull ORDER BY id', + }); + expectEqual(q.rows[0].s, '', 'first row is empty string, not null'); + expectEqual(q.rows[1].s, null, 'second row is null, not empty string'); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE bx_emptynull' }); + }, + }, + { + name: 'UTF-8 round-trip (emoji + RTL + combining marks)', + async run({ driver, dialect, call }) { + const ph1 = dialect.placeholder(1); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE IF EXISTS bx_utf8' }); + await call('iii-database::execute', { db: driver, sql: 'CREATE TABLE bx_utf8 (s TEXT NOT NULL)' }); + // Mix: emoji (4-byte UTF-8), RTL Arabic, Latin with combining acute, ZWSP, Han ideograph. + const payload = '🔥مرحبا é​汉'; + await call('iii-database::execute', { + db: driver, + sql: `INSERT INTO bx_utf8 (s) VALUES (${ph1})`, + params: [payload], + }); + const q = await call('iii-database::query', { db: driver, sql: 'SELECT s FROM bx_utf8' }); + expectEqual(q.rows[0].s, payload, 'utf-8 round-trip exact equality'); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE bx_utf8' }); + }, + }, + { + name: 'long string round-trip (64KB)', + async run({ driver, dialect, call }) { + const ph1 = dialect.placeholder(1); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE IF EXISTS bx_long' }); + // MySQL TEXT caps at 64KB; LONGTEXT is unbounded. Use LONGTEXT on mysql to stay clear of headers. + const colType = driver === 'mysql_db' ? 'LONGTEXT' : 'TEXT'; + await call('iii-database::execute', { + db: driver, + sql: `CREATE TABLE bx_long (s ${colType} NOT NULL)`, + }); + const payload = 'x'.repeat(64 * 1024 - 16); + await call('iii-database::execute', { + db: driver, + sql: `INSERT INTO bx_long (s) VALUES (${ph1})`, + params: [payload], + }); + const q = await call('iii-database::query', { db: driver, sql: 'SELECT s FROM bx_long' }); + expectEqual( + (q.rows[0].s as string).length, + payload.length, + '64KB string length preserved', + ); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE bx_long' }); + }, + }, + { + name: 'float values including small subnormal', + async run({ driver, dialect, call }) { + const ph1 = dialect.placeholder(1); + const ph2 = dialect.placeholder(2); + const ph3 = dialect.placeholder(3); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE IF EXISTS bx_float' }); + await call('iii-database::execute', { + db: driver, + sql: `CREATE TABLE bx_float (id ${dialect.idColumnDDL()}, f DOUBLE PRECISION NOT NULL)`, + }); + await call('iii-database::execute', { + db: driver, + sql: `INSERT INTO bx_float (f) VALUES (${ph1}), (${ph2}), (${ph3})`, + params: [0.0, 2.5, 1.5e-300], + }); + const q = await call('iii-database::query', { db: driver, sql: 'SELECT f FROM bx_float ORDER BY id' }); + const fs = q.rows.map((r: any) => Number(r.f)); + expect(Math.abs(fs[0] - 0.0) < 1e-12, `f0 ≈ 0.0, got ${fs[0]}`); + expect(Math.abs(fs[1] - 2.5) < 1e-12, `f1 ≈ 2.5, got ${fs[1]}`); + expect(fs[2] < 1e-200 && fs[2] > 0, `f2 is small positive double, got ${fs[2]}`); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE bx_float' }); + }, + }, + { + name: 'JSONB column round-trip (object + array + nested null)', + // Postgres-only: SQLite has no native JSON column type and MySQL's JSON + // column is supported but the driver maps it through MyValue::Bytes → + // RowValue::Text rather than RowValue::Json (different code path; not the + // one this test targets). Postgres jsonb is decoded as RowValue::Json and + // returned via `into_json` — the move-vs-clone path fixed in [H5]. + // + // Scope is limited to JSON-shaped values (objects + arrays). The worker's + // `JsonParam::from_json` only routes Object/Array variants to + // `JsonParam::Json`; bare strings go through `JsonParam::Text` and would + // bind as TEXT, not JSONB — that's a different code path. + applies: ['pg_db'], + async run({ driver, dialect, call }) { + const ph1 = dialect.placeholder(1); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE IF EXISTS bx_jsonb' }); + await call('iii-database::execute', { + db: driver, + sql: `CREATE TABLE bx_jsonb (id ${dialect.idColumnDDL()}, body JSONB NOT NULL, label TEXT NOT NULL)`, + }); + // Each shape must round-trip exactly through the + // RowValue::Json → into_json path without re-serialization quirks. + const cases: Array<{ label: string; body: unknown }> = [ + { label: 'obj', body: { user: { id: 7, name: 'O\'Brien', tags: ['a', 'b'] }, count: 42 } }, + { label: 'arr', body: [1, 'two', null, true, { k: 'v' }] }, + { label: 'with_null', body: { a: null, b: 0 } }, + { label: 'empty_obj', body: {} }, + { label: 'empty_arr', body: [] }, + ]; + for (const c of cases) { + const r = await call('iii-database::execute', { + db: driver, + sql: `INSERT INTO bx_jsonb (body, label) VALUES (${ph1}, '${c.label}')`, + params: [c.body], + }); + expectEqual(r.affected_rows, 1, `inserted ${c.label}`); + } + + const q = await call('iii-database::query', { + db: driver, + sql: 'SELECT label, body FROM bx_jsonb ORDER BY id', + }); + expectEqual(q.row_count, cases.length, 'all jsonb rows returned'); + // Postgres jsonb canonicalizes both whitespace AND object key order + // (alphabetical by key). Compare semantic equality with a stable-key + // canonicalization on both sides. + const canon = (v: unknown): unknown => { + if (Array.isArray(v)) return v.map(canon); + if (v !== null && typeof v === 'object') { + const out: Record = {}; + for (const k of Object.keys(v as Record).sort()) { + out[k] = canon((v as Record)[k]); + } + return out; + } + return v; + }; + for (let i = 0; i < cases.length; i++) { + expectEqual(q.rows[i].label, cases[i].label, `row ${i} label`); + expectEqual(canon(q.rows[i].body), canon(cases[i].body), `row ${i} body (${cases[i].label}) round-trip`); + } + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE bx_jsonb' }); + }, + }, + { + name: 'special characters in string params (parameterized binding)', + async run({ driver, dialect, call }) { + const ph1 = dialect.placeholder(1); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE IF EXISTS bx_special' }); + await call('iii-database::execute', { db: driver, sql: 'CREATE TABLE bx_special (s TEXT NOT NULL)' }); + // If the worker were string-interpolating, single-quote would terminate the literal + // and the trailing "; DROP TABLE …" would execute. Proper parameter binding makes + // the value inert — round-trip equality + table-still-exists asserts that. + const payload = `O'Brien "the\\quoted"\n\t\r-- ; DROP TABLE bx_special`; + await call('iii-database::execute', { + db: driver, + sql: `INSERT INTO bx_special (s) VALUES (${ph1})`, + params: [payload], + }); + const q = await call('iii-database::query', { db: driver, sql: 'SELECT s FROM bx_special' }); + expectEqual(q.rows[0].s, payload, 'special-char string round-trip'); + const q2 = await call('iii-database::query', { + db: driver, + sql: 'SELECT COUNT(*) AS c FROM bx_special', + }); + expectEqual(Number(q2.rows[0].c), 1, 'table not dropped by injection-shaped payload'); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE bx_special' }); + }, + }, + { + // Regression: rust_decimal::Decimal::from_sql returns Err for NaN, + // ±Infinity, and values exceeding its 96-bit range + // (rust_decimal-1.41/src/postgres/driver.rs:91, 109). The previous + // NUMERIC arm let those errors propagate, failing the entire query. + // The driver now layers a custom binary parser (PgNumericText) as + // fallback; this case exercises the fallback end-to-end against a + // real postgres so we know the `try_get → from_sql → fall-through` + // wiring holds beyond the unit tests on the parser itself. + name: 'NUMERIC edge cases route through binary fallback (postgres)', + applies: ['pg_db'], + async run({ driver, call }) { + const q = await call('iii-database::query', { + db: driver, + sql: `SELECT 'NaN'::numeric AS nan, + 'Infinity'::numeric AS pinf, + '-Infinity'::numeric AS ninf, + 100000000000000000000000000000::numeric AS big`, + }); + expectEqual(q.rows[0].nan, 'NaN', 'NaN survives via binary fallback'); + expectEqual(q.rows[0].pinf, 'Infinity', '+Infinity survives via binary fallback'); + expectEqual(q.rows[0].ninf, '-Infinity', '-Infinity survives via binary fallback'); + expectEqual(q.rows[0].big, '100000000000000000000000000000', '10^29 (beyond rust_decimal) survives via binary fallback'); + }, + }, + { + // Regression: `String: FromSql::accepts` (postgres-types-0.2/src/lib.rs:729) + // is gated to TEXT/VARCHAR/BPCHAR/NAME/UNKNOWN — it rejects NUMERIC at + // runtime with WrongType. Pre-fix, every SELECT touching a NUMERIC column + // failed; the driver now decodes via rust_decimal::Decimal and stringifies + // for the wire. We exercise positive, negative, fractional, and zero + // values to make sure the conversion path holds end-to-end. + name: 'NUMERIC columns decode to string (postgres)', + applies: ['pg_db'], + async run({ driver, call }) { + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE IF EXISTS bx_numeric' }); + await call('iii-database::execute', { + db: driver, + sql: 'CREATE TABLE bx_numeric (label TEXT NOT NULL, n NUMERIC NOT NULL)', + }); + await call('iii-database::execute', { + db: driver, + sql: `INSERT INTO bx_numeric (label, n) VALUES + ('exact', 12345.6789), + ('negf', -0.001), + ('zero', 0), + ('big', 999999999999.99)`, + }); + const q = await call('iii-database::query', { + db: driver, + sql: 'SELECT label, n FROM bx_numeric ORDER BY label', + }); + const byLabel = Object.fromEntries(q.rows.map((r: Record) => [r.label, r.n])); + // rust_decimal canonical stringification — no trailing zeros beyond what + // the Decimal carries. + expectEqual(byLabel.exact, '12345.6789', 'NUMERIC positive fractional'); + expectEqual(byLabel.negf, '-0.001', 'NUMERIC negative fractional'); + expectEqual(byLabel.zero, '0', 'NUMERIC zero'); + expectEqual(byLabel.big, '999999999999.99', 'NUMERIC large value'); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE bx_numeric' }); + }, + }, + { + // Regression: postgres-types' chrono FromSql impls bind by exact OID — + // `DateTime: accepts!(TIMESTAMPTZ)`, `NaiveDateTime: accepts!(TIMESTAMP)`. + // Decoding `TIMESTAMP WITHOUT TIME ZONE` as `DateTime` failed at + // runtime with WrongType. The driver now decodes TIMESTAMP via + // NaiveDateTime and folds it into RowValue::Timestamp by interpreting + // the naive value as UTC. The wire shape is unchanged: an RFC 3339 UTC + // string for both column types. + name: 'TIMESTAMP without time zone decodes (postgres)', + applies: ['pg_db'], + async run({ driver, call }) { + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE IF EXISTS bx_ts' }); + await call('iii-database::execute', { + db: driver, + sql: 'CREATE TABLE bx_ts (naive TIMESTAMP NOT NULL, with_tz TIMESTAMPTZ NOT NULL)', + }); + await call('iii-database::execute', { + db: driver, + sql: `INSERT INTO bx_ts (naive, with_tz) VALUES ('2026-04-29 12:00:00', '2026-04-29 12:00:00+00')`, + }); + const q = await call('iii-database::query', { + db: driver, + sql: 'SELECT naive, with_tz FROM bx_ts', + }); + // Pre-fix, querying the `naive` column raised `WrongType` and the entire + // call rejected. Now both columns surface as RFC 3339 UTC strings. + expectEqual(q.rows[0].naive, '2026-04-29T12:00:00Z', 'TIMESTAMP without tz round-trips'); + expectEqual(q.rows[0].with_tz, '2026-04-29T12:00:00Z', 'TIMESTAMPTZ round-trips'); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE bx_ts' }); + }, + }, + { + // Regression for a real bug in driver/mysql.rs::query: previously + // `tokio::time::timeout` wrapped only `conn.exec_iter(...)`, which in + // mysql_async resolves once the server returns column metadata. The + // subsequent `result.collect().await` that streams rows ran unbounded, + // so a query whose planner is fast but whose row stream is slow + // (per-row SLEEP, full scan) silently bypassed `timeout_ms`. + // + // Gated to mysql_db: postgres' `client.query` materializes Vec + // before resolving (entire read is inside the timeout) and SQLite runs + // synchronously on a blocking task, so neither has this failure mode. + name: 'query timeout fires during row streaming (mysql)', + applies: ['mysql_db'], + async run({ driver, call, expectError }) { + // 3 rows × SLEEP(2) = ~6s of server-side row generation. With + // timeout_ms=500 the wrapped pipeline must surface QUERY_TIMEOUT well + // before the stream completes. + const start = Date.now(); + await expectError( + () => + call('iii-database::query', { + db: driver, + sql: 'SELECT SLEEP(2), n FROM (SELECT 1 AS n UNION SELECT 2 UNION SELECT 3) AS t', + timeout_ms: 500, + }), + 'QUERY_TIMEOUT', + ); + const elapsed = Date.now() - start; + expect( + elapsed < 3_000, + `timeout should fire well before the ~6s row stream completes; elapsed=${elapsed}ms`, + ); + }, + }, +]; diff --git a/iii-database/tests/e2e/workers/harness/src/cases-concurrency.ts b/iii-database/tests/e2e/workers/harness/src/cases-concurrency.ts new file mode 100644 index 00000000..cfa63e6b --- /dev/null +++ b/iii-database/tests/e2e/workers/harness/src/cases-concurrency.ts @@ -0,0 +1,93 @@ +import type { TestCase } from './cases.ts'; +import { expect, expectEqual } from './cases.ts'; + +/** + * Pool + handle concurrency cases. Pool max=10 / acquire_timeout=5s per + * config.yaml. Tests run serially per driver, so pool exhaustion in one + * test cannot leak into another within the same driver pass. + */ +export const CONCURRENCY_CASES: TestCase[] = [ + { + name: '10 parallel SELECT 1 against same db', + async run({ driver, call }) { + // Pool max = 10. All 10 queries should finish without queue contention. + const promises = Array.from({ length: 10 }, () => + call('iii-database::query', { db: driver, sql: 'SELECT 1 AS n' }), + ); + const results = await Promise.all(promises); + expectEqual(results.length, 10, '10 results'); + for (const r of results) { + expectEqual(r.row_count, 1, 'each query returned 1 row'); + } + }, + }, + { + name: 'prepared statement reused 50 times sequentially', + async run({ driver, dialect, call }) { + const ph1 = dialect.placeholder(1); + // Bare `SELECT ${ph1} AS v` defaults the param to `text` on Postgres + // because the polymorphic param has no type context — sending int4 binary + // there triggers SQL state 22021 ("character not in repertoire") at decode. + // The fix: anchor the param's type via a real schema column (matches the + // existing prepareStatement + runStatement test in cases.ts). + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE IF EXISTS bx_prep_50x' }); + await call('iii-database::execute', { db: driver, sql: 'CREATE TABLE bx_prep_50x (n INT NOT NULL)' }); + // Seed 50 rows so each iteration can match a unique value. + for (let i = 0; i < 50; i++) { + await call('iii-database::execute', { + db: driver, + sql: `INSERT INTO bx_prep_50x (n) VALUES (${ph1})`, + params: [i], + }); + } + const prep = await call('iii-database::prepareStatement', { + db: driver, + sql: `SELECT n FROM bx_prep_50x WHERE n = ${ph1} LIMIT 1`, + }); + const handleId = prep.handle?.id; + expect(typeof handleId === 'string' && handleId.length > 0, 'handle id present'); + // Default TTL is far longer than 50 iterations; this catches handle-cache + // lifetime bugs where a hot handle gets evicted mid-loop. + for (let i = 0; i < 50; i++) { + const r = await call('iii-database::runStatement', { handle_id: handleId, params: [i] }); + expectEqual(r.row_count, 1, `iter ${i} row_count`); + expectEqual(Number(r.rows[0].n), i, `iter ${i} value`); + } + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE bx_prep_50x' }); + }, + }, + { + name: 'pool exhaustion surfaces POOL_TIMEOUT', + // SQLite has no SLEEP equivalent that holds a connection in the same way; + // gating to pg+mysql keeps this driver-correct. acquire_timeout is 5s in + // config.yaml; we hold connections 6s to force the timeout. + applies: ['pg_db', 'mysql_db'], + async run({ driver, call }) { + const sleepSql = driver === 'pg_db' ? 'SELECT pg_sleep(6)' : 'SELECT SLEEP(6)'; + // 12 concurrent queries against a max=10 pool. Acquire timeout = 5s, query + // hold = 6s, so the 11th and 12th waiters must time out before any holder + // releases. We assert at least one rejection contains POOL_TIMEOUT — the + // exact count depends on scheduler timing. + const promises = Array.from({ length: 12 }, () => + call('iii-database::query', { db: driver, sql: sleepSql, timeout_ms: 30_000 }), + ); + const settled = await Promise.allSettled(promises); + const rejected = settled.filter((s) => s.status === 'rejected') as PromiseRejectedResult[]; + const fulfilled = settled.filter((s) => s.status === 'fulfilled'); + expect( + rejected.length >= 1, + `expected at least 1 POOL_TIMEOUT rejection, got 0 (fulfilled=${fulfilled.length})`, + ); + const sawPoolTimeout = rejected.some((r) => { + const msg = (r.reason as any)?.message ?? String(r.reason); + return msg.includes('POOL_TIMEOUT'); + }); + expect( + sawPoolTimeout, + `at least one rejection should be POOL_TIMEOUT; reasons: ${rejected + .map((r) => (r.reason as any)?.message ?? String(r.reason)) + .join(' | ')}`, + ); + }, + }, +]; diff --git a/iii-database/tests/e2e/workers/harness/src/cases-protocol.ts b/iii-database/tests/e2e/workers/harness/src/cases-protocol.ts new file mode 100644 index 00000000..62fbb783 --- /dev/null +++ b/iii-database/tests/e2e/workers/harness/src/cases-protocol.ts @@ -0,0 +1,94 @@ +import type { TestCase } from './cases.ts'; +import { expect, expectEqual } from './cases.ts'; + +/** + * Protocol-misuse cases — assert the worker returns the documented error code + * for malformed or out-of-bounds requests rather than crashing or silently + * succeeding. The worker wraps DbError as IIIError::Handler(json_string) per + * error.rs:58-64; the harness's `expectError` matches the error code as a + * substring of the rejection message. + */ +export const PROTOCOL_CASES: TestCase[] = [ + { + name: 'unknown db rejects with UNKNOWN_DB', + async run({ call, expectError }) { + await expectError( + () => call('iii-database::query', { db: 'no_such_db', sql: 'SELECT 1' }), + 'UNKNOWN_DB', + ); + }, + }, + { + name: 'empty SQL rejects with DRIVER_ERROR', + async run({ driver, call, expectError }) { + await expectError( + () => call('iii-database::query', { db: driver, sql: '' }), + 'DRIVER_ERROR', + ); + }, + }, + { + name: 'unknown handle id rejects with STATEMENT_NOT_FOUND', + async run({ call, expectError }) { + await expectError( + () => + call('iii-database::runStatement', { + handle_id: '00000000-0000-0000-0000-000000000000', + params: [], + }), + 'STATEMENT_NOT_FOUND', + ); + }, + }, + { + name: 'runStatement with wrong param count rejects', + async run({ driver, dialect, call, expectError }) { + const ph1 = dialect.placeholder(1); + // Use a fresh prepare so test order doesn't matter. + const prep = await call('iii-database::prepareStatement', { + db: driver, + sql: `SELECT ${ph1} AS v`, + }); + const handleId = prep.handle?.id; + expect(typeof handleId === 'string' && handleId.length > 0, 'handle id present'); + // Driver should reject param-count mismatch. Exact code varies by driver + // (DRIVER_ERROR with inner SQL state); we match on DRIVER_ERROR. + await expectError( + () => call('iii-database::runStatement', { handle_id: handleId, params: [] }), + 'DRIVER_ERROR', + ); + }, + }, + { + name: 'prepared statement after TTL expiry rejects', + async run({ driver, dialect, call, expectError }) { + const ph1 = dialect.placeholder(1); + const prep = await call('iii-database::prepareStatement', { + db: driver, + sql: `SELECT ${ph1} AS v`, + ttl_seconds: 1, + }); + const handleId = prep.handle?.id; + expect(typeof handleId === 'string' && handleId.length > 0, 'handle id present'); + // TTL is 1s; the registry evictor sweeps periodically. Wait long enough + // that any reasonable evictor cadence will have run. + await new Promise((r) => setTimeout(r, 1500)); + await expectError( + () => call('iii-database::runStatement', { handle_id: handleId, params: [42] }), + 'STATEMENT_NOT_FOUND', + ); + }, + }, + { + name: 'execute() with SELECT returns 0 affected_rows (does not throw)', + async run({ driver, call }) { + // Contract: execute() is for write-shape SQL but should accept SELECT + // gracefully. affected_rows is undefined for SELECT in most drivers; the + // worker normalizes that to 0. Asserting "no throw" is the main goal — + // the value of affected_rows is a softer assertion. + const r = await call('iii-database::execute', { db: driver, sql: 'SELECT 1 AS v' }); + expect(typeof r === 'object' && r !== null, 'response is object'); + expectEqual(typeof r.affected_rows, 'number', 'affected_rows is a number'); + }, + }, +]; diff --git a/iii-database/tests/e2e/workers/harness/src/cases-row-change.ts b/iii-database/tests/e2e/workers/harness/src/cases-row-change.ts new file mode 100644 index 00000000..eb671379 --- /dev/null +++ b/iii-database/tests/e2e/workers/harness/src/cases-row-change.ts @@ -0,0 +1,165 @@ +import type { TestCase } from './cases.ts'; +import { expect, expectEqual } from './cases.ts'; + +/** + * row-change trigger validation. The streaming decoder is stubbed in v1.0 + * (worker rejects `iii-database::row-change` registration with `UNSUPPORTED`), + * so we can't exercise the dispatch path end-to-end yet. What we CAN validate + * is the slot/publication name derivation contract that the worker pins in + * its README — distinct caller-supplied `trigger_id`s must produce distinct + * Postgres replication-slot names so two registrations don't silently share + * one slot once the streaming runtime ships. + * + * Pre-fix: `derive_names` lowercased and replaced non-alnum with `_`, so + * `orders-v1` and `orders.v1` both became `orders_v1` and the second + * registration would silently reuse the first slot. Post-fix: an FNV-1a-32 + * hash of the original trigger_id is appended, guaranteeing uniqueness. + * + * This file mirrors the Rust `derive_names` algorithm in TS so we can + * compute the same names the worker would and assert Postgres treats them + * as distinct identifiers via `pg_create_logical_replication_slot`. + */ + +/** FNV-1a-32 over UTF-8 bytes. Mirrors `triggers/row_change.rs::fnv1a_32`. */ +function fnv1a32(s: string): string { + let hash = 0x811c9dc5 >>> 0; + const bytes = Buffer.from(s, 'utf8'); + for (const b of bytes) { + hash = (hash ^ b) >>> 0; + hash = Math.imul(hash, 0x01000193) >>> 0; + } + return hash.toString(16).padStart(8, '0'); +} + +/** + * TS port of `triggers/row_change.rs::derive_names`. Lowercases ASCII + * alphanumerics, replaces every other char with `_`, truncates the + * sanitized prefix at 40 chars to fit Postgres' 63-byte slot_name limit, + * and appends an 8-hex-char FNV-1a-32 hash of the *original* trigger_id. + */ +function deriveSlotName(triggerId: string): string { + const sanitized = Array.from(triggerId) + .map((c) => (/[a-zA-Z0-9]/.test(c) ? c.toLowerCase() : '_')) + .slice(0, 40) + .join(''); + return `iii_slot_${sanitized}_${fnv1a32(triggerId)}`; +} + +async function dropSlotIfExists( + call: (id: string, payload: unknown) => Promise, + driver: string, + slot: string, +): Promise { + // pg_drop_replication_slot errors if the slot is missing; pre-check then drop. + // Quote-escape the slot name as a SQL literal: replace any `'` with `''` + // (slot names from derive_names are `[a-z0-9_]` only, so this is defensive). + const lit = slot.replace(/'/g, "''"); + const exists = await call('iii-database::query', { + db: driver, + sql: `SELECT 1 FROM pg_replication_slots WHERE slot_name = '${lit}'`, + }); + if (exists.row_count > 0) { + await call('iii-database::execute', { + db: driver, + sql: `SELECT pg_drop_replication_slot('${lit}')`, + }); + } +} + +export const ROW_CHANGE_CASES: TestCase[] = [ + { + name: 'row-change derive_names: collision-prone trigger_ids produce distinct postgres slots', + applies: ['pg_db'], + async run({ driver, call }) { + // These three inputs all sanitized to `orders_v1` in the pre-fix code + // (lowercase + replace non-alnum with `_`). Post-fix, the appended hash + // makes them distinct. We use 3 (not the full 5) because the docker + // postgres image is configured with `max_replication_slots=4`, leaving + // headroom for the long-trigger-id test that runs immediately after. + const ids = ['Orders.v1', 'orders-v1', 'orders v1']; + const slots = ids.map(deriveSlotName); + + // Sanity: TS-derived names must all be distinct. + const unique = new Set(slots); + expectEqual(unique.size, ids.length, 'TS-derived slot names must be unique across collision-prone inputs'); + + // Each slot must respect Postgres' 63-byte limit. + for (const s of slots) { + expect(s.length <= 63, `slot name too long (${s.length} bytes): ${s}`); + } + + // Pre-clean any leftovers from a previous run. + for (const slot of slots) { + await dropSlotIfExists(call, driver, slot); + } + + try { + // Create all five slots. If two collided, the second create call would + // fail with `replication slot ... already exists`. + for (const slot of slots) { + await call('iii-database::execute', { + db: driver, + sql: `SELECT * FROM pg_create_logical_replication_slot('${slot}', 'pgoutput')`, + }); + } + + // Verify Postgres now lists all five as distinct slots. + const inList = slots.map((s) => `'${s}'`).join(', '); + const q = await call('iii-database::query', { + db: driver, + sql: `SELECT slot_name FROM pg_replication_slots WHERE slot_name IN (${inList}) ORDER BY slot_name`, + }); + expectEqual(q.row_count, ids.length, 'all collision-prone inputs produced distinct slots in postgres'); + } finally { + // Cleanup so re-running the harness against the same docker volume is idempotent. + for (const slot of slots) { + try { + await dropSlotIfExists(call, driver, slot); + } catch { + /* best-effort cleanup */ + } + } + } + }, + }, + { + name: 'row-change derive_names: long trigger_id stays within postgres slot-name limit', + applies: ['pg_db'], + async run({ driver, call }) { + // Pathological trigger_id: 200 chars. Without truncation the derived + // name would exceed Postgres' 63-byte slot_name cap and slot creation + // would fail; the hash suffix preserves uniqueness across the truncation. + const a = 'a'.repeat(200); + const b = 'a'.repeat(200) + 'b'; // distinct trigger_id, same first-40 sanitized prefix + const slotA = deriveSlotName(a); + const slotB = deriveSlotName(b); + + expect(slotA !== slotB, `long trigger_ids collided: ${slotA}`); + expect(slotA.length <= 63, `slotA too long (${slotA.length}): ${slotA}`); + expect(slotB.length <= 63, `slotB too long (${slotB.length}): ${slotB}`); + + // Pre-clean. + await dropSlotIfExists(call, driver, slotA); + await dropSlotIfExists(call, driver, slotB); + + try { + await call('iii-database::execute', { + db: driver, + sql: `SELECT * FROM pg_create_logical_replication_slot('${slotA}', 'pgoutput')`, + }); + await call('iii-database::execute', { + db: driver, + sql: `SELECT * FROM pg_create_logical_replication_slot('${slotB}', 'pgoutput')`, + }); + const q = await call('iii-database::query', { + db: driver, + sql: `SELECT slot_name FROM pg_replication_slots WHERE slot_name IN ('${slotA}', '${slotB}')`, + }); + expectEqual(q.row_count, 2, 'long-trigger-id slots created and distinct'); + } finally { + try { await dropSlotIfExists(call, driver, slotA); } catch { /* best-effort */ } + try { await dropSlotIfExists(call, driver, slotB); } catch { /* best-effort */ } + } + }, + }, +]; diff --git a/iii-database/tests/e2e/workers/harness/src/cases-transaction.ts b/iii-database/tests/e2e/workers/harness/src/cases-transaction.ts new file mode 100644 index 00000000..a4c433b7 --- /dev/null +++ b/iii-database/tests/e2e/workers/harness/src/cases-transaction.ts @@ -0,0 +1,135 @@ +import type { TestCase } from './cases.ts'; +import { expect, expectEqual } from './cases.ts'; + +/** + * Transaction edge cases. The function suite covers commit + rollback at + * failed_index=1; these target shapes the function suite leaves alone: + * empty / single-statement / mixed-read-write / failure-at-index-0. + * + * Each test creates its own scratch table to stay independent of `t`. + */ +export const TRANSACTION_EDGE_CASES: TestCase[] = [ + { + name: 'transaction with empty statements array', + async run({ driver, call }) { + // Spec ambiguity: an empty txn is a no-op. Drivers commit an empty + // transaction without error; the worker should pass that through. + const r = await call('iii-database::transaction', { db: driver, statements: [] }); + expectEqual(r.committed, true, 'empty transaction commits'); + expectEqual(Array.isArray(r.results) ? r.results.length : 0, 0, 'no results'); + }, + }, + { + name: 'transaction with single statement', + async run({ driver, dialect, call }) { + const ph1 = dialect.placeholder(1); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE IF EXISTS tx_single' }); + await call('iii-database::execute', { + db: driver, + sql: `CREATE TABLE tx_single (id ${dialect.idColumnDDL()}, n INT NOT NULL)`, + }); + const r = await call('iii-database::transaction', { + db: driver, + statements: [{ sql: `INSERT INTO tx_single (n) VALUES (${ph1})`, params: [7] }], + }); + expectEqual(r.committed, true, 'committed=true for single-statement txn'); + const verify = await call('iii-database::query', { + db: driver, + sql: 'SELECT n FROM tx_single', + }); + expectEqual(Number(verify.rows[0].n), 7, 'row landed'); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE tx_single' }); + }, + }, + { + name: 'transaction read-your-writes (mixed INSERT/SELECT/INSERT)', + async run({ driver, dialect, call }) { + const ph1 = dialect.placeholder(1); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE IF EXISTS tx_ryw' }); + await call('iii-database::execute', { + db: driver, + sql: `CREATE TABLE tx_ryw (id ${dialect.idColumnDDL()}, n INT NOT NULL)`, + }); + const r = await call('iii-database::transaction', { + db: driver, + statements: [ + { sql: `INSERT INTO tx_ryw (n) VALUES (${ph1})`, params: [100] }, + { sql: `SELECT n FROM tx_ryw`, params: [] }, + { sql: `INSERT INTO tx_ryw (n) VALUES (${ph1})`, params: [200] }, + ], + }); + expectEqual(r.committed, true, 'committed=true for mixed txn'); + expect(Array.isArray(r.results), 'results is array'); + expectEqual(r.results.length, 3, 'three results'); + // Note: txn results carry rows positionally (Vec>) per + // transaction.rs:64-67 — no column names. This differs from query/runStatement + // which return column-keyed objects. The first SELECT row is `[100]`, not `{n: 100}`. + const selectResult = r.results[1]; + expect( + Array.isArray(selectResult.rows) && selectResult.rows.length === 1, + `select sees exactly one row, got ${JSON.stringify(selectResult)}`, + ); + const firstRow = selectResult.rows[0]; + expect(Array.isArray(firstRow), `row is positional array, got ${JSON.stringify(firstRow)}`); + expectEqual(Number(firstRow[0]), 100, 'read-your-writes: select sees the just-inserted value'); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE tx_ryw' }); + }, + }, + { + name: 'transaction failure at index 0 reports failed_index=0', + async run({ driver, dialect, call }) { + const ph1 = dialect.placeholder(1); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE IF EXISTS tx_fail0' }); + await call('iii-database::execute', { + db: driver, + sql: `CREATE TABLE tx_fail0 (id ${dialect.idColumnDDL()}, n INT NOT NULL)`, + }); + // First statement violates NOT NULL; second never runs. failed_index must be 0. + const r = await call('iii-database::transaction', { + db: driver, + statements: [ + { sql: `INSERT INTO tx_fail0 (n) VALUES (${ph1})`, params: [null] }, + { sql: `INSERT INTO tx_fail0 (n) VALUES (${ph1})`, params: [99] }, + ], + }); + expectEqual(r.committed, false, 'committed=false'); + expectEqual(r.failed_index, 0, 'failed_index=0'); + // Confirm rollback: zero rows. + const verify = await call('iii-database::query', { + db: driver, + sql: 'SELECT COUNT(*) AS c FROM tx_fail0', + }); + expectEqual(Number(verify.rows[0].c), 0, 'rollback dropped all writes'); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE tx_fail0' }); + }, + }, + { + // Regression: the sqlite driver previously routed transaction steps via + // brittle text matching (`starts_with("SELECT") || contains(" RETURNING ")`), + // which mis-classified row-producing statements that begin with `WITH ...` + // (CTE-prefixed SELECT) and `VALUES (...)`. Both were sent through + // `c.execute()` which errors with ExecuteReturnedResults and aborted the + // entire transaction. The driver now routes via the planner's + // column_count(), which handles every statement shape correctly. + // + // Gated to sqlite_db: postgres/mysql tx paths use their own routing and + // accept these statement shapes already. The bug was sqlite-specific. + name: 'transaction handles CTE SELECT and VALUES (sqlite)', + applies: ['sqlite_db'], + async run({ driver, call }) { + const r = await call('iii-database::transaction', { + db: driver, + statements: [ + { sql: 'WITH cte AS (SELECT 1 AS n) SELECT n FROM cte' }, + { sql: 'VALUES (10), (20), (30)' }, + ], + }); + expectEqual(r.committed, true, 'CTE+VALUES tx committed'); + expect(Array.isArray(r.results) && r.results.length === 2, 'two step results'); + // CTE SELECT → 1 row + expectEqual(r.results[0].rows.length, 1, 'CTE step row count'); + // VALUES → 3 rows + expectEqual(r.results[1].rows.length, 3, 'VALUES step row count'); + }, + }, +]; diff --git a/iii-database/tests/e2e/workers/harness/src/cases-trigger.ts b/iii-database/tests/e2e/workers/harness/src/cases-trigger.ts new file mode 100644 index 00000000..753aaba6 --- /dev/null +++ b/iii-database/tests/e2e/workers/harness/src/cases-trigger.ts @@ -0,0 +1,121 @@ +import type { TestCase } from './cases.ts'; +import { expect } from './cases.ts'; + +/** + * Trigger config + lifecycle edge cases. Gated to sqlite_db because the + * trigger logic is driver-agnostic and sqlite is the fastest path; running + * across all three drivers buys nothing here. + * + * SDK note: `iii.registerTrigger` returns synchronously without awaiting the + * worker's `register_trigger` handler, so worker-side validation errors do + * NOT surface as a JS rejection. We test broken triggers by *absence of + * dispatches* — register, seed rows, assert nothing arrives. + */ +export const TRIGGER_CASES: TestCase[] = [ + { + name: 'trigger with invalid cursor_table never dispatches', + applies: ['sqlite_db'], + async run({ driver, dialect, iii, call, resetReceived, expectSilence }) { + // Use a unique scratch table per test to stay isolated from the polling case's outbox. + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE IF EXISTS tg_bad_cursor_table' }); + await call('iii-database::execute', { + db: driver, + sql: `CREATE TABLE tg_bad_cursor_table (id ${dialect.idColumnDDL()}, body TEXT NOT NULL)`, + }); + resetReceived(); + const ph1 = dialect.placeholder(1); + // 'bad-name' contains a hyphen → fails validate_sql_identifier. Per + // query_poll.rs:70-78, validate() rejects at first tick before any + // dispatch can land. + const handle = iii.registerTrigger({ + type: 'iii-database::query-poll', + function_id: 'harness::on_outbox_row', + config: { + trigger_id: `harness-bad-cursor-table-${driver}`, + db: driver, + sql: `SELECT id, body, '${driver}' AS db FROM tg_bad_cursor_table WHERE id > COALESCE(${ph1}, 0) ORDER BY id LIMIT 50`, + interval_ms: 200, + cursor_column: 'id', + cursor_table: 'bad-name', + }, + }); + try { + await call('iii-database::execute', { + db: driver, + sql: 'INSERT INTO tg_bad_cursor_table (body) VALUES (?)', + params: ['x'], + }); + await expectSilence(1500); + } finally { + try { handle.unregister(); } catch { /* ignore */ } + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE tg_bad_cursor_table' }); + } + }, + }, + { + name: 'trigger with cursor_column not in result never dispatches', + applies: ['sqlite_db'], + async run({ driver, dialect, iii, call, resetReceived, expectSilence }) { + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE IF EXISTS tg_bad_col' }); + await call('iii-database::execute', { + db: driver, + sql: `CREATE TABLE tg_bad_col (id ${dialect.idColumnDDL()}, body TEXT NOT NULL)`, + }); + resetReceived(); + const ph1 = dialect.placeholder(1); + // SQL only selects `id` and `body`, but cursor_column is `nonexistent`. + // Per query_poll.rs:107-116, run_one_tick errors at column-lookup time + // and the loop logs+swallows. No dispatch ever lands. + const handle = iii.registerTrigger({ + type: 'iii-database::query-poll', + function_id: 'harness::on_outbox_row', + config: { + trigger_id: `harness-bad-col-${driver}`, + db: driver, + sql: `SELECT id, body, '${driver}' AS db FROM tg_bad_col WHERE id > COALESCE(${ph1}, 0) ORDER BY id LIMIT 50`, + interval_ms: 200, + cursor_column: 'nonexistent', + }, + }); + try { + await call('iii-database::execute', { + db: driver, + sql: 'INSERT INTO tg_bad_col (body) VALUES (?)', + params: ['y'], + }); + await expectSilence(1500); + } finally { + try { handle.unregister(); } catch { /* ignore */ } + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE tg_bad_col' }); + } + }, + }, + { + name: 'unregister of already-unregistered trigger does not throw', + applies: ['sqlite_db'], + async run({ driver, dialect, iii }) { + const ph1 = dialect.placeholder(1); + const handle = iii.registerTrigger({ + type: 'iii-database::query-poll', + function_id: 'harness::on_outbox_row', + config: { + trigger_id: `harness-double-unreg-${driver}`, + db: driver, + sql: `SELECT id, body, '${driver}' AS db FROM outbox WHERE id > COALESCE(${ph1}, 0) ORDER BY id LIMIT 50`, + interval_ms: 1000, + cursor_column: 'id', + }, + }); + // First unregister: real cleanup. + handle.unregister(); + // Second unregister: should be a no-op rather than throwing. + let threw: unknown = null; + try { + handle.unregister(); + } catch (e) { + threw = e; + } + expect(threw === null, `second unregister threw: ${threw}`); + }, + }, +]; diff --git a/iii-database/tests/e2e/workers/harness/src/cases.ts b/iii-database/tests/e2e/workers/harness/src/cases.ts new file mode 100644 index 00000000..8f42a4be --- /dev/null +++ b/iii-database/tests/e2e/workers/harness/src/cases.ts @@ -0,0 +1,201 @@ +import type { ISdk } from 'iii-sdk'; +import type { DriverKey, Dialect } from './dialect.ts'; + +export interface CaseContext { + driver: DriverKey; + dialect: Dialect; + /** Calls a database worker function; returns parsed JSON or throws on engine error. */ + call: (functionId: string, payload: unknown) => Promise; + /** Resolves once N rows for `driver` have arrived via the query-poll sink. */ + waitForRows: (n: number, timeoutMs: number) => Promise>>; + /** Resets the per-driver received-rows buffer used by `waitForRows`. */ + resetReceived: () => void; + /** Direct SDK access for trigger-config edge-case tests. */ + iii: ISdk; + /** + * Asserts that `fn()` rejects and the rejection message contains `expectedCode`. + * The worker wraps DbError as `IIIError::Handler(json_string)`, which the engine + * surfaces as the JS Error message; substring match is more resilient than + * strict JSON parsing across SDK versions. + */ + expectError: (fn: () => Promise, expectedCode: string) => Promise; + /** + * Resolves true if NO rows arrive on the polling sink within `timeoutMs`. + * Used by trigger validation tests to assert a broken trigger never dispatches. + */ + expectSilence: (timeoutMs: number) => Promise; +} + +export interface TestCase { + name: string; + /** If set, this case only runs on the listed drivers; otherwise it runs on all. */ + applies?: readonly DriverKey[]; + run(ctx: CaseContext): Promise; +} + +export function expectEqual(actual: unknown, expected: unknown, msg: string): void { + if (JSON.stringify(actual) !== JSON.stringify(expected)) { + throw new Error(`${msg}: expected ${JSON.stringify(expected)}, got ${JSON.stringify(actual)}`); + } +} + +export function expect(cond: boolean, msg: string): asserts cond { + if (!cond) throw new Error(msg); +} + +export const SCHEMA_RESET: TestCase = { + name: 'schema-reset', + async run({ driver, dialect, call }) { + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE IF EXISTS outbox' }); + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE IF EXISTS t' }); + // The query-poll trigger persists cursor state in __iii_cursors. Without + // dropping it here, stale cursor values from a prior run survive (Postgres + // and MySQL via docker volumes; SQLite via ./data/iii.db) and cause the + // first poll to filter out the freshly-inserted ids — producing a "got 0 + // rows" timeout. Dropping the table here makes the test idempotent across + // runs without requiring a manual `docker compose down -v && rm data/iii.db`. + await call('iii-database::execute', { db: driver, sql: 'DROP TABLE IF EXISTS __iii_cursors' }); + await call('iii-database::execute', { + db: driver, + sql: `CREATE TABLE t (id ${dialect.idColumnDDL()}, n INT NOT NULL)`, + }); + await call('iii-database::execute', { + db: driver, + sql: `CREATE TABLE outbox (id ${dialect.idColumnDDL()}, body TEXT NOT NULL)`, + }); + }, +}; + +export const FUNCTION_CASES: TestCase[] = [ + { + name: 'query SELECT 1', + async run({ driver, call }) { + const r = await call('iii-database::query', { db: driver, sql: 'SELECT 1 AS n' }); + expectEqual(r.row_count, 1, 'row_count'); + expect(Array.isArray(r.columns), 'columns is array'); + expect(r.columns.length === 1, 'one column'); + expectEqual(r.columns[0].name, 'n', 'column name'); + // Value may be number or numeric string depending on driver — accept either. + const v = r.rows[0].n; + expect(v === 1 || v === '1', `n value: ${JSON.stringify(v)}`); + }, + }, + { + name: 'execute INSERT (multi-row)', + async run({ driver, dialect, call }) { + const ph1 = dialect.placeholder(1); + const ph2 = dialect.placeholder(2); + const r = await call('iii-database::execute', { + db: driver, + sql: `INSERT INTO t (n) VALUES (${ph1}), (${ph2})`, + params: [10, 20], + }); + expectEqual(r.affected_rows, 2, 'affected_rows after multi-row insert'); + }, + }, + { + name: 'query SELECT after insert', + async run({ driver, call }) { + const r = await call('iii-database::query', { + db: driver, + sql: 'SELECT n FROM t ORDER BY id', + }); + expectEqual(r.row_count, 2, 'two rows returned'); + const ns = r.rows.map((row: any) => Number(row.n)); + expectEqual(ns, [10, 20], 'row values'); + }, + }, + { + name: 'prepareStatement + runStatement', + async run({ driver, dialect, call }) { + const ph1 = dialect.placeholder(1); + const prep = await call('iii-database::prepareStatement', { + db: driver, + sql: `SELECT n FROM t WHERE n = ${ph1}`, + }); + const handleId = prep.handle?.id; + expect(typeof handleId === 'string' && handleId.length > 0, 'handle id present'); + + const r1 = await call('iii-database::runStatement', { handle_id: handleId, params: [10] }); + expectEqual(r1.row_count, 1, 'first runStatement row_count'); + expectEqual(Number(r1.rows[0].n), 10, 'first runStatement value'); + + const r2 = await call('iii-database::runStatement', { handle_id: handleId, params: [20] }); + expectEqual(r2.row_count, 1, 'second runStatement row_count'); + expectEqual(Number(r2.rows[0].n), 20, 'second runStatement value'); + }, + }, + { + name: 'transaction commit', + async run({ driver, dialect, call }) { + const ph1 = dialect.placeholder(1); + const r = await call('iii-database::transaction', { + db: driver, + statements: [ + { sql: `UPDATE t SET n = n + 1 WHERE n = ${ph1}`, params: [10] }, + { sql: `UPDATE t SET n = n + 1 WHERE n = ${ph1}`, params: [20] }, + ], + }); + expectEqual(r.committed, true, 'committed'); + const verify = await call('iii-database::query', { db: driver, sql: 'SELECT n FROM t ORDER BY id' }); + const ns = verify.rows.map((row: any) => Number(row.n)); + expectEqual(ns, [11, 21], 'post-commit values'); + }, + }, + { + name: 'transaction rollback', + async run({ driver, dialect, call }) { + const ph1 = dialect.placeholder(1); + const before = await call('iii-database::query', { db: driver, sql: 'SELECT COUNT(*) AS c FROM t' }); + const beforeCount = Number(before.rows[0].c); + + const r = await call('iii-database::transaction', { + db: driver, + statements: [ + { sql: `INSERT INTO t (n) VALUES (${ph1})`, params: [999] }, + // Second statement violates NOT NULL — forces rollback. + { sql: `INSERT INTO t (n) VALUES (${ph1})`, params: [null] }, + ], + }); + expectEqual(r.committed, false, 'committed=false'); + expectEqual(r.failed_index, 1, 'failed_index=1'); + expect(typeof r.error === 'object' && r.error !== null, 'structured error object'); + + const after = await call('iii-database::query', { db: driver, sql: 'SELECT COUNT(*) AS c FROM t' }); + expectEqual(Number(after.rows[0].c), beforeCount, 'row count unchanged after rollback'); + }, + }, +]; + +export const POLLING_CASE: TestCase = { + name: 'query-poll dispatches new rows incrementally', + async run({ driver, dialect, call, waitForRows, resetReceived }) { + resetReceived(); + const ph1 = dialect.placeholder(1); + const ph2 = dialect.placeholder(2); + const ph3 = dialect.placeholder(3); + // Seed 3 rows. + await call('iii-database::execute', { + db: driver, + sql: `INSERT INTO outbox (body) VALUES (${ph1}), (${ph2}), (${ph3})`, + params: ['a', 'b', 'c'], + }); + + const first = await waitForRows(3, 5_000); + expectEqual(first.length, 3, 'first batch row count'); + const bodies1 = first.map((r) => r.body); + expectEqual(bodies1, ['a', 'b', 'c'], 'first batch body order'); + + // Insert 2 more after the first batch was acked. + resetReceived(); + await call('iii-database::execute', { + db: driver, + sql: `INSERT INTO outbox (body) VALUES (${ph1}), (${ph2})`, + params: ['d', 'e'], + }); + const second = await waitForRows(2, 5_000); + expectEqual(second.length, 2, 'second batch row count'); + const bodies2 = second.map((r) => r.body); + expectEqual(bodies2, ['d', 'e'], 'second batch is delta only'); + }, +}; diff --git a/iii-database/tests/e2e/workers/harness/src/dialect.test.ts b/iii-database/tests/e2e/workers/harness/src/dialect.test.ts new file mode 100644 index 00000000..1ecb65ec --- /dev/null +++ b/iii-database/tests/e2e/workers/harness/src/dialect.test.ts @@ -0,0 +1,26 @@ +// Run with: node --test --import tsx ./src/dialect.test.ts +import { test } from 'node:test'; +import assert from 'node:assert/strict'; +import { dialects } from './dialect.ts'; + +test('sqlite uses ? placeholder and AUTOINCREMENT id', () => { + assert.equal(dialects.sqlite_db.placeholder(1), '?'); + assert.equal(dialects.sqlite_db.placeholder(7), '?'); + assert.equal(dialects.sqlite_db.idColumnDDL(), 'INTEGER PRIMARY KEY AUTOINCREMENT'); +}); + +test('postgres uses $N placeholders and BIGSERIAL id', () => { + assert.equal(dialects.pg_db.placeholder(1), '$1'); + assert.equal(dialects.pg_db.placeholder(2), '$2'); + assert.equal(dialects.pg_db.idColumnDDL(), 'BIGSERIAL PRIMARY KEY'); +}); + +test('mysql uses ? placeholder and AUTO_INCREMENT BIGINT id', () => { + assert.equal(dialects.mysql_db.placeholder(1), '?'); + assert.equal(dialects.mysql_db.placeholder(3), '?'); + assert.equal(dialects.mysql_db.idColumnDDL(), 'BIGINT AUTO_INCREMENT PRIMARY KEY'); +}); + +test('exposes exactly three driver keys', () => { + assert.deepEqual(Object.keys(dialects).sort(), ['mysql_db', 'pg_db', 'sqlite_db']); +}); diff --git a/iii-database/tests/e2e/workers/harness/src/dialect.ts b/iii-database/tests/e2e/workers/harness/src/dialect.ts new file mode 100644 index 00000000..da3dc41f --- /dev/null +++ b/iii-database/tests/e2e/workers/harness/src/dialect.ts @@ -0,0 +1,25 @@ +export type DriverKey = 'sqlite_db' | 'pg_db' | 'mysql_db'; + +export const DRIVER_KEYS: readonly DriverKey[] = ['sqlite_db', 'pg_db', 'mysql_db'] as const; + +export interface Dialect { + /** Returns the parameter placeholder for the i-th (1-indexed) bound value. */ + placeholder(i: number): string; + /** DDL fragment for the auto-increment primary-key id column. */ + idColumnDDL(): string; +} + +export const dialects: Record = { + sqlite_db: { + placeholder: () => '?', + idColumnDDL: () => 'INTEGER PRIMARY KEY AUTOINCREMENT', + }, + pg_db: { + placeholder: (i: number) => `$${i}`, + idColumnDDL: () => 'BIGSERIAL PRIMARY KEY', + }, + mysql_db: { + placeholder: () => '?', + idColumnDDL: () => 'BIGINT AUTO_INCREMENT PRIMARY KEY', + }, +}; diff --git a/iii-database/tests/e2e/workers/harness/src/runner.ts b/iii-database/tests/e2e/workers/harness/src/runner.ts new file mode 100644 index 00000000..6e9b4373 --- /dev/null +++ b/iii-database/tests/e2e/workers/harness/src/runner.ts @@ -0,0 +1,317 @@ +import { writeFileSync, mkdirSync } from 'node:fs'; +import { resolve } from 'node:path'; +import type { ISdk } from 'iii-sdk'; +import { DRIVER_KEYS, dialects, type DriverKey } from './dialect.ts'; +import { + SCHEMA_RESET, + FUNCTION_CASES, + POLLING_CASE, + type CaseContext, + type TestCase, +} from './cases.ts'; +import { BOUNDARY_CASES } from './cases-boundary.ts'; +import { PROTOCOL_CASES } from './cases-protocol.ts'; +import { TRANSACTION_EDGE_CASES } from './cases-transaction.ts'; +import { CONCURRENCY_CASES } from './cases-concurrency.ts'; +import { TRIGGER_CASES } from './cases-trigger.ts'; +import { ROW_CHANGE_CASES } from './cases-row-change.ts'; + +interface CaseResult { + driver: DriverKey; + case: string; + status: 'PASS' | 'FAIL'; + error?: string; + duration_ms: number; +} + +type Pending = { + n: number; + resolve: (rows: Array>) => void; + reject: (err: Error) => void; + timer: NodeJS.Timeout; +}; + +type ReceivedBuffer = { + rows: Array>; + pending: Pending[]; +}; + +export interface RunnerOptions { + iii: ISdk; + reportPath: string; + filterDriver?: DriverKey; +} + +export class Runner { + private buffers: Record = { + sqlite_db: { rows: [], pending: [] }, + pg_db: { rows: [], pending: [] }, + mysql_db: { rows: [], pending: [] }, + }; + + /** + * Active poll-trigger handles keyed by driver. We track them so we can + * unregister at end-of-run; without that, every harness invocation against + * a long-running worker leaves a zombie polling task alive in the worker + * process. On the next run, the zombie keeps polling against freshly-reset + * tables and races with the new task that registerPollingTrigger spawns, + * producing "second batch is delta only" failures even though the trigger + * implementation itself is correct. + */ + private triggers: Partial void }>> = {}; + + constructor(private opts: RunnerOptions) {} + + /** Sink for `iii-database::query-poll` dispatches; routes by `payload.db`. */ + onOutboxBatch = async (payload: any): Promise<{ ack: boolean; commit_cursor?: string }> => { + const db = String(payload?.db ?? '') as DriverKey; + if (!this.buffers[db]) { + console.error(`[harness] unexpected db in dispatch: ${db}`); + return { ack: false }; + } + const rows = (payload.rows ?? []) as Array>; + const buf = this.buffers[db]; + buf.rows.push(...rows); + + // Compute max id seen so the worker advances the cursor. + let maxId: number | undefined; + for (const r of buf.rows) { + const id = Number((r as any).id); + if (Number.isFinite(id)) maxId = maxId === undefined ? id : Math.max(maxId, id); + } + + // Resolve any waiters that are now satisfied. + buf.pending = buf.pending.filter((p) => { + if (buf.rows.length >= p.n) { + clearTimeout(p.timer); + p.resolve(buf.rows.slice(0, p.n)); + return false; + } + return true; + }); + + return { ack: true, commit_cursor: maxId !== undefined ? String(maxId) : undefined }; + }; + + private async callOnce(functionId: string, payload: unknown): Promise { + return await this.opts.iii.trigger({ function_id: functionId, payload }); + } + + private async callWithRetry(functionId: string, payload: unknown, attempts = 10): Promise { + let lastErr: unknown; + for (let i = 0; i < attempts; i++) { + try { + return await this.callOnce(functionId, payload); + } catch (e) { + lastErr = e; + await new Promise((r) => setTimeout(r, 200)); + } + } + throw lastErr; + } + + private waitForRows(driver: DriverKey, n: number, timeoutMs: number): Promise>> { + const buf = this.buffers[driver]; + return new Promise((resolveP, rejectP) => { + if (buf.rows.length >= n) { + resolveP(buf.rows.slice(0, n)); + return; + } + const timer = setTimeout(() => { + buf.pending = buf.pending.filter((p) => p.resolve !== resolveP); + rejectP(new Error(`timeout waiting for ${n} rows on ${driver} (got ${buf.rows.length})`)); + }, timeoutMs); + buf.pending.push({ n, resolve: resolveP, reject: rejectP, timer }); + }); + } + + private resetReceived(driver: DriverKey): void { + const buf = this.buffers[driver]; + for (const p of buf.pending) clearTimeout(p.timer); + buf.pending = []; + buf.rows = []; + } + + private async runCase(driver: DriverKey, c: TestCase): Promise { + const start = Date.now(); + const ctx: CaseContext = { + driver, + dialect: dialects[driver], + call: (id, payload) => this.callOnce(id, payload), + waitForRows: (n, t) => this.waitForRows(driver, n, t), + resetReceived: () => this.resetReceived(driver), + iii: this.opts.iii, + expectError: async (fn, expectedCode) => { + try { + await fn(); + } catch (e: any) { + const msg = e?.message ?? String(e); + if (!msg.includes(expectedCode)) { + throw new Error(`expected error code "${expectedCode}", got: ${msg}`); + } + return; + } + throw new Error(`expected throw with code "${expectedCode}", but call resolved`); + }, + expectSilence: async (timeoutMs) => { + // Reset, wait the window, then assert the per-driver buffer is still empty. + // Used by trigger validation tests to prove a broken trigger never dispatches. + const buf = this.buffers[driver]; + const startLen = buf.rows.length; + await new Promise((r) => setTimeout(r, timeoutMs)); + const drift = buf.rows.length - startLen; + if (drift > 0) { + throw new Error( + `expected silence for ${timeoutMs}ms but received ${drift} rows; latest=${JSON.stringify(buf.rows.slice(-Math.min(drift, 3)))}`, + ); + } + }, + }; + try { + await c.run(ctx); + return { driver, case: c.name, status: 'PASS', duration_ms: Date.now() - start }; + } catch (e: any) { + return { + driver, + case: c.name, + status: 'FAIL', + error: e?.message ?? String(e), + duration_ms: Date.now() - start, + }; + } + } + + private async waitForDatabaseWorker(driver: DriverKey): Promise { + // Probe with a no-op query until it succeeds; tolerates worker-startup race. + await this.callWithRetry('iii-database::query', { db: driver, sql: 'SELECT 1' }); + } + + private async registerPollingTrigger(driver: DriverKey): Promise { + const ph1 = dialects[driver].placeholder(1); + const handle = this.opts.iii.registerTrigger({ + type: 'iii-database::query-poll', + function_id: 'harness::on_outbox_row', + config: { + trigger_id: `harness-poll-${driver}`, + db: driver, + sql: `SELECT id, body FROM outbox WHERE id > COALESCE(${ph1}, 0) ORDER BY id LIMIT 50`, + interval_ms: 500, + cursor_column: 'id', + }, + }); + this.triggers[driver] = handle; + } + + /** Unregister all active poll triggers. Idempotent. */ + private async unregisterAllTriggers(): Promise { + for (const driver of DRIVER_KEYS) { + const t = this.triggers[driver]; + if (t) { + try { + t.unregister(); + } catch (e) { + console.error(`[harness] unregister ${driver}: ${e}`); + } + delete this.triggers[driver]; + } + } + } + + async runAll(): Promise<{ pass: number; total: number; results: CaseResult[] }> { + const drivers: DriverKey[] = this.opts.filterDriver ? [this.opts.filterDriver] : [...DRIVER_KEYS]; + // Wait for the database worker to be reachable on the first driver before kicking off. + await this.waitForDatabaseWorker(drivers[0]); + + const results: CaseResult[] = []; + const matchesDriver = (driver: DriverKey, c: TestCase) => + !c.applies || c.applies.includes(driver); + + // Stream each case result to stdout as it completes, instead of buffering + // until runAll returns. Slow tests (TTL expiry, pool exhaustion) take 5+ + // seconds individually — the user wants to see progress, not wait blind. + // + // Color the PASS/FAIL tag green/red, but only when stdout is a TTY. When + // run-tests.sh redirects stdout to a log file, isTTY is false and we + // emit plain text (otherwise ANSI escapes show up as garbage in the log). + const useColor = process.stdout.isTTY === true; + const GREEN = useColor ? '\x1b[32m' : ''; + const RED = useColor ? '\x1b[31m' : ''; + const RESET = useColor ? '\x1b[0m' : ''; + const record = (r: CaseResult): CaseResult => { + const color = r.status === 'PASS' ? GREEN : RED; + const err = r.error ? ' — ' + r.error : ''; + console.log(`[harness] ${color}${r.status}${RESET} ${r.driver} :: ${r.case} (${r.duration_ms}ms)${err}`); + results.push(r); + return r; + }; + + for (const driver of drivers) { + // Always run the schema reset; not a counted case but failures abort this driver. + const reset = record(await this.runCase(driver, SCHEMA_RESET)); + if (reset.status === 'FAIL') continue; + + // Function suite (6 cases). + for (const c of FUNCTION_CASES) { + record(await this.runCase(driver, c)); + } + + // Boundary, protocol, transaction-edge, concurrency, row-change cases. + // Each test is self-contained (creates and drops its own scratch tables + // / replication slots) so order doesn't matter. + for (const c of [ + ...BOUNDARY_CASES, + ...PROTOCOL_CASES, + ...TRANSACTION_EDGE_CASES, + ...CONCURRENCY_CASES, + ...ROW_CHANGE_CASES, + ]) { + if (!matchesDriver(driver, c)) continue; + record(await this.runCase(driver, c)); + } + + // Register the per-driver query-poll trigger before the polling case. + try { + await this.registerPollingTrigger(driver); + } catch (e: any) { + record({ + driver, + case: 'register-poll-trigger', + status: 'FAIL', + error: e?.message ?? String(e), + duration_ms: 0, + }); + continue; + } + + record(await this.runCase(driver, POLLING_CASE)); + + // Trigger validation cases (sqlite_db only). Run after the polling case + // so the long-lived polling trigger has already been registered and the + // per-driver buffer state is well-defined. Each trigger case unregisters + // its own ad-hoc trigger; the long-lived polling trigger keeps running. + // We pause the long-lived polling trigger's effect by relying on a + // unique scratch table per case (so the polling trigger's outbox SELECT + // is unaffected by trigger-test inserts), and reset the buffer at the + // start of each case via resetReceived(). + for (const c of TRIGGER_CASES) { + if (!matchesDriver(driver, c)) continue; + record(await this.runCase(driver, c)); + } + } + + // Cleanup: unregister all active triggers so re-running the harness + // against the same worker process doesn't leave zombie pollers running. + await this.unregisterAllTriggers(); + + const counted = results.filter((r) => r.case !== 'schema-reset' && r.case !== 'register-poll-trigger'); + const pass = counted.filter((r) => r.status === 'PASS').length; + + mkdirSync(resolve(this.opts.reportPath, '..'), { recursive: true }); + writeFileSync( + this.opts.reportPath, + JSON.stringify({ pass, total: counted.length, results }, null, 2), + ); + + return { pass, total: counted.length, results }; + } +} diff --git a/iii-database/tests/e2e/workers/harness/src/test.ts b/iii-database/tests/e2e/workers/harness/src/test.ts new file mode 100644 index 00000000..a45789e9 --- /dev/null +++ b/iii-database/tests/e2e/workers/harness/src/test.ts @@ -0,0 +1,17 @@ +import { call } from 'iii-sdk' + +await call('iii-database::execute', { + db: 'primary', + sql: 'CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, email TEXT)' +}) + +await call('iii-database::execute', { + db: 'primary', + sql: 'INSERT INTO users (email) VALUES (?), (?)', + params: ['a@x', 'b@x'] +}) + +const { rows } = await call('iii-database::query', { + db: 'primary', + sql: 'SELECT id, email FROM users ORDER BY id' +}) \ No newline at end of file diff --git a/iii-database/tests/e2e/workers/harness/src/worker.ts b/iii-database/tests/e2e/workers/harness/src/worker.ts new file mode 100644 index 00000000..4cde2934 --- /dev/null +++ b/iii-database/tests/e2e/workers/harness/src/worker.ts @@ -0,0 +1,58 @@ +import { registerWorker, Logger } from 'iii-sdk'; +import { resolve } from 'node:path'; +import { Runner } from './runner.ts'; +import type { DriverKey } from './dialect.ts'; + +const URL = process.env.III_URL ?? 'ws://127.0.0.1:49134'; +const REPORT_PATH = resolve(process.env.HARNESS_REPORT_PATH ?? './reports/report.json'); +const FILTER = process.env.HARNESS_FILTER as DriverKey | undefined; + +const iii = registerWorker(URL); +const logger = new Logger(); +const runner = new Runner({ iii, reportPath: REPORT_PATH, filterDriver: FILTER }); + +iii.registerFunction( + 'harness::on_outbox_row', + async (payload: unknown) => runner.onOutboxBatch(payload), + { description: 'Sink for iii-database::query-poll dispatches; routes by payload.db.' }, +); + +logger.info('harness: registered, kicking off suite', { url: URL, filter: FILTER ?? 'all', reportPath: REPORT_PATH }); + +(async () => { + // ANSI colors only when stdout is a TTY — run-tests.sh redirects to a log file, + // and bash's grep for the HARNESS_DONE sentinel must see plain text. + const useColor = process.stdout.isTTY === true; + const GREEN = useColor ? '\x1b[32m' : ''; + const RED = useColor ? '\x1b[31m' : ''; + const RESET = useColor ? '\x1b[0m' : ''; + let exitCode = 1; + try { + // Per-case results stream to stdout as they complete (see runner.ts). + // Here we just wait for the run and emit the final sentinel. + const { pass, total } = await runner.runAll(); + const status = pass === total ? 'PASS' : 'FAIL'; + const color = status === 'PASS' ? GREEN : RED; + console.log(`HARNESS_DONE: ${color}${status}${RESET} ${pass}/${total}`); + exitCode = status === 'PASS' ? 0 : 1; + } catch (e: any) { + console.error('[harness] fatal:', e?.stack ?? e); + console.log(`HARNESS_DONE: ${RED}FAIL${RESET} 0/0`); + exitCode = 1; + } + // runAll() called runner.unregisterAllTriggers() which writes UnregisterTrigger + // messages to the websocket synchronously. The SDK's Trigger.unregister() is + // fire-and-forget — sendMessage queues bytes but doesn't await the engine ACK. + // Without this drain step, process.exit() terminates before the OS flushes + // the TCP send buffer, the database worker never sees the unregister, and its + // QueryPollTrigger tasks keep polling — causing the engine to log + // "Function not found: harness::on_outbox_row" every 500ms until the worker + // is restarted (or the next harness run evicts the zombie via trigger_id dedup). + // + // 200ms grace lets the OS flush ws bytes; iii.shutdown() then closes the ws + // and drains OTel queues. iii.shutdown() itself does NOT await the ws close + // handshake, hence the explicit delay. + await new Promise((r) => setTimeout(r, 200)); + await iii.shutdown(); + process.exit(exitCode); +})(); diff --git a/iii-database/tests/e2e/workers/harness/tsconfig.json b/iii-database/tests/e2e/workers/harness/tsconfig.json new file mode 100644 index 00000000..91c7e388 --- /dev/null +++ b/iii-database/tests/e2e/workers/harness/tsconfig.json @@ -0,0 +1,14 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "ESNext", + "moduleResolution": "bundler", + "esModuleInterop": true, + "strict": true, + "skipLibCheck": true, + "noEmit": true, + "allowImportingTsExtensions": true + }, + "include": ["src/**/*"], + "exclude": ["node_modules", "dist"] +} diff --git a/iii-database/tests/integration.rs b/iii-database/tests/integration.rs new file mode 100644 index 00000000..5be3ebda --- /dev/null +++ b/iii-database/tests/integration.rs @@ -0,0 +1,181 @@ +//! Integration: build a local AppState from a YAML config and exercise each +//! function handler end-to-end against an in-memory SQLite database. + +use iii_database::config::WorkerConfig; +use iii_database::handle::HandleRegistry; +use iii_database::handlers::execute::ExecuteReq; +use iii_database::handlers::prepare::PrepareReq; +use iii_database::handlers::query::QueryReq; +use iii_database::handlers::run_statement::RunReq; +use iii_database::handlers::transaction::TxReq; +use iii_database::handlers::{execute, prepare, query, run_statement, transaction, AppState}; +use iii_database::pool; +use iii_sdk::RegisterFunction; +use serde_json::json; +use std::collections::HashMap; +use std::sync::Arc; + +async fn build_state() -> AppState { + let yaml = "databases:\n primary:\n url: \"sqlite::memory:\"\n"; + let cfg = WorkerConfig::from_yaml(yaml).unwrap(); + let mut pools = HashMap::new(); + for (name, db) in &cfg.databases { + let p = pool::build(name, db).await.unwrap(); + pools.insert(name.clone(), p); + } + AppState { + pools: Arc::new(pools), + handles: Arc::new(HandleRegistry::new()), + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn end_to_end_query_execute_prepare_run_transaction() { + let st = build_state().await; + + // Schema setup via execute + execute::handle( + &st, + serde_json::from_value::(json!({ + "db": "primary", + "sql": "CREATE TABLE t (id INTEGER PRIMARY KEY, n INT)" + })) + .unwrap(), + ) + .await + .unwrap(); + + // Insert via execute (multi-row VALUES is a single INSERT statement, OK for SQLite) + let r = execute::handle( + &st, + serde_json::from_value::(json!({ + "db": "primary", + "sql": "INSERT INTO t (n) VALUES (?), (?)", + "params": [10, 20] + })) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(r.affected_rows, 2); + + // Read via query + let r = query::handle( + &st, + serde_json::from_value::(json!({ + "db": "primary", + "sql": "SELECT id, n FROM t ORDER BY id" + })) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(r.row_count, 2); + + // Prepare + run + let p = prepare::handle( + &st, + serde_json::from_value::(json!({ + "db": "primary", + "sql": "SELECT n FROM t WHERE id = ?" + })) + .unwrap(), + ) + .await + .unwrap(); + let id = p.handle.id.clone(); + let r = run_statement::handle( + &st, + serde_json::from_value::(json!({"handle_id": id, "params": [1]})).unwrap(), + ) + .await + .unwrap(); + assert_eq!(r.row_count, 1); + + // Transaction + let r = transaction::handle( + &st, + serde_json::from_value::(json!({ + "db": "primary", + "statements": [ + {"sql": "UPDATE t SET n = n + 1 WHERE id = ?", "params": [1]}, + {"sql": "UPDATE t SET n = n + 1 WHERE id = ?", "params": [2]}, + ] + })) + .unwrap(), + ) + .await + .unwrap(); + assert!(r.committed); + + // Verify final state + let r = query::handle( + &st, + serde_json::from_value::(json!({ + "db": "primary", + "sql": "SELECT n FROM t ORDER BY id" + })) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(r.rows[0]["n"], 11); + assert_eq!(r.rows[1]["n"], 21); +} + +#[test] +fn binary_name_matches_manifest() { + assert_eq!(iii_database::worker_name(), "iii-database"); +} + +/// Regression: every RPC function must register through the typed +/// `RegisterFunction::new_async` API so the engine receives auto-generated +/// JSON Schemas. Without this the public API Reference shows empty schemas. +/// If someone adds a new function via `register_function_with(...)`, this test +/// won't catch it directly — but it locks the typed shape for the existing 5. +#[test] +fn registered_functions_carry_request_and_response_schemas() { + fn assert_schemas(id: &str, f: F) + where + T: serde::de::DeserializeOwned + schemars::JsonSchema + Send + 'static, + F: Fn(T) -> Fut + Send + Sync + 'static, + Fut: std::future::Future> + Send + 'static, + R: serde::Serialize + schemars::JsonSchema + Send + 'static, + E: std::fmt::Display + Send + 'static, + { + let reg = RegisterFunction::new_async(id, f); + assert!( + reg.request_format().is_some(), + "{id} missing request_format — did you switch back to register_function_with?" + ); + assert!( + reg.response_format().is_some(), + "{id} missing response_format" + ); + } + + // We can't move a real AppState into these closures (it owns DB pools), + // so we just verify the schema-derivation path with the public Req/Resp + // types. Any drift in the typed contract surfaces here as a compile error. + async fn _q(_: QueryReq) -> Result { + unreachable!() + } + async fn _e(_: ExecuteReq) -> Result { + unreachable!() + } + async fn _p(_: PrepareReq) -> Result { + unreachable!() + } + async fn _r(_: RunReq) -> Result { + unreachable!() + } + async fn _t(_: TxReq) -> Result { + unreachable!() + } + + assert_schemas("iii-database::query", _q); + assert_schemas("iii-database::execute", _e); + assert_schemas("iii-database::prepareStatement", _p); + assert_schemas("iii-database::runStatement", _r); + assert_schemas("iii-database::transaction", _t); +} diff --git a/iii-database/tests/value_coercion.rs b/iii-database/tests/value_coercion.rs new file mode 100644 index 00000000..0872fd08 --- /dev/null +++ b/iii-database/tests/value_coercion.rs @@ -0,0 +1,29 @@ +//! Cross-cutting coercion tests — every JSON shape, both directions. + +use iii_database::value::{JsonParam, RowValue}; +use serde_json::json; + +#[test] +fn from_json_slice_happy_path() { + // exercises the slice helper's happy path; the InvalidParam branch is hard + // to trigger from JSON in serde_json 1.x and is left for direct unit testing. + let values = vec![json!(1), json!("ok"), json!(null)]; + let out = JsonParam::from_json_slice(&values).unwrap(); + assert_eq!(out.len(), 3); + assert_eq!(out[0], JsonParam::Int(1)); + assert_eq!(out[1], JsonParam::Text("ok".into())); + assert_eq!(out[2], JsonParam::Null); +} + +#[test] +fn row_value_round_trip_text() { + assert_eq!(RowValue::Text("hi".into()).to_json(), json!("hi")); +} + +#[test] +fn row_value_float_nan_becomes_null() { + // serde_json::Number cannot represent NaN; we surface it as JSON null + // rather than failing. + let v = RowValue::Float(f64::NAN); + assert_eq!(v.to_json(), json!(null)); +}