diff --git a/.github/Dockerfile b/.github/Dockerfile index 3d93cbb8d..550c09346 100644 --- a/.github/Dockerfile +++ b/.github/Dockerfile @@ -25,7 +25,7 @@ RUN apt-get update && export DEBIAN_FRONTEND=noninteractive && \ libxdamage1 libxext6 libxfixes3 libxi6 libxrandr2 libxrender1 \ libxss1 libxtst6 lsb-release wget xdg-utils \ # Additional tools - gh libgmp3-dev software-properties-common curl git unzip bc \ + gh libgmp3-dev curl git unzip bc \ debian-keyring debian-archive-keyring apt-transport-https \ # LLVM dependencies llvm-19 llvm-19-dev llvm-19-runtime clang-19 clang-tools-19 \ @@ -117,9 +117,18 @@ ENV PATH="$ASDF_DATA_DIR/shims:$PATH" RUN asdf plugin add scarb && asdf install scarb 2.8.2 && asdf set scarb 2.8.2 # Install caddy -RUN curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' | gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg && \ - curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | tee /etc/apt/sources.list.d/caddy-stable.list && \ - apt update && apt install caddy && rm -rf /var/lib/apt/lists/* +ARG TARGETPLATFORM +RUN if [ "$TARGETPLATFORM" = "linux/arm64" ] ; then \ + curl -L https://github.com/caddyserver/caddy/releases/download/v2.10.0/caddy_2.10.0_linux_arm64.tar.gz -o caddy.tar.gz && \ + tar -xzf caddy.tar.gz caddy && \ + mv caddy /usr/local/bin/ && \ + rm caddy.tar.gz; \ + elif [ "$TARGETPLATFORM" = "linux/amd64" ] ; then \ + curl -L https://github.com/caddyserver/caddy/releases/download/v2.10.0/caddy_2.10.0_linux_amd64.tar.gz -o caddy.tar.gz && \ + tar -xzf caddy.tar.gz caddy && \ + mv caddy /usr/local/bin/ && \ + rm caddy.tar.gz; \ + fi # Set permissions RUN chown -R root:root /usr/local/cargo && chmod -R 700 /usr/local/cargo diff --git a/.github/workflows/build-and-push-docker.yml b/.github/workflows/build-and-push-docker.yml index 97208b8ce..d8a001132 100644 --- a/.github/workflows/build-and-push-docker.yml +++ b/.github/workflows/build-and-push-docker.yml @@ -14,8 +14,8 @@ on: - ".github/Dockerfile" env: - RUST_VERSION: 1.86.0 - CLIPPY_VERSION: nightly-2025-02-20 + RUST_VERSION: 1.89.0 + CLIPPY_VERSION: nightly-2025-06-20 jobs: # Docker tag determination based on workflow trigger: diff --git a/.github/workflows/dockerfile-build-test.yml b/.github/workflows/dockerfile-build-test.yml index 184493f2b..82275d05b 100644 --- a/.github/workflows/dockerfile-build-test.yml +++ b/.github/workflows/dockerfile-build-test.yml @@ -6,8 +6,8 @@ on: - ".github/Dockerfile" env: - RUST_VERSION: 1.86.0 - CLIPPY_VERSION: nightly-2025-02-20 + RUST_VERSION: 1.89.0 + CLIPPY_VERSION: nightly-2025-06-20 jobs: build-dev-image: diff --git a/.github/workflows/release-dispatch.yml b/.github/workflows/release-dispatch.yml index 5100cc8f9..b8ca1b6e3 100644 --- a/.github/workflows/release-dispatch.yml +++ b/.github/workflows/release-dispatch.yml @@ -71,5 +71,5 @@ jobs: title: "release(prepare): v${{ steps.version_info.outputs.version }}" commit-message: "release(prepare): v${{ steps.version_info.outputs.version }}" branch: prepare-release - base: main + base: ${{ github.ref_name }} delete-branch: true diff --git a/.github/workflows/release-tee.yml b/.github/workflows/release-tee.yml new file mode 100644 index 000000000..ca9983096 --- /dev/null +++ b/.github/workflows/release-tee.yml @@ -0,0 +1,125 @@ +name: release-tee + +on: + workflow_dispatch: + inputs: + tag: + description: "Release tag (e.g., v1.7.0). If empty, uses version from Cargo.toml" + type: string + required: false + +env: + CARGO_TERM_COLOR: always + +permissions: + id-token: write # OIDC token for Sigstore signing + attestations: write # Persist attestations + contents: write # Release artifact upload + +jobs: + prepare: + runs-on: ubuntu-latest + outputs: + tag_name: ${{ steps.release_info.outputs.tag_name }} + steps: + - uses: actions/checkout@v4 + - name: Get version + id: release_info + run: | + if [[ -n "${{ github.event.inputs.tag }}" ]]; then + echo "tag_name=${{ github.event.inputs.tag }}" >> $GITHUB_OUTPUT + else + cargo install cargo-get + echo "tag_name=v$(cargo get workspace.package.version)" >> $GITHUB_OUTPUT + fi + + build-contracts: + runs-on: ubuntu-latest + needs: prepare + container: + image: ghcr.io/dojoengine/katana-dev:latest + steps: + - uses: actions/checkout@v4 + - name: Build contracts + run: make contracts + - name: Upload contract artifacts + uses: actions/upload-artifact@v4 + with: + name: contract-artifacts + path: ./crates/contracts/build + retention-days: 1 + + reproducible-build: + name: Reproducible TEE Build + needs: [prepare, build-contracts] + runs-on: ubuntu-latest-8-cores + outputs: + binary-hash: ${{ steps.hash.outputs.sha384 }} + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Download contract artifacts + uses: actions/download-artifact@v4 + with: + name: contract-artifacts + path: ./crates/contracts/build + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build reproducible binary + run: | + docker build \ + -f reproducible.Dockerfile \ + -t katana-reproducible:${{ needs.prepare.outputs.tag_name }} \ + --build-arg SOURCE_DATE_EPOCH=$(git log -1 --format=%ct) \ + --no-cache \ + . + + - name: Extract binary from container + run: | + docker create --name katana-extract katana-reproducible:${{ needs.prepare.outputs.tag_name }} + docker cp katana-extract:/katana ./katana-reproducible + docker rm katana-extract + + - name: Calculate binary hash + id: hash + run: | + SHA384=$(sha384sum ./katana-reproducible | cut -d ' ' -f 1) + echo "sha384=${SHA384}" >> $GITHUB_OUTPUT + echo "Binary SHA-384: ${SHA384}" + + - name: Archive reproducible binary + env: + VERSION_NAME: ${{ needs.prepare.outputs.tag_name }} + run: | + tar -czvf "katana_${VERSION_NAME}_linux_amd64_tee.tar.gz" katana-reproducible + sha384sum katana-reproducible > "katana_${VERSION_NAME}_linux_amd64_tee.sha384" + + - name: Generate build provenance attestation + id: attest + uses: actions/attest-build-provenance@v2 + with: + subject-path: ./katana-reproducible + + - name: Upload release artifacts + uses: actions/upload-artifact@v4 + with: + name: tee-release-artifacts + path: | + katana_${{ needs.prepare.outputs.tag_name }}_linux_amd64_tee.tar.gz + katana_${{ needs.prepare.outputs.tag_name }}_linux_amd64_tee.sha384 + + - name: Summary + run: | + echo "## TEE Reproducible Build Complete" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "**Version:** ${{ needs.prepare.outputs.tag_name }}" >> $GITHUB_STEP_SUMMARY + echo "**SHA-384:** \`${{ steps.hash.outputs.sha384 }}\`" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "### Verify Attestation" >> $GITHUB_STEP_SUMMARY + echo "\`\`\`bash" >> $GITHUB_STEP_SUMMARY + echo "gh attestation verify ./katana-reproducible --repo ${{ github.repository }}" >> $GITHUB_STEP_SUMMARY + echo "\`\`\`" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index a4c498d8d..15448aa71 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,6 +10,7 @@ on: types: [closed] branches: - main + - "release/**" env: RUST_VERSION: 1.86.0 @@ -98,16 +99,6 @@ jobs: arch: arm64 svm_target_platform: linux-aarch64 native_build: false - - os: macos-13 - platform: darwin - target: x86_64-apple-darwin - arch: amd64 - native_build: true - - os: macos-13 - platform: darwin - target: x86_64-apple-darwin - arch: amd64 - native_build: false - os: macos-latest-xlarge platform: darwin target: aarch64-apple-darwin diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 3d4303ffe..263eef797 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -4,6 +4,7 @@ on: push: branches: - main + - "release/**" paths: - "Cargo.toml" - "bin/**/*.rs" @@ -214,42 +215,43 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} files: lcov.info - snos-integration-test: - needs: [fmt, clippy] - runs-on: ubuntu-latest-32-cores - timeout-minutes: 30 - if: github.event_name == 'push' || (github.event_name == 'pull_request' && github.event.pull_request.draft == false) - container: - image: ghcr.io/dojoengine/katana-dev:latest - env: - MLIR_SYS_190_PREFIX: /usr/lib/llvm-19/ - LLVM_SYS_191_PREFIX: /usr/lib/llvm-19/ - TABLEGEN_190_PREFIX: /usr/lib/llvm-19/ - steps: - - uses: actions/checkout@v3 - with: - submodules: recursive - # Workaround for https://github.com/actions/runner-images/issues/6775 - - run: git config --global --add safe.directory "*" - - - uses: Swatinem/rust-cache@v2 - with: - key: ci-${{ github.job }} - shared-key: katana-ci-cache - - - name: Download test artifacts - uses: actions/download-artifact@v5 - with: - name: test-artifacts - - - name: Prepare SNOS test environment - run: | - if [ ! -d "./tests/snos/snos/build" ]; then - make snos-artifacts - fi - - - run: | - cargo run -p snos-integration-test + # TODO: re-enable once the snos crate is added back to the workspace + # snos-integration-test: + # needs: [fmt, clippy] + # runs-on: ubuntu-latest-32-cores + # timeout-minutes: 30 + # if: github.event_name == 'push' || (github.event_name == 'pull_request' && github.event.pull_request.draft == false) + # container: + # image: ghcr.io/dojoengine/katana-dev:latest + # env: + # MLIR_SYS_190_PREFIX: /usr/lib/llvm-19/ + # LLVM_SYS_191_PREFIX: /usr/lib/llvm-19/ + # TABLEGEN_190_PREFIX: /usr/lib/llvm-19/ + # steps: + # - uses: actions/checkout@v3 + # with: + # submodules: recursive + # # Workaround for https://github.com/actions/runner-images/issues/6775 + # - run: git config --global --add safe.directory "*" + + # - uses: Swatinem/rust-cache@v2 + # with: + # key: ci-${{ github.job }} + # shared-key: katana-ci-cache + + # - name: Download test artifacts + # uses: actions/download-artifact@v5 + # with: + # name: test-artifacts + + # - name: Prepare SNOS test environment + # run: | + # if [ ! -d "./tests/snos/snos/build" ]; then + # make snos-artifacts + # fi + + # - run: | + # cargo run -p snos-integration-test explorer-reverse-proxy: needs: [fmt, clippy, build-katana-binary] @@ -344,7 +346,7 @@ jobs: db-compatibility-check: needs: [fmt, clippy, build-katana-binary] - runs-on: ubuntu-latest + runs-on: ubuntu-latest-4-cores container: image: ghcr.io/dojoengine/katana-dev:latest steps: diff --git a/.gitignore b/.gitignore index d03be0160..2cdf7244e 100644 --- a/.gitignore +++ b/.gitignore @@ -37,7 +37,6 @@ tests/fixtures/db/* crates/contracts/build/ !crates/contracts/build/legacy/ -CONTEXT.md **/.claude/settings.local.json .cargo/ \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 1def5ce02..8c993b5da 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -80,9 +80,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.0.24" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eda689f7287f15bd3582daba6be8d1545bad3740fd1fb778f629a1fe866bb43b" +checksum = "f3dcd2b4e208ce5477de90ccdcbd4bde2c8fb06af49a443974e92bb8f2c5e93f" dependencies = [ "alloy-eips", "alloy-primitives", @@ -91,6 +91,7 @@ dependencies = [ "alloy-trie", "alloy-tx-macros", "auto_impl", + "borsh", "c-kzg", "derive_more 2.0.1", "either", @@ -99,15 +100,16 @@ dependencies = [ "rand 0.8.5", "secp256k1", "serde", + "serde_json", "serde_with", "thiserror 2.0.12", ] [[package]] name = "alloy-consensus-any" -version = "1.0.24" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b5659581e41e8fe350ecc3593cb5c9dcffddfd550896390f2b78a07af67b0fa" +checksum = "ee5655f234985f5ab1e31bef7e02ed11f0a899468cf3300e061e1b96e9e11de0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -119,9 +121,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "1.0.23" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28074a21cd4f7c3a7ab218c4f38fae6be73944e1feae3b670c68b60bf85ca40" +checksum = "7f01b6d8e5b4f3222aaf7f18613a7292e2fbc9163fe120649cd1b078ca534349" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -141,9 +143,9 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "1.3.0" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9e8a436f0aad7df8bb47f144095fba61202265d9f5f09a70b0e3227881a668e" +checksum = "369f5707b958927176265e8a58627fc6195e5dfa5c55689396e68b241b3a72e6" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -170,32 +172,34 @@ dependencies = [ [[package]] name = "alloy-eip2930" -version = "0.2.1" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b82752a889170df67bbb36d42ca63c531eb16274f0d7299ae2a680facba17bd" +checksum = "9441120fa82df73e8959ae0e4ab8ade03de2aaae61be313fbf5746277847ce25" dependencies = [ "alloy-primitives", "alloy-rlp", + "borsh", "serde", ] [[package]] name = "alloy-eip7702" -version = "0.6.1" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d4769c6ffddca380b0070d71c8b7f30bed375543fe76bb2f74ec0acf4b7cd16" +checksum = "2919c5a56a1007492da313e7a3b6d45ef5edc5d33416fdec63c0d7a2702a0d20" dependencies = [ "alloy-primitives", "alloy-rlp", + "borsh", "serde", "thiserror 2.0.12", ] [[package]] name = "alloy-eips" -version = "1.0.24" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f35887da30b5fc50267109a3c61cd63e6ca1f45967983641053a40ee83468c1" +checksum = "6847d641141b92a1557094aa6c236cbe49c06fb24144d4a21fe6acb970c15888" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -204,18 +208,21 @@ dependencies = [ "alloy-rlp", "alloy-serde", "auto_impl", + "borsh", "c-kzg", "derive_more 2.0.1", "either", "serde", + "serde_with", "sha2", + "thiserror 2.0.12", ] [[package]] name = "alloy-genesis" -version = "1.0.24" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d4009efea6f403b3a80531f9c6f70fc242399498ff71196a1688cc1c901f44" +checksum = "fe3192fca2eb0b0c4b122b3c2d8254496b88a4e810558dddd3ea2f30ad9469df" dependencies = [ "alloy-eips", "alloy-primitives", @@ -239,9 +246,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "1.3.0" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "459f98c6843f208856f338bfb25e65325467f7aff35dfeb0484d0a76e059134b" +checksum = "84e3cf01219c966f95a460c95f1d4c30e12f6c18150c21a30b768af2a2a29142" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -251,9 +258,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.0.24" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "883dee3b4020fcb5667ee627b4f401e899dad82bf37b246620339dd980720ed9" +checksum = "d4ab3330e491053e9608b2a315f147357bb8acb9377a988c1203f2e8e2b296c9" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -266,9 +273,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.0.24" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd6e5b8ac1654a05c224390008e43634a2bdc74e181e02cf8ed591d8b3d4ad08" +checksum = "c1e22ff194b1e34b4defd1e257e3fe4dce0eee37451c7757a1510d6b23e7379a" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -292,9 +299,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.0.24" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80d7980333dd9391719756ac28bc2afa9baa705fc70ffd11dc86ab078dd64477" +checksum = "b8a6cbb9f431bdad294eebb5af9b293d6979e633bfe5468d1e87c1421a858265" dependencies = [ "alloy-consensus", "alloy-eips", @@ -305,9 +312,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "1.0.24" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "984c20af8aee7d123bb4bf40cf758b362b38cb9ff7160d986b6face604a1e6a9" +checksum = "7e9e5dae7d2be44904dba55bb8b538e5de89fdb9e50b3f0f163277b729285011" dependencies = [ "alloy-genesis", "alloy-hardforks", @@ -326,19 +333,18 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "1.3.0" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cfebde8c581a5d37b678d0a48a32decb51efd7a63a08ce2517ddec26db705c8" +checksum = "f6a0fb18dd5fb43ec5f0f6a20be1ce0287c79825827de5744afaa6c957737c33" dependencies = [ "alloy-rlp", "arbitrary", "bytes", "cfg-if", "const-hex", - "derive_arbitrary", "derive_more 2.0.1", - "foldhash", - "hashbrown 0.15.4", + "foldhash 0.2.0", + "hashbrown 0.16.1", "indexmap 2.10.0", "itoa", "k256", @@ -347,6 +353,7 @@ dependencies = [ "proptest", "proptest-derive", "rand 0.9.2", + "rapidhash", "ruint", "rustc-hash 2.1.1", "serde", @@ -356,9 +363,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.0.23" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59879a772ebdcde9dc4eb38b2535d32e8503d3175687cc09e763a625c5fcf32" +checksum = "3f5dde1abc3d582e53d139904fcdd8b2103f0bd03e8f2acb4292edbbaeaa7e6e" dependencies = [ "alloy-chains", "alloy-consensus", @@ -382,11 +389,10 @@ dependencies = [ "either", "futures", "futures-utils-wasm", - "http 1.3.1", "lru 0.13.0", "parking_lot", "pin-project 1.1.10", - "reqwest 0.12.22", + "reqwest", "serde", "serde_json", "thiserror 2.0.12", @@ -420,9 +426,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "1.0.23" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f060e3bb9f319eb01867a2d6d1ff9e0114e8877f5ca8f5db447724136106cae" +checksum = "5a94bdef2710322c6770be08689fee0878c2ad75615b8fc40e05d7f3c9618c0b" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -430,7 +436,7 @@ dependencies = [ "alloy-transport-http", "futures", "pin-project 1.1.10", - "reqwest 0.12.22", + "reqwest", "serde", "serde_json", "tokio", @@ -443,9 +449,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "1.0.24" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10493fa300a2757d8134f584800fef545c15905c95122bed1f6dde0b0d9dae27" +checksum = "838ca94be532a929f27961851000ec8bbbaeb06e2a2bcca44fac7855a2fe0f6f" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -455,9 +461,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "1.0.24" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f7eb22670a972ad6c222a6c6dac3eef905579acffe9d63ab42be24c7d158535" +checksum = "12df0b34551ca2eab8ec83b56cb709ee5da991737282180d354a659b907f00dc" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -466,9 +472,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.0.24" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b777b98526bbe5b7892ca22a7fd5f18ed624ff664a79f40d0f9f2bf94ba79a84" +checksum = "b7f9f130511b8632686dfe6f9909b38d7ae4c68de3ce17d28991400646a39b25" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -487,9 +493,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.0.24" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee8d2c52adebf3e6494976c8542fbdf12f10123b26e11ad56f77274c16a2a039" +checksum = "067b718d2e6ac1bb889341fcc7a250cfa49bcd3ba4f23923f1c1eb1f2b10cb7c" dependencies = [ "alloy-primitives", "serde", @@ -498,9 +504,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.0.24" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c0494d1e0f802716480aabbe25549c7f6bc2a25ff33b08fd332bbb4b7d06894" +checksum = "acff6b251740ef473932386d3b71657d3825daebf2217fb41a7ef676229225d4" dependencies = [ "alloy-primitives", "async-trait", @@ -513,9 +519,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.0.24" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59c2435eb8979a020763ced3fb478932071c56e5f75ea86db41f320915d325ba" +checksum = "c9129ef31975d987114c27c9930ee817cf3952355834d47f2fdf4596404507e8" dependencies = [ "alloy-consensus", "alloy-network", @@ -529,9 +535,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "1.3.0" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aedac07a10d4c2027817a43cc1f038313fc53c7ac866f7363239971fd01f9f18" +checksum = "09eb18ce0df92b4277291bbaa0ed70545d78b02948df756bbd3d6214bf39a218" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", @@ -543,9 +549,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-expander" -version = "1.3.0" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24f9a598f010f048d8b8226492b6401104f5a5c1273c2869b72af29b48bb4ba9" +checksum = "95d9fa2daf21f59aa546d549943f10b5cce1ae59986774019fbedae834ffe01b" dependencies = [ "alloy-json-abi", "alloy-sol-macro-input", @@ -562,9 +568,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-input" -version = "1.3.0" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f494adf9d60e49aa6ce26dfd42c7417aa6d4343cf2ae621f20e4d92a5ad07d85" +checksum = "9396007fe69c26ee118a19f4dee1f5d1d6be186ea75b3881adf16d87f8444686" dependencies = [ "alloy-json-abi", "const-hex", @@ -580,9 +586,9 @@ dependencies = [ [[package]] name = "alloy-sol-type-parser" -version = "1.3.0" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52db32fbd35a9c0c0e538b58b81ebbae08a51be029e7ad60e08b60481c2ec6c3" +checksum = "af67a0b0dcebe14244fc92002cd8d96ecbf65db4639d479f5fcd5805755a4c27" dependencies = [ "serde", "winnow", @@ -590,9 +596,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "1.3.0" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a285b46e3e0c177887028278f04cc8262b76fd3b8e0e20e93cea0a58c35f5ac5" +checksum = "09aeea64f09a7483bdcd4193634c7e5cf9fd7775ee767585270cd8ce2d69dc95" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -602,12 +608,12 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.0.23" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f89bec2f59a41c0e259b6fe92f78dfc49862c17d10f938db9c33150d5a7f42b6" +checksum = "bec1fb08ee484e615f24867c0b154fff5722bb00176102a16868c6532b7c3623" dependencies = [ "alloy-json-rpc", - "alloy-primitives", + "auto_impl", "base64 0.22.1", "derive_more 2.0.1", "futures", @@ -625,13 +631,13 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.0.23" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d3615ec64d775fec840f4e9d5c8e1f739eb1854d8d28db093fb3d4805e0cb53" +checksum = "64b722073c76f2de7e118d546ee1921c50710f97feb32aed50db94cfa5b663e1" dependencies = [ "alloy-json-rpc", "alloy-transport", - "reqwest 0.12.22", + "reqwest", "serde_json", "tower 0.5.2", "tracing", @@ -656,12 +662,11 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.0.24" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6acb36318dfa50817154064fea7932adf2eec3f51c86680e2b37d7e8906c66bb" +checksum = "04950a13cc4209d8e9b78f306e87782466bad8538c94324702d061ff03e211c9" dependencies = [ - "alloy-primitives", - "darling 0.20.11", + "darling 0.21.3", "proc-macro2", "quote", "syn 2.0.104", @@ -745,131 +750,86 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" [[package]] -name = "apollo_config" -version = "0.0.0" -source = "git+https://github.com/dojoengine/sequencer?rev=4b1587c5e#4b1587c5e3effc4cfbbb8551cdcab6454a2dbe30" +name = "apollo_compilation_utils" +version = "0.16.0-rc.0" +source = "git+https://github.com/dojoengine/sequencer?rev=d2591bb#d2591bb07b5da4bf0feebf18953c291becd45910" dependencies = [ - "apollo_infra_utils 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "clap", - "itertools 0.12.1", + "apollo_infra_utils", + "cairo-lang-sierra", + "cairo-lang-starknet-classes", + "cairo-lang-utils", + "rlimit", "serde", "serde_json", - "strum_macros 0.25.3", + "starknet-types-core 0.2.3", + "starknet_api", + "tempfile", "thiserror 1.0.69", - "tracing", - "validator", ] [[package]] -name = "apollo_config" -version = "0.0.0" -source = "git+https://github.com/dojoengine/sequencer?rev=5d737b9c9#5d737b9c90a14bdf4483d759d1a1d4ce64aa9fd2" +name = "apollo_compile_to_native" +version = "0.16.0-rc.0" +source = "git+https://github.com/dojoengine/sequencer?rev=d2591bb#d2591bb07b5da4bf0feebf18953c291becd45910" dependencies = [ - "apollo_infra_utils 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", - "clap", - "itertools 0.12.1", - "serde", - "serde_json", - "strum_macros 0.25.3", - "thiserror 1.0.69", - "tracing", - "validator", + "apollo_compilation_utils", + "apollo_compile_to_native_types", + "apollo_infra_utils", + "cairo-lang-starknet-classes", + "cairo-native", + "tempfile", ] [[package]] -name = "apollo_infra" -version = "0.0.0" -source = "git+https://github.com/dojoengine/sequencer?rev=4b1587c5e#4b1587c5e3effc4cfbbb8551cdcab6454a2dbe30" +name = "apollo_compile_to_native_types" +version = "0.16.0-rc.0" +source = "git+https://github.com/dojoengine/sequencer?rev=d2591bb#d2591bb07b5da4bf0feebf18953c291becd45910" dependencies = [ - "apollo_config 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "apollo_infra_utils 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "apollo_metrics 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "async-trait", - "hyper 0.14.32", - "rstest 0.17.0", + "apollo_config", "serde", - "serde_json", - "starknet_api 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "thiserror 1.0.69", - "time", - "tokio", - "tower 0.4.13", - "tracing", - "tracing-subscriber", "validator", ] [[package]] -name = "apollo_infra" -version = "0.0.0" -source = "git+https://github.com/dojoengine/sequencer?rev=5d737b9c9#5d737b9c90a14bdf4483d759d1a1d4ce64aa9fd2" +name = "apollo_config" +version = "0.16.0-rc.0" +source = "git+https://github.com/dojoengine/sequencer?rev=d2591bb#d2591bb07b5da4bf0feebf18953c291becd45910" dependencies = [ - "apollo_config 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", - "apollo_infra_utils 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", - "apollo_metrics 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", - "async-trait", - "hyper 0.14.32", - "rstest 0.17.0", + "apollo_infra_utils", + "clap", + "const_format", + "itertools 0.12.1", "serde", "serde_json", - "starknet_api 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", + "strum_macros 0.25.3", "thiserror 1.0.69", - "time", - "tokio", - "tower 0.4.13", "tracing", - "tracing-subscriber", + "url", "validator", ] [[package]] name = "apollo_infra_utils" -version = "0.0.0" -source = "git+https://github.com/dojoengine/sequencer?rev=4b1587c5e#4b1587c5e3effc4cfbbb8551cdcab6454a2dbe30" +version = "0.16.0-rc.0" +source = "git+https://github.com/dojoengine/sequencer?rev=d2591bb#d2591bb07b5da4bf0feebf18953c291becd45910" dependencies = [ + "apollo_proc_macros", "assert-json-diff", - "cached", - "colored 3.0.0", - "serde", - "serde_json", - "socket2 0.5.10", - "tokio", - "toml", - "tracing", -] - -[[package]] -name = "apollo_infra_utils" -version = "0.0.0" -source = "git+https://github.com/dojoengine/sequencer?rev=5d737b9c9#5d737b9c90a14bdf4483d759d1a1d4ce64aa9fd2" -dependencies = [ - "assert-json-diff", - "cached", "colored 3.0.0", + "num_enum", "serde", "serde_json", "socket2 0.5.10", + "tempfile", + "thiserror 1.0.69", "tokio", - "toml", "tracing", ] [[package]] name = "apollo_metrics" -version = "0.0.0" -source = "git+https://github.com/dojoengine/sequencer?rev=4b1587c5e#4b1587c5e3effc4cfbbb8551cdcab6454a2dbe30" -dependencies = [ - "indexmap 2.10.0", - "metrics 0.24.2", - "num-traits", - "paste", - "regex", -] - -[[package]] -name = "apollo_metrics" -version = "0.0.0" -source = "git+https://github.com/dojoengine/sequencer?rev=5d737b9c9#5d737b9c90a14bdf4483d759d1a1d4ce64aa9fd2" +version = "0.16.0-rc.0" +source = "git+https://github.com/dojoengine/sequencer?rev=d2591bb#d2591bb07b5da4bf0feebf18953c291becd45910" dependencies = [ "indexmap 2.10.0", "metrics 0.24.2", @@ -880,99 +840,15 @@ dependencies = [ [[package]] name = "apollo_proc_macros" -version = "0.0.0" -source = "git+https://github.com/dojoengine/sequencer?rev=4b1587c5e#4b1587c5e3effc4cfbbb8551cdcab6454a2dbe30" -dependencies = [ - "quote", - "syn 2.0.104", -] - -[[package]] -name = "apollo_proc_macros" -version = "0.0.0" -source = "git+https://github.com/dojoengine/sequencer?rev=5d737b9c9#5d737b9c90a14bdf4483d759d1a1d4ce64aa9fd2" +version = "0.16.0-rc.0" +source = "git+https://github.com/dojoengine/sequencer?rev=d2591bb#d2591bb07b5da4bf0feebf18953c291becd45910" dependencies = [ + "lazy_static", + "proc-macro2", "quote", "syn 2.0.104", ] -[[package]] -name = "apollo_sierra_multicompile" -version = "0.0.0" -source = "git+https://github.com/dojoengine/sequencer?rev=4b1587c5e#4b1587c5e3effc4cfbbb8551cdcab6454a2dbe30" -dependencies = [ - "apollo_config 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "apollo_infra 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "apollo_infra_utils 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "apollo_sierra_multicompile_types 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "async-trait", - "cairo-lang-sierra", - "cairo-lang-starknet-classes", - "cairo-lang-utils", - "rlimit", - "serde", - "serde_json", - "starknet-types-core", - "starknet_api 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "tempfile", - "thiserror 1.0.69", - "tracing", - "validator", -] - -[[package]] -name = "apollo_sierra_multicompile" -version = "0.0.0" -source = "git+https://github.com/dojoengine/sequencer?rev=5d737b9c9#5d737b9c90a14bdf4483d759d1a1d4ce64aa9fd2" -dependencies = [ - "apollo_config 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", - "apollo_infra 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", - "apollo_infra_utils 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", - "apollo_sierra_multicompile_types 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", - "async-trait", - "cairo-lang-sierra", - "cairo-lang-starknet-classes", - "cairo-lang-utils", - "cairo-native", - "rlimit", - "serde", - "serde_json", - "starknet-types-core", - "starknet_api 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", - "tempfile", - "thiserror 1.0.69", - "tracing", - "validator", -] - -[[package]] -name = "apollo_sierra_multicompile_types" -version = "0.0.0" -source = "git+https://github.com/dojoengine/sequencer?rev=4b1587c5e#4b1587c5e3effc4cfbbb8551cdcab6454a2dbe30" -dependencies = [ - "apollo_infra 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "apollo_proc_macros 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "async-trait", - "serde", - "serde_json", - "starknet_api 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "thiserror 1.0.69", -] - -[[package]] -name = "apollo_sierra_multicompile_types" -version = "0.0.0" -source = "git+https://github.com/dojoengine/sequencer?rev=5d737b9c9#5d737b9c90a14bdf4483d759d1a1d4ce64aa9fd2" -dependencies = [ - "apollo_infra 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", - "apollo_proc_macros 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", - "async-trait", - "serde", - "serde_json", - "starknet_api 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", - "thiserror 1.0.69", -] - [[package]] name = "aquamarine" version = "0.6.0" @@ -1337,6 +1213,45 @@ dependencies = [ "term", ] +[[package]] +name = "asn1-rs" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5493c3bedbacf7fd7382c6346bbd66687d12bbaad3a89a2d2c303ee6cf20b048" +dependencies = [ + "asn1-rs-derive", + "asn1-rs-impl", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror 1.0.69", + "time", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "synstructure", +] + +[[package]] +name = "asn1-rs-impl" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + [[package]] name = "assert-json-diff" version = "2.0.2" @@ -1702,10 +1617,20 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36eaf5d7b090263e8150820482d5d93cd964a81e4019913c972f4edcc6edb740" dependencies = [ + "bincode_derive", "serde", "unty", ] +[[package]] +name = "bincode_derive" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf95709a440f45e986983918d0e8a1f30a9b1df04918fc828670606804ac3c09" +dependencies = [ + "virtue", +] + [[package]] name = "bindgen" version = "0.68.1" @@ -1818,6 +1743,26 @@ dependencies = [ "hex-conservative", ] +[[package]] +name = "bitfield" +version = "0.19.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21ba6517c6b0f2bf08be60e187ab64b038438f22dd755614d8fe4d4098c46419" +dependencies = [ + "bitfield-macros", +] + +[[package]] +name = "bitfield-macros" +version = "0.19.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f48d6ace212fdf1b45fd6b566bb40808415344642b76c3224c07c8df9da81e97" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -1838,85 +1783,53 @@ checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ "funty", "radium", - "serde", "tap", "wyz", ] [[package]] -name = "block-buffer" -version = "0.10.4" +name = "blake2" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "generic-array", + "digest 0.10.7", ] [[package]] -name = "blockifier" -version = "0.0.0" -source = "git+https://github.com/dojoengine/sequencer?rev=4b1587c5e#4b1587c5e3effc4cfbbb8551cdcab6454a2dbe30" +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "anyhow", - "apollo_config 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "apollo_infra_utils 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "apollo_sierra_multicompile 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "ark-ec 0.4.2", - "ark-ff 0.4.2", - "ark-secp256k1 0.4.0", - "ark-secp256r1 0.4.0", - "blockifier_test_utils 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "cached", - "cairo-lang-casm", - "cairo-lang-runner", - "cairo-lang-starknet-classes", - "cairo-vm 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "derive_more 0.99.20", - "indexmap 2.10.0", - "itertools 0.12.1", - "keccak", - "log", - "mockall", - "num-bigint", - "num-integer", - "num-rational", - "num-traits", - "paste", - "phf", - "rand 0.8.5", - "rstest 0.17.0", - "rstest_reuse 0.7.0", - "semver 1.0.26", - "serde", - "serde_json", - "sha2", - "starknet-types-core", - "starknet_api 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "strum 0.25.0", - "strum_macros 0.25.3", - "thiserror 1.0.69", + "generic-array", ] [[package]] name = "blockifier" -version = "0.0.0" -source = "git+https://github.com/dojoengine/sequencer?rev=5d737b9c9#5d737b9c90a14bdf4483d759d1a1d4ce64aa9fd2" +version = "0.16.0-rc.0" +source = "git+https://github.com/dojoengine/sequencer?rev=d2591bb#d2591bb07b5da4bf0feebf18953c291becd45910" dependencies = [ "anyhow", - "apollo_config 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", - "apollo_infra_utils 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", - "apollo_sierra_multicompile 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", + "apollo_compilation_utils", + "apollo_compile_to_native", + "apollo_compile_to_native_types", + "apollo_config", + "apollo_infra_utils", + "apollo_metrics", "ark-ec 0.4.2", "ark-ff 0.4.2", "ark-secp256k1 0.4.0", "ark-secp256r1 0.4.0", - "blockifier_test_utils 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", + "blockifier_test_utils", "cached", "cairo-lang-casm", "cairo-lang-runner", "cairo-lang-starknet-classes", + "cairo-lang-utils", "cairo-native", - "cairo-vm 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "cairo-vm", + "dashmap", "derive_more 0.99.20", "indexmap 2.10.0", "itertools 0.12.1", @@ -1936,8 +1849,8 @@ dependencies = [ "serde", "serde_json", "sha2", - "starknet-types-core", - "starknet_api 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", + "starknet-types-core 0.2.3", + "starknet_api", "strum 0.25.0", "strum_macros 0.25.3", "thiserror 1.0.69", @@ -1945,38 +1858,17 @@ dependencies = [ [[package]] name = "blockifier_test_utils" -version = "0.0.0" -source = "git+https://github.com/dojoengine/sequencer?rev=4b1587c5e#4b1587c5e3effc4cfbbb8551cdcab6454a2dbe30" +version = "0.16.0-rc.0" +source = "git+https://github.com/dojoengine/sequencer?rev=d2591bb#d2591bb07b5da4bf0feebf18953c291becd45910" dependencies = [ - "apollo_infra_utils 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", + "apollo_infra_utils", "cairo-lang-starknet-classes", - "itertools 0.12.1", + "expect-test", "pretty_assertions", "rstest 0.17.0", "serde_json", - "starknet-types-core", - "starknet_api 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "strum 0.25.0", - "strum_macros 0.25.3", - "tempfile", - "tokio", - "tracing", - "tracing-test", -] - -[[package]] -name = "blockifier_test_utils" -version = "0.0.0" -source = "git+https://github.com/dojoengine/sequencer?rev=5d737b9c9#5d737b9c90a14bdf4483d759d1a1d4ce64aa9fd2" -dependencies = [ - "apollo_infra_utils 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", - "cairo-lang-starknet-classes", - "itertools 0.12.1", - "pretty_assertions", - "rstest 0.17.0", - "serde_json", - "starknet-types-core", - "starknet_api 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", + "starknet-types-core 0.2.3", + "starknet_api", "strum 0.25.0", "strum_macros 0.25.3", "tempfile", @@ -2000,7 +1892,7 @@ dependencies = [ [[package]] name = "bonsai-trie" version = "0.1.0" -source = "git+https://github.com/michalpalkowski/bonsai-trie-katana.git?branch=feature%2Fprovable-forking#228ee8ef3b0463b7616a028dae687c7ad5e0247f" +source = "git+https://github.com/michalpalkowski/bonsai-trie-katana.git?branch=feature%2Fprovable-forking#4912185fcbf9672467871e77dc1986212af672fb" dependencies = [ "bitvec", "derive_more 0.99.20", @@ -2012,7 +1904,7 @@ dependencies = [ "serde", "slotmap", "smallvec", - "starknet-types-core", + "starknet-types-core 0.2.3", "thiserror 2.0.12", ] @@ -2116,16 +2008,6 @@ dependencies = [ "serde", ] -[[package]] -name = "bzip2" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdb116a6ef3f6c3698828873ad02c3014b3c85cadb88496095628e3ef1e347f8" -dependencies = [ - "bzip2-sys", - "libc", -] - [[package]] name = "bzip2" version = "0.5.2" @@ -2215,7 +2097,7 @@ dependencies = [ "serde", "serde_json", "starknet", - "starknet-types-core", + "starknet-types-core 0.1.8", "thiserror 2.0.12", "tracing", "tracing-subscriber", @@ -2296,9 +2178,9 @@ dependencies = [ [[package]] name = "cairo-lang-casm" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99b3c953c0321df1d7ce9101c7a94e2d4007aa8c3362ee96be54bbe77916ef60" +checksum = "7d1d84a85b59c753aa4a7f0c455a5c815e0aebb89faf0c8ab366b0d87c0bb934" dependencies = [ "cairo-lang-utils", "indoc", @@ -2310,9 +2192,9 @@ dependencies = [ [[package]] name = "cairo-lang-compiler" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8cd5bec42d0c4e9f1ac6c373c177c98c73a760fabb4066757edd32ef9db467e" +checksum = "3a5cbeb4e134cf29c63d18a235beae3f124bef2824ec45d09d6e18a0c334e509" dependencies = [ "anyhow", "cairo-lang-defs", @@ -2321,6 +2203,7 @@ dependencies = [ "cairo-lang-lowering", "cairo-lang-parser", "cairo-lang-project", + "cairo-lang-runnable-utils", "cairo-lang-semantic", "cairo-lang-sierra", "cairo-lang-sierra-generator", @@ -2336,19 +2219,20 @@ dependencies = [ [[package]] name = "cairo-lang-debug" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ea55d64b6e7aa9186bb65ca32f50f386d6518d467930e53fcf47658dec74a2e" +checksum = "fa5311e1c31d413f3fa34e40e48b662c19151f0fb4b10467d627a52c93eae918" dependencies = [ "cairo-lang-utils", ] [[package]] name = "cairo-lang-defs" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "093146c748e95400230a40dc6171ac192399484addd96c6ac70ec36b0a2e45b0" +checksum = "872feccf7b8f70ed5d74c40548bf974fbcc5069b2ea1ae15a9b8f1ab911c536b" dependencies = [ + "bincode 2.0.1", "cairo-lang-debug", "cairo-lang-diagnostics", "cairo-lang-filesystem", @@ -2357,14 +2241,17 @@ dependencies = [ "cairo-lang-utils", "itertools 0.14.0", "rust-analyzer-salsa", + "serde", "smol_str", + "typetag", + "xxhash-rust", ] [[package]] name = "cairo-lang-diagnostics" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c85890af7f0d0b6e0d15686e0a5169d3396e2861cb3adac3c594f82ae5ed42c" +checksum = "5d0e7c551a634708366af3003176f2f9cdea56fd4a91c834ddd802030366f6a5" dependencies = [ "cairo-lang-debug", "cairo-lang-filesystem", @@ -2374,9 +2261,9 @@ dependencies = [ [[package]] name = "cairo-lang-eq-solver" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a54937f1baca684547159af847ba5332ec8015de442878b8e4d6dbbaeec714c" +checksum = "ed04fc3f52d68157f359257c477e30f68dec36bbf568c85d567812583cd5f9c8" dependencies = [ "cairo-lang-utils", "good_lp", @@ -2384,9 +2271,9 @@ dependencies = [ [[package]] name = "cairo-lang-filesystem" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e07492644cfa43e50cfc334a80c12c9e4d8e2a4248b3d2240301c99a025010a" +checksum = "ca1835a43a00a90d5cd4ca3f6bb9178ec450d55458e8b56ac34ca1d6d0ccf58f" dependencies = [ "cairo-lang-debug", "cairo-lang-utils", @@ -2395,14 +2282,14 @@ dependencies = [ "semver 1.0.26", "serde", "smol_str", - "toml", + "toml 0.8.23", ] [[package]] name = "cairo-lang-formatter" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c26c988d6b522c45b5f70add3808fcae13c921822d1c48724c394b450adcf7f9" +checksum = "3bd0736456004f1d334bad5b366c6933c4b856a23a5dfade96cfe0a1c5eb3ddb" dependencies = [ "anyhow", "cairo-lang-diagnostics", @@ -2413,18 +2300,18 @@ dependencies = [ "diffy", "ignore", "itertools 0.14.0", - "rust-analyzer-salsa", "serde", "thiserror 2.0.12", ] [[package]] name = "cairo-lang-lowering" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90577e4dc391e2384041763120618ed2017a8f709f20dfcaa2c1246f908dd374" +checksum = "fd2e1d66c241fba4f3dc43e42956001940298fb4ea5970acfc8b2db8bf4b6629" dependencies = [ - "bincode 1.3.3", + "assert_matches", + "bincode 2.0.1", "cairo-lang-debug", "cairo-lang-defs", "cairo-lang-diagnostics", @@ -2442,14 +2329,13 @@ dependencies = [ "num-traits", "rust-analyzer-salsa", "serde", - "smol_str", ] [[package]] name = "cairo-lang-parser" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6b7985c0ee345ead0e0f713474ec6490e3fac80c3c3889ab9e67b1588d30337" +checksum = "15c3ab263d4afd34a002dc0e37f9bacca734aa133dbbb8540651d28308977a68" dependencies = [ "cairo-lang-diagnostics", "cairo-lang-filesystem", @@ -2468,9 +2354,9 @@ dependencies = [ [[package]] name = "cairo-lang-plugins" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "697772ca0b096e36cb98cfc9b1231b115a15eebf8ac7295f7b50252f5a1e6aea" +checksum = "566059584384c12fa598ae0e0509fd3d12b3985a25872de22e37245c4bc5762c" dependencies = [ "cairo-lang-defs", "cairo-lang-diagnostics", @@ -2493,9 +2379,9 @@ checksum = "123ac0ecadf31bacae77436d72b88fa9caef2b8e92c89ce63a125ae911a12fae" [[package]] name = "cairo-lang-proc-macros" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23a956924e4f53cb1b69a7cee4758f0bcf50e23bbb8769f632625956a574f736" +checksum = "61599d8cac760505d1913fa5d7dddcf019f22d47f0748ff66b1b58afe1858b62" dependencies = [ "cairo-lang-debug", "quote", @@ -2504,22 +2390,22 @@ dependencies = [ [[package]] name = "cairo-lang-project" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "088f29ca8d06722bd92001d098b619a25979dcbfa5face7a6de5d8c7232f0454" +checksum = "99635e2569cebc31583110b417e6a410990a494c7d56998f2be0a169a1158456" dependencies = [ "cairo-lang-filesystem", "cairo-lang-utils", "serde", "thiserror 2.0.12", - "toml", + "toml 0.8.23", ] [[package]] name = "cairo-lang-runnable-utils" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03b63a8fe2e8f2ae6280392bcc8ab98b981db75832b16c98974b81978d3d1b26" +checksum = "f747c3d433ec5e82576e59852fd8c86a802fefe55e7bdbb9c0db61adb1a40e7b" dependencies = [ "cairo-lang-casm", "cairo-lang-sierra", @@ -2528,16 +2414,16 @@ dependencies = [ "cairo-lang-sierra-to-casm", "cairo-lang-sierra-type-size", "cairo-lang-utils", - "cairo-vm 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "cairo-vm", "itertools 0.14.0", "thiserror 2.0.12", ] [[package]] name = "cairo-lang-runner" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4db16efd33b13cecb1ca5b843a65f1e8e3eab4e18abf0c39522b04d741e51e7" +checksum = "40a9ab4bb286d641463b2253070c145c53ff7e71f29cda2a49915f79ff7db927" dependencies = [ "ark-ff 0.5.0", "ark-secp256k1 0.5.0", @@ -2550,7 +2436,7 @@ dependencies = [ "cairo-lang-sierra-to-casm", "cairo-lang-starknet", "cairo-lang-utils", - "cairo-vm 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "cairo-vm", "itertools 0.14.0", "keccak", "num-bigint", @@ -2559,15 +2445,15 @@ dependencies = [ "rand 0.9.2", "sha2", "smol_str", - "starknet-types-core", + "starknet-types-core 0.2.3", "thiserror 2.0.12", ] [[package]] name = "cairo-lang-semantic" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a3d35463c096f1e3ab6830e28c762b22a7b5c3fbf0df5c2e9a265d290d22ee5" +checksum = "bf1e01333b127fa3733f2f93b3febc45219ef55b807d196f298cadea6ad8fe44" dependencies = [ "cairo-lang-debug", "cairo-lang-defs", @@ -2587,19 +2473,19 @@ dependencies = [ "rust-analyzer-salsa", "sha3", "smol_str", - "toml", + "toml 0.8.23", ] [[package]] name = "cairo-lang-sierra" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e02d4df410965f122b67967936b722352eddbfde550883b054e019dc54beeef" +checksum = "300655046f505cf806a918918e5397b20c22b579d78c2ef09bc7d4d59fd733be" dependencies = [ "anyhow", "cairo-lang-utils", "const-fnv1a-hash", - "convert_case 0.7.1", + "convert_case 0.8.0", "derivative", "itertools 0.14.0", "lalrpop", @@ -2613,15 +2499,15 @@ dependencies = [ "serde_json", "sha3", "smol_str", - "starknet-types-core", + "starknet-types-core 0.2.3", "thiserror 2.0.12", ] [[package]] name = "cairo-lang-sierra-ap-change" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c0d3be06212edb4d79be1296cd999b246d22e1541b49432db74dca16fe0c523" +checksum = "0c51190f463ac9f7d4a2ce0e0345cfc92334589811a7114eeeec84029999d7f1" dependencies = [ "cairo-lang-eq-solver", "cairo-lang-sierra", @@ -2635,9 +2521,9 @@ dependencies = [ [[package]] name = "cairo-lang-sierra-gas" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a7f246adb40ac69242231642cdf2571c83463068086a00b5ae9131f7dfc74b5" +checksum = "bb0d0f038acd79aedcadad4ad2ad928b0881c4e96a2d9ad0e0b3173a6111f313" dependencies = [ "cairo-lang-eq-solver", "cairo-lang-sierra", @@ -2651,9 +2537,9 @@ dependencies = [ [[package]] name = "cairo-lang-sierra-generator" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2ca1fbf8d29528d5fdf6a7e3b2ccdd4f3e9b57066057c30f9bc2c3118867571" +checksum = "8bc8d2a89273ba24529319982a4a7833f2a6c4a87752baea2bc70ceb4b3285b7" dependencies = [ "cairo-lang-debug", "cairo-lang-defs", @@ -2675,9 +2561,9 @@ dependencies = [ [[package]] name = "cairo-lang-sierra-to-casm" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efbb42e872cff7d050d2348978e2f12a94b4b29aee6f5ba5a7eca76a5294c900" +checksum = "7c852277442b2d8ca9741cdc8ccb737c6ad381d300ab4e2d982a98ba40e5f5b6" dependencies = [ "assert_matches", "cairo-lang-casm", @@ -2690,15 +2576,15 @@ dependencies = [ "itertools 0.14.0", "num-bigint", "num-traits", - "starknet-types-core", + "starknet-types-core 0.2.3", "thiserror 2.0.12", ] [[package]] name = "cairo-lang-sierra-type-size" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8217d84f5434c36c68f54b5bbac9c91ff981a3738f2e6bc51b102f5beae3fd8" +checksum = "265aa8daaa94cc4d5e135a82c0bbe7d28d2c0fbc612332903dbf1a68ed15978f" dependencies = [ "cairo-lang-sierra", "cairo-lang-utils", @@ -2706,9 +2592,9 @@ dependencies = [ [[package]] name = "cairo-lang-starknet" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70aca831fef1f41b29c5e62a464a8ddd7964ed2414d3dafb6e1530bff1ae3cbd" +checksum = "deb8bf3ccf8fe1f910291d388a2351b6f40ad32be07bdbd3a628e103387b1a48" dependencies = [ "anyhow", "cairo-lang-compiler", @@ -2716,6 +2602,7 @@ dependencies = [ "cairo-lang-diagnostics", "cairo-lang-filesystem", "cairo-lang-lowering", + "cairo-lang-parser", "cairo-lang-plugins", "cairo-lang-semantic", "cairo-lang-sierra", @@ -2730,21 +2617,22 @@ dependencies = [ "serde", "serde_json", "smol_str", - "starknet-types-core", + "starknet-types-core 0.2.3", "thiserror 2.0.12", + "typetag", ] [[package]] name = "cairo-lang-starknet-classes" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e9b59bb3d46e68730266b27e3f0ad524c076eebab1bcc9352256a8957d6b88" +checksum = "4839b63927954a7c3d018fd012ce0bea256db205b85ee45df27fb1e90cb10e02" dependencies = [ "cairo-lang-casm", "cairo-lang-sierra", "cairo-lang-sierra-to-casm", "cairo-lang-utils", - "convert_case 0.7.1", + "convert_case 0.8.0", "itertools 0.14.0", "num-bigint", "num-integer", @@ -2753,15 +2641,15 @@ dependencies = [ "serde_json", "sha3", "smol_str", - "starknet-types-core", + "starknet-types-core 0.2.3", "thiserror 2.0.12", ] [[package]] name = "cairo-lang-syntax" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686aea0cd9730af809010a74c0a8583b3acb99a08e5f97a07ed205f37b9e75ae" +checksum = "a1f83d5b0213ddab04090f4a10d009ff3428a0d6e289f4fea31798210d60d5cb" dependencies = [ "cairo-lang-debug", "cairo-lang-filesystem", @@ -2777,9 +2665,9 @@ dependencies = [ [[package]] name = "cairo-lang-syntax-codegen" -version = "2.11.4" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c69eb6919b69a9a7bb92f79d73c568d09ef77efa0b5d265efb763234ec979d4" +checksum = "0d00ae64466774b6e34a91c4a66202778b17ef5a844a6f668436e28d71ccb9b2" dependencies = [ "genco", "xshell", @@ -2787,9 +2675,9 @@ dependencies = [ [[package]] name = "cairo-lang-test-plugin" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1a6f34df0f3929bf8166c5a6d143c22ad28fd73937cfb5d7a994d56ca4a86c4" +checksum = "e1e90cf75528c423cd6b6faaab2dde0c1b23efe36103e1e57f338293552ee16f" dependencies = [ "anyhow", "cairo-lang-compiler", @@ -2797,6 +2685,7 @@ dependencies = [ "cairo-lang-defs", "cairo-lang-filesystem", "cairo-lang-lowering", + "cairo-lang-parser", "cairo-lang-semantic", "cairo-lang-sierra", "cairo-lang-sierra-generator", @@ -2809,14 +2698,14 @@ dependencies = [ "num-bigint", "num-traits", "serde", - "starknet-types-core", + "starknet-types-core 0.2.3", ] [[package]] name = "cairo-lang-test-utils" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "199eb0e7ac78ba7dfbf6551ab7eab14dd45fdcf21675fd5472ca695dc6da96f1" +checksum = "ebbd4ebcd82ab07fba3d376a6aa992aa552fcb7f051736f6b5a2122381754bdb" dependencies = [ "cairo-lang-formatter", "cairo-lang-utils", @@ -2827,9 +2716,9 @@ dependencies = [ [[package]] name = "cairo-lang-utils" -version = "2.11.2" +version = "2.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e621368454b62603ae035d04864770a70952e6ca8341b78c1ac50a0088939e3f" +checksum = "cca315cce0937801a772bee5fe92cca28b8172421bdd2f67c96e8288a0dcfb9f" dependencies = [ "hashbrown 0.15.4", "indexmap 2.10.0", @@ -2839,13 +2728,14 @@ dependencies = [ "parity-scale-codec", "schemars 0.8.22", "serde", + "smol_str", ] [[package]] name = "cairo-native" -version = "0.4.3" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67542e31b00b8f015088c06b04095aaed96b3e35b50d396289390e9fb9c9a778" +checksum = "ff8751057242ee2f1414b27079188bd750aeed8191c9a49429be8526214bf507" dependencies = [ "anyhow", "aquamarine", @@ -2862,7 +2752,6 @@ dependencies = [ "cairo-lang-sierra", "cairo-lang-sierra-ap-change", "cairo-lang-sierra-gas", - "cairo-lang-sierra-generator", "cairo-lang-sierra-to-casm", "cairo-lang-starknet", "cairo-lang-starknet-classes", @@ -2887,8 +2776,8 @@ dependencies = [ "serde", "serde_json", "sha2", - "starknet-curve 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "starknet-types-core", + "starknet-curve 0.6.0", + "starknet-types-core 0.2.3", "stats_alloc", "tempfile", "thiserror 2.0.12", @@ -2897,64 +2786,20 @@ dependencies = [ "utf8_iter", ] -[[package]] -name = "cairo-type-derive" -version = "0.1.0" -source = "git+https://github.com/cartridge-gg/snos?rev=c96eb9e#c96eb9e433c89da1a12a8a8474a30e6590431662" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.104", -] - [[package]] name = "cairo-vm" -version = "1.0.2" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fa8b4b56ee66cebcade4d85128e55b2bfdf046502187aeaa8c2768a427684dc" +checksum = "c21cacdf4e290ab5f0018f24d6bf97f8d3a8809bd09568550669270e7f9ed534" dependencies = [ "anyhow", "arbitrary", "bincode 2.0.1", "bitvec", "generic-array", - "hashbrown 0.14.5", - "hex", - "keccak", - "lazy_static", - "nom", - "num-bigint", - "num-integer", - "num-prime", - "num-traits", - "rand 0.8.5", - "rust_decimal", - "serde", - "serde_json", - "sha2", - "sha3", - "starknet-crypto 0.6.2", - "starknet-types-core", - "thiserror-no-std", - "zip 0.6.6", -] - -[[package]] -name = "cairo-vm" -version = "1.0.2" -source = "git+https://github.com/kariy/cairo-vm?branch=kariy%2F1.0.2_clear-cell#0881dafa5440b304fb000578b6f17c0a6e5f0595" -dependencies = [ - "anyhow", - "ark-ff 0.4.2", - "ark-std 0.4.0", - "bincode 2.0.1", - "bitvec", - "cairo-lang-casm", - "cairo-lang-starknet", - "cairo-lang-starknet-classes", - "generic-array", - "hashbrown 0.14.5", + "hashbrown 0.15.4", "hex", + "indoc", "keccak", "lazy_static", "nom", @@ -2968,10 +2813,9 @@ dependencies = [ "serde_json", "sha2", "sha3", - "starknet-crypto 0.6.2", - "starknet-types-core", - "thiserror-no-std", - "wasm-bindgen", + "starknet-crypto 0.8.1", + "starknet-types-core 0.2.3", + "thiserror 2.0.12", "zip 0.6.6", ] @@ -3013,18 +2857,14 @@ version = "1.7.0" dependencies = [ "anyhow", "ark-ec 0.4.2", - "cainome", "katana-contracts", "katana-primitives", "lazy_static", "num-bigint", - "parking_lot", - "reqwest 0.12.22", + "reqwest", "serde", "serde_json", "stark-vrf", - "starknet", - "starknet-crypto 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", "thiserror 1.0.69", "tracing", "url", @@ -3045,6 +2885,25 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" +[[package]] +name = "cbindgen" +version = "0.29.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "befbfd072a8e81c02f8c507aefce431fe5e7d051f83d48a23ffc9b9fe5a11799" +dependencies = [ + "clap", + "heck 0.5.0", + "indexmap 2.10.0", + "log", + "proc-macro2", + "quote", + "serde", + "serde_json", + "syn 2.0.104", + "tempfile", + "toml 0.9.5", +] + [[package]] name = "cc" version = "1.2.31" @@ -3213,6 +3072,23 @@ dependencies = [ "thiserror 2.0.12", ] +[[package]] +name = "coco-provider" +version = "0.3.0" +source = "git+https://github.com/automata-network/coco-provider-sdk#3a832b8cf5e88ef71649ab56e4efd67067b26b7c" +dependencies = [ + "bitfield", + "cbindgen", + "iocuddle", + "libc", + "log", + "rand 0.8.5", + "serde", + "serde-big-array", + "sysinfo 0.35.2", + "uuid 1.17.0", +] + [[package]] name = "colorchoice" version = "1.0.4" @@ -3342,12 +3218,6 @@ dependencies = [ "unicode-xid", ] -[[package]] -name = "constant_time_eq" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" - [[package]] name = "constant_time_eq" version = "0.3.1" @@ -3482,6 +3352,15 @@ dependencies = [ "itertools 0.10.5", ] +[[package]] +name = "crossbeam-channel" +version = "0.5.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "crossbeam-deque" version = "0.8.6" @@ -3602,6 +3481,16 @@ dependencies = [ "darling_macro 0.20.11", ] +[[package]] +name = "darling" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" +dependencies = [ + "darling_core 0.21.3", + "darling_macro 0.21.3", +] + [[package]] name = "darling_core" version = "0.14.4" @@ -3630,6 +3519,21 @@ dependencies = [ "syn 2.0.104", ] +[[package]] +name = "darling_core" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "serde", + "strsim 0.11.1", + "syn 2.0.104", +] + [[package]] name = "darling_macro" version = "0.14.4" @@ -3652,6 +3556,17 @@ dependencies = [ "syn 2.0.104", ] +[[package]] +name = "darling_macro" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" +dependencies = [ + "darling_core 0.21.3", + "quote", + "syn 2.0.104", +] + [[package]] name = "dashmap" version = "6.1.0" @@ -3731,6 +3646,20 @@ dependencies = [ "zeroize", ] +[[package]] +name = "der-parser" +version = "9.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553" +dependencies = [ + "asn1-rs", + "displaydoc", + "nom", + "num-bigint", + "num-traits", + "rusticata-macros", +] + [[package]] name = "deranged" version = "0.4.0" @@ -3917,6 +3846,12 @@ dependencies = [ "syn 2.0.104", ] +[[package]] +name = "dissimilar" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8975ffdaa0ef3661bfe02dbdcc06c9f829dfafe6a3c474de366a8d5e44276921" + [[package]] name = "doc-comment" version = "0.3.3" @@ -3929,18 +3864,6 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" -[[package]] -name = "dummy" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cac124e13ae9aa56acc4241f8c8207501d93afdd8d8e62f0c1f2e12f6508c65" -dependencies = [ - "darling 0.20.11", - "proc-macro2", - "quote", - "syn 2.0.104", -] - [[package]] name = "dunce" version = "1.0.5" @@ -4048,15 +3971,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" -[[package]] -name = "encoding_rs" -version = "0.8.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" -dependencies = [ - "cfg-if", -] - [[package]] name = "endian-type" version = "0.1.2" @@ -4085,45 +3999,33 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" dependencies = [ "proc-macro2", - "quote", - "syn 2.0.104", -] - -[[package]] -name = "env_filter" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "186e05a59d4c50738528153b83b0b0194d3a29507dfec16eccd4b342903397d0" -dependencies = [ - "log", - "regex", -] - -[[package]] -name = "env_home" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7f84e12ccf0a7ddc17a6c41c93326024c42920d7ee630d04950e6926645c0fe" - -[[package]] -name = "env_logger" -version = "0.11.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c863f0904021b108aa8b2f55046443e6b1ebde8fd4a15c399893aae4fa069f" -dependencies = [ - "anstream", - "anstyle", - "env_filter", - "jiff", - "log", + "quote", + "syn 2.0.104", ] +[[package]] +name = "env_home" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7f84e12ccf0a7ddc17a6c41c93326024c42920d7ee630d04950e6926645c0fe" + [[package]] name = "equivalent" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" +[[package]] +name = "erased-serde" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89e8918065695684b2b0702da20382d5ae6065cf3327bc2d6436bd49a71ce9f3" +dependencies = [ + "serde", + "serde_core", + "typeid", +] + [[package]] name = "errno" version = "0.3.13" @@ -4184,15 +4086,13 @@ dependencies = [ ] [[package]] -name = "fake" -version = "2.10.0" +name = "expect-test" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d391ba4af7f1d93f01fcf7b2f29e2bc9348e109dfdbf4dcbdc51dfa38dab0b6" +checksum = "63af43ff4431e848fb47472a920f14fa71c24de13255a5692e93d4e90302acb0" dependencies = [ - "deunicode", - "dummy", - "rand 0.8.5", - "serde_json", + "dissimilar", + "once_cell", ] [[package]] @@ -4276,7 +4176,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d" dependencies = [ "crc32fast", - "libz-sys", "miniz_oxide", ] @@ -4292,6 +4191,27 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" +[[package]] +name = "foldhash" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "form_urlencoded" version = "1.2.1" @@ -4457,7 +4377,7 @@ dependencies = [ "once_cell", "prost 0.13.5", "prost-types 0.13.5", - "reqwest 0.12.22", + "reqwest", "secret-vault-value", "serde", "serde_json", @@ -4484,10 +4404,10 @@ dependencies = [ "http 1.3.1", "http-body-util", "hyper 1.6.0", - "hyper-rustls 0.27.7", + "hyper-rustls", "hyper-util", "ring", - "rustls-pemfile 2.2.0", + "rustls-pemfile", "serde", "serde_json", "thiserror 1.0.69", @@ -4720,15 +4640,6 @@ dependencies = [ "crunchy", ] -[[package]] -name = "hash32" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47d60b12902ba28e2730cd37e95b8c9223af2808df9e902d4df49588d1470606" -dependencies = [ - "byteorder", -] - [[package]] name = "hashbrown" version = "0.12.3" @@ -4756,7 +4667,6 @@ dependencies = [ "ahash 0.8.12", "allocator-api2", "rayon", - "serde", ] [[package]] @@ -4767,8 +4677,19 @@ checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" dependencies = [ "allocator-api2", "equivalent", - "foldhash", + "foldhash 0.1.5", + "serde", +] + +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" +dependencies = [ + "foldhash 0.2.0", "serde", + "serde_core", ] [[package]] @@ -4794,21 +4715,10 @@ dependencies = [ "url", "walkdir", "which 7.0.3", - "winreg 0.55.0", + "winreg", "zip 2.4.2", ] -[[package]] -name = "heapless" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bfb9eb618601c89945a70e254898da93b13be0388091d42117462b265bb3fad" -dependencies = [ - "hash32", - "serde", - "stable_deref_trait", -] - [[package]] name = "heck" version = "0.4.1" @@ -4976,20 +4886,6 @@ dependencies = [ "want", ] -[[package]] -name = "hyper-rustls" -version = "0.24.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" -dependencies = [ - "futures-util", - "http 0.2.12", - "hyper 0.14.32", - "rustls 0.21.12", - "tokio", - "tokio-rustls 0.24.1", -] - [[package]] name = "hyper-rustls" version = "0.27.7" @@ -5328,7 +5224,7 @@ checksum = "183b3088984b400f4cfac3620d5e076c84da5364016b4f49473de574b2586235" dependencies = [ "console", "number_prefix", - "portable-atomic 1.11.1", + "portable-atomic", "unicode-width 0.2.1", "web-time", ] @@ -5392,6 +5288,15 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "inventory" +version = "0.3.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc61209c082fbeb19919bee74b176221b27223e27b65d781eb91af24eb1fb46e" +dependencies = [ + "rustversion", +] + [[package]] name = "io-uring" version = "0.7.9" @@ -5403,6 +5308,12 @@ dependencies = [ "libc", ] +[[package]] +name = "iocuddle" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8972d5be69940353d5347a1344cb375d9b457d6809b428b05bb1ca2fb9ce007" + [[package]] name = "ipnet" version = "2.11.0" @@ -5509,30 +5420,6 @@ dependencies = [ "libc", ] -[[package]] -name = "jiff" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be1f93b8b1eb69c77f24bbb0afdf66f54b632ee39af40ca21c4365a1d7347e49" -dependencies = [ - "jiff-static", - "log", - "portable-atomic 1.11.1", - "portable-atomic-util", - "serde", -] - -[[package]] -name = "jiff-static" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03343451ff899767262ec32146f6d559dd759fdadf42ff0e227c7c48f72594b4" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.104", -] - [[package]] name = "jni" version = "0.21.1" @@ -5655,7 +5542,7 @@ dependencies = [ "base64 0.22.1", "http-body 1.0.1", "hyper 1.6.0", - "hyper-rustls 0.27.7", + "hyper-rustls", "hyper-util", "jsonrpsee-core", "jsonrpsee-types", @@ -5819,7 +5706,7 @@ dependencies = [ "tokio", "tracing", "url", - "vergen 9.0.6", + "vergen", "vergen-gitcl", ] @@ -5844,7 +5731,7 @@ dependencies = [ "starknet", "tempfile", "thiserror 1.0.69", - "toml", + "toml 0.8.23", "url", ] @@ -5866,6 +5753,7 @@ dependencies = [ "katana-primitives", "katana-rpc-server", "katana-slot-controller", + "katana-tee", "katana-tracing", "katana-utils", "serde", @@ -5874,7 +5762,7 @@ dependencies = [ "shellexpand", "starknet", "tokio", - "toml", + "toml 0.8.23", "tracing", "url", ] @@ -5917,8 +5805,8 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "starknet-crypto 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", - "starknet-types-core", + "starknet-crypto 0.7.4", + "starknet-types-core 0.1.8", "syn 2.0.104", ] @@ -5951,8 +5839,7 @@ dependencies = [ "rand 0.8.5", "rayon", "rstest 0.18.2", - "starknet", - "starknet-types-core", + "starknet-types-core 0.2.3", "tempfile", "thiserror 1.0.69", "tokio", @@ -5986,7 +5873,7 @@ dependencies = [ "tempfile", "thiserror 1.0.69", "tracing", - "zstd 0.13.3", + "zstd", ] [[package]] @@ -5996,10 +5883,10 @@ dependencies = [ "alloy-primitives", "anyhow", "assert_matches", - "blockifier 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", + "blockifier", "cairo-lang-starknet-classes", "cairo-native", - "cairo-vm 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "cairo-vm", "criterion", "katana-chain-spec", "katana-contracts", @@ -6019,7 +5906,7 @@ dependencies = [ "serde_json", "similar-asserts", "starknet", - "starknet_api 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", + "starknet_api", "thiserror 1.0.69", "tokio", "tracing", @@ -6034,7 +5921,7 @@ dependencies = [ "http-body 1.0.1", "jsonrpsee", "katana-runner", - "reqwest 0.12.22", + "reqwest", "rust-embed", "serde", "serde_json", @@ -6085,7 +5972,7 @@ dependencies = [ "katana-utils", "num-traits", "parking_lot", - "reqwest 0.12.22", + "reqwest", "starknet", "thiserror 1.0.69", "tokio", @@ -6101,7 +5988,7 @@ dependencies = [ "clap", "katana-gateway-types", "katana-primitives", - "reqwest 0.12.22", + "reqwest", "rstest 0.18.2", "serde", "serde_json", @@ -6168,7 +6055,7 @@ version = "1.7.0" dependencies = [ "anyhow", "base64 0.21.7", - "cairo-vm 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "cairo-vm", "derive_more 0.99.20", "katana-contracts", "katana-primitives", @@ -6206,11 +6093,10 @@ dependencies = [ "katana-chain-spec", "katana-pool", "katana-primitives", - "reqwest 0.12.22", + "reqwest", "serde", "serde_json", "starknet", - "starknet-crypto 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", "thiserror 1.0.69", "tokio", "tracing", @@ -6240,6 +6126,7 @@ name = "katana-node" version = "1.7.0" dependencies = [ "anyhow", + "backon", "futures", "http 1.3.1", "jsonrpsee", @@ -6264,6 +6151,7 @@ dependencies = [ "katana-rpc-types", "katana-stage", "katana-tasks", + "katana-tee", "num-traits", "parking_lot", "serde", @@ -6285,7 +6173,7 @@ dependencies = [ "serde", "serde_json", "starknet", - "starknet-types-core", + "starknet-types-core 0.2.3", "tempfile", "thiserror 1.0.69", "tokio", @@ -6349,15 +6237,15 @@ dependencies = [ "anyhow", "arbitrary", "assert_matches", - "blockifier 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", + "blockifier", "cainome-cairo-serde", "cairo-lang-sierra", "cairo-lang-starknet-classes", "cairo-lang-utils", - "cairo-vm 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "cairo-vm", "criterion", "derive_more 0.99.20", - "heapless", + "katana-primitives-macro", "lazy_static", "num-bigint", "num-traits", @@ -6369,14 +6257,23 @@ dependencies = [ "serde_json_pythonic", "similar-asserts", "starknet", - "starknet-crypto 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", - "starknet-types-core", - "starknet_api 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", + "starknet-types-core 0.2.3", + "starknet_api", "strum 0.25.0", "strum_macros 0.25.3", "thiserror 1.0.69", ] +[[package]] +name = "katana-primitives-macro" +version = "1.7.0" +dependencies = [ + "proc-macro2", + "quote", + "starknet-types-core 0.2.3", + "syn 2.0.104", +] + [[package]] name = "katana-provider" version = "1.7.0" @@ -6406,7 +6303,7 @@ dependencies = [ "serde_json", "similar-asserts", "starknet", - "starknet-types-core", + "starknet-types-core 0.2.3", "tempfile", "thiserror 1.0.69", "tokio", @@ -6424,7 +6321,7 @@ dependencies = [ "katana-rpc-types", "katana-trie", "starknet", - "starknet-types-core", + "starknet-types-core 0.2.3", "thiserror 1.0.69", ] @@ -6473,6 +6370,7 @@ dependencies = [ "cairo-lang-starknet-classes", "cartridge", "futures", + "hex", "http 1.3.1", "indexmap 2.10.0", "jsonrpsee", @@ -6495,6 +6393,7 @@ dependencies = [ "katana-rpc-types", "katana-rpc-types-builder", "katana-tasks", + "katana-tee", "katana-tracing", "katana-trie", "katana-utils", @@ -6507,7 +6406,7 @@ dependencies = [ "serde_json", "similar-asserts", "starknet", - "starknet-crypto 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", + "starknet-types-core 0.2.3", "tempfile", "thiserror 1.0.69", "tokio", @@ -6540,7 +6439,7 @@ dependencies = [ "serde_with", "similar-asserts", "starknet", - "starknet_api 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", + "starknet_api", "thiserror 1.0.69", ] @@ -6593,6 +6492,7 @@ dependencies = [ "backon", "futures", "katana-core", + "katana-db", "katana-executor", "katana-gateway-client", "katana-gateway-types", @@ -6602,11 +6502,12 @@ dependencies = [ "katana-provider", "katana-rpc-types", "katana-tasks", + "katana-trie", "num-traits", "rayon", "rstest 0.18.2", "starknet", - "starknet-types-core", + "starknet-types-core 0.2.3", "thiserror 1.0.69", "tokio", "tracing", @@ -6624,7 +6525,7 @@ dependencies = [ "alloy-rpc-types-eth", "alloy-sol-types", "anyhow", - "reqwest 0.12.22", + "reqwest", ] [[package]] @@ -6639,6 +6540,20 @@ dependencies = [ "tracing", ] +[[package]] +name = "katana-tee" +version = "1.7.0" +dependencies = [ + "bincode 1.3.3", + "clap", + "rstest 0.18.2", + "serde", + "sev-snp", + "tempfile", + "thiserror 1.0.69", + "tracing", +] + [[package]] name = "katana-tracing" version = "1.7.0" @@ -6647,6 +6562,7 @@ dependencies = [ "bytes", "chrono", "clap", + "dirs", "http 1.3.1", "http-body-util", "opentelemetry", @@ -6660,6 +6576,7 @@ dependencies = [ "thiserror 1.0.69", "tower-http", "tracing", + "tracing-appender", "tracing-log 0.1.4", "tracing-opentelemetry", "tracing-subscriber", @@ -6676,7 +6593,7 @@ dependencies = [ "serde", "slab", "starknet", - "starknet-types-core", + "starknet-types-core 0.2.3", "thiserror 1.0.69", ] @@ -6775,7 +6692,21 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbc2a4da0d9e52ccfe6306801a112e81a8fc0c76aa3e4449fefeda7fef72bb34" dependencies = [ - "lambdaworks-math", + "lambdaworks-math 0.10.0", + "serde", + "sha2", + "sha3", +] + +[[package]] +name = "lambdaworks-crypto" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58b1a1c1102a5a7fbbda117b79fb3a01e033459c738a3c1642269603484fd1c1" +dependencies = [ + "lambdaworks-math 0.13.0", + "rand 0.8.5", + "rand_chacha 0.3.1", "serde", "sha2", "sha3", @@ -6791,6 +6722,20 @@ dependencies = [ "serde_json", ] +[[package]] +name = "lambdaworks-math" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "018a95aa873eb49896a858dee0d925c33f3978d073c64b08dd4f2c9b35a017c6" +dependencies = [ + "getrandom 0.2.16", + "num-bigint", + "num-traits", + "rand 0.8.5", + "serde", + "serde_json", +] + [[package]] name = "lazy_static" version = "1.5.0" @@ -6849,17 +6794,6 @@ dependencies = [ "libc", ] -[[package]] -name = "libz-sys" -version = "1.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d" -dependencies = [ - "cc", - "pkg-config", - "vcpkg", -] - [[package]] name = "linux-raw-sys" version = "0.4.15" @@ -7051,17 +6985,6 @@ dependencies = [ "libc", ] -[[package]] -name = "metrics" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b9b8653cec6897f73b519a43fba5ee3d50f62fe9af80b428accdcc093b4a849" -dependencies = [ - "ahash 0.7.8", - "metrics-macros", - "portable-atomic 0.3.20", -] - [[package]] name = "metrics" version = "0.23.1" @@ -7069,7 +6992,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3045b4193fbdc5b5681f32f11070da9be3609f189a79f3390706d42587f46bb5" dependencies = [ "ahash 0.8.12", - "portable-atomic 1.11.1", + "portable-atomic", ] [[package]] @@ -7079,7 +7002,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25dea7ac8057892855ec285c440160265225438c3c45072613c25a4b26e98ef5" dependencies = [ "ahash 0.8.12", - "portable-atomic 1.11.1", + "portable-atomic", ] [[package]] @@ -7103,7 +7026,7 @@ dependencies = [ "base64 0.22.1", "http-body-util", "hyper 1.6.0", - "hyper-rustls 0.27.7", + "hyper-rustls", "hyper-util", "indexmap 2.10.0", "ipnet", @@ -7115,17 +7038,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "metrics-macros" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "731f8ecebd9f3a4aa847dfe75455e4757a45da40a7793d2f0b1f9b6ed18b23f3" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "metrics-process" version = "2.4.0" @@ -7277,7 +7189,7 @@ dependencies = [ "num-complex", "num-integer", "num-traits", - "portable-atomic 1.11.1", + "portable-atomic", "portable-atomic-util", "rawpointer", ] @@ -7488,6 +7400,7 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77e878c846a8abae00dd069496dbe8751b16ac1c3d6bd2a7283a938e8228f90d" dependencies = [ + "proc-macro-crate", "proc-macro2", "quote", "syn 2.0.104", @@ -7550,6 +7463,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "oid-registry" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8d8034d9489cdaf79228eb9f6a3b8d7bb32ba00d6645ebd48eef4077ceb5bd9" +dependencies = [ + "asn1-rs", +] + [[package]] name = "once_cell" version = "1.21.3" @@ -7574,12 +7496,60 @@ version = "11.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" +[[package]] +name = "openssl" +version = "0.10.75" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" +dependencies = [ + "bitflags 2.9.1", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + [[package]] name = "openssl-probe" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" +[[package]] +name = "openssl-src" +version = "300.5.4+3.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a507b3792995dae9b0df8a1c1e3771e8418b7c2d9f0baeba32e6fe8b06c7cb72" +dependencies = [ + "cc", +] + +[[package]] +name = "openssl-sys" +version = "0.9.111" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" +dependencies = [ + "cc", + "libc", + "openssl-src", + "pkg-config", + "vcpkg", +] + [[package]] name = "opentelemetry" version = "0.30.0" @@ -7623,7 +7593,7 @@ dependencies = [ "bytes", "http 1.3.1", "opentelemetry", - "reqwest 0.12.22", + "reqwest", ] [[package]] @@ -7638,7 +7608,7 @@ dependencies = [ "opentelemetry-proto", "opentelemetry_sdk", "prost 0.13.5", - "reqwest 0.12.22", + "reqwest", "thiserror 2.0.12", "tokio", "tonic 0.13.1", @@ -7785,77 +7755,16 @@ dependencies = [ ] [[package]] -name = "password-hash" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" -dependencies = [ - "base64ct", - "rand_core 0.6.4", - "subtle", -] - -[[package]] -name = "paste" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" - -[[package]] -name = "path-clean" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17359afc20d7ab31fdb42bb844c8b3bb1dabd7dcf7e68428492da7f16966fcef" - -[[package]] -name = "pathfinder-common" -version = "0.14.3" -source = "git+https://github.com/Moonsong-Labs/pathfinder?rev=9c19d9a37be8f447ec4548456c440ccbd0e44260#9c19d9a37be8f447ec4548456c440ccbd0e44260" -dependencies = [ - "anyhow", - "bitvec", - "fake", - "metrics 0.20.1", - "num-bigint", - "paste", - "pathfinder-crypto", - "primitive-types", - "rand 0.8.5", - "serde", - "serde_json", - "serde_with", - "sha3", - "tagged", - "tagged-debug-derive", - "thiserror 1.0.69", - "vergen 8.3.2", -] - -[[package]] -name = "pathfinder-crypto" -version = "0.14.3" -source = "git+https://github.com/Moonsong-Labs/pathfinder?rev=9c19d9a37be8f447ec4548456c440ccbd0e44260#9c19d9a37be8f447ec4548456c440ccbd0e44260" -dependencies = [ - "bitvec", - "fake", - "rand 0.8.5", - "serde", -] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] -name = "pathfinder-serde" -version = "0.14.3" -source = "git+https://github.com/Moonsong-Labs/pathfinder?rev=9c19d9a37be8f447ec4548456c440ccbd0e44260#9c19d9a37be8f447ec4548456c440ccbd0e44260" -dependencies = [ - "anyhow", - "num-bigint", - "pathfinder-common", - "pathfinder-crypto", - "primitive-types", - "serde", - "serde_json", - "serde_with", -] +name = "path-clean" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17359afc20d7ab31fdb42bb844c8b3bb1dabd7dcf7e68428492da7f16966fcef" [[package]] name = "pbkdf2" @@ -7864,9 +7773,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ "digest 0.10.7", - "hmac", - "password-hash", - "sha2", ] [[package]] @@ -8085,15 +7991,6 @@ dependencies = [ "plotters-backend", ] -[[package]] -name = "portable-atomic" -version = "0.3.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e30165d31df606f5726b090ec7592c308a0eaf61721ff64c9a3018e344a8753e" -dependencies = [ - "portable-atomic 1.11.1", -] - [[package]] name = "portable-atomic" version = "1.11.1" @@ -8106,7 +8003,7 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" dependencies = [ - "portable-atomic 1.11.1", + "portable-atomic", ] [[package]] @@ -8341,9 +8238,9 @@ dependencies = [ [[package]] name = "proptest-derive" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" +checksum = "095a99f75c69734802359b682be8daaf8980296731f6470434ea2c652af1dd30" dependencies = [ "proc-macro2", "quote", @@ -8467,39 +8364,6 @@ dependencies = [ "prost 0.14.1", ] -[[package]] -name = "prove_block" -version = "0.1.0" -source = "git+https://github.com/cartridge-gg/snos?rev=c96eb9e#c96eb9e433c89da1a12a8a8474a30e6590431662" -dependencies = [ - "anyhow", - "blockifier 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "cairo-lang-starknet-classes", - "cairo-lang-utils", - "cairo-vm 1.0.2 (git+https://github.com/kariy/cairo-vm?branch=kariy%2F1.0.2_clear-cell)", - "clap", - "env_logger", - "futures-core", - "log", - "num-bigint", - "pathfinder-common", - "pathfinder-crypto", - "pathfinder-serde", - "reqwest 0.11.27", - "rpc-client", - "rpc-replay", - "serde", - "serde_json", - "serde_with", - "starknet", - "starknet-os", - "starknet-os-types", - "starknet-types-core", - "starknet_api 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "thiserror 1.0.69", - "tokio", -] - [[package]] name = "ptr_meta" version = "0.1.4" @@ -8719,6 +8583,15 @@ dependencies = [ "rand_core 0.9.3", ] +[[package]] +name = "rapidhash" +version = "4.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2988730ee014541157f48ce4dcc603940e00915edc3c7f9a8d78092256bb2493" +dependencies = [ + "rustversion", +] + [[package]] name = "raw-cpuid" version = "11.5.0" @@ -8859,47 +8732,6 @@ dependencies = [ "bytecheck", ] -[[package]] -name = "reqwest" -version = "0.11.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" -dependencies = [ - "base64 0.21.7", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "h2 0.3.27", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.32", - "hyper-rustls 0.24.2", - "ipnet", - "js-sys", - "log", - "mime", - "once_cell", - "percent-encoding", - "pin-project-lite", - "rustls 0.21.12", - "rustls-pemfile 1.0.4", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper 0.1.2", - "system-configuration", - "tokio", - "tokio-rustls 0.24.1", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "webpki-roots 0.25.4", - "winreg 0.50.0", -] - [[package]] name = "reqwest" version = "0.12.22" @@ -8909,20 +8741,17 @@ dependencies = [ "async-compression", "base64 0.22.1", "bytes", - "encoding_rs", "futures-channel", "futures-core", "futures-util", - "h2 0.4.12", "http 1.3.1", "http-body 1.0.1", "http-body-util", "hyper 1.6.0", - "hyper-rustls 0.27.7", + "hyper-rustls", "hyper-util", "js-sys", "log", - "mime", "mime_guess", "percent-encoding", "pin-project-lite", @@ -8980,7 +8809,7 @@ dependencies = [ "anyhow", "headless_chrome", "nix 0.30.1", - "reqwest 0.12.22", + "reqwest", "tokio", ] @@ -9082,39 +8911,6 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afab94fb28594581f62d981211a9a4d53cc8130bbcbbb89a0440d9b8e81a7746" -[[package]] -name = "rpc-client" -version = "0.1.0" -source = "git+https://github.com/cartridge-gg/snos?rev=c96eb9e#c96eb9e433c89da1a12a8a8474a30e6590431662" -dependencies = [ - "log", - "reqwest 0.11.27", - "serde", - "serde_json", - "starknet", - "starknet-crypto 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", - "starknet-os", - "starknet-types-core", - "thiserror 1.0.69", -] - -[[package]] -name = "rpc-replay" -version = "0.1.0" -source = "git+https://github.com/cartridge-gg/snos?rev=c96eb9e#c96eb9e433c89da1a12a8a8474a30e6590431662" -dependencies = [ - "blockifier 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "cairo-lang-starknet-classes", - "rpc-client", - "serde", - "serde_json", - "starknet", - "starknet-os-types", - "starknet_api 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "thiserror 1.0.69", - "tokio", -] - [[package]] name = "rsb_derive" version = "0.5.1" @@ -9359,6 +9155,15 @@ dependencies = [ "semver 1.0.26", ] +[[package]] +name = "rusticata-macros" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +dependencies = [ + "nom", +] + [[package]] name = "rustix" version = "0.38.44" @@ -9385,18 +9190,6 @@ dependencies = [ "windows-sys 0.60.2", ] -[[package]] -name = "rustls" -version = "0.21.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" -dependencies = [ - "log", - "ring", - "rustls-webpki 0.101.7", - "sct", -] - [[package]] name = "rustls" version = "0.22.4" @@ -9434,7 +9227,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" dependencies = [ "openssl-probe", - "rustls-pemfile 2.2.0", + "rustls-pemfile", "rustls-pki-types", "schannel", "security-framework 2.11.1", @@ -9452,15 +9245,6 @@ dependencies = [ "security-framework 3.3.0", ] -[[package]] -name = "rustls-pemfile" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" -dependencies = [ - "base64 0.21.7", -] - [[package]] name = "rustls-pemfile" version = "2.2.0" @@ -9507,16 +9291,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" -[[package]] -name = "rustls-webpki" -version = "0.101.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "rustls-webpki" version = "0.102.8" @@ -9678,16 +9452,6 @@ dependencies = [ "sha2", ] -[[package]] -name = "sct" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "seahash" version = "4.1.0" @@ -9814,13 +9578,23 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "serde" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ + "serde_core", "serde_derive", ] +[[package]] +name = "serde-big-array" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11fc7cc2c76d73e0f27ee52abbd64eec84d46f370c88371120433196934e4b7f" +dependencies = [ + "serde", +] + [[package]] name = "serde-utils" version = "1.7.0" @@ -9829,11 +9603,20 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", @@ -9893,6 +9676,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_spanned" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40734c41988f7306bb04f0ecf60ec0f3f1caa34290e4e8ea471dcd3346483b83" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -9938,26 +9730,33 @@ dependencies = [ ] [[package]] -name = "serde_yaml" -version = "0.9.34+deprecated" +name = "serdect" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" +checksum = "a84f14a19e9a014bb9f4512488d9829a68e04ecabffb0f9904cd1ace94598177" dependencies = [ - "indexmap 2.10.0", - "itoa", - "ryu", + "base16ct", "serde", - "unsafe-libyaml", ] [[package]] -name = "serdect" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a84f14a19e9a014bb9f4512488d9829a68e04ecabffb0f9904cd1ace94598177" +name = "sev-snp" +version = "0.3.0" +source = "git+https://github.com/automata-network/amd-sev-snp-attestation-sdk?branch=main#07162a4dd8d692af68484084b972f8b9b286359b" dependencies = [ - "base16ct", + "asn1-rs", + "bincode 1.3.3", + "bitfield", + "cbindgen", + "coco-provider", + "hex", + "openssl", + "rand 0.8.5", "serde", + "serde-big-array", + "sysinfo 0.33.1", + "ureq 2.12.1", + "x509-parser", ] [[package]] @@ -10178,30 +9977,12 @@ dependencies = [ [[package]] name = "smol_str" -version = "0.2.2" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd538fb6910ac1099850255cf94a94df6551fbdd602454387d0adb2d1ca6dead" +checksum = "3498b0a27f93ef1402f20eefacfaa1691272ac4eca1cdc8c596cb0a245d6cbf5" dependencies = [ - "serde", -] - -[[package]] -name = "snos-integration-test" -version = "1.7.0" -dependencies = [ - "anyhow", - "c-kzg", - "cairo-vm 1.0.2 (git+https://github.com/kariy/cairo-vm?branch=kariy%2F1.0.2_clear-cell)", - "either", - "katana-chain-spec", - "katana-messaging", - "katana-node", - "katana-primitives", - "katana-provider", - "prove_block", - "starknet", - "starknet-os", - "tokio", + "borsh", + "serde_core", ] [[package]] @@ -10310,14 +10091,15 @@ dependencies = [ [[package]] name = "starknet" -version = "0.17.0-rc.2" -source = "git+https://github.com/kariy/starknet-rs?rev=2ef3088#2ef30887a2be68b6aa4514d6b56ba12fcb3988be" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5ed01c14136e56dcdf21385d20c4a6fdd3509947cb56cca45fc765ef5809add" dependencies = [ "starknet-accounts", "starknet-contract", "starknet-core", "starknet-core-derive", - "starknet-crypto 0.7.4 (git+https://github.com/kariy/starknet-rs?rev=2ef3088)", + "starknet-crypto 0.8.1", "starknet-macros", "starknet-providers", "starknet-signers", @@ -10325,13 +10107,14 @@ dependencies = [ [[package]] name = "starknet-accounts" -version = "0.16.0-rc.2" -source = "git+https://github.com/kariy/starknet-rs?rev=2ef3088#2ef30887a2be68b6aa4514d6b56ba12fcb3988be" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f7c118729bcdcfa1610844047cbdb23090fb1d4172a36bb97a663be8d022d1a" dependencies = [ "async-trait", "auto_impl", "starknet-core", - "starknet-crypto 0.7.4 (git+https://github.com/kariy/starknet-rs?rev=2ef3088)", + "starknet-crypto 0.8.1", "starknet-providers", "starknet-signers", "thiserror 1.0.69", @@ -10339,8 +10122,9 @@ dependencies = [ [[package]] name = "starknet-contract" -version = "0.16.0-rc.2" -source = "git+https://github.com/kariy/starknet-rs?rev=2ef3088#2ef30887a2be68b6aa4514d6b56ba12fcb3988be" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccb64331b72caf51c0d8b684b62012f9a771015b4cf5e52cba9bf61be8384ad3" dependencies = [ "serde", "serde_json", @@ -10353,13 +10137,14 @@ dependencies = [ [[package]] name = "starknet-core" -version = "0.16.0-rc.2" -source = "git+https://github.com/kariy/starknet-rs?rev=2ef3088#2ef30887a2be68b6aa4514d6b56ba12fcb3988be" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efb7212226769766c1c7d79b70f9242ffbd213290a41604ecc7e78faa0ed0deb" dependencies = [ "base64 0.21.7", "crypto-bigint", "flate2", - "foldhash", + "foldhash 0.1.5", "hex", "indexmap 2.10.0", "num-traits", @@ -10369,14 +10154,15 @@ dependencies = [ "serde_with", "sha3", "starknet-core-derive", - "starknet-crypto 0.7.4 (git+https://github.com/kariy/starknet-rs?rev=2ef3088)", - "starknet-types-core", + "starknet-crypto 0.8.1", + "starknet-types-core 0.2.3", ] [[package]] name = "starknet-core-derive" version = "0.1.0" -source = "git+https://github.com/kariy/starknet-rs?rev=2ef3088#2ef30887a2be68b6aa4514d6b56ba12fcb3988be" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b08520b7d80eda7bf1a223e8db4f9bb5779a12846f15ebf8f8d76667eca7f5ad" dependencies = [ "proc-macro2", "quote", @@ -10417,15 +10203,16 @@ dependencies = [ "num-traits", "rfc6979", "sha2", - "starknet-curve 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "starknet-types-core", + "starknet-curve 0.5.1", + "starknet-types-core 0.1.8", "zeroize", ] [[package]] name = "starknet-crypto" -version = "0.7.4" -source = "git+https://github.com/kariy/starknet-rs?rev=2ef3088#2ef30887a2be68b6aa4514d6b56ba12fcb3988be" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1004a16c25dc6113c19d4f9d0c19ff97d85804829894bba22c0d0e9e7b249812" dependencies = [ "crypto-bigint", "hex", @@ -10435,8 +10222,8 @@ dependencies = [ "num-traits", "rfc6979", "sha2", - "starknet-curve 0.5.1 (git+https://github.com/kariy/starknet-rs?rev=2ef3088)", - "starknet-types-core", + "starknet-curve 0.6.0", + "starknet-types-core 0.2.3", "zeroize", ] @@ -10466,15 +10253,16 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bcde6bd74269b8161948190ace6cf069ef20ac6e79cd2ba09b320efa7500b6de" dependencies = [ - "starknet-types-core", + "starknet-types-core 0.1.8", ] [[package]] name = "starknet-curve" -version = "0.5.1" -source = "git+https://github.com/kariy/starknet-rs?rev=2ef3088#2ef30887a2be68b6aa4514d6b56ba12fcb3988be" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22c898ae81b6409532374cf237f1bd752d068b96c6ad500af9ebbd0d9bb712f6" dependencies = [ - "starknet-types-core", + "starknet-types-core 0.2.3", ] [[package]] @@ -10492,113 +10280,20 @@ dependencies = [ ] [[package]] -name = "starknet-gateway-types" -version = "0.14.3" -source = "git+https://github.com/Moonsong-Labs/pathfinder?rev=9c19d9a37be8f447ec4548456c440ccbd0e44260#9c19d9a37be8f447ec4548456c440ccbd0e44260" -dependencies = [ - "anyhow", - "fake", - "pathfinder-common", - "pathfinder-crypto", - "pathfinder-serde", - "primitive-types", - "rand 0.8.5", - "reqwest 0.12.22", - "rstest 0.18.2", - "serde", - "serde_json", - "serde_with", - "sha3", - "thiserror 1.0.69", - "tokio", -] - -[[package]] -name = "starknet-macros" -version = "0.2.5-rc.2" -source = "git+https://github.com/kariy/starknet-rs?rev=2ef3088#2ef30887a2be68b6aa4514d6b56ba12fcb3988be" -dependencies = [ - "starknet-core", - "syn 2.0.104", -] - -[[package]] -name = "starknet-os" -version = "0.1.0" -source = "git+https://github.com/cartridge-gg/snos?rev=c96eb9e#c96eb9e433c89da1a12a8a8474a30e6590431662" -dependencies = [ - "anyhow", - "ark-ec 0.4.2", - "ark-ff 0.4.2", - "ark-secp256k1 0.4.0", - "ark-secp256r1 0.4.0", - "base64 0.21.7", - "bitvec", - "blockifier 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "c-kzg", - "cairo-lang-casm", - "cairo-lang-starknet", - "cairo-lang-starknet-classes", - "cairo-type-derive", - "cairo-vm 1.0.2 (git+https://github.com/kariy/cairo-vm?branch=kariy%2F1.0.2_clear-cell)", - "futures", - "futures-util", - "heck 0.4.1", - "hex", - "indexmap 2.10.0", - "indoc", - "keccak", - "lazy_static", - "log", - "num-bigint", - "num-integer", - "num-traits", - "pathfinder-common", - "pathfinder-crypto", - "reqwest 0.11.27", - "serde", - "serde_json", - "serde_with", - "serde_yaml", - "sha2", - "starknet-crypto 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", - "starknet-gateway-types", - "starknet-os-types", - "starknet-types-core", - "starknet_api 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "thiserror 1.0.69", - "tokio", - "uuid 1.17.0", - "zip 0.6.6", -] - -[[package]] -name = "starknet-os-types" -version = "0.1.0" -source = "git+https://github.com/cartridge-gg/snos?rev=c96eb9e#c96eb9e433c89da1a12a8a8474a30e6590431662" +name = "starknet-macros" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d59e1eb22f4366385b132ba7016faa5a6457f1f23f896f737a06da626455e7b" dependencies = [ - "blockifier 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "cairo-lang-starknet-classes", - "cairo-vm 1.0.2 (git+https://github.com/kariy/cairo-vm?branch=kariy%2F1.0.2_clear-cell)", - "flate2", - "num-bigint", - "once_cell", - "serde", - "serde_json", - "serde_with", - "starknet", - "starknet-crypto 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", - "starknet-gateway-types", - "starknet-types-core", - "starknet_api 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "thiserror 1.0.69", - "tokio", + "starknet-core", + "syn 2.0.104", ] [[package]] name = "starknet-providers" -version = "0.16.0-rc.2" -source = "git+https://github.com/kariy/starknet-rs?rev=2ef3088#2ef30887a2be68b6aa4514d6b56ba12fcb3988be" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15fc3d94cc008cea64e291b261e8349065424ee7491e5dd0fa9bd688818bece1" dependencies = [ "async-trait", "auto_impl", @@ -10606,7 +10301,7 @@ dependencies = [ "flate2", "getrandom 0.2.16", "log", - "reqwest 0.12.22", + "reqwest", "serde", "serde_json", "serde_with", @@ -10617,8 +10312,9 @@ dependencies = [ [[package]] name = "starknet-signers" -version = "0.14.0-rc.2" -source = "git+https://github.com/kariy/starknet-rs?rev=2ef3088#2ef30887a2be68b6aa4514d6b56ba12fcb3988be" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d839b06d899ef3a0de11b1e9a91a14c118b1ed36830ec8e59d9fbc9a1e51976b" dependencies = [ "async-trait", "auto_impl", @@ -10627,7 +10323,7 @@ dependencies = [ "getrandom 0.2.16", "rand 0.8.5", "starknet-core", - "starknet-crypto 0.7.4 (git+https://github.com/kariy/starknet-rs?rev=2ef3088)", + "starknet-crypto 0.8.1", "thiserror 1.0.69", ] @@ -10636,63 +10332,50 @@ name = "starknet-types-core" version = "0.1.8" source = "git+https://github.com/kariy/types-rs?rev=0f6ae31#0f6ae31a5a19352cc99b74604fab15cd9d1bb76e" dependencies = [ - "arbitrary", - "lambdaworks-crypto", - "lambdaworks-math", - "lazy_static", + "lambdaworks-crypto 0.10.0", + "lambdaworks-math 0.10.0", "num-bigint", "num-integer", "num-traits", - "parity-scale-codec", "serde", "size-of", "zeroize", ] [[package]] -name = "starknet_api" -version = "0.0.0" -source = "git+https://github.com/dojoengine/sequencer?rev=4b1587c5e#4b1587c5e3effc4cfbbb8551cdcab6454a2dbe30" +name = "starknet-types-core" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab92594a86ac627dd4c8d3350362cc8035e55c548c27c71dfa4c9fc6b3b6ab1a" dependencies = [ - "apollo_infra_utils 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=4b1587c5e)", - "base64 0.13.1", - "bitvec", - "cached", - "cairo-lang-runner", - "cairo-lang-starknet-classes", - "derive_more 0.99.20", - "flate2", - "hex", - "indexmap 2.10.0", - "itertools 0.12.1", + "arbitrary", + "blake2", + "digest 0.10.7", + "lambdaworks-crypto 0.13.0", + "lambdaworks-math 0.13.0", + "lazy_static", "num-bigint", + "num-integer", "num-traits", - "pretty_assertions", - "primitive-types", - "rand 0.8.5", - "semver 1.0.26", + "parity-scale-codec", + "rand 0.9.2", "serde", - "serde_json", - "sha3", "size-of", - "starknet-crypto 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", - "starknet-types-core", - "strum 0.25.0", - "strum_macros 0.25.3", - "thiserror 1.0.69", + "zeroize", ] [[package]] name = "starknet_api" -version = "0.0.0" -source = "git+https://github.com/dojoengine/sequencer?rev=5d737b9c9#5d737b9c90a14bdf4483d759d1a1d4ce64aa9fd2" +version = "0.16.0-rc.0" +source = "git+https://github.com/dojoengine/sequencer?rev=d2591bb#d2591bb07b5da4bf0feebf18953c291becd45910" dependencies = [ - "apollo_infra_utils 0.0.0 (git+https://github.com/dojoengine/sequencer?rev=5d737b9c9)", + "apollo_infra_utils", "base64 0.13.1", "bitvec", "cached", "cairo-lang-runner", "cairo-lang-starknet-classes", + "cairo-lang-utils", "derive_more 0.99.20", "flate2", "hex", @@ -10708,11 +10391,12 @@ dependencies = [ "serde_json", "sha3", "size-of", - "starknet-crypto 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", - "starknet-types-core", + "starknet-crypto 0.8.1", + "starknet-types-core 0.2.3", "strum 0.25.0", "strum_macros 0.25.3", "thiserror 1.0.69", + "time", ] [[package]] @@ -10853,9 +10537,9 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "1.3.0" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a985ff4ffd7373e10e0fb048110fb11a162e5a4c47f92ddb8787a6f766b769" +checksum = "5f92d01b5de07eaf324f7fca61cc6bd3d82bbc1de5b6c963e6fe79e86f36580d" dependencies = [ "paste", "proc-macro2", @@ -10889,6 +10573,20 @@ dependencies = [ "syn 2.0.104", ] +[[package]] +name = "sysinfo" +version = "0.33.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fc858248ea01b66f19d8e8a6d55f41deaf91e9d495246fd01368d99935c6c01" +dependencies = [ + "core-foundation-sys", + "libc", + "memchr", + "ntapi", + "rayon", + "windows 0.57.0", +] + [[package]] name = "sysinfo" version = "0.34.2" @@ -10916,45 +10614,6 @@ dependencies = [ "windows 0.61.3", ] -[[package]] -name = "system-configuration" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" -dependencies = [ - "bitflags 1.3.2", - "core-foundation 0.9.4", - "system-configuration-sys", -] - -[[package]] -name = "system-configuration-sys" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "tagged" -version = "0.14.3" -source = "git+https://github.com/Moonsong-Labs/pathfinder?rev=9c19d9a37be8f447ec4548456c440ccbd0e44260#9c19d9a37be8f447ec4548456c440ccbd0e44260" -dependencies = [ - "fake", -] - -[[package]] -name = "tagged-debug-derive" -version = "0.14.3" -source = "git+https://github.com/Moonsong-Labs/pathfinder?rev=9c19d9a37be8f447ec4548456c440ccbd0e44260#9c19d9a37be8f447ec4548456c440ccbd0e44260" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "tap" version = "1.0.1" @@ -11041,26 +10700,6 @@ dependencies = [ "syn 2.0.104", ] -[[package]] -name = "thiserror-impl-no-std" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58e6318948b519ba6dc2b442a6d0b904ebfb8d411a3ad3e07843615a72249758" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "thiserror-no-std" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3ad459d94dd517257cc96add8a43190ee620011bb6e6cdc82dafd97dfafafea" -dependencies = [ - "thiserror-impl-no-std", -] - [[package]] name = "thread_local" version = "1.1.9" @@ -11197,16 +10836,6 @@ dependencies = [ "syn 2.0.104", ] -[[package]] -name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls 0.21.12", - "tokio", -] - [[package]] name = "tokio-rustls" version = "0.25.0" @@ -11262,11 +10891,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" dependencies = [ "serde", - "serde_spanned", - "toml_datetime", + "serde_spanned 0.6.9", + "toml_datetime 0.6.11", "toml_edit", ] +[[package]] +name = "toml" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75129e1dc5000bfbaa9fee9d1b21f974f9fbad9daec557a521ee6e080825f6e8" +dependencies = [ + "indexmap 2.10.0", + "serde", + "serde_spanned 1.0.0", + "toml_datetime 0.7.0", + "toml_parser", + "toml_writer", + "winnow", +] + [[package]] name = "toml_datetime" version = "0.6.11" @@ -11276,6 +10920,15 @@ dependencies = [ "serde", ] +[[package]] +name = "toml_datetime" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bade1c3e902f58d73d3f294cd7f20391c1cb2fbcb643b73566bc773971df91e3" +dependencies = [ + "serde", +] + [[package]] name = "toml_edit" version = "0.22.27" @@ -11284,18 +10937,33 @@ checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ "indexmap 2.10.0", "serde", - "serde_spanned", - "toml_datetime", + "serde_spanned 0.6.9", + "toml_datetime 0.6.11", "toml_write", "winnow", ] +[[package]] +name = "toml_parser" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b551886f449aa90d4fe2bdaa9f4a2577ad2dde302c61ecf262d80b116db95c10" +dependencies = [ + "winnow", +] + [[package]] name = "toml_write" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" +[[package]] +name = "toml_writer" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df8b2b54733674ad286d16267dcfc7a71ed5c776e4ac7aa3c3e2561f7c637bf2" + [[package]] name = "tonic" version = "0.11.0" @@ -11317,7 +10985,7 @@ dependencies = [ "pin-project 1.1.10", "prost 0.12.6", "rustls-native-certs 0.7.3", - "rustls-pemfile 2.2.0", + "rustls-pemfile", "rustls-pki-types", "tokio", "tokio-rustls 0.25.0", @@ -11469,6 +11137,18 @@ dependencies = [ "tracing-core", ] +[[package]] +name = "tracing-appender" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "786d480bce6247ab75f005b14ae1624ad978d3029d9113f0a22fa1ac773faeaf" +dependencies = [ + "crossbeam-channel", + "thiserror 2.0.12", + "time", + "tracing-subscriber", +] + [[package]] name = "tracing-attributes" version = "0.1.30" @@ -11636,12 +11316,42 @@ version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6af6ae20167a9ece4bcb41af5b80f8a1f1df981f6391189ce00fd257af04126a" +[[package]] +name = "typeid" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc7d623258602320d5c55d1bc22793b57daff0ec7efc270ea7d55ce1d5f5471c" + [[package]] name = "typenum" version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" +[[package]] +name = "typetag" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be2212c8a9b9bcfca32024de14998494cf9a5dfa59ea1b829de98bac374b86bf" +dependencies = [ + "erased-serde", + "inventory", + "once_cell", + "serde", + "typetag-impl", +] + +[[package]] +name = "typetag-impl" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27a7a9b72ba121f6f1f6c3632b85604cac41aedb5ddc70accbebb6cac83de846" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + [[package]] name = "ucd-trie" version = "0.1.7" @@ -11738,12 +11448,6 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7264e107f553ccae879d21fbea1d6724ac785e8c3bfc762137959b5802826ef3" -[[package]] -name = "unsafe-libyaml" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" - [[package]] name = "untrusted" version = "0.9.0" @@ -11795,7 +11499,7 @@ dependencies = [ "log", "percent-encoding", "rustls 0.23.31", - "rustls-pemfile 2.2.0", + "rustls-pemfile", "rustls-pki-types", "ureq-proto", "utf-8", @@ -11866,7 +11570,6 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d" dependencies = [ - "getrandom 0.3.3", "js-sys", "serde", "wasm-bindgen", @@ -11923,18 +11626,6 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" -[[package]] -name = "vergen" -version = "8.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2990d9ea5967266ea0ccf413a4aa5c42a93dbcfda9cb49a97de6931726b12566" -dependencies = [ - "anyhow", - "cfg-if", - "rustversion", - "time", -] - [[package]] name = "vergen" version = "9.0.6" @@ -11962,7 +11653,7 @@ dependencies = [ "derive_builder", "rustversion", "time", - "vergen 9.0.6", + "vergen", "vergen-lib", ] @@ -11983,6 +11674,12 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +[[package]] +name = "virtue" +version = "0.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "051eb1abcf10076295e815102942cc58f9d5e3b4560e46e53c21e8ff6f3af7b1" + [[package]] name = "wait-timeout" version = "0.2.1" @@ -12162,12 +11859,6 @@ dependencies = [ "rustls-pki-types", ] -[[package]] -name = "webpki-roots" -version = "0.25.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" - [[package]] name = "webpki-roots" version = "0.26.11" @@ -12766,16 +12457,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "winreg" -version = "0.50.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" -dependencies = [ - "cfg-if", - "windows-sys 0.48.0", -] - [[package]] name = "winreg" version = "0.55.0" @@ -12840,6 +12521,24 @@ dependencies = [ "tap", ] +[[package]] +name = "x509-parser" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcbc162f30700d6f3f82a24bf7cc62ffe7caea42c0b2cba8bf7f3ae50cf51f69" +dependencies = [ + "asn1-rs", + "data-encoding", + "der-parser", + "lazy_static", + "nom", + "oid-registry", + "ring", + "rusticata-macros", + "thiserror 1.0.69", + "time", +] + [[package]] name = "xshell" version = "0.2.7" @@ -12855,6 +12554,12 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32ac00cd3f8ec9c1d33fb3e7958a82df6989c42d747bd326c822b1d625283547" +[[package]] +name = "xxhash-rust" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdd20c5420375476fbd4394763288da7eb0cc0b8c11deed431a91562af7335d3" + [[package]] name = "xz2" version = "0.1.7" @@ -12994,18 +12699,10 @@ version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "760394e246e4c28189f19d488c058bf16f564016aefac5d32bb1f3b51d5e9261" dependencies = [ - "aes", "byteorder", - "bzip2 0.4.4", - "constant_time_eq 0.1.5", "crc32fast", "crossbeam-utils", "flate2", - "hmac", - "pbkdf2 0.11.0", - "sha1", - "time", - "zstd 0.11.2+zstd.1.5.2", ] [[package]] @@ -13016,8 +12713,8 @@ checksum = "fabe6324e908f85a1c52063ce7aa26b68dcb7eb6dbc83a2d148403c9bc3eba50" dependencies = [ "aes", "arbitrary", - "bzip2 0.5.2", - "constant_time_eq 0.3.1", + "bzip2", + "constant_time_eq", "crc32fast", "crossbeam-utils", "deflate64", @@ -13035,7 +12732,7 @@ dependencies = [ "xz2", "zeroize", "zopfli", - "zstd 0.13.3", + "zstd", ] [[package]] @@ -13050,32 +12747,13 @@ dependencies = [ "simd-adler32", ] -[[package]] -name = "zstd" -version = "0.11.2+zstd.1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" -dependencies = [ - "zstd-safe 5.0.2+zstd.1.5.2", -] - [[package]] name = "zstd" version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" dependencies = [ - "zstd-safe 7.2.4", -] - -[[package]] -name = "zstd-safe" -version = "5.0.2+zstd.1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db" -dependencies = [ - "libc", - "zstd-sys", + "zstd-safe", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 844f00c1c..64252fd0e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,6 +24,7 @@ members = [ "crates/pool/pool", "crates/pool/pool-api", "crates/primitives", + "crates/primitives/macro", "crates/rpc/rpc-server", "crates/rpc/rpc-api", "crates/rpc/rpc-client", @@ -42,12 +43,13 @@ members = [ "crates/sync/pipeline", "crates/sync/stage", "crates/tasks", + "crates/tee", "crates/tracing", "crates/trie", "crates/utils", "tests/db-compat", "tests/reverse-proxy", - "tests/snos", + # "tests/snos", ] [workspace.package] @@ -94,6 +96,7 @@ katana-pipeline = { path = "crates/sync/pipeline" } katana-pool = { path = "crates/pool/pool" } katana-pool-api = { path = "crates/pool/pool-api" } katana-primitives = { path = "crates/primitives" } +katana-primitives-macro = { path = "crates/primitives/macro" } katana-provider = { path = "crates/storage/provider/provider" } katana-provider-api = { path = "crates/storage/provider/provider-api" } katana-rpc-server = { path = "crates/rpc/rpc-server" } @@ -106,18 +109,19 @@ katana-slot-controller = { path = "crates/controller" } katana-stage = { path = "crates/sync/stage" } katana-starknet = { path = "crates/starknet" } katana-tasks = { path = "crates/tasks" } +katana-tee = { path = "crates/tee" } katana-tracing = { path = "crates/tracing" } katana-trie = { path = "crates/trie" } katana-utils = { path = "crates/utils" } # cairo-lang -cairo-lang-casm = "2.11.2" -cairo-lang-runner = "2.11.2" -cairo-lang-sierra = "2.11.2" -cairo-lang-sierra-to-casm = "2.11.2" -cairo-lang-starknet = "2.11.2" -cairo-lang-starknet-classes = "2.11.2" -cairo-lang-utils = "2.11.2" +cairo-lang-casm = "2.12.3" +cairo-lang-runner = "2.12.3" +cairo-lang-sierra = "2.12.3" +cairo-lang-sierra-to-casm = "2.12.3" +cairo-lang-starknet = "2.12.3" +cairo-lang-starknet-classes = "2.12.3" +cairo-lang-utils = "2.12.3" anyhow = "1.0.89" arbitrary = { version = "1.3.2", features = [ "derive" ] } @@ -128,6 +132,7 @@ auto_impl = "1.2.0" backon = { version = "1.5", features = [ "tokio-sleep" ] } base64 = "0.21" bigdecimal = "0.4.1" +bincode = "1.3" bitvec = "1.0.1" camino = { version = "1.1.2", features = [ "serde1" ] } chrono = { version = "0.4.24", features = [ "serde" ] } @@ -156,6 +161,7 @@ regex = "1.10.3" reqwest = { version = "0.12.15", features = [ "json", "rustls-tls" ], default-features = false } rstest = "0.18.2" rstest_reuse = "0.6.0" +sev-snp = { git = "https://github.com/automata-network/amd-sev-snp-attestation-sdk", branch = "main", default-features = false, features = ["legacy", "configfs"] } similar-asserts = "1.5.0" slot = { git = "https://github.com/cartridge-gg/slot", rev = "1298a30" } spinoff = "0.8.0" @@ -195,15 +201,15 @@ criterion = "0.5.1" pprof = { version = "0.13.0", features = [ "criterion", "flamegraph" ] } # alloys -alloy-contract = { version = "1.0", default-features = false } -alloy-network = { version = "1.0", default-features = false } -alloy-primitives = { version = "1.0", default-features = false } -alloy-provider = { version = "1.0", default-features = false } -alloy-rpc-types-eth = { version = "1.0", default-features = false } -alloy-signer = { version = "1.0", default-features = false } -alloy-sol-types = { version = "1.0", default-features = false } -alloy-transport = { version = "1.0", default-features = false } -alloy-transport-http = { version = "1.0", default-features = false } +alloy-contract = { version = "1.2", default-features = false } +alloy-network = { version = "1.2", default-features = false } +alloy-primitives = { version = "1.2", default-features = false } +alloy-provider = { version = "1.2", default-features = false } +alloy-rpc-types-eth = { version = "1.2", default-features = false } +alloy-signer = { version = "1.2", default-features = false } +alloy-sol-types = { version = "1.2", default-features = false } +alloy-transport = { version = "1.2", default-features = false } +alloy-transport-http = { version = "1.2", default-features = false } # macro proc-macro2 = "1.0" @@ -213,6 +219,7 @@ syn = { version = "2.0", default-features = false } # tracing/logging log = "0.4.21" tracing = { version = "0.1.38", features = [ "log" ], default-features = false } +tracing-appender = "0.2" tracing-log = "0.1.3" tracing-opentelemetry = "0.31.0" tracing-subscriber = "0.3.16" @@ -224,22 +231,21 @@ opentelemetry-http = "0.30.0" opentelemetry-stackdriver = { version = "0.27.0", features = [ "propagator" ] } # starknet -starknet = "0.17.0-rc.2" -starknet-crypto = "0.7.4" -starknet-types-core = { version = "0.1.8", features = [ "arbitrary", "hash" ] } +starknet = "0.17.0" +starknet-types-core = { version = "=0.2.3", features = [ "arbitrary", "hash" ] } # Some types that we used from cairo-vm implements the `Arbitrary` trait, # only under the `test_utils` feature. -cairo-vm = { version = "1.0.2", features = [ "test_utils" ] } +cairo-vm = { version = "2.5.0", features = [ "test_utils" ] } -blockifier = { git = "https://github.com/dojoengine/sequencer", rev = "5d737b9c9", default-features = false } -starknet_api = { git = "https://github.com/dojoengine/sequencer", rev = "5d737b9c9" } +# branch: blockifier/v0.16.0-rc.0 +blockifier = { git = "https://github.com/dojoengine/sequencer", rev = "d2591bb", default-features = false } +starknet_api = { git = "https://github.com/dojoengine/sequencer", rev = "d2591bb" } cainome = { git = "https://github.com/cartridge-gg/cainome", rev = "7d60de1", features = [ "abigen-rs" ] } cainome-cairo-serde = { git = "https://github.com/cartridge-gg/cainome", rev = "7d60de1" } piltover = { git = "https://github.com/kariy/piltover.git", branch = "feat/rpc0.9" } [patch.crates-io] -starknet = { git = "https://github.com/kariy/starknet-rs", rev = "2ef3088" } # NOTE: remove this patch once this PR is merged # # This patch fixes an issue where we're unable to correctly evaluate the accurate size diff --git a/Makefile b/Makefile index 66975460d..b663044ae 100644 --- a/Makefile +++ b/Makefile @@ -168,7 +168,7 @@ endif snos-deps-linux: install-pyenv @echo "Installing Python build dependencies for Linux..." sudo apt-get update - sudo apt-get install -y make build-essential libssl-dev libgmp-dev libbz2-dev libreadline-dev libsqlite3-dev liblzma-dev + sudo apt-get install -y make build-essential libssl-dev libgmp-dev libbz2-dev libreadline-dev libsqlite3-dev liblzma-dev zlib1g-dev @echo "Linux SNOS dependencies installed successfully." @echo "NOTE: You may need to restart your shell or run 'source ~/.bashrc' before using pyenv." diff --git a/bin/katana/src/cli/config.rs b/bin/katana/src/cli/config.rs index 137f2bce9..8ed34d03d 100644 --- a/bin/katana/src/cli/config.rs +++ b/bin/katana/src/cli/config.rs @@ -1,8 +1,8 @@ use anyhow::Result; use clap::Args; use katana_chain_spec::rollup::LocalChainConfigDir; +use katana_primitives::cairo::ShortString; use katana_primitives::chain::ChainId; -use starknet::core::utils::parse_cairo_short_string; #[derive(Debug, Args)] #[cfg_attr(test, derive(PartialEq, Eq))] @@ -30,7 +30,7 @@ impl ConfigArgs { // returned by `list` will be of the `ChainId::Id` variant and thus // will display in hex form. But for now, it's fine to assume that because we // only limit valid ASCII string in the `katana init` flow. - let name = parse_cairo_short_string(&chain.id())?; + let name = ShortString::try_from(chain.id())?; println!("{name}"); } } diff --git a/bin/katana/src/cli/db/prune.rs b/bin/katana/src/cli/db/prune.rs index 5133bf323..3cf3269fc 100644 --- a/bin/katana/src/cli/db/prune.rs +++ b/bin/katana/src/cli/db/prune.rs @@ -108,8 +108,8 @@ fn prune_database(db_path: &str, mode: PruneMode) -> Result<()> { if blocks > latest_block { eprintln!( - "Warning: Requested to keep {} blocks, but only {} blocks exist", - blocks, latest_block + "Warning: Requested to keep {blocks} blocks, but only {latest_block} blocks \ + exist" ); return Ok(()); } @@ -123,7 +123,7 @@ fn prune_database(db_path: &str, mode: PruneMode) -> Result<()> { } prune_keep_last_n(&tx, cutoff_block)?; - println!("Pruned historical data for blocks 0 to {}", cutoff_block); + println!("Pruned historical data for blocks 0 to {cutoff_block}"); } } @@ -456,7 +456,7 @@ fn show_confirmation_prompt(stats: &PruningStats, mode: &PruneMode) -> Result { - println!("- Action: Keep only the last {} blocks of historical data", blocks); + println!("- Action: Keep only the last {blocks} blocks of historical data"); } } diff --git a/bin/katana/src/cli/init/deployment.rs b/bin/katana/src/cli/init/deployment.rs index e428e11aa..fe1a020a8 100644 --- a/bin/katana/src/cli/init/deployment.rs +++ b/bin/katana/src/cli/init/deployment.rs @@ -3,6 +3,7 @@ use std::str::FromStr; use anyhow::{anyhow, Result}; use cainome::cairo_serde; use katana_primitives::block::{BlockHash, BlockNumber}; +use katana_primitives::cairo::ShortString; use katana_primitives::class::{ CompiledClassHash, ComputeClassHashError, ContractClass, ContractClassCompilationError, ContractClassFromStrError, @@ -17,7 +18,6 @@ use starknet::accounts::{Account, AccountError, ConnectedAccount, SingleOwnerAcc use starknet::contract::ContractFactory; use starknet::core::crypto::compute_hash_on_elements; use starknet::core::types::{BlockId, BlockTag, FlattenedSierraClass, StarknetError}; -use starknet::macros::short_string; use starknet::providers::{Provider, ProviderError}; use starknet::signers::LocalWallet; use thiserror::Error; @@ -126,6 +126,8 @@ pub async fn deploy_settlement_contract( sp.update_text("Deploying contract..."); let salt = Felt::from(rand::random::()); + + #[allow(deprecated)] let factory = ContractFactory::new(class_hash, &account); const INITIAL_STATE_ROOT: Felt = Felt::ZERO; @@ -410,10 +412,10 @@ fn compute_starknet_os_config_hash( fee_token: Felt, ) -> Felt { // A constant representing the StarkNet OS config version. - const STARKNET_OS_CONFIG_VERSION: Felt = short_string!("StarknetOsConfig2"); + const STARKNET_OS_CONFIG_VERSION: ShortString = ShortString::from_ascii("StarknetOsConfig2"); compute_hash_on_elements(&[ - STARKNET_OS_CONFIG_VERSION, + STARKNET_OS_CONFIG_VERSION.into(), chain_id, deprecated_fee_token, fee_token, diff --git a/bin/katana/src/cli/init/mod.rs b/bin/katana/src/cli/init/mod.rs index 65a29aed7..f69783f32 100644 --- a/bin/katana/src/cli/init/mod.rs +++ b/bin/katana/src/cli/init/mod.rs @@ -59,20 +59,20 @@ use std::path::PathBuf; use std::str::FromStr; use anyhow::Context; -use clap::builder::NonEmptyStringValueParser; use clap::{Args, Subcommand}; use deployment::DeploymentOutcome; use katana_chain_spec::rollup::{ChainConfigDir, DEFAULT_APPCHAIN_FEE_TOKEN_ADDRESS}; use katana_chain_spec::{rollup, FeeContracts, SettlementLayer}; +use katana_cli::utils::ShortStringValueParser; use katana_genesis::allocation::DevAllocationsGenerator; use katana_genesis::constant::DEFAULT_PREFUNDED_ACCOUNT_BALANCE; use katana_genesis::Genesis; use katana_primitives::block::BlockNumber; +use katana_primitives::cairo::ShortString; use katana_primitives::chain::ChainId; use katana_primitives::{ContractAddress, Felt, U256}; use settlement::SettlementChainProvider; use starknet::accounts::{ExecutionEncoding, SingleOwnerAccount}; -use starknet::core::utils::{cairo_short_string_to_felt, parse_cairo_short_string}; use starknet::providers::Provider; use starknet::signers::SigningKey; use url::Url; @@ -118,9 +118,9 @@ pub struct RollupArgs { /// An empty `Id` is not a allowed, since the chain id must be /// a valid ASCII string. #[arg(long)] - #[arg(value_parser = NonEmptyStringValueParser::new())] + #[arg(value_parser = ShortStringValueParser)] #[arg(requires_all = ["settlement_chain", "settlement_account", "settlement_account_private_key"])] - id: Option, + id: Option, #[arg( long = "settlement-chain", @@ -193,8 +193,8 @@ pub struct SovereignArgs { /// An empty `Id` is not a allowed, since the chain id must be /// a valid ASCII string. #[arg(long)] - #[arg(value_parser = NonEmptyStringValueParser::new())] - id: Option, + #[arg(value_parser = ShortStringValueParser)] + id: Option, /// Specify the path of the directory where the configuration files will be stored at. #[arg(long)] @@ -266,7 +266,7 @@ impl RollupArgs { } async fn configure_from_args(&self) -> Option> { - if let Some(id) = self.id.clone() { + if let Some(id) = self.id { // Check if all required settlement args are provided let Some(settlement_chain) = self.settlement_chain.clone() else { return None; // Fall back to prompting @@ -313,12 +313,7 @@ impl RollupArgs { Err(err) => return Some(Err(err)), }; - let chain_id = match cairo_short_string_to_felt(&id) - .with_context(|| format!("invalid chain id: {id}")) - { - Ok(id) => id, - Err(err) => return Some(Err(err)), - }; + let chain_id = Felt::from(id); let deployment_outcome = if let Some(contract) = self.settlement_contract { match deployment::check_program_info(chain_id, contract, &settlement_provider) @@ -360,7 +355,7 @@ impl RollupArgs { deployment_outcome, rpc_url: settlement_provider.url().clone(), account: settlement_account_address, - settlement_id: parse_cairo_short_string(&l1_chain_id).unwrap(), + settlement_id: ShortString::try_from(l1_chain_id).unwrap(), #[cfg(feature = "init-slot")] slot_paymasters: self.slot.paymaster_accounts.clone(), })) @@ -409,7 +404,7 @@ impl SovereignArgs { } fn configure_from_args(&self) -> Option { - self.id.clone().map(|id| SovereignOutcome { + self.id.map(|id| SovereignOutcome { id, #[cfg(feature = "init-slot")] slot_paymasters: self.slot.paymaster_accounts.clone(), @@ -420,7 +415,7 @@ impl SovereignArgs { #[derive(Debug)] struct SovereignOutcome { /// The id of the new chain to be initialized. - pub id: String, + pub id: ShortString, #[cfg(feature = "init-slot")] pub slot_paymasters: Option>, @@ -433,10 +428,10 @@ struct PersistentOutcome { pub account: ContractAddress, // the id of the new chain to be initialized. - pub id: String, + pub id: ShortString, // the chain id of the settlement layer. - pub settlement_id: String, + pub settlement_id: ShortString, // the rpc url for the settlement layer. pub rpc_url: Url, @@ -586,7 +581,7 @@ mod tests { let result = Cli::parse_from(["init", "sovereign", "--id", "bruh"]); assert_matches!(result.args.mode, InitMode::Sovereign(config) => { - assert_eq!(config.id, Some("bruh".to_string())); + assert_eq!(config.id, Some(ShortString::from_ascii("bruh"))); }); } diff --git a/bin/katana/src/cli/init/prompt.rs b/bin/katana/src/cli/init/prompt.rs index 6d4f60cc8..7fa64563e 100644 --- a/bin/katana/src/cli/init/prompt.rs +++ b/bin/katana/src/cli/init/prompt.rs @@ -6,10 +6,10 @@ use anyhow::{Context, Result}; use inquire::validator::{ErrorMessage, Validation}; use inquire::{Confirm, CustomType, Select}; use katana_primitives::block::BlockNumber; +use katana_primitives::cairo::ShortString; use katana_primitives::{ContractAddress, Felt}; use starknet::accounts::{ExecutionEncoding, SingleOwnerAccount}; use starknet::core::types::{BlockId, BlockTag}; -use starknet::core::utils::{cairo_short_string_to_felt, parse_cairo_short_string}; use starknet::providers::Provider; use starknet::signers::{LocalWallet, SigningKey}; use tokio::runtime::Handle; @@ -20,12 +20,12 @@ use crate::cli::init::settlement::SettlementChainProvider; use crate::cli::init::slot::{self, PaymasterAccountArgs}; pub async fn prompt_rollup() -> Result { - let chain_id = CustomType::::new("Id") + let chain_id = CustomType::::new("Id") .with_help_message("This will be the id of your rollup chain.") // checks that the input is a valid ascii string. .with_parser(&|input| { if !input.is_empty() && input.is_ascii() { - Ok(input.to_string()) + Ok(ShortString::from_str(input).unwrap()) } else { Err(()) } @@ -128,31 +128,32 @@ pub async fn prompt_rollup() -> Result { // The core settlement contract on L1c. // Prompt the user whether to deploy the settlement contract or not. - let deployment_outcome = - if Confirm::new("Deploy settlement contract?").with_default(true).prompt()? { - let chain_id = cairo_short_string_to_felt(&chain_id)?; - deployment::deploy_settlement_contract(account, chain_id).await? - } - // If denied, prompt the user for an already deployed contract. - else { - let address = CustomType::::new("Settlement contract") - .with_parser(contract_exist_parser) - .prompt()?; + let deployment_outcome = if Confirm::new("Deploy settlement contract?") + .with_default(true) + .prompt()? + { + deployment::deploy_settlement_contract(account, chain_id.into()).await? + } + // If denied, prompt the user for an already deployed contract. + else { + let address = CustomType::::new("Settlement contract") + .with_parser(contract_exist_parser) + .prompt()?; - // Check that the settlement contract has been initialized with the correct program - // info. - let chain_id = cairo_short_string_to_felt(&chain_id)?; - deployment::check_program_info(chain_id, address, &settlement_provider).await.context( + // Check that the settlement contract has been initialized with the correct program + // info. + deployment::check_program_info(chain_id.into(), address, &settlement_provider) + .await + .context( "Invalid settlement contract. The contract might have been configured incorrectly.", )?; - let block_number = - CustomType::::new("Settlement contract deployment block") - .with_help_message("The block at which the settlement contract was deployed") - .prompt()?; + let block_number = CustomType::::new("Settlement contract deployment block") + .with_help_message("The block at which the settlement contract was deployed") + .prompt()?; - DeploymentOutcome { contract_address: address, block_number } - }; + DeploymentOutcome { contract_address: address, block_number } + }; let slot_paymasters = prompt_slot_paymasters()?; @@ -161,19 +162,19 @@ pub async fn prompt_rollup() -> Result { deployment_outcome, rpc_url: settlement_provider.url().clone(), account: account_address, - settlement_id: parse_cairo_short_string(&l1_chain_id)?, + settlement_id: ShortString::try_from(l1_chain_id)?, #[cfg(feature = "init-slot")] slot_paymasters, }) } pub async fn prompt_sovereign() -> Result { - let chain_id = CustomType::::new("Id") + let chain_id = CustomType::::new("Id") .with_help_message("This will be the id of your sovereign chain.") // checks that the input is a valid ascii string. .with_parser(&|input| { if !input.is_empty() && input.is_ascii() { - Ok(input.to_string()) + Ok(ShortString::from_str(input).unwrap()) } else { Err(()) } @@ -198,7 +199,7 @@ fn prompt_slot_paymasters() -> Result>> { // Prompt for slot paymaster accounts while Confirm::new("Add Slot paymaster account?").with_default(true).prompt()? { - let pubkey_prompt_text = format!("Paymaster #{} public key", paymaster_count); + let pubkey_prompt_text = format!("Paymaster #{paymaster_count} public key"); let public_key = CustomType::::new(&pubkey_prompt_text) .with_formatter(&|input: Felt| format!("{input:#x}")) .prompt()?; @@ -222,7 +223,7 @@ fn prompt_slot_paymasters() -> Result>> { } }; - let salt_prompt_text = format!("Paymaster #{} salt", paymaster_count); + let salt_prompt_text = format!("Paymaster #{paymaster_count} salt"); let salt = CustomType::::new(&salt_prompt_text) .with_formatter(&|input: Felt| format!("{input:#x}")) .with_validator(unique_salt_validator) diff --git a/bin/katana/src/cli/rpc/starknet.rs b/bin/katana/src/cli/rpc/starknet.rs index 9f45fabb7..f088c76e2 100644 --- a/bin/katana/src/cli/rpc/starknet.rs +++ b/bin/katana/src/cli/rpc/starknet.rs @@ -478,7 +478,7 @@ impl StarknetCommands { .enumerate() .map(|(i, s)| { s.trim().parse::().with_context(|| { - format!("Invalid class hash at position {}: '{}'", i, s) + format!("Invalid class hash at position {i}: '{s}'") }) }) .collect::>>()?, @@ -498,7 +498,7 @@ impl StarknetCommands { .enumerate() .map(|(i, s)| { s.trim().parse::().with_context(|| { - format!("Invalid contract address at position {}: '{}'", i, s) + format!("Invalid contract address at position {i}: '{s}'") }) }) .collect::>>()?, @@ -613,7 +613,7 @@ fn parse_event_keys(keys: &[String]) -> Result>> { .map(|s| { s.trim() .parse::() - .with_context(|| format!("invalid felt in key group {}: '{}'", i, s)) + .with_context(|| format!("invalid felt in key group {i}: '{s}'")) }) .collect::>>() }) diff --git a/bin/katana/src/cli/stage/checkpoint.rs b/bin/katana/src/cli/stage/checkpoint.rs index 15022fef1..5439eb908 100644 --- a/bin/katana/src/cli/stage/checkpoint.rs +++ b/bin/katana/src/cli/stage/checkpoint.rs @@ -1,7 +1,7 @@ use anyhow::Result; use clap::{Args, Subcommand}; use katana_db::abstraction::{Database, DbTx, DbTxMut}; -use katana_db::models::stage::StageCheckpoint; +use katana_db::models::stage::ExecutionCheckpoint; use katana_db::tables; use katana_primitives::block::BlockNumber; @@ -64,7 +64,7 @@ impl CheckpointArgs { impl GetArgs { fn execute(self) -> Result<()> { let result = db::open_db_ro(&self.path)? - .view(|tx| tx.get::(self.stage_id.clone()))??; + .view(|tx| tx.get::(self.stage_id.clone()))??; match result { Some(checkpoint) => { @@ -82,8 +82,8 @@ impl GetArgs { impl SetArgs { fn execute(self) -> Result<()> { db::open_db_rw(&self.path)?.update(|tx| { - let checkpoint = StageCheckpoint { block: self.block_number }; - tx.put::(self.stage_id.clone(), checkpoint) + let checkpoint = ExecutionCheckpoint { block: self.block_number }; + tx.put::(self.stage_id.clone(), checkpoint) })??; println!("set checkpoint for stage '{}' to block {}", self.stage_id, self.block_number); diff --git a/bin/katana/src/cli/version.rs b/bin/katana/src/cli/version.rs index 5c26d73a4..872ef0bb1 100644 --- a/bin/katana/src/cli/version.rs +++ b/bin/katana/src/cli/version.rs @@ -28,7 +28,7 @@ pub fn generate_long() -> String { writeln!(out, "{}", generate_short()).unwrap(); writeln!(out).unwrap(); writeln!(out, "features: {}", features().join(",")).unwrap(); - write!(out, "built on: {}", VERGEN_BUILD_TIMESTAMP).unwrap(); + write!(out, "built on: {VERGEN_BUILD_TIMESTAMP}").unwrap(); out } diff --git a/bin/katana/tests/fixtures.rs b/bin/katana/tests/fixtures.rs index 232a72d98..733de811e 100644 --- a/bin/katana/tests/fixtures.rs +++ b/bin/katana/tests/fixtures.rs @@ -78,6 +78,13 @@ fn populate_db(db: &TempDb) { classes.insert(hash, ContractClass::Legacy(Default::default())); } + let mut migrated_compiled_classes = BTreeMap::new(); + for _ in 0..10 { + let hash = arbitrary!(ClassHash); + let compiled_class_hash = arbitrary!(ClassHash); + migrated_compiled_classes.insert(hash, compiled_class_hash); + } + let mut nonce_updates = BTreeMap::new(); for _ in 0..10 { nonce_updates.insert(arbitrary!(ContractAddress), arbitrary!(Nonce)); @@ -110,9 +117,15 @@ fn populate_db(db: &TempDb) { replaced_classes, deployed_contracts, deprecated_declared_classes, + migrated_compiled_classes, }; - provider.trie_insert_declared_classes(num, &state_updates.declared_classes).unwrap(); + provider + .trie_insert_declared_classes( + num, + state_updates.declared_classes.clone().into_iter().collect(), + ) + .unwrap(); provider.trie_insert_contract_updates(num, &state_updates).unwrap(); let mut block = Block::default(); diff --git a/crates/cartridge/Cargo.toml b/crates/cartridge/Cargo.toml index 78f7cdecd..dabaa7015 100644 --- a/crates/cartridge/Cargo.toml +++ b/crates/cartridge/Cargo.toml @@ -12,15 +12,11 @@ katana-primitives.workspace = true anyhow.workspace = true ark-ec = "0.4" -cainome.workspace = true lazy_static.workspace = true num-bigint.workspace = true -parking_lot.workspace = true reqwest.workspace = true serde.workspace = true serde_json.workspace = true -starknet.workspace = true -starknet-crypto.workspace = true thiserror.workspace = true tracing.workspace = true url.workspace = true diff --git a/crates/cartridge/build.rs b/crates/cartridge/build.rs index 9c9e0e187..4d9a552a0 100644 --- a/crates/cartridge/build.rs +++ b/crates/cartridge/build.rs @@ -56,9 +56,9 @@ fn main() { let struct_name = filename_to_struct_name(&file_name); generated_code.push_str(&format!( - "::katana_contracts::contract!(\n {},\n \ - \"{{CARGO_MANIFEST_DIR}}/controller/account_sdk/artifacts/classes/{}.json\"\n);\n", - struct_name, file_name + "::katana_contracts::contract!(\n {struct_name},\n \ + \"{{CARGO_MANIFEST_DIR}}/controller/account_sdk/artifacts/classes/{file_name}.\ + json\"\n);\n" )); } } diff --git a/crates/cartridge/src/client.rs b/crates/cartridge/src/client.rs index 1e7dcdb4e..ca1b8b474 100644 --- a/crates/cartridge/src/client.rs +++ b/crates/cartridge/src/client.rs @@ -74,7 +74,7 @@ pub struct GetAccountCalldataResponse { #[cfg(test)] mod tests { - use katana_primitives::{address, ContractAddress}; + use katana_primitives::address; use super::*; diff --git a/crates/cartridge/src/lib.rs b/crates/cartridge/src/lib.rs index d743e8bb8..7d834d330 100644 --- a/crates/cartridge/src/lib.rs +++ b/crates/cartridge/src/lib.rs @@ -1,3 +1,5 @@ +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + pub mod client; pub mod vrf; diff --git a/crates/cartridge/src/vrf.rs b/crates/cartridge/src/vrf.rs index 365855fc2..004f1992b 100644 --- a/crates/cartridge/src/vrf.rs +++ b/crates/cartridge/src/vrf.rs @@ -21,11 +21,11 @@ use std::str::FromStr; use ark_ec::short_weierstrass::Affine; +use katana_primitives::cairo::ShortString; use katana_primitives::utils::get_contract_address; -use katana_primitives::{ContractAddress, Felt}; +use katana_primitives::{felt, ContractAddress, Felt}; use num_bigint::BigInt; use stark_vrf::{generate_public_key, BaseField, StarkCurve, StarkVRF}; -use starknet::macros::{felt, short_string}; use tracing::trace; // Class hash of the VRF provider contract (fee estimation code commented, since currently Katana @@ -34,7 +34,7 @@ use tracing::trace; // `crates/controller/artifacts/cartridge_vrf_VrfProvider.contract_class.json` pub const CARTRIDGE_VRF_CLASS_HASH: Felt = felt!("0x07007ea60938ff539f1c0772a9e0f39b4314cfea276d2c22c29a8b64f2a87a58"); -pub const CARTRIDGE_VRF_SALT: Felt = short_string!("cartridge_vrf"); +pub const CARTRIDGE_VRF_SALT: ShortString = ShortString::from_ascii("cartridge_vrf"); pub const CARTRIDGE_VRF_DEFAULT_PRIVATE_KEY: Felt = felt!("0x1"); #[derive(Debug, Default, Clone)] @@ -125,7 +125,7 @@ fn compute_vrf_address( public_key_y: Felt, ) -> ContractAddress { get_contract_address( - CARTRIDGE_VRF_SALT, + CARTRIDGE_VRF_SALT.into(), CARTRIDGE_VRF_CLASS_HASH, &[*provider_addrss, public_key_x, public_key_y], Felt::ZERO, diff --git a/crates/chain-spec/src/dev.rs b/crates/chain-spec/src/dev.rs index f764dc4df..6fb1439fa 100644 --- a/crates/chain-spec/src/dev.rs +++ b/crates/chain-spec/src/dev.rs @@ -1,4 +1,5 @@ use std::collections::BTreeMap; +use std::str::FromStr; use alloy_primitives::U256; use katana_contracts::contracts; @@ -11,6 +12,7 @@ use katana_genesis::constant::{ }; use katana_genesis::Genesis; use katana_primitives::block::{ExecutableBlock, GasPrices, PartialHeader}; +use katana_primitives::cairo::ShortString; use katana_primitives::chain::ChainId; use katana_primitives::class::ClassHash; use katana_primitives::contract::ContractAddress; @@ -20,7 +22,6 @@ use katana_primitives::utils::split_u256; use katana_primitives::version::StarknetVersion; use katana_primitives::Felt; use lazy_static::lazy_static; -use starknet::core::utils::cairo_short_string_to_felt; use crate::{FeeContracts, SettlementLayer}; @@ -206,13 +207,13 @@ fn add_fee_token( // --- ERC20 metadata - let name = cairo_short_string_to_felt(name).unwrap(); - let symbol = cairo_short_string_to_felt(symbol).unwrap(); + let name = ShortString::from_str(name).expect("valid ERC20 name"); + let symbol = ShortString::from_str(symbol).expect("valid ERC20 symbol"); let decimals = decimals.into(); let (total_supply_low, total_supply_high) = split_u256(total_supply); - storage.insert(ERC20_NAME_STORAGE_SLOT, name); - storage.insert(ERC20_SYMBOL_STORAGE_SLOT, symbol); + storage.insert(ERC20_NAME_STORAGE_SLOT, name.into()); + storage.insert(ERC20_SYMBOL_STORAGE_SLOT, symbol.into()); storage.insert(ERC20_DECIMAL_STORAGE_SLOT, decimals); storage.insert(ERC20_TOTAL_SUPPLY_STORAGE_SLOT, total_supply_low); storage.insert(ERC20_TOTAL_SUPPLY_STORAGE_SLOT + Felt::ONE, total_supply_high); @@ -501,8 +502,8 @@ mod tests { let (total_supply_low, total_supply_high) = split_u256(U256::from_str("0x1a784379d99db42000000").unwrap()); - let name = cairo_short_string_to_felt("Ether").unwrap(); - let symbol = cairo_short_string_to_felt("ETH").unwrap(); + let name = ShortString::from_ascii("Ether"); + let symbol = ShortString::from_ascii("ETH"); let decimals = Felt::from(18); let eth_fee_token_storage = actual_state_updates @@ -511,8 +512,8 @@ mod tests { .get(&DEFAULT_ETH_FEE_TOKEN_ADDRESS) .unwrap(); - assert_eq!(eth_fee_token_storage.get(&ERC20_NAME_STORAGE_SLOT), Some(&name)); - assert_eq!(eth_fee_token_storage.get(&ERC20_SYMBOL_STORAGE_SLOT), Some(&symbol)); + assert_eq!(eth_fee_token_storage.get(&ERC20_NAME_STORAGE_SLOT), Some(&name.into())); + assert_eq!(eth_fee_token_storage.get(&ERC20_SYMBOL_STORAGE_SLOT), Some(&symbol.into())); assert_eq!(eth_fee_token_storage.get(&ERC20_DECIMAL_STORAGE_SLOT), Some(&decimals)); assert_eq!( eth_fee_token_storage.get(&ERC20_TOTAL_SUPPLY_STORAGE_SLOT), @@ -525,8 +526,8 @@ mod tests { // check STRK fee token contract storage - let strk_name = cairo_short_string_to_felt("Starknet Token").unwrap(); - let strk_symbol = cairo_short_string_to_felt("STRK").unwrap(); + let strk_name = ShortString::from_ascii("Starknet Token"); + let strk_symbol = ShortString::from_ascii("STRK"); let strk_decimals = Felt::from(18); let strk_fee_token_storage = actual_state_updates @@ -535,8 +536,11 @@ mod tests { .get(&DEFAULT_STRK_FEE_TOKEN_ADDRESS) .unwrap(); - assert_eq!(strk_fee_token_storage.get(&ERC20_NAME_STORAGE_SLOT), Some(&strk_name)); - assert_eq!(strk_fee_token_storage.get(&ERC20_SYMBOL_STORAGE_SLOT), Some(&strk_symbol)); + assert_eq!(strk_fee_token_storage.get(&ERC20_NAME_STORAGE_SLOT), Some(&strk_name.into())); + assert_eq!( + strk_fee_token_storage.get(&ERC20_SYMBOL_STORAGE_SLOT), + Some(&strk_symbol.into()) + ); assert_eq!(strk_fee_token_storage.get(&ERC20_DECIMAL_STORAGE_SLOT), Some(&strk_decimals)); assert_eq!( strk_fee_token_storage.get(&ERC20_TOTAL_SUPPLY_STORAGE_SLOT), diff --git a/crates/chain-spec/src/rollup/utils.rs b/crates/chain-spec/src/rollup/utils.rs index 86f066f07..bc0f20ace 100644 --- a/crates/chain-spec/src/rollup/utils.rs +++ b/crates/chain-spec/src/rollup/utils.rs @@ -5,18 +5,18 @@ use std::sync::Arc; use alloy_primitives::U256; use katana_contracts::contracts; use katana_genesis::allocation::{DevGenesisAccount, GenesisAccountAlloc}; +use katana_primitives::cairo::ShortString; use katana_primitives::class::{ClassHash, ContractClass}; use katana_primitives::contract::{ContractAddress, Nonce}; use katana_primitives::transaction::{ DeclareTx, DeclareTxV0, DeclareTxV2, DeclareTxWithClass, DeployAccountTx, DeployAccountTxV1, ExecutableTx, ExecutableTxWithHash, InvokeTx, InvokeTxV1, }; -use katana_primitives::utils::split_u256; use katana_primitives::utils::transaction::compute_deploy_account_v1_tx_hash; +use katana_primitives::utils::{get_contract_address, split_u256}; use katana_primitives::{felt, Felt}; use num_traits::FromPrimitive; -use starknet::core::utils::{get_contract_address, get_selector_from_name}; -use starknet::macros::short_string; +use starknet::core::utils::get_selector_from_name; use starknet::signers::SigningKey; use crate::rollup::ChainSpec; @@ -270,9 +270,9 @@ impl<'c> GenesisTransactionsBuilder<'c> { let master_address = *self.master_address.get().expect("must be initialized first"); - let ctor_args = vec![ - short_string!("Starknet Token"), - short_string!("STRK"), + let ctor_args: Vec = vec![ + ShortString::from_ascii("Starknet Token").into(), + ShortString::from_ascii("STRK").into(), felt!("0x12"), Felt::from_u128(u128::MAX).unwrap(), Felt::from_u128(u128::MAX).unwrap(), diff --git a/crates/cli/Cargo.toml b/crates/cli/Cargo.toml index 525b8e52c..415c8a838 100644 --- a/crates/cli/Cargo.toml +++ b/crates/cli/Cargo.toml @@ -14,6 +14,7 @@ katana-genesis.workspace = true katana-primitives.workspace = true katana-rpc-server.workspace = true katana-slot-controller = { workspace = true, optional = true } +katana-tee = { workspace = true, optional = true } katana-tracing.workspace = true katana-utils.workspace = true serde-utils.workspace = true @@ -41,7 +42,8 @@ cartridge = [ "katana-node/cartridge", "katana-rpc-server/cartridge", ] -default = [ "cartridge", "server" ] -explorer = [ "katana-node/explorer", "katana-utils/explorer" ] -native = [ "katana-node/native" ] -server = [ ] +default = ["cartridge", "server", "tee"] +explorer = ["katana-node/explorer", "katana-utils/explorer"] +native = ["katana-node/native"] +server = [] +tee = ["dep:katana-tee", "katana-node/tee", "katana-node/tee-snp", "katana-tee/snp"] diff --git a/crates/cli/src/args.rs b/crates/cli/src/args.rs index 8e11f0f2b..98ef6ad87 100644 --- a/crates/cli/src/args.rs +++ b/crates/cli/src/args.rs @@ -27,6 +27,8 @@ use katana_node::config::rpc::RpcConfig; #[cfg(feature = "server")] use katana_node::config::rpc::{RpcModuleKind, RpcModulesList}; use katana_node::config::sequencing::SequencingConfig; +#[cfg(feature = "tee")] +use katana_node::config::tee::TeeConfig; use katana_node::config::Config; use katana_node::Node; use serde::{Deserialize, Serialize}; @@ -129,13 +131,25 @@ pub struct SequencerNodeArgs { #[cfg(feature = "cartridge")] #[command(flatten)] pub cartridge: CartridgeOptions, + + #[cfg(feature = "tee")] + #[command(flatten)] + pub tee: TeeOptions, } impl SequencerNodeArgs { pub async fn execute(&self) -> Result<()> { - // Initialize logging with tracer - let tracer_config = self.tracer_config(); - katana_tracing::init(self.logging.log_format, tracer_config).await?; + let logging = katana_tracing::LoggingConfig { + stdout_format: self.logging.stdout.stdout_format, + stdout_color: self.logging.stdout.color, + file_enabled: self.logging.file.enabled, + file_format: self.logging.file.file_format, + file_directory: self.logging.file.directory.clone(), + file_max_files: self.logging.file.max_files, + }; + + katana_tracing::init(logging, self.tracer_config()).await?; + self.start_node().await } @@ -204,37 +218,21 @@ impl SequencerNodeArgs { // the messagign config will eventually be removed slowly. let messaging = if cs_messaging.is_some() { cs_messaging } else { self.messaging.clone() }; - #[cfg(feature = "cartridge")] - { - let paymaster = self.cartridge_config(); - - Ok(Config { - db, - dev, - rpc, - chain, - metrics, - gateway, - forking, - execution, - messaging, - paymaster, - sequencing, - }) - } - - #[cfg(not(feature = "cartridge"))] Ok(Config { - metrics, db, dev, rpc, chain, - feeder_gateway, + metrics, + gateway, + forking, execution, - sequencing, messaging, - forking, + sequencing, + #[cfg(feature = "cartridge")] + paymaster: self.cartridge_config(), + #[cfg(feature = "tee")] + tee: self.tee_config(), }) } @@ -283,6 +281,14 @@ impl SequencerNodeArgs { modules.add(RpcModuleKind::Cartridge); } + // The TEE rpc must be enabled if a TEE provider is specified. + // We put it here so that even when the individual api are explicitly specified + // (ie `--rpc.api`) we guarantee that the tee rpc is enabled. + #[cfg(feature = "tee")] + if self.tee.tee_provider.is_some() { + modules.add(RpcModuleKind::Tee); + } + let cors_origins = self.server.http_cors_origins.clone(); Ok(RpcConfig { @@ -451,6 +457,11 @@ impl SequencerNodeArgs { } } + #[cfg(feature = "tee")] + fn tee_config(&self) -> Option { + self.tee.tee_provider.map(|provider_type| TeeConfig { provider_type }) + } + /// Parse the node config from the command line arguments and the config file, /// and merge them together prioritizing the command line arguments. pub fn with_config_file(mut self) -> Result { @@ -517,6 +528,15 @@ impl SequencerNodeArgs { self.cartridge.merge(config.cartridge.as_ref()); } + #[cfg(feature = "explorer")] + { + if !self.explorer.explorer { + if let Some(explorer) = &config.explorer { + self.explorer.explorer = explorer.explorer; + } + } + } + Ok(self) } @@ -539,7 +559,7 @@ mod test { }; use katana_node::config::rpc::RpcModuleKind; use katana_primitives::chain::ChainId; - use katana_primitives::{address, felt, ContractAddress, Felt}; + use katana_primitives::{address, felt, Felt}; use super::*; @@ -721,6 +741,9 @@ total_accounts = 20 validate_max_steps = 500 invoke_max_steps = 9988 chain_id.Named = "Mainnet" + +[explorer] +explorer = true "#; let path = std::env::temp_dir().join("katana-config.json"); std::fs::write(&path, content).unwrap(); @@ -764,6 +787,9 @@ chain_id.Named = "Mainnet" assert_eq!(config.chain.genesis().gas_prices.eth.get(), 9999); assert_eq!(config.chain.genesis().gas_prices.strk.get(), 8888); assert_eq!(config.chain.id(), ChainId::Id(Felt::from_str("0x123").unwrap())); + + #[cfg(feature = "explorer")] + assert!(config.rpc.explorer); } #[test] diff --git a/crates/cli/src/file.rs b/crates/cli/src/file.rs index 61cec5508..defbb08cb 100644 --- a/crates/cli/src/file.rs +++ b/crates/cli/src/file.rs @@ -27,6 +27,8 @@ pub struct NodeArgsConfig { pub metrics: Option, #[cfg(feature = "cartridge")] pub cartridge: Option, + #[cfg(feature = "explorer")] + pub explorer: Option, } impl NodeArgsConfig { @@ -82,6 +84,15 @@ impl TryFrom for NodeArgsConfig { }; } + #[cfg(feature = "explorer")] + { + node_config.explorer = if args.explorer == ExplorerOptions::default() { + None + } else { + Some(args.explorer) + }; + } + Ok(node_config) } } diff --git a/crates/cli/src/full.rs b/crates/cli/src/full.rs index 89a2fb655..3c57b215e 100644 --- a/crates/cli/src/full.rs +++ b/crates/cli/src/full.rs @@ -54,13 +54,24 @@ pub struct FullNodeArgs { #[cfg(feature = "explorer")] #[command(flatten)] pub explorer: ExplorerOptions, + + #[command(flatten)] + pub pruning: PruningOptions, } impl FullNodeArgs { pub async fn execute(&self) -> Result<()> { - // Initialize logging with tracer - let tracer_config = self.tracer_config(); - katana_tracing::init(self.logging.log_format, tracer_config).await?; + let logging = katana_tracing::LoggingConfig { + stdout_format: self.logging.stdout.stdout_format, + stdout_color: self.logging.stdout.color, + file_enabled: self.logging.file.enabled, + file_format: self.logging.file.file_format, + file_directory: self.logging.file.directory.clone(), + file_max_files: self.logging.file.max_files, + }; + + katana_tracing::init(logging, self.tracer_config()).await?; + self.start_node().await } @@ -95,16 +106,30 @@ impl FullNodeArgs { let db = self.db_config(); let rpc = self.rpc_config()?; let metrics = self.metrics_config(); + let pruning = self.pruning_config(); Ok(full::Config { db, rpc, metrics, + pruning, network: self.network, gateway_api_key: self.gateway_api_key.clone(), }) } + fn pruning_config(&self) -> full::PruningConfig { + use crate::options::PruningMode; + + // Translate CLI pruning mode to distance from tip + let distance = match self.pruning.mode { + PruningMode::Archive => None, + PruningMode::Full(n) => Some(n), + }; + + full::PruningConfig { distance } + } + fn db_config(&self) -> DbConfig { DbConfig { dir: Some(self.db_dir.clone()) } } diff --git a/crates/cli/src/options.rs b/crates/cli/src/options.rs index cd730af9e..e36fd0cc0 100644 --- a/crates/cli/src/options.rs +++ b/crates/cli/src/options.rs @@ -10,6 +10,7 @@ #[cfg(feature = "server")] use std::net::IpAddr; use std::num::NonZeroU128; +use std::path::PathBuf; use clap::Args; use katana_genesis::Genesis; @@ -30,7 +31,7 @@ use katana_primitives::block::{BlockHashOrNumber, GasPrice}; use katana_primitives::chain::ChainId; #[cfg(feature = "server")] use katana_rpc_server::cors::HeaderValue; -use katana_tracing::{gcloud, otlp, LogFormat, TracerConfig}; +use katana_tracing::{default_log_file_directory, gcloud, otlp, LogColor, LogFormat, TracerConfig}; use serde::{Deserialize, Serialize}; use serde_utils::serialize_opt_as_hex; use url::Url; @@ -41,6 +42,7 @@ use crate::utils::{parse_block_hash_or_number, parse_genesis}; const DEFAULT_DEV_SEED: &str = "0"; const DEFAULT_DEV_ACCOUNTS: u16 = 10; +const DEFAULT_LOG_FILE_MAX_FILES: usize = 7; #[cfg(feature = "server")] #[derive(Debug, Args, Clone, Serialize, Deserialize, PartialEq)] @@ -433,11 +435,59 @@ pub struct ForkingOptions { #[derive(Debug, Args, Clone, Serialize, Deserialize, Default, PartialEq)] #[command(next_help_heading = "Logging options")] pub struct LoggingOptions { - /// Log format to use - #[arg(long = "log.format", value_name = "FORMAT")] + #[command(flatten)] + pub stdout: StdoutLoggingOptions, + + #[command(flatten)] + pub file: FileLoggingOptions, +} + +#[derive(Debug, Args, Clone, Serialize, Deserialize, Default, PartialEq)] +pub struct StdoutLoggingOptions { + #[arg(long = "log.stdout.format", value_name = "FORMAT")] + #[arg(default_value_t = LogFormat::Full)] + pub stdout_format: LogFormat, + + /// Sets whether or not the formatter emits ANSI terminal escape codes for colors and other + /// text formatting + /// + /// Possible values: + /// - always: Colors on + /// - auto: Auto-detect + /// - never: Colors off + #[arg(long = "color", value_name = "COLOR")] + #[arg(default_value_t = LogColor::Always)] + pub color: LogColor, +} + +#[derive(Debug, Args, Clone, Serialize, Deserialize, Default, PartialEq)] +pub struct FileLoggingOptions { + /// Enable writing logs to files. + #[arg(long = "log.file")] + #[serde(default)] + pub enabled: bool, + + #[arg(requires = "enabled")] + #[arg(long = "log.file.format", value_name = "FORMAT")] #[arg(default_value_t = LogFormat::Full)] - pub log_format: LogFormat, + pub file_format: LogFormat, + + /// The path to put log files in + #[arg(requires = "enabled")] + #[arg(long = "log.file.directory", value_name = "PATH")] + #[arg(default_value_os_t = default_log_file_directory())] + #[serde(default = "default_log_file_directory")] + pub directory: PathBuf, + + /// Maximum number of daily log files to keep. + /// + /// If `0` is supplied, no files are deleted (unlimited retention). + #[arg(requires = "enabled")] + #[arg(long = "log.file.max-files", value_name = "COUNT")] + #[arg(default_value_t = DEFAULT_LOG_FILE_MAX_FILES)] + pub max_files: usize, } + #[derive(Debug, Args, Default, Clone, Serialize, Deserialize, PartialEq)] #[command(next_help_heading = "Gas Price Oracle Options")] pub struct GasPriceOracleOptions { @@ -719,3 +769,69 @@ impl TracerOptions { self } } + +#[derive(Debug, Args, Clone, Serialize, Deserialize, PartialEq)] +#[command(next_help_heading = "Pruning options")] +pub struct PruningOptions { + /// State pruning mode + /// + /// Determines how much historical state to retain: + /// - 'archive': Keep all historical state (no pruning, default) + /// - 'full:N': Keep last N blocks of historical state + #[arg(long = "prune.mode", value_name = "MODE", default_value = "archive")] + #[arg(value_parser = parse_pruning_mode)] + pub mode: PruningMode, +} + +impl Default for PruningOptions { + fn default() -> Self { + Self { mode: PruningMode::Archive } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[serde(rename_all = "lowercase")] +pub enum PruningMode { + Archive, + Full(u64), +} + +fn parse_pruning_mode(s: &str) -> Result { + match s.to_lowercase().as_str() { + "archive" => Ok(PruningMode::Archive), + s if s.starts_with("full:") => { + let n = + s.strip_prefix("full:").and_then(|n| n.parse::().ok()).ok_or_else(|| { + "Invalid full format. Use 'full:N' where N is the number of blocks to keep" + .to_string() + })?; + Ok(PruningMode::Full(n)) + } + _ => Err(format!("Invalid pruning mode '{s}'. Valid modes are: 'archive', 'full:N'")), + } +} + +#[cfg(feature = "tee")] +#[derive(Debug, Args, Clone, Serialize, Deserialize, Default, PartialEq)] +#[command(next_help_heading = "TEE options")] +pub struct TeeOptions { + /// Enable TEE attestation support with AMD SEV-SNP. + /// + /// When enabled, the TEE RPC API becomes available for generating + /// hardware-backed attestation quotes. Requires running in an SEV-SNP VM + /// with /dev/sev-guest available. + #[arg(long = "tee.provider", value_name = "PROVIDER")] + #[serde(default)] + pub tee_provider: Option, +} + +#[cfg(feature = "tee")] +impl TeeOptions { + pub fn merge(&mut self, other: Option<&Self>) { + if let Some(other) = other { + if self.tee_provider.is_none() { + self.tee_provider = other.tee_provider; + } + } + } +} diff --git a/crates/cli/src/utils.rs b/crates/cli/src/utils.rs index 5004bd772..b51db2000 100644 --- a/crates/cli/src/utils.rs +++ b/crates/cli/src/utils.rs @@ -11,6 +11,7 @@ use katana_genesis::constant::{ use katana_genesis::json::GenesisJson; use katana_genesis::Genesis; use katana_primitives::block::{BlockHash, BlockHashOrNumber, BlockNumber}; +use katana_primitives::cairo::ShortString; use katana_primitives::chain::ChainId; use katana_primitives::class::ClassHash; use katana_primitives::contract::ContractAddress; @@ -56,7 +57,7 @@ pub fn print_intro(args: &SequencerNodeArgs, chain: &ChainSpec) { let account_class_hash = accounts.peek().map(|e| e.1.class_hash()); let seed = &args.development.seed; - if args.logging.log_format == LogFormat::Json { + if args.logging.stdout.stdout_format == LogFormat::Json { info!( target: LOG_TARGET, "{}", @@ -137,9 +138,8 @@ PREDEPLOYED CONTRACTS println!( r" | Contract | Universal Deployer -| Address | {} -| Class Hash | {:#064x}", - DEFAULT_UDC_ADDRESS, DEFAULT_LEGACY_UDC_CLASS_HASH +| Address | {DEFAULT_UDC_ADDRESS} +| Class Hash | {DEFAULT_LEGACY_UDC_CLASS_HASH:#064x}" ); if let Some(hash) = account_class_hash { @@ -229,6 +229,49 @@ pub fn parse_chain_config_dir(value: &str) -> Result { } } +/// A clap value parser for [`ShortString`] that ensures the string is non-empty. +/// +/// This is the `ShortString` equivalent of clap's `NonEmptyStringValueParser`. +#[derive(Clone, Debug)] +pub struct ShortStringValueParser; + +impl clap::builder::TypedValueParser for ShortStringValueParser { + type Value = ShortString; + + fn parse_ref( + &self, + cmd: &clap::Command, + arg: Option<&clap::Arg>, + value: &std::ffi::OsStr, + ) -> Result { + use core::str::FromStr; + + use clap::error::{ContextKind, ContextValue, ErrorKind}; + + let value = + value.to_str().ok_or_else(|| clap::Error::new(ErrorKind::InvalidUtf8).with_cmd(cmd))?; + + if value.is_empty() { + let mut err = clap::Error::new(ErrorKind::InvalidValue).with_cmd(cmd); + if let Some(arg) = arg { + err.insert(ContextKind::InvalidArg, ContextValue::String(arg.to_string())); + } + err.insert(ContextKind::InvalidValue, ContextValue::String(value.to_string())); + return Err(err); + } + + ShortString::from_str(value).map_err(|e| { + let mut err = clap::Error::new(ErrorKind::InvalidValue).with_cmd(cmd); + if let Some(arg) = arg { + err.insert(ContextKind::InvalidArg, ContextValue::String(arg.to_string())); + } + err.insert(ContextKind::InvalidValue, ContextValue::String(value.to_string())); + err.insert(ContextKind::Custom, ContextValue::String(e.to_string())); + err + }) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/contracts/build.rs b/crates/contracts/build.rs index 12eb90338..5ea42e3fc 100644 --- a/crates/contracts/build.rs +++ b/crates/contracts/build.rs @@ -57,20 +57,19 @@ fn main() { panic!( "Contract compilation build script failed. Below are the last 50 lines of `scarb \ - build` output:\n\n{}", - last_n_lines + build` output:\n\n{last_n_lines}" ); } // Create build directory if it doesn't exist if let Err(e) = fs::create_dir_all(build_dir) { - panic!("Failed to create build directory: {}", e); + panic!("Failed to create build directory: {e}"); } // Copy artifacts from target/dev to build directory if target_dir.exists() { if let Err(e) = copy_dir_contents(&target_dir, build_dir) { - panic!("Failed to copy contract artifacts: {}", e); + panic!("Failed to copy contract artifacts: {e}"); } println!("cargo:warning=Contract artifacts copied to build directory"); } else { diff --git a/crates/contracts/macro/src/lib.rs b/crates/contracts/macro/src/lib.rs index 5c03c531e..a311e7b1a 100644 --- a/crates/contracts/macro/src/lib.rs +++ b/crates/contracts/macro/src/lib.rs @@ -111,14 +111,14 @@ fn generate_contract_impl(input: &ContractInput) -> Result { + let genesis_block = chain_spec.block(); + let mut genesis_state_updates = chain_spec.state_updates(); + + // commit the block but compute the trie using volatile storage so that it won't + // overwrite the existing trie this is very hacky and we should find for a + // much elegant solution. + let committed_block = commit_genesis_block( + GenesisTrieWriter, + genesis_block.header.clone(), + Vec::new(), + &[], + &mut genesis_state_updates.state_updates, + )?; + + // check genesis should be the same + if local_hash != committed_block.hash { + return Err(anyhow!( + "Genesis block hash mismatch: expected {:#x}, got {local_hash:#x}", + committed_block.hash + )); + } - // check genesis should be the same - if local_hash != committed_block.hash { - return Err(anyhow!( - "Genesis block hash mismatch: expected {:#x}, got {local_hash:#x}", - committed_block.hash - )); + info!(genesis_hash = %local_hash, "Genesis has already been initialized"); } - info!(genesis_hash = %local_hash, "Genesis has already been initialized"); - } else if !is_forking { - // Initialize the dev genesis block (only for non-forked instances) - let block = chain_spec.block(); - let states = chain_spec.state_updates(); - - let outcome = self.do_mine_block( - &BlockEnv { - number: block.header.number, - timestamp: block.header.timestamp, - l2_gas_prices: block.header.l2_gas_prices, - l1_gas_prices: block.header.l1_gas_prices, - l1_data_gas_prices: block.header.l1_data_gas_prices, - sequencer_address: block.header.sequencer_address, - starknet_version: block.header.starknet_version, - }, - ExecutionOutput { states, ..Default::default() }, - )?; + // No genesis yet and we're NOT forking → initialize + (None, false) => { + let block = chain_spec.block(); + let states = chain_spec.state_updates(); + + let outcome = self.do_mine_block( + &BlockEnv { + number: block.header.number, + timestamp: block.header.timestamp, + l2_gas_prices: block.header.l2_gas_prices, + l1_gas_prices: block.header.l1_gas_prices, + l1_data_gas_prices: block.header.l1_data_gas_prices, + sequencer_address: block.header.sequencer_address, + starknet_version: block.header.starknet_version, + }, + ExecutionOutput { states, ..Default::default() }, + )?; + + info!(genesis_hash = %outcome.block_hash, "Genesis initialized"); + } - info!(genesis_hash = %outcome.block_hash, "Genesis initialized"); + // Forking mode → NEVER touch genesis + (_, true) => { + info!("Forking mode enabled — skipping dev genesis initialization"); + } } - // For forked instances, genesis is not created (fork already has its state) Ok(()) } @@ -522,9 +527,24 @@ impl<'a, P: TrieWriter> UncommittedBlock<'a, P> { // state_commitment = hPos("STARKNET_STATE_V0", contract_trie_root, class_trie_root) fn compute_new_state_root(&self) -> Felt { - self.provider - .compute_state_root(self.header.number, self.state_updates) - .expect("failed to compute state root") + let class_trie_root = self + .provider + .trie_insert_declared_classes( + self.header.number, + self.state_updates.declared_classes.clone().into_iter().collect(), + ) + .expect("failed to update class trie"); + + let contract_trie_root = self + .provider + .trie_insert_contract_updates(self.header.number, self.state_updates) + .expect("failed to update contract trie"); + + hash::Poseidon::hash_array(&[ + ShortString::from_ascii("STARKNET_STATE_V0").into(), + contract_trie_root, + class_trie_root, + ]) } } @@ -538,8 +558,7 @@ fn store_block( // Validate that all declared classes have their corresponding class artifacts if let Err(missing) = states.validate_classes() { return Err(BlockProductionError::InconsistentState(format!( - "missing class artifacts for declared classes: {:#?}", - missing, + "missing class artifacts for declared classes: {missing:#?}" ))); } @@ -643,7 +662,18 @@ impl TrieWriter for GenesisTrieWriter { contract_leafs .into_iter() .map(|(address, leaf)| { - let class_hash = leaf.class_hash.unwrap(); + let class_hash = if let Some(class_hash) = leaf.class_hash { + class_hash + } else { + // TODO: there's must be a better way to handle this + assert!( + address == address!("0x1") || address == address!("0x2"), + "Only special contracts may have unspecified class hash." + ); + + ClassHash::ZERO + }; + let nonce = leaf.nonce.unwrap_or_default(); let storage_root = leaf.storage_root.unwrap_or_default(); let leaf_hash = compute_contract_state_hash(&class_hash, &storage_root, &nonce); @@ -663,12 +693,12 @@ impl TrieWriter for GenesisTrieWriter { fn trie_insert_declared_classes( &self, block_number: BlockNumber, - updates: &BTreeMap, + classes: Vec<(ClassHash, CompiledClassHash)>, ) -> katana_provider::ProviderResult { let mut trie = ClassesTrie::new(HashMapDb::default()); - for (class_hash, compiled_hash) in updates { - trie.insert(*class_hash, *compiled_hash); + for (class_hash, compiled_hash) in classes { + trie.insert(class_hash, compiled_hash); } trie.commit(block_number); diff --git a/crates/core/src/constants.rs b/crates/core/src/constants.rs index 5044bc6d0..a02381dfd 100644 --- a/crates/core/src/constants.rs +++ b/crates/core/src/constants.rs @@ -1,6 +1,6 @@ use katana_primitives::contract::ContractAddress; +use katana_primitives::felt; use lazy_static::lazy_static; -use starknet::macros::felt; lazy_static! { diff --git a/crates/executor/Cargo.toml b/crates/executor/Cargo.toml index 789481b8f..3864d967e 100644 --- a/crates/executor/Cargo.toml +++ b/crates/executor/Cargo.toml @@ -14,13 +14,12 @@ katana-provider.workspace = true blockifier = { workspace = true, features = [ "testing" ] } num-traits.workspace = true quick_cache = "0.6.10" -starknet.workspace = true thiserror.workspace = true tracing.workspace = true # cairo-native cairo-lang-starknet-classes = { workspace = true, optional = true } -cairo-native = { version = "0.4.1", optional = true } +cairo-native = { version = "0.6.2", optional = true } cairo-vm.workspace = true parking_lot.workspace = true rayon = { workspace = true, optional = true } @@ -34,6 +33,7 @@ katana-provider = { workspace = true, features = [ "test-utils" ] } katana-rpc-types.workspace = true katana-utils.workspace = true +starknet.workspace = true alloy-primitives.workspace = true anyhow.workspace = true assert_matches.workspace = true diff --git a/crates/executor/benches/utils.rs b/crates/executor/benches/utils.rs index c47cd5fc2..181eaa35d 100644 --- a/crates/executor/benches/utils.rs +++ b/crates/executor/benches/utils.rs @@ -2,8 +2,8 @@ use katana_genesis::constant::DEFAULT_ETH_FEE_TOKEN_ADDRESS; use katana_primitives::block::GasPrices; use katana_primitives::env::BlockEnv; use katana_primitives::transaction::{ExecutableTxWithHash, InvokeTx, InvokeTxV1}; -use katana_primitives::Felt; -use starknet::macros::{felt, selector}; +use katana_primitives::{felt, Felt}; +use starknet::macros::selector; pub fn tx() -> ExecutableTxWithHash { let invoke = InvokeTx::V1(InvokeTxV1 { diff --git a/crates/executor/src/abstraction/executor.rs b/crates/executor/src/abstraction/executor.rs index 8ea0af9b1..16136711f 100644 --- a/crates/executor/src/abstraction/executor.rs +++ b/crates/executor/src/abstraction/executor.rs @@ -1,4 +1,5 @@ use katana_primitives::block::ExecutableBlock; +use katana_primitives::contract::{ContractAddress, StorageKey, StorageValue}; use katana_primitives::env::{BlockEnv, VersionedConstantsOverrides}; use katana_primitives::transaction::{ExecutableTxWithHash, TxWithHash}; use katana_provider::api::state::StateProvider; @@ -50,4 +51,17 @@ pub trait BlockExecutor<'a>: Send + Sync + core::fmt::Debug { /// Returns the current block environment of the executor. fn block_env(&self) -> BlockEnv; + + // TEMP: This is primarily for `dev_setStorageAt` dev endpoint. To make sure the updated storage + // value is reflected in the pending state. This functionality should prolly be moved to the + // pending state level instead of the executor. + // + /// Sets the storage value for the given contract address and key. + /// This is used for dev purposes to manipulate state directly. + fn set_storage_at( + &self, + address: ContractAddress, + key: StorageKey, + value: StorageValue, + ) -> ExecutorResult<()>; } diff --git a/crates/executor/src/implementation/blockifier/cache.rs b/crates/executor/src/implementation/blockifier/cache.rs index b99a5351d..fb36d14ed 100644 --- a/crates/executor/src/implementation/blockifier/cache.rs +++ b/crates/executor/src/implementation/blockifier/cache.rs @@ -277,7 +277,7 @@ impl ClassCache { let _span = span.enter(); let executor = - AotContractExecutor::new(&program, &entry_points, version.into(), OptLevel::Default) + AotContractExecutor::new(&program, &entry_points, version.into(), OptLevel::Default, None) .inspect_err(|error| tracing::error!(target: "class_cache", %error, "Failed to compile native class")) .unwrap(); diff --git a/crates/executor/src/implementation/blockifier/call.rs b/crates/executor/src/implementation/blockifier/call.rs index 4f860bbb0..eb0519e32 100644 --- a/crates/executor/src/implementation/blockifier/call.rs +++ b/crates/executor/src/implementation/blockifier/call.rs @@ -2,7 +2,7 @@ use std::str::FromStr; use std::sync::Arc; use blockifier::blockifier_versioned_constants::VersionedConstants; -use blockifier::bouncer::n_steps_to_sierra_gas; +use blockifier::bouncer::n_steps_to_gas; use blockifier::context::{BlockContext, TransactionContext}; use blockifier::execution::call_info::CallInfo; use blockifier::execution::entry_point::{ @@ -138,9 +138,7 @@ pub fn get_call_sierra_gas_consumed( // steps to sierra gas. // // https://github.com/dojoengine/sequencer/blob/5d737b9c90a14bdf4483d759d1a1d4ce64aa9fd2/crates/blockifier/src/execution/entry_point_execution.rs#L475-L479 - TrackedResource::CairoSteps => { - n_steps_to_sierra_gas(info.resources.n_steps, versioned_constant).0 - } + TrackedResource::CairoSteps => n_steps_to_gas(info.resources.n_steps, versioned_constant).0, TrackedResource::SierraGas => info.execution.gas_consumed, } @@ -156,7 +154,7 @@ mod tests { use blockifier::state::cached_state::{self}; use katana_primitives::class::ContractClass; use katana_primitives::execution::FunctionCall; - use katana_primitives::{address, felt, ContractAddress}; + use katana_primitives::{address, felt}; use katana_provider::api::contract::ContractClassWriter; use katana_provider::api::state::{StateFactoryProvider, StateWriter}; use katana_provider::{test_utils, ProviderFactory}; diff --git a/crates/executor/src/implementation/blockifier/error.rs b/crates/executor/src/implementation/blockifier/error.rs index 688e3f182..bd870f73d 100644 --- a/crates/executor/src/implementation/blockifier/error.rs +++ b/crates/executor/src/implementation/blockifier/error.rs @@ -46,6 +46,12 @@ impl From for ExecutionError { } } +impl From> for ExecutionError { + fn from(error: Box) -> Self { + Self::from(*error) + } +} + impl From for ExecutionError { fn from(error: PreExecutionError) -> Self { match error { @@ -77,6 +83,12 @@ impl From for ExecutionError { } } +impl From> for ExecutionError { + fn from(error: Box) -> Self { + Self::from(*error) + } +} + impl From for ExecutionError { fn from(error: TransactionFeeError) -> Self { match error { @@ -92,6 +104,12 @@ impl From for ExecutionError { } } +impl From> for ExecutionError { + fn from(error: Box) -> Self { + Self::from(*error) + } +} + impl From for ExecutionError { fn from(error: StateError) -> Self { match error { diff --git a/crates/executor/src/implementation/blockifier/mod.rs b/crates/executor/src/implementation/blockifier/mod.rs index f56dc5999..afc12e315 100644 --- a/crates/executor/src/implementation/blockifier/mod.rs +++ b/crates/executor/src/implementation/blockifier/mod.rs @@ -3,7 +3,7 @@ use std::sync::Arc; // Re-export the blockifier crate. pub use blockifier; use blockifier::blockifier_versioned_constants::VersionedConstants; -use blockifier::bouncer::{n_steps_to_sierra_gas, Bouncer, BouncerConfig, BouncerWeights}; +use blockifier::bouncer::{n_steps_to_gas, Bouncer, BouncerConfig, BouncerWeights}; pub mod cache; pub mod call; @@ -144,9 +144,9 @@ impl<'a> StarknetVMProcessor<'a> { // // To learn more about the L2 gas, refer to . block_max_capacity.sierra_gas = - n_steps_to_sierra_gas(limits.cairo_steps as usize, block_context.versioned_constants()); + n_steps_to_gas(limits.cairo_steps as usize, block_context.versioned_constants()); - let bouncer = Bouncer::new(BouncerConfig { block_max_capacity }); + let bouncer = Bouncer::new(BouncerConfig { block_max_capacity, ..Default::default() }); Self { cfg_env, @@ -258,9 +258,10 @@ impl<'a> BlockExecutor<'a> for StarknetVMProcessor<'a> { Ok(exec_result) => { match &exec_result { ExecutionResult::Success { receipt, trace } => { - self.stats.l1_gas_used += receipt.resources_used().gas.l1_gas as u128; + self.stats.l1_gas_used += + receipt.resources_used().total_gas_consumed.l1_gas as u128; self.stats.cairo_steps_used += - receipt.resources_used().computation_resources.n_steps as u128; + receipt.resources_used().vm_resources.n_steps as u128; if let Some(reason) = receipt.revert_reason() { info!(target: LOG_TARGET, hash = format!("{hash:#x}"), %reason, "Transaction reverted."); @@ -291,7 +292,7 @@ impl<'a> BlockExecutor<'a> for StarknetVMProcessor<'a> { } fn take_execution_output(&mut self) -> ExecutorResult { - let states = utils::state_update_from_cached_state(&self.state); + let states = utils::state_update_from_cached_state(&self.state, true); let transactions = std::mem::take(&mut self.transactions); let stats = std::mem::take(&mut self.stats); Ok(ExecutionOutput { stats, states, transactions }) @@ -343,4 +344,23 @@ impl<'a> BlockExecutor<'a> for StarknetVMProcessor<'a> { sequencer_address: utils::to_address(self.block_context.block_info().sequencer_address), } } + + fn set_storage_at( + &self, + address: katana_primitives::contract::ContractAddress, + key: katana_primitives::contract::StorageKey, + value: katana_primitives::contract::StorageValue, + ) -> crate::ExecutorResult<()> { + use blockifier::state::state_api::State; + + let blk_address = utils::to_blk_address(address); + let storage_key = starknet_api::state::StorageKey(key.try_into().unwrap()); + + self.state + .inner + .lock() + .cached_state + .set_storage_at(blk_address, storage_key, value) + .map_err(|e| crate::ExecutorError::Other(e.to_string().into())) + } } diff --git a/crates/executor/src/implementation/blockifier/utils.rs b/crates/executor/src/implementation/blockifier/utils.rs index 0f3152273..11b0fbda0 100644 --- a/crates/executor/src/implementation/blockifier/utils.rs +++ b/crates/executor/src/implementation/blockifier/utils.rs @@ -11,7 +11,7 @@ use blockifier::execution::contract_class::{ use blockifier::fee::fee_utils::get_fee_by_gas_vector; use blockifier::state::cached_state::{self, TransactionalState}; use blockifier::state::state_api::{StateReader, UpdatableState}; -use blockifier::state::stateful_compression::{allocate_aliases_in_storage, compress}; +use blockifier::state::stateful_compression::allocate_aliases_in_storage; use blockifier::transaction::account_transaction::{ AccountTransaction, ExecutionFlags as BlockifierExecutionFlags, }; @@ -20,6 +20,7 @@ use blockifier::transaction::transaction_execution::Transaction; use blockifier::transaction::transactions::ExecutableTransaction; use cairo_vm::types::errors::program_errors::ProgramError; use katana_chain_spec::ChainSpec; +use katana_primitives::cairo::ShortString; use katana_primitives::chain::NamedChainId; use katana_primitives::env::{BlockEnv, VersionedConstantsOverrides}; use katana_primitives::fee::{FeeInfo, PriceUnit, ResourceBoundsMapping}; @@ -30,7 +31,6 @@ use katana_primitives::transaction::{ use katana_primitives::{class, fee}; use katana_provider::api::contract::ContractClassProvider; use num_traits::Zero; -use starknet::core::utils::parse_cairo_short_string; use starknet_api::block::{ BlockInfo, BlockNumber, BlockTimestamp, FeeType, GasPriceVector, GasPrices, NonzeroGasPrice, StarknetVersion, @@ -56,6 +56,8 @@ use starknet_api::transaction::{ TransactionVersion, }; +const ALIAS_CONTRACT_ADDRESS: &str = "0x2"; + use super::state::CachedState; use crate::abstraction::ExecutionFlags; use crate::utils::build_receipt; @@ -139,6 +141,7 @@ pub fn transact( &tx_state, &tx_state_changes_keys, &info.summarize(versioned_constants), + &info.summarize_builtins(), &info.receipt.resources, versioned_constants, )?; @@ -203,7 +206,7 @@ pub fn to_executor_tx(mut tx: ExecutableTxWithHash, mut flags: ExecutionFlags) - tx: ApiInvokeTransaction::V0(starknet_api::transaction::InvokeTransactionV0 { entry_point_selector: EntryPointSelector(tx.entry_point_selector), contract_address: to_blk_address(tx.contract_address), - signature: TransactionSignature(signature), + signature: TransactionSignature(signature.into()), calldata: Calldata(Arc::new(calldata)), max_fee: Fee(tx.max_fee), }), @@ -225,7 +228,7 @@ pub fn to_executor_tx(mut tx: ExecutableTxWithHash, mut flags: ExecutionFlags) - max_fee: Fee(tx.max_fee), nonce: Nonce(tx.nonce), sender_address: to_blk_address(tx.sender_address), - signature: TransactionSignature(signature), + signature: TransactionSignature(signature.into()), calldata: Calldata(Arc::new(calldata)), }), tx_hash: TransactionHash(hash), @@ -251,7 +254,7 @@ pub fn to_executor_tx(mut tx: ExecutableTxWithHash, mut flags: ExecutionFlags) - tip: Tip(tx.tip), nonce: Nonce(tx.nonce), sender_address: to_blk_address(tx.sender_address), - signature: TransactionSignature(signature), + signature: TransactionSignature(signature.into()), calldata: Calldata(Arc::new(calldata)), paymaster_data: PaymasterData(paymaster_data), account_deployment_data: AccountDeploymentData(account_deploy_data), @@ -281,7 +284,7 @@ pub fn to_executor_tx(mut tx: ExecutableTxWithHash, mut flags: ExecutionFlags) - tx: ApiDeployAccountTransaction::V1(DeployAccountTransactionV1 { max_fee: Fee(tx.max_fee), nonce: Nonce(tx.nonce), - signature: TransactionSignature(signature), + signature: TransactionSignature(signature.into()), class_hash: ClassHash(tx.class_hash), constructor_calldata: Calldata(Arc::new(calldata)), contract_address_salt: salt, @@ -309,7 +312,7 @@ pub fn to_executor_tx(mut tx: ExecutableTxWithHash, mut flags: ExecutionFlags) - tx: ApiDeployAccountTransaction::V3(DeployAccountTransactionV3 { tip: Tip(tx.tip), nonce: Nonce(tx.nonce), - signature: TransactionSignature(signature), + signature: TransactionSignature(signature.into()), class_hash: ClassHash(tx.class_hash), constructor_calldata: Calldata(Arc::new(calldata)), contract_address_salt: salt, @@ -336,7 +339,7 @@ pub fn to_executor_tx(mut tx: ExecutableTxWithHash, mut flags: ExecutionFlags) - max_fee: Fee(tx.max_fee), nonce: Nonce::default(), sender_address: to_blk_address(tx.sender_address), - signature: TransactionSignature(tx.signature), + signature: TransactionSignature(tx.signature.into()), class_hash: ClassHash(tx.class_hash), }), @@ -344,7 +347,7 @@ pub fn to_executor_tx(mut tx: ExecutableTxWithHash, mut flags: ExecutionFlags) - max_fee: Fee(tx.max_fee), nonce: Nonce(tx.nonce), sender_address: to_blk_address(tx.sender_address), - signature: TransactionSignature(tx.signature), + signature: TransactionSignature(tx.signature.into()), class_hash: ClassHash(tx.class_hash), }), @@ -355,7 +358,7 @@ pub fn to_executor_tx(mut tx: ExecutableTxWithHash, mut flags: ExecutionFlags) - max_fee: Fee(tx.max_fee), nonce: Nonce(tx.nonce), sender_address: to_blk_address(tx.sender_address), - signature: TransactionSignature(signature), + signature: TransactionSignature(signature.into()), class_hash: ClassHash(tx.class_hash), compiled_class_hash: CompiledClassHash(tx.compiled_class_hash), }) @@ -374,7 +377,7 @@ pub fn to_executor_tx(mut tx: ExecutableTxWithHash, mut flags: ExecutionFlags) - tip: Tip(tx.tip), nonce: Nonce(tx.nonce), sender_address: to_blk_address(tx.sender_address), - signature: TransactionSignature(signature), + signature: TransactionSignature(signature.into()), class_hash: ClassHash(tx.class_hash), account_deployment_data: AccountDeploymentData(account_deploy_data), compiled_class_hash: CompiledClassHash(tx.compiled_class_hash), @@ -479,7 +482,8 @@ pub fn block_context_from_envs( use_kzg_da: false, }; - let chain_info = ChainInfo { fee_token_addresses, chain_id: to_blk_chain_id(chain_spec.id()) }; + let chain_info = + ChainInfo { fee_token_addresses, chain_id: to_blk_chain_id(chain_spec.id()), is_l3: false }; // IMPORTANT: // @@ -497,19 +501,33 @@ pub fn block_context_from_envs( BlockContext::new(block_info, chain_info, versioned_constants, BouncerConfig::max()) } -pub(super) fn state_update_from_cached_state(state: &CachedState<'_>) -> StateUpdatesWithClasses { - let alias_contract_address = contract_address!("0x2"); +pub(super) fn state_update_from_cached_state( + state: &CachedState<'_>, + stateful_compression: bool, +) -> StateUpdatesWithClasses { + let state_diff = if stateful_compression { + let mut state_lock = state.inner.lock(); + + let alias_contract_address = contract_address!(ALIAS_CONTRACT_ADDRESS); + allocate_aliases_in_storage(&mut state_lock.cached_state, alias_contract_address) + .expect("failed to allocated aliases"); - allocate_aliases_in_storage(&mut state.inner.lock().cached_state, alias_contract_address) - .unwrap(); + #[cfg(debug_assertions)] + { + use blockifier::state::stateful_compression::compress; - let state_diff = state.inner.lock().cached_state.to_state_diff().unwrap().state_maps; - let state_diff = - compress(&state_diff, &state.inner.lock().cached_state, alias_contract_address); + let state_diff = state_lock.cached_state.to_state_diff().unwrap().state_maps; + let compressed_state_diff = + compress(&state_diff, &state_lock.cached_state, alias_contract_address); - assert!(state_diff.is_ok(), "failed to compress state diff"); + debug_assert!(compressed_state_diff.is_ok(), "failed to compress state diff"); + } + + state_lock.cached_state.to_state_diff().unwrap().state_maps + } else { + state.inner.lock().cached_state.to_state_diff().unwrap().state_maps + }; - let state_diff = state.inner.lock().cached_state.to_state_diff().unwrap().state_maps; let mut declared_contract_classes: BTreeMap< katana_primitives::class::ClassHash, katana_primitives::class::ContractClass, @@ -574,6 +592,7 @@ pub(super) fn state_update_from_cached_state(state: &CachedState<'_>) -> StateUp deployed_contracts, deprecated_declared_classes, replaced_classes: BTreeMap::default(), + migrated_compiled_classes: BTreeMap::default(), }, } } @@ -636,8 +655,8 @@ pub fn to_blk_chain_id(chain_id: katana_primitives::chain::ChainId) -> ChainId { katana_primitives::chain::ChainId::Named(NamedChainId::Sepolia) => ChainId::Sepolia, katana_primitives::chain::ChainId::Named(named) => ChainId::Other(named.to_string()), katana_primitives::chain::ChainId::Id(id) => { - let id = parse_cairo_short_string(&id).expect("valid cairo string"); - ChainId::Other(id) + let id = ShortString::try_from(id).expect("valid cairo string"); + ChainId::Other(id.to_string()) } } } diff --git a/crates/executor/src/implementation/noop.rs b/crates/executor/src/implementation/noop.rs index 013a12e43..59ba6f874 100644 --- a/crates/executor/src/implementation/noop.rs +++ b/crates/executor/src/implementation/noop.rs @@ -90,6 +90,15 @@ impl<'a> BlockExecutor<'a> for NoopExecutor { fn block_env(&self) -> BlockEnv { self.block_env.clone() } + + fn set_storage_at( + &self, + _address: ContractAddress, + _key: StorageKey, + _value: StorageValue, + ) -> ExecutorResult<()> { + Ok(()) + } } #[derive(Debug)] diff --git a/crates/executor/src/utils.rs b/crates/executor/src/utils.rs index 3ca19eadb..c990c6c9a 100644 --- a/crates/executor/src/utils.rs +++ b/crates/executor/src/utils.rs @@ -13,15 +13,17 @@ pub(crate) const LOG_TARGET: &str = "executor"; pub fn log_resources(resources: &TransactionResources) { let mut mapped_strings = Vec::new(); - for (builtin, count) in &resources.computation.vm_resources.builtin_instance_counter { + for (builtin, count) in &resources.computation.tx_vm_resources.builtin_instance_counter { mapped_strings.push(format!("{builtin}: {count}")); } // Sort the strings alphabetically mapped_strings.sort(); - mapped_strings.insert(0, format!("steps: {}", resources.computation.vm_resources.n_steps)); - mapped_strings - .insert(1, format!("memory holes: {}", resources.computation.vm_resources.n_memory_holes)); + mapped_strings.insert(0, format!("steps: {}", resources.computation.tx_vm_resources.n_steps)); + mapped_strings.insert( + 1, + format!("memory holes: {}", resources.computation.tx_vm_resources.n_memory_holes), + ); trace!(target: LOG_TARGET, usage = mapped_strings.join(" | "), "Transaction resource usage."); } @@ -74,7 +76,7 @@ pub(crate) fn build_receipt( } fn get_receipt_resources(receipt: &TransactionReceipt) -> receipt::ExecutionResources { - let computation_resources = receipt.resources.computation.vm_resources.clone(); + let computation_resources = receipt.resources.computation.tx_vm_resources.clone(); let gas = GasUsed { l2_gas: receipt.gas.l2_gas.0, @@ -87,7 +89,11 @@ fn get_receipt_resources(receipt: &TransactionReceipt) -> receipt::ExecutionReso l1_data_gas: receipt.da_gas.l1_data_gas.0, }; - receipt::ExecutionResources { da_resources, computation_resources, gas } + receipt::ExecutionResources { + data_availability: da_resources, + vm_resources: computation_resources, + total_gas_consumed: gas, + } } fn events_from_exec_info(info: &TransactionExecutionInfo) -> Vec { diff --git a/crates/executor/tests/executor.rs b/crates/executor/tests/executor.rs index 690911e38..cc735cd2b 100644 --- a/crates/executor/tests/executor.rs +++ b/crates/executor/tests/executor.rs @@ -9,7 +9,6 @@ use katana_genesis::constant::{ DEFAULT_ETH_FEE_TOKEN_ADDRESS, DEFAULT_PREFUNDED_ACCOUNT_BALANCE, DEFAULT_UDC_ADDRESS, }; use katana_primitives::block::ExecutableBlock; -use katana_primitives::contract::ContractAddress; use katana_primitives::transaction::TxWithHash; use katana_primitives::{address, Felt}; use katana_provider::api::contract::ContractClassProviderExt; @@ -273,10 +272,10 @@ fn test_executor_with_valid_blocks_impl( .map(|(tx, res)| { if let Some(receipt) = res.receipt() { let resources = receipt.resources_used(); - actual_total_gas += resources.gas.l1_gas as u128; + actual_total_gas += resources.total_gas_consumed.l1_gas as u128; } if let Some(rec) = res.receipt() { - actual_total_steps += rec.resources_used().computation_resources.n_steps as u128; + actual_total_steps += rec.resources_used().vm_resources.n_steps as u128; } tx.clone() }) @@ -308,7 +307,12 @@ fn test_executor_with_valid_blocks_impl( // TODO: asserts the storage updates let actual_storage_updates = states.state_updates.storage_updates; - assert_eq!(actual_storage_updates.len(), 3, "only 3 contracts whose storage should be updated"); + assert_eq!( + actual_storage_updates.len(), + 4, + "only 4 ( 3 normal contract + 1 for special alias contract '0x2') contracts whose storage \ + should be updated" + ); assert!( actual_storage_updates.contains_key(&DEFAULT_ETH_FEE_TOKEN_ADDRESS), "fee token storage must get updated" @@ -321,6 +325,10 @@ fn test_executor_with_valid_blocks_impl( actual_storage_updates.contains_key(&new_acc), "newly deployed account storage must get updated" ); + assert!( + actual_storage_updates.contains_key(&address!("0x2")), + "alias contract must be allocated" + ); } use fixtures::factory; diff --git a/crates/executor/tests/fixtures/mod.rs b/crates/executor/tests/fixtures/mod.rs index 80d9ac34a..dd83b6e3f 100644 --- a/crates/executor/tests/fixtures/mod.rs +++ b/crates/executor/tests/fixtures/mod.rs @@ -21,11 +21,10 @@ use katana_primitives::transaction::{ }; use katana_primitives::utils::class::{parse_compiled_class, parse_sierra_class}; use katana_primitives::version::CURRENT_STARKNET_VERSION; -use katana_primitives::{address, Felt}; +use katana_primitives::{address, felt, Felt}; use katana_provider::api::block::BlockWriter; use katana_provider::api::state::{StateFactoryProvider, StateProvider}; use katana_provider::{DbProviderFactory, MutableProvider, ProviderFactory}; -use starknet::macros::felt; // TODO: remove support for legacy contract declaration #[allow(unused)] diff --git a/crates/executor/tests/fixtures/transaction.rs b/crates/executor/tests/fixtures/transaction.rs index 0cffee41e..0610156cb 100644 --- a/crates/executor/tests/fixtures/transaction.rs +++ b/crates/executor/tests/fixtures/transaction.rs @@ -7,12 +7,12 @@ use katana_primitives::da::DataAvailabilityMode; use katana_primitives::fee::{AllResourceBoundsMapping, ResourceBounds, ResourceBoundsMapping}; use katana_primitives::transaction::ExecutableTxWithHash; use katana_primitives::utils::transaction::compute_invoke_v3_tx_hash; -use katana_primitives::Felt; +use katana_primitives::{felt, Felt}; use katana_rpc_types::broadcasted::BroadcastedInvokeTx; use katana_rpc_types::{BroadcastedTx, BroadcastedTxWithChainId}; use starknet::accounts::{Account, ExecutionEncoder, ExecutionEncoding, SingleOwnerAccount}; use starknet::core::types::{BlockId, BlockTag, Call}; -use starknet::macros::{felt, selector}; +use starknet::macros::selector; use starknet::providers::jsonrpc::HttpTransport; use starknet::providers::{JsonRpcClient, Url}; use starknet::signers::{LocalWallet, Signer, SigningKey}; diff --git a/crates/explorer/build.rs b/crates/explorer/build.rs index f1788549d..47cfbfb6f 100644 --- a/crates/explorer/build.rs +++ b/crates/explorer/build.rs @@ -84,7 +84,7 @@ fn build_ui_assets(ui_dir: &Path) { return; } Err(e) => { - eprintln!("Warning: Failed to run bun install: {}", e); + eprintln!("Warning: Failed to run bun install: {e}"); return; } } @@ -105,7 +105,7 @@ fn build_ui_assets(ui_dir: &Path) { eprintln!("Warning: Failed to build UI in {}", ui_dir.display()); } Err(e) => { - eprintln!("Warning: Failed to run bun build: {}", e); + eprintln!("Warning: Failed to run bun build: {e}"); } } } diff --git a/crates/explorer/src/lib.rs b/crates/explorer/src/lib.rs index 3b975f85b..4e9eab560 100644 --- a/crates/explorer/src/lib.rs +++ b/crates/explorer/src/lib.rs @@ -564,7 +564,7 @@ impl ExplorerService { }; if let Some(asset) = EmbeddedAssets::get(asset_path) { - let content_type = Self::get_content_type(&format!("/{}", asset_path)); + let content_type = Self::get_content_type(&format!("/{asset_path}")); let content = if content_type == "text/html" { let html = String::from_utf8_lossy(&asset.data); let injected = Self::inject_environment(config, &html); @@ -640,9 +640,9 @@ impl ExplorerService { if let Some(head_pos) = html.find("") { let (start, end) = html.split_at(head_pos + 6); - format!("{}{}{}", start, script, end) + format!("{start}{script}{end}") } else { - format!("{}\n{}", script, html) + format!("{script}\n{html}") } } diff --git a/crates/explorer/ui b/crates/explorer/ui index 5d03c18ce..2be6bfc5e 160000 --- a/crates/explorer/ui +++ b/crates/explorer/ui @@ -1 +1 @@ -Subproject commit 5d03c18ce96a4e0aa0c2773a948285dbdd6c1acb +Subproject commit 2be6bfc5e6530756b20381257b1043ac599c396f diff --git a/crates/gateway/gateway-client/tests/fixtures/0.14.1/state_update/mainnet_4130000.json b/crates/gateway/gateway-client/tests/fixtures/0.14.1/state_update/mainnet_4130000.json new file mode 100644 index 000000000..dcc18d21a --- /dev/null +++ b/crates/gateway/gateway-client/tests/fixtures/0.14.1/state_update/mainnet_4130000.json @@ -0,0 +1,330 @@ +{ + "block_hash": "0x1935ec0e5c7758fdc11a78ed9d4cadd4225eab826aabd98fe2d04b45ca4c150", + "new_root": "0x7e72ca880e4fa1f4987257d90b2642860a4574a03b79ac830f6fb5968520977", + "old_root": "0x484d8010568613b1878e03085989536d9112d89e2979297f0fbd741a3f73138", + "state_diff": { + "storage_diffs": { + "0x1": [ + { + "key": "0x3f04c6", + "value": "0x3e53fff98965235e64b766ad713b1d9730027c6cf41ffb2ace60c1979b02bc7" + } + ], + "0x2": [ + { + "key": "0x0", + "value": "0x78b0fb7" + }, + { + "key": "0xd58a4bdab51dbf1e5c9178814cc4adf01ebe0c872d0ed61b4e54922cc1b996", + "value": "0x78b0fb1" + }, + { + "key": "0x37a3f18631ffdfbc04751ecf46226bcca4194f7ee3743da23d45503e78a8031", + "value": "0x78b0fb4" + }, + { + "key": "0x37a3f18631ffdfbc04751ecf46226bcca4194f7ee3743da23d45503e78a8032", + "value": "0x78b0fb5" + }, + { + "key": "0x3ee4ba0f59886159d92a35f96ded219dd7f69c30953f9b68d333f10a27e312b", + "value": "0x78b0fb2" + }, + { + "key": "0x51434bb3f996080c738de8a0ef10304fa880084fd55cb896c136e4873854f0b", + "value": "0x78b0fad" + }, + { + "key": "0x51434bb3f996080c738de8a0ef10304fa880084fd55cb896c136e4873854f0c", + "value": "0x78b0fae" + }, + { + "key": "0x51434bb3f996080c738de8a0ef10304fa880084fd55cb896c136e4873854f0d", + "value": "0x78b0faf" + }, + { + "key": "0x51434bb3f996080c738de8a0ef10304fa880084fd55cb896c136e4873854f0e", + "value": "0x78b0fb0" + }, + { + "key": "0x5e4542324cd70f0da676f0418afaecd12405976d2356fec26293a460a48bf13", + "value": "0x78b0fb6" + }, + { + "key": "0x67e2b7df13c81d03441781ccd8f64bfdb0fce02f5ddc22cb35c8144c59c02f6", + "value": "0x78b0fb3" + }, + { + "key": "0x6d410d47be5497b0dafef14e24c8767731a6e50126ff8fa99f25a0d0ee02788", + "value": "0x78b0fac" + } + ], + "0x3df887ee9fcdbde0d5cee8bd63fa66dee1662a1f44b075f744952cc99b6b13": [ + { + "key": "0xfc7fce0137ef0fdf47d2297ba791a15d3f3f1801dc7ef0b2aa311ad5d4034c", + "value": "0x155d5ef96092800c8f58" + }, + { + "key": "0x29904f220cd5d7c3928339c9760e0459aa13e8f35214b3e510f51042a8e027d", + "value": "0x744d8b7db6286804a" + }, + { + "key": "0x29904f220cd5d7c3928339c9760e0459aa13e8f35214b3e510f51042a8e027f", + "value": "0xeddb83" + }, + { + "key": "0x29904f220cd5d7c3928339c9760e0459aa13e8f35214b3e510f51042a8e0281", + "value": "0x69393984" + }, + { + "key": "0x29904f220cd5d7c3928339c9760e0459aa13e8f35214b3e510f51042a8e0284", + "value": "0xd09a27d0809e1caf7" + } + ], + "0x12f173ebf374db69c33c4176b4c2bd4cde1bca1ceb7ff88b52dc42e7c5f66f1": [ + { + "key": "0x3342fa999fea16067b1f01baf96673f31a25f2b1443e6754d93fc40b57e8df2", + "value": "0x1682fb66a5adc2bb" + }, + { + "key": "0x3342fa999fea16067b1f01baf96673f31a25f2b1443e6754d93fc40b57e8df6", + "value": "0x69393984" + } + ], + "0x18469ed2d40a016a602371173c7287e25f85cb6abb6fc0866d3c444e2837603": [ + { + "key": "0x6d410d47be5497b0dafef14e24c8767731a6e50126ff8fa99f25a0d0ee02788", + "value": "0x1" + } + ], + "0x1b14326182638866e10d804d7a9e9fd51a522c8ac59ab9b1b11975d21fae9c7": [ + { + "key": "0xefb0884a0332bee3218e5114a1f5a8b94b7f3a0aa4b620ecd81bc37c64598f", + "value": "0x5880301" + }, + { + "key": "0x329c7ad716328e6d50f9ca0db199b7680edd1f9888de9e870e256b4d829dd57", + "value": "0x75585" + }, + { + "key": "0x51434bb3f996080c738de8a0ef10304fa880084fd55cb896c136e4873854f0b", + "value": "0xa90224" + }, + { + "key": "0x51434bb3f996080c738de8a0ef10304fa880084fd55cb896c136e4873854f0c", + "value": "0x3f04d0" + }, + { + "key": "0x51434bb3f996080c738de8a0ef10304fa880084fd55cb896c136e4873854f0d", + "value": "0x69393933" + }, + { + "key": "0x51434bb3f996080c738de8a0ef10304fa880084fd55cb896c136e4873854f0e", + "value": "0x69393984" + } + ], + "0x223e2f1b09f495eb4784de5da08751eef6339294e3e02f525c49b3c4df38880": [ + { + "key": "0x324fce85c2297d7ff9c265222108145baf05cde91f2da32b3877560d78916ac", + "value": "0xdff7d" + } + ], + "0x2ef591697f0fd9adc0ba9dbe0ca04dabad80cf95f08ba02e435d9cb6698a28a": [ + { + "key": "0x1379c436357371c42e43fedcbd051f6fe836d50df1d57bd62bedc69f7a9ca03", + "value": "0xe00000921c0000db955581216d5f5521415f20c1210e100000c02647b" + } + ], + "0x377c2d65debb3978ea81904e7d59740da1f07412e30d01c5ded1c5d6f1ddc43": [ + { + "key": "0xd58a4bdab51dbf1e5c9178814cc4adf01ebe0c872d0ed61b4e54922cc1b996", + "value": "0x69dcb48a56745cdc27194572d04f1e5a95781371c95244bd4588669488a6ca0" + }, + { + "key": "0x3ee4ba0f59886159d92a35f96ded219dd7f69c30953f9b68d333f10a27e312b", + "value": "0x18469ed2d40a016a602371173c7287e25f85cb6abb6fc0866d3c444e2837603" + }, + { + "key": "0x484b46148d37383593029fa3b4c09a5e0e3cb66bbcf5fc66529fa452ccc6e34", + "value": "0x8" + }, + { + "key": "0x6e1a3e69a0abb3927c8349aee9a5d839de8053af580fdfa7ad5f22c0fe663fb", + "value": "0xae" + } + ], + "0x3c4b9713e7d408681f8f541b999cfba9d0a85cd4152140e75d97353d5ecc8f0": [ + { + "key": "0x2208eb7142b20a00788438f9ba35fdef173c7680ba652fbfa239fef3addf2b1", + "value": "0x1e52d71" + }, + { + "key": "0x33043a6019faaaacd896299801314dabcc91bc7d39ce3e8d557f431280a456b", + "value": "0x3c557401365e136ba378b" + }, + { + "key": "0x3fc73af821c877aa6ced977a0bc34de21bb584b2954c82c06c80d3bc0cce026", + "value": "0x8139fd44b2b34049169b3c5b5da" + } + ], + "0x4718f5a0fc34cc1af16a1cdee98ffb20c31f5cd61d6ab07201858f4287c938d": [ + { + "key": "0x129226399a3e8e88b4c9f04e61229c79b33bcd6e6204fce73b383cf5c93fc33", + "value": "0x2e8c2f263c8319c36b" + }, + { + "key": "0x232431de833901e6d877a0ac4bfd9e1d4a2ccafee5f9dfd8b9e1ba5575a48db", + "value": "0x56a660814c34e396" + }, + { + "key": "0x32d1ea410b5bc194a70d11ed61ed4ccd46fb14c08014c3dae9d0b43eefa5426", + "value": "0x168dc46f10f91510" + }, + { + "key": "0x341b8d18ee008343d6a5ca5b846c332e31416d9a531473330016d5a4bedcd88", + "value": "0x744d8b7db6286804a" + }, + { + "key": "0x3d7ccfb9e9c5944c7a3226921716da0d42239892dbde7b12c1e5597d31c4124", + "value": "0x3138ac97ca17d45c7515" + }, + { + "key": "0x428b8bf52f845d23614a1144ecb19fec6156cdb86ed63f4ffa89d467c9d56f0", + "value": "0x9e18c2326c4c9ec30c" + }, + { + "key": "0x5496768776e3db30053404f18067d81a6e06f5a2b0de326e21298fd9d569a9a", + "value": "0x131fb601e0da1c14012ed" + }, + { + "key": "0x54c222c4c7612af283391cc175e9ba95b706989f0ebf07502f9ac5ed89f1d6c", + "value": "0x2aca9acc8480ec7ac" + }, + { + "key": "0x69156455e9666009c6db030647b6779b5137551d6f5b16c4ea53a1b3a425561", + "value": "0xc2a36c4dd1c70ace0" + }, + { + "key": "0x71b83ef0f4cb2ccaa36ef634622714589223ed6e67ad6eae365827a5f829161", + "value": "0x9c5d62bbf9a9466b88" + }, + { + "key": "0x7baedaf13ef4d370383337a289792e2ae5747607cf278d8ceeb30274355c1e1", + "value": "0x9bc76445c15ef04128" + } + ], + "0x49d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7": [ + { + "key": "0x3d7ccfb9e9c5944c7a3226921716da0d42239892dbde7b12c1e5597d31c4124", + "value": "0x6ba79cb95c262a20" + }, + { + "key": "0x4ce9e044cc4ea3671f19777c34fbb4e6c42f40b39b409fe794602732b423e35", + "value": "0x7584268fc4b3fe2" + } + ], + "0x53c91253bc9682c04929ca02ed00b3e423f6710d2ee7e0d5ebb06f3ecf368a8": [ + { + "key": "0x28635d134a5c3c8534adbf4b3fcf41028eac53e6f04a6835290b78cbef31807", + "value": "0xba" + }, + { + "key": "0x32d1ea410b5bc194a70d11ed61ed4ccd46fb14c08014c3dae9d0b43eefa5426", + "value": "0x0" + }, + { + "key": "0x38e0503b87211df933ca34a5a6f1677f88d16890ec6fb843e975e273e80467e", + "value": "0x2078dcd414" + }, + { + "key": "0x4ce9e044cc4ea3671f19777c34fbb4e6c42f40b39b409fe794602732b423e35", + "value": "0x8d29f2be" + }, + { + "key": "0x4e1c1355394243f72d1536f721b2b63aed57a67a5b60817cde5e3af9faa194f", + "value": "0x5bb128bb54c5" + } + ], + "0x5a7a86d6113c8860f90f96ea1c8e70a747333feabb40b0584c3936fa6f86717": [ + { + "key": "0x67e2b7df13c81d03441781ccd8f64bfdb0fce02f5ddc22cb35c8144c59c02f6", + "value": "0x1" + } + ], + "0x62da0780fae50d68cecaa5a051606dc21217ba290969b302db4dd99d2e9b470": [ + { + "key": "0x37a3f18631ffdfbc04751ecf46226bcca4194f7ee3743da23d45503e78a8031", + "value": "0x3" + }, + { + "key": "0x37a3f18631ffdfbc04751ecf46226bcca4194f7ee3743da23d45503e78a8032", + "value": "0x69393984" + } + ], + "0x68400056dccee818caa7e8a2c305f9a60d255145bac22d6c5c9bf9e2e046b71": [ + { + "key": "0x3e9df762c67f04c3d19de6f877d7906e3a52e992c3f97013dc2450ab7851c9", + "value": "0x6ba79cb95c262a20" + }, + { + "key": "0x1f5dba4f0e386fe3e03022985e50076614214c29faad4f1a66fd553c39c47ed", + "value": "0x3138ac97ca17d45c7515" + } + ], + "0x68f5c6a61780768455de69077e07e89787839bf8166decfbf92b645209c0fb8": [ + { + "key": "0x32d1ea410b5bc194a70d11ed61ed4ccd46fb14c08014c3dae9d0b43eefa5426", + "value": "0x85a2e" + }, + { + "key": "0x341b8d18ee008343d6a5ca5b846c332e31416d9a531473330016d5a4bedcd88", + "value": "0xeddb83" + }, + { + "key": "0x493875a3926558b908441a8fd6642a9f5b85f7fc5e39289c3a83b72b2eca837", + "value": "0x2ad3b8411" + } + ], + "0x69dcb48a56745cdc27194572d04f1e5a95781371c95244bd4588669488a6ca0": [ + { + "key": "0x5e4542324cd70f0da676f0418afaecd12405976d2356fec26293a460a48bf13", + "value": "0x1" + } + ], + "0x7229d1454093674673a530cd0d37beef3fc0f1b3116d95c62c5c032f1827d87": [ + { + "key": "0x38c22f06d1a1a3c7432edc69ea247f27fe02c8463b142118b802c9791bbc2ba", + "value": "0x11237d6a" + }, + { + "key": "0x52239ed1307efbb7852382b35cd1328f3ae408fc01d99ca882db67a61ee1407", + "value": "0x0" + }, + { + "key": "0x52239ed1307efbb7852382b35cd1328f3ae408fc01d99ca882db67a61ee1408", + "value": "0x0" + } + ] + }, + "deployed_contracts": [], + "old_declared_contracts": [], + "declared_classes": [], + "nonces": { + "0x338e702f015198672d2e087c93b403b35e7c90c2ba436f8397a03a661830fe": "0x51b3f", + "0x10aeeaa11d863f2e53373106e6807cec1fc6e8ed95a4e8b710665b7068dd8e6": "0xc268", + "0x2d0356738e30a3ce3d7ec6368e64d286ef71fa444990676ef1e083f68edd266": "0x2ac53", + "0x662776dac110a170767d83da4f1d8fae022df7aa8a78252eb9c501c68d49604": "0x1bb63", + "0x70366dd8425e129d7dbd7f6db2bb948bba8616563f83f043507a20c11e0d187": "0x1bb7f", + "0x7c0b8a20b433194608a907c6666ecd532991ac3d90a571d38547b62ebc5e21a": "0x3b", + "0x7c183208cf2fc08503ed1edb44694295a07d0adc25bb6dad1b40f4540a427fa": "0x1baee" + }, + "replaced_classes": [], + "migrated_compiled_classes": [ + { + "class_hash": "0x4ac055f14361bb6f7bf4b9af6e96ca68825e6037e9bdf87ea0b2c641dea73ae", + "compiled_class_hash": "0x17f3b8f7225a160ec0542ea5c44ee876f2b132e7dee00ec36f2422d8155a4e4" + } + ] + } +} diff --git a/crates/gateway/gateway-client/tests/mainnet.rs b/crates/gateway/gateway-client/tests/mainnet.rs index 7e3467ecd..790a92a58 100644 --- a/crates/gateway/gateway-client/tests/mainnet.rs +++ b/crates/gateway/gateway-client/tests/mainnet.rs @@ -32,6 +32,7 @@ async fn get_block(gateway: Client, #[case] block_number: BlockNumber, #[case] e #[case::v0_11_1(65000, test_data("0.11.1/state_update/mainnet_65000.json"))] #[case::v0_12_2(350000, test_data("0.12.2/state_update/mainnet_350000.json"))] #[case::v0_13_0(550000, test_data("0.13.0/state_update/mainnet_550000.json"))] +#[case::v0_14_1(4130000, test_data("0.14.1/state_update/mainnet_4130000.json"))] #[tokio::test] async fn get_state_update( gateway: Client, diff --git a/crates/gateway/gateway-server/src/handlers.rs b/crates/gateway/gateway-server/src/handlers.rs index 8a7901aac..2f9661584 100644 --- a/crates/gateway/gateway-server/src/handlers.rs +++ b/crates/gateway/gateway-server/src/handlers.rs @@ -195,7 +195,7 @@ where } /// The state update type returns by `/get_state_update` endpoint. -#[allow(clippy::enum_variant_names)] +#[allow(clippy::enum_variant_names, clippy::large_enum_variant)] #[derive(Debug, PartialEq, Eq, Serialize)] #[serde(untagged)] pub enum GetStateUpdateResponse { diff --git a/crates/gateway/gateway-types/src/conversion.rs b/crates/gateway/gateway-types/src/conversion.rs index e4a12f26c..530c1eede 100644 --- a/crates/gateway/gateway-types/src/conversion.rs +++ b/crates/gateway/gateway-types/src/conversion.rs @@ -12,7 +12,9 @@ use crate::{ impl From for StateUpdate { fn from(value: katana_rpc_types::StateUpdate) -> Self { match value { - katana_rpc_types::StateUpdate::Update(update) => StateUpdate::Confirmed(update.into()), + katana_rpc_types::StateUpdate::Confirmed(update) => { + StateUpdate::Confirmed(update.into()) + } katana_rpc_types::StateUpdate::PreConfirmed(pre_confirmed) => { StateUpdate::PreConfirmed(pre_confirmed.into()) } @@ -64,6 +66,16 @@ impl From for StateDiff { }) .collect(); + let migrated_compiled_classes = value + .migrated_compiled_classes + .unwrap_or_default() + .into_iter() + .map(|(class_hash, compiled_class_hash)| DeclaredContract { + class_hash, + compiled_class_hash, + }) + .collect(); + let replaced_classes = value .replaced_classes .into_iter() @@ -77,6 +89,7 @@ impl From for StateDiff { declared_classes, nonces: value.nonces, replaced_classes, + migrated_compiled_classes, } } } @@ -253,21 +266,67 @@ impl From for ReceiptBody { Some(ExecutionStatus::Succeeded) }; - let execution_resources = Some(ExecutionResources { - vm_resources: receipt.resources_used().computation_resources.clone(), - data_availability: Some(receipt.resources_used().da_resources.clone()), - total_gas_consumed: Some(receipt.resources_used().gas.clone()), - }); + match receipt { + katana_primitives::receipt::Receipt::Deploy(receipt) => { + Self { + execution_resources: Some(receipt.execution_resources.into()), + // This would need to be populated from transaction context + l1_to_l2_consumed_message: None, + l2_to_l1_messages: receipt.messages_sent, + events: receipt.events, + actual_fee: receipt.fee.overall_fee.into(), + execution_status, + revert_error: receipt.revert_error, + } + } - Self { - execution_resources, - l1_to_l2_consumed_message: None, /* This would need to be populated from transaction - * context */ - l2_to_l1_messages: receipt.messages_sent().to_vec(), - events: receipt.events().to_vec(), - actual_fee: receipt.fee().overall_fee.into(), - execution_status, - revert_error: receipt.revert_reason().map(|s| s.to_string()), + katana_primitives::receipt::Receipt::Invoke(receipt) => { + Self { + execution_resources: Some(receipt.execution_resources.into()), + // This would need to be populated from transaction context + l1_to_l2_consumed_message: None, + l2_to_l1_messages: receipt.messages_sent, + events: receipt.events, + actual_fee: receipt.fee.overall_fee.into(), + execution_status, + revert_error: receipt.revert_error, + } + } + + katana_primitives::receipt::Receipt::Declare(receipt) => Self { + execution_resources: Some(receipt.execution_resources.into()), + // This would need to be populated from transaction context + l1_to_l2_consumed_message: None, + l2_to_l1_messages: receipt.messages_sent, + events: receipt.events, + actual_fee: receipt.fee.overall_fee.into(), + execution_status, + revert_error: receipt.revert_error, + }, + + katana_primitives::receipt::Receipt::DeployAccount(receipt) => { + Self { + execution_resources: Some(receipt.execution_resources.into()), + // This would need to be populated from transaction context + l1_to_l2_consumed_message: None, + l2_to_l1_messages: receipt.messages_sent, + events: receipt.events, + actual_fee: receipt.fee.overall_fee.into(), + execution_status, + revert_error: receipt.revert_error, + } + } + + katana_primitives::receipt::Receipt::L1Handler(receipt) => Self { + execution_resources: Some(receipt.execution_resources.into()), + // This would need to be populated from transaction context + l1_to_l2_consumed_message: None, + l2_to_l1_messages: receipt.messages_sent, + events: receipt.events, + actual_fee: receipt.fee.overall_fee.into(), + execution_status, + revert_error: receipt.revert_error, + }, } } } @@ -305,6 +364,12 @@ impl From for katana_primitives::state::StateUpdates { .map(|contract| (contract.class_hash, contract.compiled_class_hash)) .collect(); + let migrated_compiled_classes = value + .migrated_compiled_classes + .into_iter() + .map(|contract| (contract.class_hash, contract.compiled_class_hash)) + .collect(); + let replaced_classes = value .replaced_classes .into_iter() @@ -318,6 +383,27 @@ impl From for katana_primitives::state::StateUpdates { deployed_contracts, nonce_updates: value.nonces, deprecated_declared_classes: BTreeSet::from_iter(value.old_declared_contracts), + migrated_compiled_classes, + } + } +} + +impl From for ExecutionResources { + fn from(value: katana_primitives::receipt::ExecutionResources) -> Self { + Self { + vm_resources: value.vm_resources, + data_availability: Some(value.data_availability), + total_gas_consumed: Some(value.total_gas_consumed), + } + } +} + +impl From for katana_primitives::receipt::ExecutionResources { + fn from(value: ExecutionResources) -> Self { + Self { + vm_resources: value.vm_resources, + data_availability: value.data_availability.unwrap_or_default(), + total_gas_consumed: value.total_gas_consumed.unwrap_or_default(), } } } @@ -325,7 +411,7 @@ impl From for katana_primitives::state::StateUpdates { #[cfg(test)] mod from_primitives_test { use katana_primitives::transaction::TxWithHash; - use katana_primitives::{address, felt, ContractAddress}; + use katana_primitives::{address, felt}; use katana_utils::arbitrary; use super::*; @@ -624,6 +710,7 @@ mod from_primitives_test { }], nonces: BTreeMap::new(), replaced_classes: vec![], + migrated_compiled_classes: Vec::new(), }; let converted: katana_primitives::state::StateUpdates = state_diff.into(); @@ -650,10 +737,9 @@ mod from_primitives_test { mod from_rpc_test { use std::collections::{BTreeMap, BTreeSet}; - use katana_primitives::contract::ContractAddress; use katana_primitives::{address, felt}; - use crate::StateDiff; + use crate::{DeclaredContract, StateDiff}; #[test] fn state_diff_conversion() { @@ -674,6 +760,10 @@ mod from_rpc_test { declared_classes, nonces: BTreeMap::new(), replaced_classes: BTreeMap::new(), + migrated_compiled_classes: Some(BTreeMap::from_iter([ + (felt!("0xa1"), felt!("0xb1")), + (felt!("0xa2"), felt!("0xb2")), + ])), }; let converted: StateDiff = rpc_state_diff.into(); @@ -698,5 +788,15 @@ mod from_rpc_test { // Verify deprecated declared classes assert_eq!(converted.old_declared_contracts.len(), 1); assert!(converted.old_declared_contracts.contains(&felt!("0x4"))); + + // Verify migrated class hashes + assert_eq!(converted.migrated_compiled_classes.len(), 2); + assert_eq!( + converted.migrated_compiled_classes, + vec![ + DeclaredContract { class_hash: felt!("0xa1"), compiled_class_hash: felt!("0xb1") }, + DeclaredContract { class_hash: felt!("0xa2"), compiled_class_hash: felt!("0xb2") } + ] + ); } } diff --git a/crates/gateway/gateway-types/src/lib.rs b/crates/gateway/gateway-types/src/lib.rs index 052a76c6c..cd5a7d657 100644 --- a/crates/gateway/gateway-types/src/lib.rs +++ b/crates/gateway/gateway-types/src/lib.rs @@ -19,12 +19,8 @@ //! - [`DeployAccountTxV3`]: Uses the custom DA mode and resource bounds //! - [`L1HandlerTx`]: Optional `nonce` field -use std::collections::{BTreeMap, BTreeSet}; - use katana_primitives::block::{BlockHash, BlockNumber}; pub use katana_primitives::class::CasmContractClass; -use katana_primitives::class::{ClassHash, CompiledClassHash}; -use katana_primitives::contract::{Nonce, StorageKey, StorageValue}; use katana_primitives::da::L1DataAvailabilityMode; use katana_primitives::{ContractAddress, Felt}; pub use katana_rpc_types::class::RpcSierraContractClass; @@ -34,10 +30,12 @@ use starknet::core::types::ResourcePrice; mod conversion; mod error; mod receipt; +mod state_update; mod transaction; pub use error::*; pub use receipt::*; +pub use state_update::*; pub use transaction::*; /// The contract class type returns by `/get_class_by_hash` endpoint. @@ -142,249 +140,6 @@ pub struct Block { pub transactions: Vec, } -/// The state update type returns by `/get_state_update` endpoint, with `includeBlock=true`. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct StateUpdateWithBlock { - pub state_update: StateUpdate, - pub block: Block, -} - -// The main reason why aren't using the state update RPC types is because the state diff -// serialization is different. -#[derive(Debug, Clone, PartialEq, Eq, Serialize)] -#[serde(untagged)] -pub enum StateUpdate { - Confirmed(ConfirmedStateUpdate), - PreConfirmed(PreConfirmedStateUpdate), -} - -/// State update of a pre-confirmed block. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct PreConfirmedStateUpdate { - /// The previous global state root - pub old_root: Felt, - /// State diff - pub state_diff: StateDiff, -} - -/// State update of a confirmed block. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct ConfirmedStateUpdate { - /// Block hash - pub block_hash: BlockHash, - /// The new global state root - pub new_root: Felt, - /// The previous global state root - pub old_root: Felt, - /// State diff - pub state_diff: StateDiff, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct StorageDiff { - pub key: StorageKey, - pub value: StorageValue, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct DeployedContract { - pub address: ContractAddress, - pub class_hash: ClassHash, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct DeclaredContract { - pub class_hash: ClassHash, - pub compiled_class_hash: CompiledClassHash, -} - fn default_l2_gas_price() -> ResourcePrice { ResourcePrice { price_in_fri: Felt::from(1), price_in_wei: Felt::from(1) } } - -// todo(kariy): merge the serialization of gateway into the rpc types -#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] -pub struct StateDiff { - pub storage_diffs: BTreeMap>, - pub deployed_contracts: Vec, - pub old_declared_contracts: Vec, - pub declared_classes: Vec, - pub nonces: BTreeMap, - pub replaced_classes: Vec, -} - -impl StateDiff { - /// Returns a new [`StateDiff`] that contains all updates from `self` and `other`, - /// preferring the values from `other` when both diffs touch the same entry. - pub fn merge(mut self, other: StateDiff) -> StateDiff { - let StateDiff { - storage_diffs, - deployed_contracts, - old_declared_contracts, - declared_classes, - nonces, - replaced_classes, - } = other; - - Self::merge_storage_diffs(&mut self.storage_diffs, storage_diffs); - Self::merge_deployed_contracts(&mut self.deployed_contracts, deployed_contracts); - Self::merge_deployed_contracts(&mut self.replaced_classes, replaced_classes); - Self::merge_declared_classes(&mut self.declared_classes, declared_classes); - Self::merge_old_declared_contracts( - &mut self.old_declared_contracts, - old_declared_contracts, - ); - self.nonces.extend(nonces); - - self - } - - fn merge_storage_diffs( - target: &mut BTreeMap>, - updates: BTreeMap>, - ) { - for (address, diffs) in updates { - let entry = target.entry(address).or_default(); - let mut index_by_key: BTreeMap = - entry.iter().enumerate().map(|(idx, diff)| (diff.key, idx)).collect(); - - for diff in diffs { - if let Some(idx) = index_by_key.get(&diff.key).copied() { - entry[idx] = diff; - } else { - index_by_key.insert(diff.key, entry.len()); - entry.push(diff); - } - } - } - } - - fn merge_deployed_contracts( - target: &mut Vec, - incoming: Vec, - ) { - let mut index_by_address: BTreeMap = - target.iter().enumerate().map(|(idx, contract)| (contract.address, idx)).collect(); - - for contract in incoming { - if let Some(idx) = index_by_address.get(&contract.address).copied() { - target[idx] = contract; - } else { - index_by_address.insert(contract.address, target.len()); - target.push(contract); - } - } - } - - fn merge_declared_classes(target: &mut Vec, incoming: Vec) { - let mut index_by_hash: BTreeMap = - target.iter().enumerate().map(|(idx, contract)| (contract.class_hash, idx)).collect(); - - for declared in incoming { - if let Some(idx) = index_by_hash.get(&declared.class_hash).copied() { - target[idx] = declared; - } else { - index_by_hash.insert(declared.class_hash, target.len()); - target.push(declared); - } - } - } - - fn merge_old_declared_contracts(target: &mut Vec, incoming: Vec) { - let mut seen: BTreeSet = target.iter().copied().collect(); - - for class_hash in incoming { - if seen.insert(class_hash) { - target.push(class_hash); - } - } - } -} - -impl<'de> Deserialize<'de> for StateUpdate { - fn deserialize>(deserializer: D) -> Result { - struct __Visitor; - - impl<'de> serde::de::Visitor<'de> for __Visitor { - type Value = StateUpdate; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("a state update response") - } - - fn visit_map>( - self, - mut map: A, - ) -> Result { - let mut block_hash: Option = None; - let mut new_root: Option = None; - let mut old_root: Option = None; - let mut state_diff: Option = None; - - while let Some(key) = map.next_key::()? { - match key.as_str() { - "block_hash" => { - if block_hash.is_some() { - return Err(serde::de::Error::duplicate_field("block_hash")); - } - block_hash = Some(map.next_value()?); - } - - "new_root" => { - if new_root.is_some() { - return Err(serde::de::Error::duplicate_field("new_root")); - } - new_root = Some(map.next_value()?); - } - - "old_root" => { - if old_root.is_some() { - return Err(serde::de::Error::duplicate_field("old_root")); - } - old_root = Some(map.next_value()?); - } - - "state_diff" => { - if state_diff.is_some() { - return Err(serde::de::Error::duplicate_field("state_diff")); - } - state_diff = Some(map.next_value()?); - } - - _ => { - let _ = map.next_value::()?; - } - } - } - - let old_root = - old_root.ok_or_else(|| serde::de::Error::missing_field("old_root"))?; - let state_diff = - state_diff.ok_or_else(|| serde::de::Error::missing_field("state_diff"))?; - - // If block_hash and new_root are not present, deserialize as - // PreConfirmedStateUpdate - match (block_hash, new_root) { - (None, None) => Ok(StateUpdate::PreConfirmed(PreConfirmedStateUpdate { - old_root, - state_diff, - })), - - (Some(block_hash), Some(new_root)) => { - Ok(StateUpdate::Confirmed(ConfirmedStateUpdate { - block_hash, - new_root, - old_root, - state_diff, - })) - } - - (None, Some(_)) => Err(serde::de::Error::missing_field("block_hash")), - (Some(_), None) => Err(serde::de::Error::missing_field("new_root")), - } - } - } - - deserializer.deserialize_map(__Visitor) - } -} diff --git a/crates/gateway/gateway-types/src/state_update.rs b/crates/gateway/gateway-types/src/state_update.rs new file mode 100644 index 000000000..8ab542e3a --- /dev/null +++ b/crates/gateway/gateway-types/src/state_update.rs @@ -0,0 +1,306 @@ +use std::collections::{BTreeMap, BTreeSet}; + +use katana_primitives::block::BlockHash; +use katana_primitives::class::{ClassHash, CompiledClassHash}; +use katana_primitives::contract::{Nonce, StorageKey, StorageValue}; +use katana_primitives::{ContractAddress, Felt}; +use serde::{Deserialize, Serialize}; + +use crate::Block; + +/// The state update type returns by `/get_state_update` endpoint, with `includeBlock=true`. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct StateUpdateWithBlock { + pub state_update: StateUpdate, + pub block: Block, +} + +// The main reason why aren't using the state update RPC types is because the state diff +// serialization is different. +#[derive(Debug, Clone, PartialEq, Eq, Serialize)] +#[serde(untagged)] +pub enum StateUpdate { + Confirmed(ConfirmedStateUpdate), + PreConfirmed(PreConfirmedStateUpdate), +} + +/// State update of a pre-confirmed block. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct PreConfirmedStateUpdate { + /// The previous global state root + pub old_root: Option, + /// State diff + pub state_diff: StateDiff, +} + +/// State update of a confirmed block. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct ConfirmedStateUpdate { + /// Block hash + pub block_hash: BlockHash, + /// The new global state root + pub new_root: Felt, + /// The previous global state root + pub old_root: Felt, + /// State diff + pub state_diff: StateDiff, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct StorageDiff { + pub key: StorageKey, + pub value: StorageValue, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct DeployedContract { + pub address: ContractAddress, + pub class_hash: ClassHash, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct DeclaredContract { + pub class_hash: ClassHash, + pub compiled_class_hash: CompiledClassHash, +} + +/// State diff returned by the feeder gateway API. +/// +/// This type has a different serialization format compared to the JSON-RPC +/// [`StateDiff`](katana_rpc_types::state_update::StateDiff). +/// +/// ## Serialization Differences from the RPC Format +/// +/// - **`nonces`** +/// +/// Serialized as a map object `{ "0x123": "0x1" }`, whereas RPC uses an array `[ { +/// "contract_address": "0x123", "nonce": "0x1" } ]`. +/// +/// - **`storage_diffs`** +/// +/// Serialized as a map with contract address as key `{ "0x123": [ { "key": "0x1", "value": "0x2" } +/// ] }`, whereas RPC uses an array `[ { "address": "0x123", "storage_entries": [...] } ]`. +/// +/// - **`replaced_classes`** +/// +/// Uses `"address"` field name, whereas RPC uses `"contract_address"`. +/// +/// - **`old_declared_contracts`** +/// +/// This field name differs from RPC which uses `"deprecated_declared_classes"`. +#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] +pub struct StateDiff { + pub storage_diffs: BTreeMap>, + pub deployed_contracts: Vec, + pub old_declared_contracts: Vec, + pub declared_classes: Vec, + pub nonces: BTreeMap, + pub replaced_classes: Vec, + /// Classes whose compiled class hash (CASM hash) has been recomputed using the Blake2s hash + /// function. + /// + /// Introduced in [SNIP34] (Starknet 0.14.1) as part of the S-Two proving system optimization. + /// Blake2s hashes are ~3x cheaper to prove compared to Poseidon, significantly reducing the + /// cost of proving correct Cairo instruction execution within each block. + /// + /// [SNIP34]: https://community.starknet.io/t/snip-34-more-efficient-casm-hashes/115979 + /// + /// ## Migration Mechanism + /// + /// Existing classes are migrated gradually - a class hash is migrated the first time SNOS + /// executes an instruction within a contract using that class. Specifically: + /// + /// - **Successful transactions**: All classes used throughout the transaction are migrated. + /// - **Reverted transactions**: Classes proven before the reversion point are migrated, + /// including: + /// - The account contract's class (since `validate` must run successfully) + /// - Classes where a non-existent entry point invocation was proven + /// + /// The compiled class hash updates are applied at the end of each block. Since the execution + /// environment only references class hashes (never compiled class hashes directly), these + /// migrations do not affect execution - only the resulting state root. + #[serde(default)] + pub migrated_compiled_classes: Vec, +} + +impl StateDiff { + /// Returns a new [`StateDiff`] that contains all updates from `self` and `other`, + /// preferring the values from `other` when both diffs touch the same entry. + pub fn merge(mut self, other: StateDiff) -> StateDiff { + let StateDiff { + storage_diffs, + deployed_contracts, + old_declared_contracts, + declared_classes, + nonces, + replaced_classes, + migrated_compiled_classes, + } = other; + + Self::merge_storage_diffs(&mut self.storage_diffs, storage_diffs); + Self::merge_deployed_contracts(&mut self.deployed_contracts, deployed_contracts); + Self::merge_deployed_contracts(&mut self.replaced_classes, replaced_classes); + Self::merge_declared_classes(&mut self.declared_classes, declared_classes); + Self::merge_declared_classes( + &mut self.migrated_compiled_classes, + migrated_compiled_classes, + ); + Self::merge_old_declared_contracts( + &mut self.old_declared_contracts, + old_declared_contracts, + ); + self.nonces.extend(nonces); + + self + } + + fn merge_storage_diffs( + target: &mut BTreeMap>, + updates: BTreeMap>, + ) { + for (address, diffs) in updates { + let entry = target.entry(address).or_default(); + let mut index_by_key: BTreeMap = + entry.iter().enumerate().map(|(idx, diff)| (diff.key, idx)).collect(); + + for diff in diffs { + if let Some(idx) = index_by_key.get(&diff.key).copied() { + entry[idx] = diff; + } else { + index_by_key.insert(diff.key, entry.len()); + entry.push(diff); + } + } + } + } + + fn merge_deployed_contracts( + target: &mut Vec, + incoming: Vec, + ) { + let mut index_by_address: BTreeMap = + target.iter().enumerate().map(|(idx, contract)| (contract.address, idx)).collect(); + + for contract in incoming { + if let Some(idx) = index_by_address.get(&contract.address).copied() { + target[idx] = contract; + } else { + index_by_address.insert(contract.address, target.len()); + target.push(contract); + } + } + } + + fn merge_declared_classes(target: &mut Vec, incoming: Vec) { + let mut index_by_hash: BTreeMap = + target.iter().enumerate().map(|(idx, contract)| (contract.class_hash, idx)).collect(); + + for declared in incoming { + if let Some(idx) = index_by_hash.get(&declared.class_hash).copied() { + target[idx] = declared; + } else { + index_by_hash.insert(declared.class_hash, target.len()); + target.push(declared); + } + } + } + + fn merge_old_declared_contracts(target: &mut Vec, incoming: Vec) { + let mut seen: BTreeSet = target.iter().copied().collect(); + + for class_hash in incoming { + if seen.insert(class_hash) { + target.push(class_hash); + } + } + } +} + +impl<'de> Deserialize<'de> for StateUpdate { + fn deserialize>(deserializer: D) -> Result { + struct __Visitor; + + impl<'de> serde::de::Visitor<'de> for __Visitor { + type Value = StateUpdate; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("a state update response") + } + + fn visit_map>( + self, + mut map: A, + ) -> Result { + let mut block_hash: Option = None; + let mut new_root: Option = None; + let mut old_root: Option = None; + let mut state_diff: Option = None; + + while let Some(key) = map.next_key::()? { + match key.as_str() { + "block_hash" => { + if block_hash.is_some() { + return Err(serde::de::Error::duplicate_field("block_hash")); + } + block_hash = Some(map.next_value()?); + } + + "new_root" => { + if new_root.is_some() { + return Err(serde::de::Error::duplicate_field("new_root")); + } + new_root = Some(map.next_value()?); + } + + "old_root" => { + if old_root.is_some() { + return Err(serde::de::Error::duplicate_field("old_root")); + } + old_root = Some(map.next_value()?); + } + + "state_diff" => { + if state_diff.is_some() { + return Err(serde::de::Error::duplicate_field("state_diff")); + } + state_diff = Some(map.next_value()?); + } + + _ => { + let _ = map.next_value::()?; + } + } + } + + let state_diff = + state_diff.ok_or_else(|| serde::de::Error::missing_field("state_diff"))?; + + // If block_hash and new_root are not present, deserialize as + // PreConfirmedStateUpdate + match (block_hash, new_root) { + (None, None, ..) => Ok(StateUpdate::PreConfirmed(PreConfirmedStateUpdate { + old_root, + state_diff, + })), + + (Some(block_hash), Some(new_root)) => { + let old_root = + old_root.ok_or_else(|| serde::de::Error::missing_field("old_root"))?; + + Ok(StateUpdate::Confirmed(ConfirmedStateUpdate { + block_hash, + new_root, + old_root, + state_diff, + })) + } + + (None, Some(_)) => Err(serde::de::Error::missing_field("block_hash")), + (Some(_), None) => Err(serde::de::Error::missing_field("new_root")), + } + } + } + + deserializer.deserialize_map(__Visitor) + } +} diff --git a/crates/gateway/gateway-types/tests/types.rs b/crates/gateway/gateway-types/tests/types.rs index 8f5194104..e34eaf81c 100644 --- a/crates/gateway/gateway-types/tests/types.rs +++ b/crates/gateway/gateway-types/tests/types.rs @@ -5,7 +5,7 @@ use katana_gateway_types::{ StateDiff, StorageDiff, }; use katana_primitives::execution::BuiltinName; -use katana_primitives::{address, eth_address, felt, ContractAddress}; +use katana_primitives::{address, eth_address, felt}; use serde_json::json; #[test] @@ -39,6 +39,11 @@ fn state_diff_to_state_updates_conversion() { let replaced_classes = vec![DeployedContract { address: address!("0x320"), class_hash: felt!("0x384") }]; + let migrated_compiled_classes = vec![ + DeclaredContract { class_hash: felt!("0xaa1"), compiled_class_hash: felt!("0xbb1") }, + DeclaredContract { class_hash: felt!("0xaa2"), compiled_class_hash: felt!("0xbb2") }, + ]; + let state_diff = StateDiff { storage_diffs, deployed_contracts, @@ -46,6 +51,7 @@ fn state_diff_to_state_updates_conversion() { declared_classes, nonces, replaced_classes, + migrated_compiled_classes, }; let state_updates: katana_primitives::state::StateUpdates = state_diff.into(); @@ -80,6 +86,17 @@ fn state_diff_to_state_updates_conversion() { // replaced classes assert_eq!(state_updates.replaced_classes.len(), 1); assert_eq!(state_updates.replaced_classes.get(&address!("0x320")).unwrap(), &felt!("0x384")); + + // migrated class hashes + assert_eq!(state_updates.migrated_compiled_classes.len(), 2); + assert_eq!( + state_updates.migrated_compiled_classes.get(&felt!("0xaa1")).unwrap(), + &felt!("0xbb1") + ); + assert_eq!( + state_updates.migrated_compiled_classes.get(&felt!("0xaa2")).unwrap(), + &felt!("0xbb2") + ); } #[test] @@ -123,6 +140,10 @@ fn state_diff_merge_merges_entries() { address: address!("0x350"), class_hash: felt!("0x900"), }], + migrated_compiled_classes: vec![DeclaredContract { + class_hash: felt!("0x666"), + compiled_class_hash: felt!("0x999"), + }], }; let mut other_storage = BTreeMap::new(); @@ -152,6 +173,10 @@ fn state_diff_merge_merges_entries() { DeployedContract { address: address!("0x350"), class_hash: felt!("0x901") }, DeployedContract { address: address!("0x351"), class_hash: felt!("0x902") }, ], + migrated_compiled_classes: vec![DeclaredContract { + class_hash: felt!("0x777"), + compiled_class_hash: felt!("0x888"), + }], }; let merged = base.merge(other); @@ -192,6 +217,16 @@ fn state_diff_merge_merges_entries() { // nonces override and extend assert_eq!(merged.nonces.get(&contract_a), Some(&felt!("0x5"))); assert_eq!(merged.nonces.get(&contract_b), Some(&felt!("0x6"))); + + // migrated class hashes merged + let migrated_compiled_classes_by_hash: BTreeMap<_, _> = merged + .migrated_compiled_classes + .iter() + .map(|c| (c.class_hash, c.compiled_class_hash)) + .collect(); + + assert_eq!(migrated_compiled_classes_by_hash.get(&felt!("0x666")), Some(&felt!("0x999"))); + assert_eq!(migrated_compiled_classes_by_hash.get(&felt!("0x777")), Some(&felt!("0x888"))); } #[test] diff --git a/crates/genesis/src/constant.rs b/crates/genesis/src/constant.rs index 1fe8040a7..79a17fd39 100644 --- a/crates/genesis/src/constant.rs +++ b/crates/genesis/src/constant.rs @@ -2,8 +2,7 @@ use katana_contracts::contracts::{Account, LegacyERC20, UniversalDeployer}; use katana_primitives::class::ClassHash; use katana_primitives::contract::{ContractAddress, StorageKey}; use katana_primitives::utils::get_storage_var_address; -use katana_primitives::Felt; -use starknet::macros::felt; +use katana_primitives::{felt, Felt}; /// The default universal deployer contract address. pub const DEFAULT_UDC_ADDRESS: ContractAddress = @@ -12,12 +11,12 @@ pub const DEFAULT_UDC_ADDRESS: ContractAddress = /// The default ETH fee token contract address. /// See https://github.com/starknet-io/starknet-addresses/blob/master/bridged_tokens/mainnet.json pub const DEFAULT_ETH_FEE_TOKEN_ADDRESS: ContractAddress = - ContractAddress(felt!("0x2e7442625bab778683501c0eadbc1ea17b3535da040a12ac7d281066e915eea")); + ContractAddress(felt!("0x049d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7")); /// The default STRK fee token contract address. /// See https://github.com/starknet-io/starknet-addresses/blob/master/bridged_tokens/mainnet.json pub const DEFAULT_STRK_FEE_TOKEN_ADDRESS: ContractAddress = - ContractAddress(felt!("0x2e7442625bab778683501c0eadbc1ea17b3535da040a12ac7d281066e915eea")); + ContractAddress(felt!("0x04718f5a0fc34cc1af16a1cdee98ffb20c31f5cd61d6ab07201858f4287c938d")); /// The standard storage address for `public key` in the default account class. /// Corresponds to keccak("Account_public_key") diff --git a/crates/genesis/src/json.rs b/crates/genesis/src/json.rs index f944a3bff..397421137 100644 --- a/crates/genesis/src/json.rs +++ b/crates/genesis/src/json.rs @@ -589,8 +589,7 @@ fn class_artifact_at_path( #[cfg(test)] mod tests { use katana_contracts::contracts::{Account, LegacyERC20, UniversalDeployer}; - use katana_primitives::address; - use starknet::macros::felt; + use katana_primitives::{address, felt}; use super::*; use crate::constant::{DEFAULT_LEGACY_ERC20_CLASS_HASH, DEFAULT_LEGACY_UDC_CLASS_HASH}; diff --git a/crates/messaging/Cargo.toml b/crates/messaging/Cargo.toml index e4ee3f296..837ab55d0 100644 --- a/crates/messaging/Cargo.toml +++ b/crates/messaging/Cargo.toml @@ -24,7 +24,6 @@ url.workspace = true alloy-primitives = { workspace = true, features = [ "serde" ] } alloy-sol-types = { workspace = true, default-features = false, features = [ "json" ] } -starknet-crypto.workspace = true alloy-contract = { workspace = true, default-features = false } alloy-network = { workspace = true, default-features = false } diff --git a/crates/messaging/src/ethereum.rs b/crates/messaging/src/ethereum.rs index 37ae0b2e6..e85a36833 100644 --- a/crates/messaging/src/ethereum.rs +++ b/crates/messaging/src/ethereum.rs @@ -207,7 +207,7 @@ fn parse_messages(messages: &[MessageToL1]) -> Vec { } fn felt_from_u256(v: U256) -> Felt { - Felt::from_str(format!("{:#064x}", v).as_str()).unwrap() + Felt::from_str(format!("{v:#064x}").as_str()).unwrap() } #[cfg(test)] @@ -215,8 +215,9 @@ mod tests { use alloy_primitives::{address, b256, LogData, U256}; use katana_primitives::chain::{ChainId, NamedChainId}; + use katana_primitives::felt; use katana_primitives::utils::transaction::compute_l1_to_l2_message_hash; - use starknet::macros::{felt, selector}; + use starknet::macros::selector; use super::*; diff --git a/crates/messaging/src/lib.rs b/crates/messaging/src/lib.rs index 5e8fe4c8c..6c7b69368 100644 --- a/crates/messaging/src/lib.rs +++ b/crates/messaging/src/lib.rs @@ -170,6 +170,7 @@ pub trait Messenger { ) -> MessengerResult<(u64, Vec)>; } +#[allow(clippy::large_enum_variant)] #[derive(Debug)] pub enum MessengerMode { Ethereum(EthereumMessaging), diff --git a/crates/messaging/src/starknet.rs b/crates/messaging/src/starknet.rs index cfd6ea26c..f18107f72 100644 --- a/crates/messaging/src/starknet.rs +++ b/crates/messaging/src/starknet.rs @@ -2,8 +2,9 @@ use alloy_primitives::B256; use anyhow::Result; use async_trait::async_trait; use katana_primitives::chain::ChainId; +use katana_primitives::hash::StarkHash; use katana_primitives::transaction::L1HandlerTx; -use katana_primitives::Felt; +use katana_primitives::{hash, Felt}; use starknet::core::types::{BlockId, EmittedEvent, EventFilter}; use starknet::macros::selector; use starknet::providers::jsonrpc::HttpTransport; @@ -197,11 +198,12 @@ fn compute_starknet_to_appchain_message_hash( ) -> Felt { let mut buf: Vec = vec![from_address, to_address, nonce, entry_point_selector, Felt::from(payload.len())]; + for p in payload { buf.push(*p); } - starknet_crypto::poseidon_hash_many(&buf) + hash::Poseidon::hash_array(&buf) } #[cfg(test)] diff --git a/crates/node-bindings/src/lib.rs b/crates/node-bindings/src/lib.rs index 6601fb821..3e964f485 100644 --- a/crates/node-bindings/src/lib.rs +++ b/crates/node-bindings/src/lib.rs @@ -510,7 +510,7 @@ impl Katana { } if self.json_log { - cmd.args(["--log.format", "json"]); + cmd.args(["--log.stdout.format", "json"]); } if let Some(fork_block_number) = self.fork_block_number { diff --git a/crates/node/Cargo.toml b/crates/node/Cargo.toml index 00ed23a57..2d85c1a64 100644 --- a/crates/node/Cargo.toml +++ b/crates/node/Cargo.toml @@ -27,8 +27,10 @@ katana-rpc-client.workspace = true katana-rpc-types.workspace = true katana-stage.workspace = true katana-tasks.workspace = true +katana-tee = { workspace = true, optional = true } anyhow.workspace = true +backon.workspace = true num-traits.workspace = true futures.workspace = true http.workspace = true @@ -45,6 +47,8 @@ strum_macros.workspace = true tokio = { workspace = true, features = [ "time" ] } [features] -cartridge = [ "katana-rpc-api/cartridge", "katana-rpc-server/cartridge" ] -explorer = [ "katana-rpc-server/explorer" ] -native = [ "katana-executor/native" ] +cartridge = ["katana-rpc-api/cartridge", "katana-rpc-server/cartridge"] +explorer = ["katana-rpc-server/explorer"] +native = ["katana-executor/native"] +tee = ["dep:katana-tee", "katana-rpc-api/tee", "katana-rpc-server/tee"] +tee-snp = ["tee", "katana-tee/snp"] diff --git a/crates/node/src/config/mod.rs b/crates/node/src/config/mod.rs index 15d5314a3..8e03bc9c4 100644 --- a/crates/node/src/config/mod.rs +++ b/crates/node/src/config/mod.rs @@ -10,6 +10,8 @@ pub mod metrics; pub mod paymaster; pub mod rpc; pub mod sequencing; +#[cfg(feature = "tee")] +pub mod tee; use db::DbConfig; use dev::DevConfig; @@ -60,4 +62,8 @@ pub struct Config { /// Cartridge paymaster options. #[cfg(feature = "cartridge")] pub paymaster: Option, + + /// TEE attestation options. + #[cfg(feature = "tee")] + pub tee: Option, } diff --git a/crates/node/src/config/rpc.rs b/crates/node/src/config/rpc.rs index 4a97d119e..2c5f9fb62 100644 --- a/crates/node/src/config/rpc.rs +++ b/crates/node/src/config/rpc.rs @@ -34,6 +34,8 @@ pub enum RpcModuleKind { Dev, #[cfg(feature = "cartridge")] Cartridge, + #[cfg(feature = "tee")] + Tee, } /// Configuration for the RPC server. @@ -104,6 +106,8 @@ impl RpcModulesList { RpcModuleKind::Dev, #[cfg(feature = "cartridge")] RpcModuleKind::Cartridge, + #[cfg(feature = "tee")] + RpcModuleKind::Tee, ])) } diff --git a/crates/node/src/config/tee.rs b/crates/node/src/config/tee.rs new file mode 100644 index 000000000..ff18038ab --- /dev/null +++ b/crates/node/src/config/tee.rs @@ -0,0 +1,8 @@ +use katana_tee::TeeProviderType; + +/// TEE configuration options. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct TeeConfig { + /// The type of TEE provider to use for attestation. + pub provider_type: TeeProviderType, +} diff --git a/crates/node/src/full/mod.rs b/crates/node/src/full/mod.rs index 04a2a550c..21e8d4a0a 100644 --- a/crates/node/src/full/mod.rs +++ b/crates/node/src/full/mod.rs @@ -58,10 +58,13 @@ pub enum Network { Sepolia, } +pub use katana_pipeline::PruningConfig; + #[derive(Debug)] pub struct Config { pub db: DbConfig, pub rpc: RpcConfig, + pub pruning: PruningConfig, pub metrics: Option, pub gateway_api_key: Option, pub network: Network, @@ -74,7 +77,7 @@ pub struct Node { pub pool: FullNodePool, pub config: Arc, pub task_manager: TaskManager, - pub pipeline: Pipeline, + pub pipeline: Pipeline, pub rpc_server: RpcServer, pub gateway_client: SequencerGateway, pub metrics_server: Option>, @@ -124,10 +127,14 @@ impl Node { // --- build pipeline let (mut pipeline, pipeline_handle) = Pipeline::new(storage_provider.clone(), 256); + + // Configure pruning + pipeline.set_pruning_config(config.pruning.clone()); + let block_downloader = BatchBlockDownloader::new_gateway(gateway_client.clone(), 20); pipeline.add_stage(Blocks::new(storage_provider.clone(), block_downloader)); pipeline.add_stage(Classes::new(storage_provider.clone(), gateway_client.clone(), 20)); - pipeline.add_stage(StateTrie::new(storage_provider.clone())); + pipeline.add_stage(StateTrie::new(storage_provider.clone(), task_spawner.clone())); // -- build chain tip watcher using gateway client @@ -257,11 +264,14 @@ impl Node { None }; - let pipeline_handle = self.pipeline.handle(); + let chain_tip_watcher = self.chain_tip_watcher; + let mut tip_subscription = chain_tip_watcher.subscribe(); - let mut tip_subscription = self.chain_tip_watcher.subscribe(); + let pipeline_handle = self.pipeline.handle(); let pipeline_handle_clone = pipeline_handle.clone(); + // -- start syncing pipeline task + self.task_manager .task_spawner() .build_task() @@ -269,20 +279,29 @@ impl Node { .name("Pipeline") .spawn(self.pipeline.into_future()); + // -- start chain tip watcher task + self.task_manager .task_spawner() .build_task() .graceful_shutdown() .name("Chain tip watcher") - .spawn(self.chain_tip_watcher.into_future()); + .spawn(async move { + loop { + if let Err(error) = chain_tip_watcher.run().await { + error!(%error, "Tip watcher failed. Restarting task."); + } + } + }); + + // -- start a task for updating the pipeline's tip based on chain tip changes - // spawn a task for updating the pipeline's tip based on chain tip changes self.task_manager.task_spawner().spawn(async move { loop { match tip_subscription.changed().await { Ok(new_tip) => pipeline_handle_clone.set_tip(new_tip), - Err(err) => { - error!(error = ?err, "Error updating pipeline tip."); + Err(error) => { + error!(?error, "Error updating pipeline tip."); break; } } diff --git a/crates/node/src/full/pending/mod.rs b/crates/node/src/full/pending/mod.rs index 0c25829fa..26725b459 100644 --- a/crates/node/src/full/pending/mod.rs +++ b/crates/node/src/full/pending/mod.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use std::time::Duration; +use anyhow::{anyhow, Result}; use katana_gateway_client::Client; use katana_gateway_types::{ConfirmedTransaction, ErrorCode, PreConfirmedBlock, StateDiff}; use katana_pipeline::PipelineBlockSubscription; @@ -45,7 +46,13 @@ impl PreconfStateFactory { shared_preconf_block: shared_preconf_block.clone(), }; - tokio::spawn(async move { worker.run().await }); + tokio::spawn(async move { + loop { + if let Err(error) = worker.run().await { + error!(%error, "PreconfBlockWatcher returned with an error."); + } + } + }); Self { gateway_client, latest_synced_block, shared_preconf_block, storage_provider } } @@ -75,12 +82,12 @@ impl PreconfStateFactory { .map(|preconf_data| preconf_data.preconf_state_updates.clone()) } - pub fn block(&self) -> Option { + pub fn block(&self) -> Option<(BlockNumber, PreConfirmedBlock)> { self.shared_preconf_block .inner .lock() .as_ref() - .map(|preconf_data| preconf_data.preconf_block.clone()) + .map(|preconf_data| (preconf_data.preconf_block_id, preconf_data.preconf_block.clone())) } pub fn transactions(&self) -> Option> { @@ -104,8 +111,9 @@ struct PreconfBlockData { preconf_state_updates: StateUpdates, } -const DEFAULT_INTERVAL: Duration = Duration::from_millis(500); +const DEFAULT_INTERVAL: Duration = Duration::from_millis(1000); +#[derive(Debug)] struct PreconfBlockWatcher { interval: Duration, gateway_client: Client, @@ -120,7 +128,7 @@ struct PreconfBlockWatcher { } impl PreconfBlockWatcher { - async fn run(&mut self) { + async fn run(&mut self) -> Result<()> { let mut current_preconf_block_num = self.latest_synced_block.block().map(|b| b + 1).unwrap_or(0); @@ -160,32 +168,28 @@ impl PreconfBlockWatcher { // chain's tip, in which case we just skip to the next // iteration. Err(katana_gateway_client::Error::Sequencer(error)) - if error.code == ErrorCode::BlockNotFound => - { - continue - } + if error.code == ErrorCode::BlockNotFound => {} - Err(err) => panic!("{err}"), + Err(err) => return Err(anyhow!(err)), + } + } else { + if let Err(err) = self.latest_synced_block.changed().await { + error!(error = ?err, "Error receiving latest block number."); + break; } - } - - tokio::select! { - biased; - res = self.latest_synced_block.changed() => { - if let Err(err) = res { - error!(error = ?err, "Error receiving latest block number."); - break; - } + // reset preconf state + *self.shared_preconf_block.inner.lock() = None; - let latest_synced_block_num = self.latest_synced_block.block().unwrap(); - current_preconf_block_num = latest_synced_block_num + 1; - } + let latest_synced_block_num = self.latest_synced_block.block().unwrap_or(0); + current_preconf_block_num = latest_synced_block_num + 1; - _ = tokio::time::sleep(self.interval) => { - current_preconf_block_num += 1; - } + continue; } + + tokio::time::sleep(self.interval).await } + + Ok(()) } } diff --git a/crates/node/src/full/pending/provider.rs b/crates/node/src/full/pending/provider.rs index a7aee2e69..f9e476819 100644 --- a/crates/node/src/full/pending/provider.rs +++ b/crates/node/src/full/pending/provider.rs @@ -2,7 +2,6 @@ use katana_gateway_types::TxTryFromError; use katana_primitives::block::FinalityStatus; use katana_primitives::fee::PriceUnit; use katana_primitives::transaction::{TxHash, TxNumber, TxType, TxWithHash}; -use katana_primitives::Felt; use katana_provider::api::state::StateProvider; use katana_rpc_server::starknet::{PendingBlockProvider, StarknetApiResult}; use katana_rpc_types::{ @@ -15,7 +14,7 @@ impl PendingBlockProvider for PreconfStateFactory { fn get_pending_block_with_txs( &self, ) -> StarknetApiResult> { - if let Some(block) = self.block() { + if let Some((block_number, block)) = self.block() { let transactions = block .transactions .clone() @@ -26,7 +25,7 @@ impl PendingBlockProvider for PreconfStateFactory { Ok(Some(katana_rpc_types::PreConfirmedBlockWithTxs { transactions, - block_number: 0, + block_number, l1_da_mode: block.l1_da_mode, l1_gas_price: block.l1_gas_price, l2_gas_price: block.l2_gas_price, @@ -43,10 +42,10 @@ impl PendingBlockProvider for PreconfStateFactory { fn get_pending_block_with_receipts( &self, ) -> StarknetApiResult> { - if let Some(block) = self.block() { + if let Some((block_number, block)) = self.block() { Ok(Some(katana_rpc_types::PreConfirmedBlockWithReceipts { transactions: Vec::new(), - block_number: 0, + block_number, l1_da_mode: block.l1_da_mode, l1_gas_price: block.l1_gas_price, l2_gas_price: block.l2_gas_price, @@ -63,7 +62,7 @@ impl PendingBlockProvider for PreconfStateFactory { fn get_pending_block_with_tx_hashes( &self, ) -> StarknetApiResult> { - if let Some(block) = self.block() { + if let Some((block_number, block)) = self.block() { let transactions = block .transactions .clone() @@ -73,7 +72,7 @@ impl PendingBlockProvider for PreconfStateFactory { Ok(Some(katana_rpc_types::PreConfirmedBlockWithTxHashes { transactions, - block_number: 0, + block_number, l1_da_mode: block.l1_da_mode, l1_gas_price: block.l1_gas_price, l2_gas_price: block.l2_gas_price, @@ -91,7 +90,7 @@ impl PendingBlockProvider for PreconfStateFactory { &self, hash: TxHash, ) -> StarknetApiResult> { - if let Some(preconf_block) = self.block() { + if let Some((block_number, preconf_block)) = self.block() { let receipt = preconf_block .transaction_receipts .iter() @@ -109,7 +108,7 @@ impl PendingBlockProvider for PreconfStateFactory { let status = FinalityStatus::PreConfirmed; let transaction_hash = receipt.transaction_hash; - let block = ReceiptBlockInfo::PreConfirmed { block_number: 0 }; + let block = ReceiptBlockInfo::PreConfirmed { block_number }; let receipt = match r#type { TxType::Invoke => { @@ -151,10 +150,7 @@ impl PendingBlockProvider for PreconfStateFactory { &self, ) -> StarknetApiResult> { if let Some(state_diff) = self.state_updates() { - Ok(Some(PreConfirmedStateUpdate { - old_root: Felt::ZERO, - state_diff: state_diff.into(), - })) + Ok(Some(PreConfirmedStateUpdate { old_root: None, state_diff: state_diff.into() })) } else { Ok(None) } diff --git a/crates/node/src/full/tip_watcher.rs b/crates/node/src/full/tip_watcher.rs index 3ebcb38c1..c425a9684 100644 --- a/crates/node/src/full/tip_watcher.rs +++ b/crates/node/src/full/tip_watcher.rs @@ -1,12 +1,12 @@ -use std::future::IntoFuture; use std::time::Duration; use anyhow::Result; +use backon::{ExponentialBuilder, Retryable}; use futures::future::BoxFuture; use katana_gateway_types::BlockId; use katana_primitives::block::BlockNumber; use tokio::sync::watch; -use tracing::{error, info}; +use tracing::{info, warn}; pub type TipWatcherFut = BoxFuture<'static, Result<()>>; @@ -78,19 +78,6 @@ impl ChainTipWatcher

{ } } -impl IntoFuture for ChainTipWatcher

{ - type Output = Result<()>; - type IntoFuture = TipWatcherFut; - - fn into_future(self) -> Self::IntoFuture { - Box::pin(async move { - self.run().await.inspect_err(|error| { - error!(target: "node", %error, "Tip watcher failed."); - }) - }) - } -} - impl

std::fmt::Debug for ChainTipWatcher

{ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("ChainTipWatcher") @@ -131,7 +118,13 @@ impl std::fmt::Debug for TipSubscription { impl ChainTipProvider for katana_gateway_client::Client { fn latest_number(&self) -> BoxFuture<'_, Result> { Box::pin(async move { - let block = self.get_block(BlockId::Latest).await?; + let block = (|| async { self.get_block(BlockId::Latest).await }) + .retry(ExponentialBuilder::default()) + .notify(|error, dur| { + warn!(%error, "Failed to fetch latest block, retrying in {}s...", dur.as_secs()); + }) + .await?; + block.block_number.ok_or_else(|| anyhow::anyhow!("Block number not available")) }) } diff --git a/crates/node/src/lib.rs b/crates/node/src/lib.rs index f7daae7a2..2a46b5311 100644 --- a/crates/node/src/lib.rs +++ b/crates/node/src/lib.rs @@ -39,6 +39,8 @@ use katana_rpc_api::dev::DevApiServer; use katana_rpc_api::starknet::{StarknetApiServer, StarknetTraceApiServer, StarknetWriteApiServer}; #[cfg(feature = "explorer")] use katana_rpc_api::starknet_ext::StarknetApiExtServer; +#[cfg(feature = "tee")] +use katana_rpc_api::tee::TeeApiServer; use katana_rpc_client::starknet::Client as StarknetClient; #[cfg(feature = "cartridge")] use katana_rpc_server::cartridge::CartridgeApi; @@ -47,6 +49,8 @@ use katana_rpc_server::dev::DevApi; #[cfg(feature = "cartridge")] use katana_rpc_server::starknet::PaymasterConfig; use katana_rpc_server::starknet::{StarknetApi, StarknetApiConfig}; +#[cfg(feature = "tee")] +use katana_rpc_server::tee::TeeApi; use katana_rpc_server::{RpcServer, RpcServerHandle}; use katana_rpc_types::GetBlockWithTxHashesResponse; use katana_stage::Sequencing; @@ -115,17 +119,29 @@ where .with_fee(config.dev.fee); let executor_factory = { - #[allow(unused_mut)] - let mut class_cache = ClassCache::builder(); - - #[cfg(feature = "native")] - { - info!(enabled = config.execution.compile_native, "Cairo native compilation"); - class_cache = class_cache.compile_native(config.execution.compile_native); - } - - let global_class_cache = class_cache.build_global()?; - // let global_class_cache = ClassCache::new()?; + // Try to use existing global cache if already initialized (useful for tests with + // multiple nodes) Otherwise, build and initialize a new global cache + let global_class_cache = match ClassCache::try_global() { + Ok(cache) => { + info!("Using existing global class cache"); + cache + } + Err(_) => { + #[allow(unused_mut)] + let mut class_cache = ClassCache::builder(); + + #[cfg(feature = "native")] + { + info!( + enabled = config.execution.compile_native, + "Cairo native compilation" + ); + class_cache = class_cache.compile_native(config.execution.compile_native); + } + + class_cache.build_global()? + } + }; let factory = BlockifierFactory::new( overrides, @@ -270,6 +286,37 @@ where rpc_modules.merge(DevApiServer::into_rpc(api))?; } + // --- build tee api (if configured) + #[cfg(feature = "tee")] + if config.rpc.apis.contains(&RpcModuleKind::Tee) { + if let Some(ref tee_config) = config.tee { + use katana_tee::{TeeProvider, TeeProviderType}; + + let tee_provider: Arc = match tee_config.provider_type { + TeeProviderType::SevSnp => { + #[cfg(feature = "tee-snp")] + { + Arc::new( + katana_tee::SevSnpProvider::new() + .context("Failed to initialize SEV-SNP provider")?, + ) + } + #[cfg(not(feature = "tee-snp"))] + { + anyhow::bail!( + "SEV-SNP TEE provider requires the 'tee-snp' feature to be enabled" + ); + } + } + }; + + let api = TeeApi::new(provider.clone(), tee_provider); + rpc_modules.merge(TeeApiServer::into_rpc(api))?; + + info!(target: "node", provider = ?tee_config.provider_type, "TEE API enabled"); + } + } + #[allow(unused_mut)] let mut rpc_server = RpcServer::new().metrics(true).health_check(true).cors(cors).module(rpc_modules)?; @@ -344,10 +391,12 @@ where impl Node { pub fn build(config: Config) -> Result { let (provider, db) = if let Some(path) = &config.db.dir { + info!(target: "node", path = %path.display(), "Initializing database."); let db = katana_db::Db::new(path)?; let factory = DbProviderFactory::new(db.clone()); (factory, db) } else { + info!(target: "node", "Initializing in-memory database."); let factory = DbProviderFactory::new_in_memory(); let db = factory.db().clone(); (factory, db) @@ -370,6 +419,7 @@ impl Node { return Err(anyhow::anyhow!("Forking is only supported in dev mode for now")); }; + info!(target: "node", "Initializing in-memory database."); let db = katana_db::Db::in_memory()?; let client = StarknetClient::new(cfg.url.clone()); diff --git a/crates/pool/pool/src/validation/stateful.rs b/crates/pool/pool/src/validation/stateful.rs index 467ee8dcf..35806c30a 100644 --- a/crates/pool/pool/src/validation/stateful.rs +++ b/crates/pool/pool/src/validation/stateful.rs @@ -271,7 +271,7 @@ fn map_fee_err( } TransactionFeeError::InsufficientResourceBounds { errors } => { - let error = errors.iter().map(|e| format!("{}", e)).collect::>().join("\n"); + let error = errors.iter().map(|e| format!("{e}")).collect::>().join("\n"); Ok(InvalidTransactionError::InsufficientIntrinsicFee( InsufficientIntrinsicFeeError::InsufficientResourceBounds { error }, )) @@ -286,9 +286,9 @@ fn map_executor_err( ) -> Result> { match err { TransactionExecutorError::TransactionExecutionError(e) => match e { - TransactionExecutionError::TransactionFeeError(e) => map_fee_err(e), + TransactionExecutionError::TransactionFeeError(e) => map_fee_err(*e), TransactionExecutionError::TransactionPreValidationError(e) => { - map_pre_validation_err(e) + map_pre_validation_err(*e) } _ => Err(Box::new(e)), @@ -330,7 +330,7 @@ fn map_pre_validation_err( err: TransactionPreValidationError, ) -> Result> { match err { - TransactionPreValidationError::TransactionFeeError(err) => map_fee_err(err), + TransactionPreValidationError::TransactionFeeError(err) => map_fee_err(*err), TransactionPreValidationError::StateError(err) => Err(Box::new(err)), TransactionPreValidationError::InvalidNonce { address, diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 33cd162ca..e28d32eaa 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -7,6 +7,8 @@ version.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +katana-primitives-macro.workspace = true + cairo-vm.workspace = true cairo-lang-sierra.workspace = true cairo-lang-utils.workspace = true @@ -17,13 +19,11 @@ arbitrary = { workspace = true, optional = true } blockifier = { workspace = true, features = [ "testing" ] } # some Clone derives are gated behind 'testing' feature cainome-cairo-serde.workspace = true derive_more.workspace = true -heapless = { version = "0.8.0", features = [ "serde" ] } lazy_static.workspace = true num-traits.workspace = true serde.workspace = true serde_json.workspace = true starknet.workspace = true -starknet-crypto.workspace = true starknet-types-core.workspace = true starknet_api.workspace = true thiserror.workspace = true diff --git a/crates/primitives/macro/Cargo.toml b/crates/primitives/macro/Cargo.toml new file mode 100644 index 000000000..cd9bc5068 --- /dev/null +++ b/crates/primitives/macro/Cargo.toml @@ -0,0 +1,15 @@ +[package] +description = "Procedural macros for katana-primitives" +edition.workspace = true +name = "katana-primitives-macro" +version.workspace = true + +[lib] +proc-macro = true + +[dependencies] +proc-macro2.workspace = true +quote.workspace = true +syn = { workspace = true, features = ["parsing", "proc-macro"] } + +starknet-types-core.workspace = true diff --git a/crates/primitives/macro/src/lib.rs b/crates/primitives/macro/src/lib.rs new file mode 100644 index 000000000..e0a83ed03 --- /dev/null +++ b/crates/primitives/macro/src/lib.rs @@ -0,0 +1,123 @@ +#![deny(missing_docs)] + +//! Procedural macros for the `katana-primitives` crate. + +use proc_macro::TokenStream; +use proc_macro2::TokenStream as TokenStream2; +use starknet_types_core::felt::{Felt, NonZeroFelt}; +use syn::parse::{Parse, ParseStream}; +use syn::{parse_macro_input, LitStr, Token}; + +/// 2 ** 251 - 256 +/// +/// Valid storage addresses should satisfy `address + offset < 2**251` where `offset < +/// 256` and `address < ADDR_BOUND`. +const ADDR_BOUND: NonZeroFelt = NonZeroFelt::from_raw([ + 576459263475590224, + 18446744073709255680, + 160989183, + 18446743986131443745, +]); + +const DEFAULT_CRATE_PATH: &str = "::katana_primitives"; + +/// Input for the `felt!` and `address!` macros. +/// +/// Supports two forms: +/// - `felt!("0x1234")` - uses default crate path `::katana_primitives` +/// - `felt!("0x1234", crate)` - uses custom crate path +struct MacroInput { + value: LitStr, + crate_path: String, +} + +impl Parse for MacroInput { + fn parse(input: ParseStream<'_>) -> syn::Result { + let value: LitStr = input.parse()?; + + let crate_path = if input.peek(Token![,]) { + input.parse::()?; + // Parse remaining tokens as the crate path + input.parse::()?.to_string() + } else { + DEFAULT_CRATE_PATH.to_string() + }; + + Ok(MacroInput { value, crate_path }) + } +} + +fn parse_felt(s: &str) -> Felt { + if s.starts_with("0x") || s.starts_with("0X") { + Felt::from_hex(s).expect("invalid Felt hex value") + } else { + Felt::from_dec_str(s).expect("invalid Felt decimal value") + } +} + +/// Defines a compile-time constant for a field element from its decimal or hexadecimal +/// representation. +/// +/// # Examples +/// +/// ```ignore +/// use katana_primitives::felt; +/// +/// // From hexadecimal (uses default crate path) +/// let hex_felt = felt!("0x1234"); +/// +/// // From decimal +/// let dec_felt = felt!("42"); +/// +/// // With custom crate path (for use inside katana-primitives itself) +/// let internal_felt = felt!("0x1234", crate); +/// ``` +#[proc_macro] +pub fn felt(input: TokenStream) -> TokenStream { + let MacroInput { value, crate_path } = parse_macro_input!(input as MacroInput); + let felt_value = parse_felt(&value.value()); + let felt_raw = felt_value.to_raw(); + + format!( + "{}::Felt::from_raw([{}, {}, {}, {}])", + crate_path, felt_raw[0], felt_raw[1], felt_raw[2], felt_raw[3], + ) + .parse() + .unwrap() +} + +/// Defines a compile-time constant for a contract address from its decimal or hexadecimal +/// representation. +/// +/// The address is normalized (i.e., `address % ADDR_BOUND`) at compile time. +/// +/// # Examples +/// +/// ```ignore +/// use katana_primitives::address; +/// +/// // From hexadecimal (uses default crate path) +/// const MY_CONTRACT: ContractAddress = address!("0x1234"); +/// +/// // From decimal +/// const OTHER_CONTRACT: ContractAddress = address!("42"); +/// +/// // With custom crate path (for use inside katana-primitives itself) +/// const INTERNAL: ContractAddress = address!("0x1234", crate); +/// ``` +#[proc_macro] +pub fn address(input: TokenStream) -> TokenStream { + let MacroInput { value, crate_path } = parse_macro_input!(input as MacroInput); + let felt_value = parse_felt(&value.value()); + + // Normalize the address: address % ADDR_BOUND + let normalized = felt_value.mod_floor(&ADDR_BOUND); + let felt_raw = normalized.to_raw(); + + format!( + "{}::ContractAddress::from_raw([{}, {}, {}, {}])", + crate_path, felt_raw[0], felt_raw[1], felt_raw[2], felt_raw[3], + ) + .parse() + .unwrap() +} diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 4ff3e8f8e..7c03b756f 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -5,8 +5,8 @@ use std::str::FromStr; use num_traits::ToPrimitive; use starknet::core::types::ResourcePrice; use starknet::core::utils::cairo_short_string_to_felt; -use starknet::macros::short_string; +use crate::cairo::ShortString; use crate::contract::ContractAddress; use crate::da::L1DataAvailabilityMode; use crate::transaction::{ExecutableTxWithHash, TxHash, TxWithHash}; @@ -327,6 +327,8 @@ impl Header { pub fn compute_hash(&self) -> Felt { use starknet_types_core::hash::{Poseidon, StarkHash}; + const BLOCK_HASH_VERSION: ShortString = ShortString::from_ascii("STARKNET_BLOCK_HASH0"); + let concant = Self::concat_counts( self.transaction_count, self.events_count, @@ -335,7 +337,7 @@ impl Header { ); Poseidon::hash_array(&[ - short_string!("STARKNET_BLOCK_HASH0"), + BLOCK_HASH_VERSION.into(), self.number.into(), self.state_root, self.sequencer_address.into(), @@ -684,15 +686,15 @@ mod tests { #[test] fn header_concat_counts() { - let expected = felt!("0x6400000000000000c8000000000000012c0000000000000000"); + let expected = felt!("0x6400000000000000c8000000000000012c0000000000000000", crate); let actual = Header::concat_counts(100, 200, 300, L1DataAvailabilityMode::Calldata); assert_eq!(actual, expected); - let expected = felt!("0x1000000000000000200000000000000038000000000000000"); + let expected = felt!("0x1000000000000000200000000000000038000000000000000", crate); let actual = Header::concat_counts(1, 2, 3, L1DataAvailabilityMode::Blob); assert_eq!(actual, expected); - let expected = felt!("0xffffffff000000000000000000000000000000000000000000000000"); + let expected = felt!("0xffffffff000000000000000000000000000000000000000000000000", crate); let actual = Header::concat_counts(0xFFFFFFFF, 0, 0, L1DataAvailabilityMode::Calldata); assert_eq!(actual, expected); } diff --git a/crates/primitives/src/cairo.rs b/crates/primitives/src/cairo.rs index 617621f6c..496abcd6a 100644 --- a/crates/primitives/src/cairo.rs +++ b/crates/primitives/src/cairo.rs @@ -1,81 +1,193 @@ use crate::Felt; /// A Cairo short string. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct ShortString(heapless::String<31>); +/// +/// This is a stack-allocated string type that can hold up to 31 ASCII bytes, +/// which is the maximum length for a Cairo short string. +/// +/// It supports const construction via [`ShortString::from_ascii`]. +#[derive(Clone, PartialEq, Eq, Hash, Default, Copy)] +pub struct ShortString { + data: [u8; 31], + len: u8, +} impl ShortString { /// Creates a new empty short string. pub const fn new() -> Self { - Self(heapless::String::new()) + Self { data: [0; 31], len: 0 } + } + + /// Creates a new short string from an ASCII string literal at compile time. + /// + /// # Panics + /// + /// Panics at compile time if the string is longer than 31 bytes or contains + /// non-ASCII characters. + /// + /// # Examples + /// + /// ``` + /// use katana_primitives::cairo::ShortString; + /// + /// const HELLO: ShortString = ShortString::from_ascii("hello"); + /// assert_eq!(HELLO.as_str(), "hello"); + /// ``` + pub const fn from_ascii(s: &str) -> Self { + let bytes = s.as_bytes(); + let len = bytes.len(); + + assert!(len <= 31, "string is too long to be a Cairo short string"); + + let mut data = [0u8; 31]; + let mut i = 0; + while i < len { + let b = bytes[i]; + assert!(b.is_ascii(), "invalid ASCII character in string"); + data[i] = b; + i += 1; + } + + Self { data, len: len as u8 } } - pub fn as_str(&self) -> &str { - self.0.as_str() + pub const fn as_str(&self) -> &str { + // SAFETY: We only store valid ASCII bytes, which are valid UTF-8 + unsafe { core::str::from_utf8_unchecked(self.as_bytes()) } } - pub fn len(&self) -> usize { - self.0.len() + /// Returns the bytes of the short string as a slice. + pub const fn as_bytes(&self) -> &[u8] { + // Use a manual slice since `&self.data[..self.len as usize]` is not const-stable + unsafe { core::slice::from_raw_parts(self.data.as_ptr(), self.len as usize) } } - pub fn is_empty(&self) -> bool { - self.0.is_empty() + pub const fn len(&self) -> usize { + self.len as usize + } + + pub const fn is_empty(&self) -> bool { + self.len == 0 } #[inline] - fn push(&mut self, c: char) -> Result<(), ()> { - self.0.push(c) + fn push(&mut self, c: char) -> Result<(), ShortStringError> { + if !c.is_ascii() { + return Err(ShortStringError::InvalidAscii); + } + + if self.len >= 31 { + return Err(ShortStringError::ExceedsCapacity); + } + + self.data[self.len as usize] = c as u8; + self.len += 1; + + Ok(()) } #[inline] - fn push_str(&mut self, string: &str) -> Result<(), ()> { - self.0.push_str(string) + fn push_str(&mut self, string: &str) -> Result<(), ShortStringError> { + let bytes = string.as_bytes(); + + if bytes.len() + self.len as usize > 31 { + return Err(ShortStringError::ExceedsCapacity); + } + + for &b in bytes { + if !b.is_ascii() { + return Err(ShortStringError::InvalidAscii); + } + + self.data[self.len as usize] = b; + self.len += 1; + } + + Ok(()) } } -impl Default for ShortString { - fn default() -> Self { - Self::new() - } +/// Error returned when constructing or modifying a [`ShortString`] fails. +#[derive(Debug, Clone, Copy, PartialEq, Eq, thiserror::Error)] +pub enum ShortStringError { + #[error("string is longer than 31 bytes")] + ExceedsCapacity, + + #[error("invalid ASCII character")] + InvalidAscii, + + #[error("unexpected null terminator")] + UnexpectedNullTerminator, } impl core::ops::Deref for ShortString { type Target = str; fn deref(&self) -> &Self::Target { - self.0.as_str() + self.as_str() } } impl AsRef for ShortString { fn as_ref(&self) -> &str { - self.0.as_str() + self.as_str() + } +} + +impl PartialEq for ShortString { + fn eq(&self, other: &Felt) -> bool { + Felt::from(self) == *other + } +} + +impl core::fmt::Debug for ShortString { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_tuple("ShortString").field(&self.as_str()).finish() } } -#[derive(Debug, thiserror::Error)] -pub enum ShortStringTryFromStrError { - #[error("String is too long to be a Cairo short string")] - StringTooLong, - #[error("Invalid ASCII character in string")] - InvalidAsciiString, +#[cfg(feature = "serde")] +impl serde::Serialize for ShortString { + fn serialize(&self, serializer: S) -> Result { + serializer.serialize_str(self.as_str()) + } +} + +#[cfg(feature = "serde")] +impl<'de> serde::Deserialize<'de> for ShortString { + fn deserialize>(deserializer: D) -> Result { + struct ShortStringVisitor; + + impl serde::de::Visitor<'_> for ShortStringVisitor { + type Value = ShortString; + + fn expecting(&self, formatter: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + formatter.write_str("a string up to 31 ASCII characters") + } + + fn visit_str(self, v: &str) -> Result { + v.parse().map_err(serde::de::Error::custom) + } + } + + deserializer.deserialize_str(ShortStringVisitor) + } } impl core::str::FromStr for ShortString { - type Err = ShortStringTryFromStrError; + type Err = ShortStringError; fn from_str(s: &str) -> Result { if !s.is_ascii() { - return Err(ShortStringTryFromStrError::InvalidAsciiString); + return Err(ShortStringError::InvalidAscii); } if s.len() > 31 { - return Err(ShortStringTryFromStrError::StringTooLong); + return Err(ShortStringError::ExceedsCapacity); } let mut string = Self::new(); - string.push_str(s).expect("length already checked"); + string.push_str(s).expect("qed; length already checked"); Ok(string) } @@ -83,7 +195,7 @@ impl core::str::FromStr for ShortString { impl From for String { fn from(string: ShortString) -> Self { - string.0.to_string() + string.as_str().to_string() } } @@ -95,22 +207,12 @@ impl From for Felt { impl From<&ShortString> for Felt { fn from(string: &ShortString) -> Self { - Felt::from_bytes_be_slice(string.0.as_bytes()) + Felt::from_bytes_be_slice(string.as_bytes()) } } -#[derive(Debug, thiserror::Error)] -pub enum ShortStringFromFeltError { - #[error("Unexpected null terminator in string")] - UnexpectedNullTerminator, - #[error("String exceeds maximum length for Cairo short strings")] - StringTooLong, - #[error("Non-ASCII character found")] - NonAsciiCharacter, -} - impl TryFrom for ShortString { - type Error = ShortStringFromFeltError; + type Error = ShortStringError; fn try_from(value: Felt) -> Result { if value == Felt::ZERO { @@ -121,7 +223,7 @@ impl TryFrom for ShortString { // First byte must be zero because the string must only be 31 bytes. if bytes[0] > 0 { - return Err(ShortStringFromFeltError::StringTooLong); + return Err(ShortStringError::ExceedsCapacity); } let mut string = ShortString::new(); @@ -129,12 +231,12 @@ impl TryFrom for ShortString { for byte in bytes { if byte == 0u8 { if !string.is_empty() { - return Err(ShortStringFromFeltError::UnexpectedNullTerminator); + return Err(ShortStringError::UnexpectedNullTerminator); } } else if byte.is_ascii() { string.push(byte as char).expect("qed; should fit"); } else { - return Err(ShortStringFromFeltError::NonAsciiCharacter); + return Err(ShortStringError::InvalidAscii); } } @@ -143,7 +245,7 @@ impl TryFrom for ShortString { } impl TryFrom<&Felt> for ShortString { - type Error = ShortStringFromFeltError; + type Error = ShortStringError; fn try_from(value: &Felt) -> Result { Self::try_from(*value) @@ -152,23 +254,22 @@ impl TryFrom<&Felt> for ShortString { impl core::fmt::Display for ShortString { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "{}", self.0) + write!(f, "{}", self.as_str()) } } #[cfg(feature = "arbitrary")] impl<'a> arbitrary::Arbitrary<'a> for ShortString { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let mut raw_bytes = heapless::Vec::::new(); - let length = u.int_in_range(0..=31)?; + let length: u8 = u.int_in_range(0..=31)?; + let mut data = [0u8; 31]; - for _ in 0..length { - let char = u.int_in_range(0..=127)?; // ASCII range - raw_bytes.push(char).expect("shouldn't be full"); + for item in data.iter_mut().take(length as usize) { + // ASCII printable range (32-126) to avoid control characters + *item = u.int_in_range(32..=126)?; } - let str = heapless::String::<31>::from_utf8(raw_bytes).expect("should be valid utf8"); - Ok(Self(str)) + Ok(Self { data, len: length }) } } @@ -179,7 +280,7 @@ mod tests { use assert_matches::assert_matches; use super::ShortString; - use crate::cairo::{ShortStringFromFeltError, ShortStringTryFromStrError}; + use crate::cairo::ShortStringError; use crate::Felt; #[test] @@ -190,6 +291,19 @@ mod tests { assert_eq!(s.as_str(), ""); } + #[test] + fn const_from_ascii() { + const HELLO: ShortString = ShortString::from_ascii("hello"); + assert_eq!(HELLO.as_str(), "hello"); + assert_eq!(HELLO.len(), 5); + + const EMPTY: ShortString = ShortString::from_ascii(""); + assert!(EMPTY.is_empty()); + + const MAX_LEN: ShortString = ShortString::from_ascii("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); + assert_eq!(MAX_LEN.len(), 31); + } + #[test] fn try_from_str() { let s = ShortString::from_str("hello").unwrap(); @@ -212,13 +326,22 @@ mod tests { assert_eq!(original, converted); } + #[test] + fn eq_felt() { + let s = ShortString::from_str("hello").unwrap(); + let felt = Felt::from(&s); + + assert!(s == felt); + assert!(s != Felt::from(123u64)); + } + #[test] fn felt_with_non_zero_first_byte() { // Create felt with non-zero first byte let mut bytes = [0u8; 32]; bytes[0] = 1; let felt = Felt::from_bytes_be(&bytes); - assert_matches!(ShortString::try_from(felt), Err(ShortStringFromFeltError::StringTooLong)); + assert_matches!(ShortString::try_from(felt), Err(ShortStringError::ExceedsCapacity)); } #[test] @@ -274,16 +397,70 @@ mod tests { let felt = Felt::from_bytes_be(&bytes); assert!(matches!( ShortString::try_from(felt), - Err(ShortStringFromFeltError::UnexpectedNullTerminator) + Err(ShortStringError::UnexpectedNullTerminator) )); } #[test] fn try_from_non_ascii_str() { - assert_matches!( - ShortString::from_str("café"), - Err(ShortStringTryFromStrError::InvalidAsciiString) - ); + assert_matches!(ShortString::from_str("café"), Err(ShortStringError::InvalidAscii)); + } + + #[test] + fn push_non_ascii_char() { + let mut s = ShortString::new(); + assert_matches!(s.push('é'), Err(ShortStringError::InvalidAscii)); + // String should remain unchanged + assert!(s.is_empty()); + } + + #[test] + fn push_exceeds_capacity() { + // Create a string with 31 characters (max capacity) + let mut s = ShortString::from_str(&"a".repeat(31)).unwrap(); + assert_eq!(s.len(), 31); + + // Pushing another character should fail + assert_matches!(s.push('b'), Err(ShortStringError::ExceedsCapacity)); + // String should remain unchanged + assert_eq!(s.len(), 31); + } + + #[test] + fn push_str_non_ascii() { + let mut s = ShortString::new(); + assert_matches!(s.push_str("café"), Err(ShortStringError::InvalidAscii)); + // Note: push_str doesn't rollback on error, so "caf" is added before encountering "é" + assert_eq!(s.as_str(), "caf"); + + // Test with non-ASCII at the start - string should remain empty + let mut s = ShortString::new(); + assert_matches!(s.push_str("éfoo"), Err(ShortStringError::InvalidAscii)); + assert!(s.is_empty()); + } + + #[test] + fn push_str_exceeds_capacity() { + // Create a string with 30 characters + let mut s = ShortString::from_str(&"a".repeat(30)).unwrap(); + assert_eq!(s.len(), 30); + + // Pushing a 2-character string should fail (30 + 2 > 31) + assert_matches!(s.push_str("bb"), Err(ShortStringError::ExceedsCapacity)); + // String should remain unchanged + assert_eq!(s.len(), 30); + } + + #[test] + fn push_str_exactly_fills_capacity() { + // Create a string with 30 characters + let mut s = ShortString::from_str(&"a".repeat(30)).unwrap(); + assert_eq!(s.len(), 30); + + // Pushing a 1-character string should succeed (30 + 1 = 31) + assert!(s.push_str("b").is_ok()); + assert_eq!(s.len(), 31); + assert!(s.as_str().ends_with('b')); } #[cfg(feature = "arbitrary")] @@ -297,7 +474,8 @@ mod tests { for _ in 0..100 { let s = ShortString::arbitrary(&mut u).unwrap(); assert!(s.len() <= 31); - assert!(String::from(s).into_bytes().into_iter().all(|b| b <= 127)); + // Verify all characters are ASCII printable (32-126) + assert!(s.as_bytes().iter().all(|&b| (32..=126).contains(&b))); } } } diff --git a/crates/primitives/src/chain.rs b/crates/primitives/src/chain.rs index 4190440f4..1161571e1 100644 --- a/crates/primitives/src/chain.rs +++ b/crates/primitives/src/chain.rs @@ -1,6 +1,6 @@ -use starknet::core::utils::{cairo_short_string_to_felt, CairoShortStringToFeltError}; -use starknet::macros::short_string; +use std::str::FromStr; +use crate::cairo::{ShortString, ShortStringError}; use crate::{Felt, FromStrError}; /// Known chain ids that has been assigned a name. @@ -15,22 +15,22 @@ pub enum NamedChainId { impl NamedChainId { /// `SN_MAIN` in ASCII - pub const SN_MAIN: Felt = short_string!("SN_MAIN"); + pub const SN_MAIN: ShortString = ShortString::from_ascii("SN_MAIN"); /// `SN_GOERLI` in ASCII - pub const SN_GOERLI: Felt = short_string!("SN_GOERLI"); + pub const SN_GOERLI: ShortString = ShortString::from_ascii("SN_GOERLI"); /// `SN_SEPOLIA` in ASCII - pub const SN_SEPOLIA: Felt = short_string!("SN_SEPOLIA"); + pub const SN_SEPOLIA: ShortString = ShortString::from_ascii("SN_SEPOLIA"); /// Returns the id of the chain. It is the ASCII representation of a predefined string /// constants. #[inline] - pub const fn id(&self) -> Felt { + pub fn id(&self) -> Felt { match self { - NamedChainId::Mainnet => Self::SN_MAIN, - NamedChainId::Goerli => Self::SN_GOERLI, - NamedChainId::Sepolia => Self::SN_SEPOLIA, + NamedChainId::Mainnet => Self::SN_MAIN.into(), + NamedChainId::Goerli => Self::SN_GOERLI.into(), + NamedChainId::Sepolia => Self::SN_SEPOLIA.into(), } } @@ -58,12 +58,13 @@ pub struct NamedChainTryFromError(Felt); impl TryFrom for NamedChainId { type Error = NamedChainTryFromError; + fn try_from(value: Felt) -> Result { - if value == Self::SN_MAIN { + if Self::SN_MAIN == value { Ok(Self::Mainnet) - } else if value == Self::SN_GOERLI { + } else if Self::SN_GOERLI == value { Ok(Self::Goerli) - } else if value == Self::SN_SEPOLIA { + } else if Self::SN_SEPOLIA == value { Ok(Self::Sepolia) } else { Err(NamedChainTryFromError(value)) @@ -85,8 +86,9 @@ pub enum ChainId { pub enum ParseChainIdError { #[error(transparent)] FromStr(#[from] FromStrError), - #[error(transparent)] - CairoShortStringToFelt(#[from] CairoShortStringToFeltError), + + #[error("invalid short string: {0}")] + InvalidShortString(#[from] ShortStringError), } impl ChainId { @@ -102,13 +104,17 @@ impl ChainId { /// If the `str` starts with `0x` it is parsed as a hex string, otherwise it is parsed as a /// Cairo short string. pub fn parse(s: &str) -> Result { - let id = - if s.starts_with("0x") { Felt::from_hex(s)? } else { cairo_short_string_to_felt(s)? }; + let id = if s.starts_with("0x") { + Felt::from_hex(s)? + } else { + Felt::from(ShortString::from_str(s)?) + }; + Ok(ChainId::from(id)) } /// Returns the chain id value. - pub const fn id(&self) -> Felt { + pub fn id(&self) -> Felt { match self { ChainId::Named(name) => name.id(), ChainId::Id(id) => *id, @@ -156,17 +162,16 @@ impl From for Felt { mod tests { use std::convert::TryFrom; - use starknet::core::utils::cairo_short_string_to_felt; - use starknet::macros::felt; - use super::ChainId; + use crate::cairo::ShortString; use crate::chain::NamedChainId; + use crate::{felt, Felt}; #[test] fn named_chain_id() { - let mainnet_id = cairo_short_string_to_felt("SN_MAIN").unwrap(); - let goerli_id = cairo_short_string_to_felt("SN_GOERLI").unwrap(); - let sepolia_id = cairo_short_string_to_felt("SN_SEPOLIA").unwrap(); + let mainnet_id = Felt::from(ShortString::from_ascii("SN_MAIN")); + let goerli_id = Felt::from(ShortString::from_ascii("SN_GOERLI")); + let sepolia_id = Felt::from(ShortString::from_ascii("SN_SEPOLIA")); assert_eq!(NamedChainId::Mainnet.id(), mainnet_id); assert_eq!(NamedChainId::Goerli.id(), goerli_id); @@ -175,14 +180,14 @@ mod tests { assert_eq!(NamedChainId::try_from(mainnet_id).unwrap(), NamedChainId::Mainnet); assert_eq!(NamedChainId::try_from(goerli_id).unwrap(), NamedChainId::Goerli); assert_eq!(NamedChainId::try_from(sepolia_id).unwrap(), NamedChainId::Sepolia); - assert!(NamedChainId::try_from(felt!("0x1337")).is_err()); + assert!(NamedChainId::try_from(felt!("0x1337", crate)).is_err()); } #[test] fn chain_id() { - let mainnet_id = cairo_short_string_to_felt("SN_MAIN").unwrap(); - let goerli_id = cairo_short_string_to_felt("SN_GOERLI").unwrap(); - let sepolia_id = cairo_short_string_to_felt("SN_SEPOLIA").unwrap(); + let mainnet_id = Felt::from(ShortString::from_ascii("SN_MAIN")); + let goerli_id = Felt::from(ShortString::from_ascii("SN_GOERLI")); + let sepolia_id = Felt::from(ShortString::from_ascii("SN_SEPOLIA")); assert_eq!(ChainId::MAINNET.id(), NamedChainId::Mainnet.id()); assert_eq!(ChainId::GOERLI.id(), NamedChainId::Goerli.id()); @@ -191,18 +196,18 @@ mod tests { assert_eq!(ChainId::from(mainnet_id), ChainId::MAINNET); assert_eq!(ChainId::from(goerli_id), ChainId::GOERLI); assert_eq!(ChainId::from(sepolia_id), ChainId::SEPOLIA); - assert_eq!(ChainId::from(felt!("0x1337")), ChainId::Id(felt!("0x1337"))); + assert_eq!(ChainId::from(felt!("0x1337", crate)), ChainId::Id(felt!("0x1337", crate))); assert_eq!(ChainId::MAINNET.to_string(), "SN_MAIN"); assert_eq!(ChainId::GOERLI.to_string(), "SN_GOERLI"); assert_eq!(ChainId::SEPOLIA.to_string(), "SN_SEPOLIA"); - assert_eq!(ChainId::Id(felt!("0x1337")).to_string(), "0x1337"); + assert_eq!(ChainId::Id(felt!("0x1337", crate)).to_string(), "0x1337"); } #[test] fn parse_chain_id() { - let mainnet_id = cairo_short_string_to_felt("SN_MAIN").unwrap(); - let custom_id = cairo_short_string_to_felt("KATANA").unwrap(); + let mainnet_id = Felt::from(ShortString::from_ascii("SN_MAIN")); + let custom_id = Felt::from(ShortString::from_ascii("KATANA")); assert_eq!(ChainId::parse("SN_MAIN").unwrap(), ChainId::MAINNET); assert_eq!(ChainId::parse("KATANA").unwrap(), ChainId::Id(custom_id)); diff --git a/crates/primitives/src/class.rs b/crates/primitives/src/class.rs index 0a1888aa6..6fbad5a83 100644 --- a/crates/primitives/src/class.rs +++ b/crates/primitives/src/class.rs @@ -6,10 +6,10 @@ use cairo_lang_starknet_classes::contract_class::{ }; use cairo_lang_utils::bigint::BigUintAsHex; use serde_json_pythonic::to_string_pythonic; -use starknet::macros::short_string; use starknet_api::contract_class::SierraVersion; -use starknet_types_core::hash::{Poseidon, StarkHash}; +use starknet_types_core::hash::{self, Poseidon, StarkHash}; +use crate::cairo::ShortString; use crate::utils::{normalize_address, starknet_keccak}; use crate::Felt; @@ -39,9 +39,9 @@ impl std::fmt::Display for MaybeInvalidSierraContractAbi { match self { MaybeInvalidSierraContractAbi::Valid(abi) => { let s = to_string_pythonic(abi).expect("failed to serialize abi"); - write!(f, "{}", s) + write!(f, "{s}") } - MaybeInvalidSierraContractAbi::Invalid(abi) => write!(f, "{}", abi), + MaybeInvalidSierraContractAbi::Invalid(abi) => write!(f, "{abi}"), } } } @@ -309,19 +309,21 @@ pub fn compute_sierra_class_hash( entry_points_by_type: &ContractEntryPoints, sierra_program: &[Felt], ) -> Felt { - let mut hasher = starknet_crypto::PoseidonHasher::new(); - hasher.update(short_string!("CONTRACT_CLASS_V0.1.0")); - - // Hashes entry points - hasher.update(entrypoints_hash(&entry_points_by_type.external)); - hasher.update(entrypoints_hash(&entry_points_by_type.l1_handler)); - hasher.update(entrypoints_hash(&entry_points_by_type.constructor)); - // Hashes ABI - hasher.update(starknet_keccak(abi.as_bytes())); - // Hashes Sierra program - hasher.update(Poseidon::hash_array(sierra_program)); - - normalize_address(hasher.finalize()) + const CONTRACT_CLASS_VERSION: ShortString = ShortString::from_ascii("CONTRACT_CLASS_V0.1.0"); + + let hash = hash::Poseidon::hash_array(&[ + CONTRACT_CLASS_VERSION.into(), + // Hashes entry points + entrypoints_hash(&entry_points_by_type.external), + entrypoints_hash(&entry_points_by_type.l1_handler), + entrypoints_hash(&entry_points_by_type.constructor), + // Hashes ABI + starknet_keccak(abi.as_bytes()), + // Hashes Sierra program + Poseidon::hash_array(sierra_program), + ]); + + normalize_address(hash) } /// Computes the hash of a legacy contract class. @@ -342,14 +344,14 @@ pub fn compute_legacy_class_hash( } fn entrypoints_hash(entrypoints: &[ContractEntryPoint]) -> Felt { - let mut hasher = starknet_crypto::PoseidonHasher::new(); - - for entry in entrypoints { - hasher.update(entry.selector.clone().into()); - hasher.update(entry.function_idx.into()); - } - - hasher.finalize() + let initial = Vec::with_capacity(entrypoints.len() * 2); + let felts = entrypoints.iter().fold(initial, |mut acc, entry| { + acc.push(entry.selector.clone().into()); + acc.push(entry.function_idx.into()); + acc + }); + + hash::Poseidon::hash_array(&felts) } #[cfg(test)] diff --git a/crates/primitives/src/contract.rs b/crates/primitives/src/contract.rs index 3325c64bf..9beb3cc08 100644 --- a/crates/primitives/src/contract.rs +++ b/crates/primitives/src/contract.rs @@ -30,6 +30,13 @@ impl ContractAddress { pub fn new(address: Felt) -> Self { ContractAddress(normalize_address(address)) } + + /// Creates a new [`ContractAddress`] from its raw internal representation. + /// + /// See [`Felt::from_raw`] to understand how it works under the hood. + pub const fn from_raw(value: [u64; 4]) -> Self { + ContractAddress(Felt::from_raw(value)) + } } impl core::ops::Deref for ContractAddress { @@ -117,13 +124,6 @@ impl cainome_cairo_serde::CairoSerde for ContractAddress { } } -#[macro_export] -macro_rules! address { - ($value:expr) => { - ContractAddress::new($crate::felt!($value)) - }; -} - /// Represents a generic contract instance information. #[derive(Debug, Copy, Clone, Default, PartialEq, Eq)] #[cfg_attr(feature = "arbitrary", derive(::arbitrary::Arbitrary))] diff --git a/crates/primitives/src/da/blob.rs b/crates/primitives/src/da/blob.rs index 76e703350..5f0e3b995 100644 --- a/crates/primitives/src/da/blob.rs +++ b/crates/primitives/src/da/blob.rs @@ -19,7 +19,7 @@ use super::math::{fft, ifft}; pub fn recover(data: Vec) -> Vec { let xs: Vec = (0..BLOB_LEN) .map(|i| { - let bin = format!("{:012b}", i); + let bin = format!("{i:012b}"); let bin_rev = bin.chars().rev().collect::(); GENERATOR.modpow(&BigUint::from_str_radix(&bin_rev, 2).unwrap(), &BLS_MODULUS) }) @@ -31,7 +31,7 @@ pub fn recover(data: Vec) -> Vec { pub fn transform(data: Vec) -> Vec { let xs: Vec = (0..BLOB_LEN) .map(|i| { - let bin = format!("{:012b}", i); + let bin = format!("{i:012b}"); let bin_rev = bin.chars().rev().collect::(); GENERATOR.modpow(&BigUint::from_str_radix(&bin_rev, 2).unwrap(), &BLS_MODULUS) }) diff --git a/crates/primitives/src/da/encoding.rs b/crates/primitives/src/da/encoding.rs index 832aebdd1..2ab6c6e27 100644 --- a/crates/primitives/src/da/encoding.rs +++ b/crates/primitives/src/da/encoding.rs @@ -305,10 +305,8 @@ impl ContractUpdate { mod tests { use std::str::FromStr; - use starknet::macros::felt; - use super::*; - use crate::address; + use crate::{address, felt}; macro_rules! biguint { ($s:expr) => { @@ -318,7 +316,7 @@ mod tests { #[test] fn rt_metadata_encoding() { - let metadata = felt!("0x10000000000000001").to_biguint(); + let metadata = felt!("0x10000000000000001", crate).to_biguint(); let encoded = Metadata::decode(&metadata).unwrap(); assert!(!encoded.class_information_flag); @@ -354,19 +352,26 @@ mod tests { assert_eq!(state_updates.deployed_contracts.len(), 0); let address = address!( - "2019172390095051323869047481075102003731246132997057518965927979101413600827" + "2019172390095051323869047481075102003731246132997057518965927979101413600827", + crate ); assert_eq!(state_updates.nonce_updates.get(&address), Some(&Felt::ONE)); let storage_updates = state_updates.storage_updates.get(&address).unwrap(); assert_eq!(storage_updates.len(), 1); - assert_eq!(storage_updates.get(&felt!("0x64")), Some(&felt!("0xc8"))); + assert_eq!(storage_updates.get(&felt!("0x64", crate)), Some(&felt!("0xc8", crate))); + + let class_hash = felt!( + "1351148242645005540004162531550805076995747746087542030095186557536641755046", + crate + ); + + let compiled_class_hash = felt!( + "558404273560404778508455254030458021013656352466216690688595011803280448032", + crate + ); - let class_hash = - felt!("1351148242645005540004162531550805076995747746087542030095186557536641755046"); - let compiled_class_hash = - felt!("558404273560404778508455254030458021013656352466216690688595011803280448032"); assert_eq!(state_updates.declared_classes.get(&class_hash), Some(&compiled_class_hash)); let encoded = encode_state_updates(state_updates); diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index a59952d1c..edac94959 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -22,7 +22,7 @@ pub mod utils; pub use alloy_primitives::{B256, U256}; pub use contract::ContractAddress; pub use eth::address as eth_address; -pub use starknet::macros::felt; +pub use katana_primitives_macro::{address, felt}; pub use starknet_types_core::felt::{Felt, FromStrError}; pub use starknet_types_core::hash; diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 1babb3b08..bf96957e2 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -107,6 +107,25 @@ pub struct DeployAccountTxReceipt { pub contract_address: ContractAddress, } +/// Receipt for a `Deploy` transaction. +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub struct DeployTxReceipt { + /// Information about the transaction fee. + pub fee: FeeInfo, + /// Events emitted by contracts. + pub events: Vec, + /// Messages sent to L1. + pub messages_sent: Vec, + /// Revert error message if the transaction execution failed. + pub revert_error: Option, + /// The execution resources used by the transaction. + pub execution_resources: ExecutionResources, + /// Contract address of the deployed contract. + pub contract_address: ContractAddress, +} + /// The receipt of a transaction containing the outputs of its execution. #[derive(Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] @@ -116,6 +135,7 @@ pub enum Receipt { Declare(DeclareTxReceipt), L1Handler(L1HandlerTxReceipt), DeployAccount(DeployAccountTxReceipt), + Deploy(DeployTxReceipt), } impl Receipt { @@ -129,6 +149,7 @@ impl Receipt { /// Returns the revert reason if the transaction is reverted. pub fn revert_reason(&self) -> Option<&str> { match self { + Receipt::Deploy(rct) => rct.revert_error.as_deref(), Receipt::Invoke(rct) => rct.revert_error.as_deref(), Receipt::Declare(rct) => rct.revert_error.as_deref(), Receipt::L1Handler(rct) => rct.revert_error.as_deref(), @@ -139,6 +160,7 @@ impl Receipt { /// Returns the L1 messages sent. pub fn messages_sent(&self) -> &[MessageToL1] { match self { + Receipt::Deploy(rct) => &rct.messages_sent, Receipt::Invoke(rct) => &rct.messages_sent, Receipt::Declare(rct) => &rct.messages_sent, Receipt::L1Handler(rct) => &rct.messages_sent, @@ -149,6 +171,7 @@ impl Receipt { /// Returns the events emitted. pub fn events(&self) -> &[Event] { match self { + Receipt::Deploy(rct) => &rct.events, Receipt::Invoke(rct) => &rct.events, Receipt::Declare(rct) => &rct.events, Receipt::L1Handler(rct) => &rct.events, @@ -159,6 +182,7 @@ impl Receipt { /// Returns the execution resources used. pub fn resources_used(&self) -> &ExecutionResources { match self { + Receipt::Deploy(rct) => &rct.execution_resources, Receipt::Invoke(rct) => &rct.execution_resources, Receipt::Declare(rct) => &rct.execution_resources, Receipt::L1Handler(rct) => &rct.execution_resources, @@ -168,6 +192,7 @@ impl Receipt { pub fn fee(&self) -> &FeeInfo { match self { + Receipt::Deploy(rct) => &rct.fee, Receipt::Invoke(rct) => &rct.fee, Receipt::Declare(rct) => &rct.fee, Receipt::L1Handler(rct) => &rct.fee, @@ -178,6 +203,7 @@ impl Receipt { /// Returns the transaction tyoe of the receipt. pub fn r#type(&self) -> TxType { match self { + Receipt::Deploy(_) => TxType::Deploy, Receipt::Invoke(_) => TxType::Invoke, Receipt::Declare(_) => TxType::Declare, Receipt::L1Handler(_) => TxType::L1Handler, @@ -224,9 +250,9 @@ impl ReceiptWithTxHash { pub fn compute_hash(&self) -> Felt { let resources_used = self.resources_used(); let gas_uasge = hash::Poseidon::hash_array(&[ - resources_used.gas.l2_gas.into(), - resources_used.gas.l1_gas.into(), - resources_used.gas.l1_data_gas.into(), + resources_used.total_gas_consumed.l2_gas.into(), + resources_used.total_gas_consumed.l1_gas.into(), + resources_used.total_gas_consumed.l1_data_gas.into(), ]); let messages_hash = self.compute_messages_to_l1_hash(); @@ -277,10 +303,10 @@ impl ReceiptWithTxHash { #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct ExecutionResources { /// The total gas used by the transaction execution. - pub gas: GasUsed, + pub total_gas_consumed: GasUsed, /// Computation resources if the transaction is executed on the CairoVM. - pub computation_resources: VmResources, - pub da_resources: DataAvailabilityResources, + pub vm_resources: VmResources, + pub data_availability: DataAvailabilityResources, } #[derive(Debug, Default, Clone, PartialEq, Eq)] @@ -325,6 +351,10 @@ impl<'a> arbitrary::Arbitrary<'a> for ExecutionResources { let gas = u.arbitrary::()?; let da_resources = u.arbitrary::()?; - Ok(Self { da_resources, computation_resources, gas }) + Ok(Self { + data_availability: da_resources, + vm_resources: computation_resources, + total_gas_consumed: gas, + }) } } diff --git a/crates/primitives/src/state.rs b/crates/primitives/src/state.rs index 3a149d6b3..0ee53d439 100644 --- a/crates/primitives/src/state.rs +++ b/crates/primitives/src/state.rs @@ -1,9 +1,9 @@ use std::collections::{BTreeMap, BTreeSet}; use std::iter; -use starknet::macros::short_string; use starknet_types_core::hash::{self, StarkHash}; +use crate::cairo::ShortString; use crate::class::{ClassHash, CompiledClassHash, ContractClass}; use crate::contract::{ContractAddress, Nonce, StorageKey, StorageValue}; use crate::Felt; @@ -28,6 +28,8 @@ pub struct StateUpdates { /// A mapping of replaced contract addresses to their new class hashes ie using `replace_class` /// syscall. pub replaced_classes: BTreeMap, + /// A list of classes whose compiled class hashes have been migrated. + pub migrated_compiled_classes: BTreeMap, } impl StateUpdates { @@ -40,6 +42,7 @@ impl StateUpdates { len += self.declared_classes.len(); len += self.deprecated_declared_classes.len(); len += self.nonce_updates.len(); + len += self.migrated_compiled_classes.len(); for updates in self.storage_updates.values() { len += updates.len(); @@ -122,8 +125,8 @@ pub fn compute_state_diff_hash(states: StateUpdates) -> Felt { let nonces_len = Felt::from(nonce_updates.len()); let nonce_updates = nonce_updates.into_iter().flat_map(|nonce| vec![nonce.0.into(), nonce.1]); - let magic = short_string!("STARKNET_STATE_DIFF0"); - let elements: Vec = iter::once(magic) + let magic = ShortString::from_ascii("STARKNET_STATE_DIFF0"); + let elements: Vec = iter::once(Felt::from(magic)) .chain(iter::once(updated_contracts_len)) .chain(updated_contracts) .chain(iter::once(declared_classes_len)) diff --git a/crates/primitives/src/utils/transaction.rs b/crates/primitives/src/utils/transaction.rs index d3de5dc51..de0d94e6d 100644 --- a/crates/primitives/src/utils/transaction.rs +++ b/crates/primitives/src/utils/transaction.rs @@ -1,7 +1,6 @@ use alloy_primitives::{Keccak256, B256}; -use starknet::core::crypto::compute_hash_on_elements; use starknet::core::types::MsgToL1; -use starknet_crypto::poseidon_hash_many; +use starknet_types_core::hash::{Pedersen, Poseidon, StarkHash}; use crate::da::DataAvailabilityMode; use crate::eth::Address as EthAddress; @@ -58,12 +57,12 @@ pub fn compute_deploy_account_v1_tx_hash( ) -> Felt { let calldata_to_hash = [&[class_hash, contract_address_salt], constructor_calldata].concat(); - compute_hash_on_elements(&[ + Pedersen::hash_array(&[ PREFIX_DEPLOY_ACCOUNT, if is_query { QUERY_VERSION_OFFSET + Felt::ONE } else { Felt::ONE }, // version sender_address, Felt::ZERO, // entry_point_selector - compute_hash_on_elements(&calldata_to_hash), + Pedersen::hash_array(&calldata_to_hash), max_fee.into(), chain_id, nonce, @@ -88,16 +87,16 @@ pub fn compute_deploy_account_v3_tx_hash( fee_da_mode: &DataAvailabilityMode, is_query: bool, ) -> Felt { - poseidon_hash_many(&[ + Poseidon::hash_array(&[ PREFIX_DEPLOY_ACCOUNT, if is_query { QUERY_VERSION_OFFSET + Felt::THREE } else { Felt::THREE }, // version contract_address, hash_fee_fields(tip, l1_gas_bounds, l2_gas_bounds, l1_data_gas_bounds), - poseidon_hash_many(paymaster_data), + Poseidon::hash_array(paymaster_data), chain_id, nonce, encode_da_mode(nonce_da_mode, fee_da_mode), - poseidon_hash_many(constructor_calldata), + Poseidon::hash_array(constructor_calldata), class_hash, contract_address_salt, ]) @@ -113,12 +112,12 @@ pub fn compute_declare_v0_tx_hash( chain_id: Felt, is_query: bool, ) -> Felt { - compute_hash_on_elements(&[ + Pedersen::hash_array(&[ PREFIX_DECLARE, if is_query { QUERY_VERSION_OFFSET + Felt::ZERO } else { Felt::ZERO }, // version sender_address, Felt::ZERO, // entry_point_selector - compute_hash_on_elements(&[]), + Pedersen::hash_array(&[]), max_fee.into(), chain_id, class_hash, @@ -134,12 +133,12 @@ pub fn compute_declare_v1_tx_hash( nonce: Felt, is_query: bool, ) -> Felt { - compute_hash_on_elements(&[ + Pedersen::hash_array(&[ PREFIX_DECLARE, if is_query { QUERY_VERSION_OFFSET + Felt::ONE } else { Felt::ONE }, // version sender_address, Felt::ZERO, // entry_point_selector - compute_hash_on_elements(&[class_hash]), + Pedersen::hash_array(&[class_hash]), max_fee.into(), chain_id, nonce, @@ -156,12 +155,12 @@ pub fn compute_declare_v2_tx_hash( compiled_class_hash: Felt, is_query: bool, ) -> Felt { - compute_hash_on_elements(&[ + Pedersen::hash_array(&[ PREFIX_DECLARE, if is_query { QUERY_VERSION_OFFSET + Felt::TWO } else { Felt::TWO }, // version sender_address, Felt::ZERO, // entry_point_selector - compute_hash_on_elements(&[class_hash]), + Pedersen::hash_array(&[class_hash]), max_fee.into(), chain_id, nonce, @@ -187,16 +186,16 @@ pub fn compute_declare_v3_tx_hash( account_deployment_data: &[Felt], is_query: bool, ) -> Felt { - poseidon_hash_many(&[ + Poseidon::hash_array(&[ PREFIX_DECLARE, if is_query { QUERY_VERSION_OFFSET + Felt::THREE } else { Felt::THREE }, // version sender_address, hash_fee_fields(tip, l1_gas_bounds, l2_gas_bounds, l1_data_gas_bounds), - poseidon_hash_many(paymaster_data), + Poseidon::hash_array(paymaster_data), chain_id, nonce, encode_da_mode(nonce_da_mode, fee_da_mode), - poseidon_hash_many(account_deployment_data), + Poseidon::hash_array(account_deployment_data), class_hash, compiled_class_hash, ]) @@ -211,12 +210,12 @@ pub fn compute_invoke_v1_tx_hash( nonce: Felt, is_query: bool, ) -> Felt { - compute_hash_on_elements(&[ + Pedersen::hash_array(&[ PREFIX_INVOKE, if is_query { QUERY_VERSION_OFFSET + Felt::ONE } else { Felt::ONE }, // version sender_address, Felt::ZERO, // entry_point_selector - compute_hash_on_elements(calldata), + Pedersen::hash_array(calldata), max_fee.into(), chain_id, nonce, @@ -240,17 +239,17 @@ pub fn compute_invoke_v3_tx_hash( account_deployment_data: &[Felt], is_query: bool, ) -> Felt { - poseidon_hash_many(&[ + Poseidon::hash_array(&[ PREFIX_INVOKE, if is_query { QUERY_VERSION_OFFSET + Felt::THREE } else { Felt::THREE }, // version sender_address, hash_fee_fields(tip, l1_gas_bounds, l2_gas_bounds, l1_data_gas_bounds), - poseidon_hash_many(paymaster_data), + Poseidon::hash_array(paymaster_data), chain_id, nonce, encode_da_mode(nonce_da_mode, fee_da_mode), - poseidon_hash_many(account_deployment_data), - poseidon_hash_many(calldata), + Poseidon::hash_array(account_deployment_data), + Poseidon::hash_array(calldata), ]) } @@ -274,12 +273,12 @@ pub fn compute_l1_handler_tx_hash( chain_id: Felt, nonce: Felt, ) -> Felt { - compute_hash_on_elements(&[ + Pedersen::hash_array(&[ PREFIX_L1_HANDLER, version, contract_address.into(), entry_point_selector, - compute_hash_on_elements(calldata), + Pedersen::hash_array(calldata), Felt::ZERO, // No fee on L2 for L1 handler tx chain_id, nonce, @@ -392,14 +391,14 @@ fn hash_fee_fields( l1_data_gas_bounds: Option<&ResourceBounds>, ) -> Felt { if let Some(data_gas_bounds) = l1_data_gas_bounds { - poseidon_hash_many(&[ + Poseidon::hash_array(&[ tip.into(), encode_gas_bound(b"L1_GAS", l1_gas_bounds), encode_gas_bound(b"L2_GAS", l2_gas_bounds), encode_gas_bound(b"L1_DATA", data_gas_bounds), ]) } else { - poseidon_hash_many(&[ + Poseidon::hash_array(&[ tip.into(), encode_gas_bound(b"L1_GAS", l1_gas_bounds), encode_gas_bound(b"L2_GAS", l2_gas_bounds), @@ -419,10 +418,11 @@ fn encode_da_mode( #[cfg(test)] mod tests { use num_traits::ToPrimitive; - use starknet::macros::{felt, short_string}; + use starknet::macros::felt; use super::*; use crate::address; + use crate::cairo::ShortString; use crate::chain::ChainId; #[test] @@ -433,10 +433,10 @@ mod tests { #[test] fn test_prefix_constants() { - assert_eq!(PREFIX_INVOKE, short_string!("invoke")); - assert_eq!(PREFIX_DECLARE, short_string!("declare")); - assert_eq!(PREFIX_DEPLOY_ACCOUNT, short_string!("deploy_account")); - assert_eq!(PREFIX_L1_HANDLER, short_string!("l1_handler")); + assert_eq!(PREFIX_INVOKE, ShortString::from_ascii("invoke").into()); + assert_eq!(PREFIX_DECLARE, ShortString::from_ascii("declare").into()); + assert_eq!(PREFIX_DEPLOY_ACCOUNT, ShortString::from_ascii("deploy_account").into()); + assert_eq!(PREFIX_L1_HANDLER, ShortString::from_ascii("l1_handler").into()); } #[test] @@ -817,7 +817,7 @@ mod tests { let version = felt!("0x0"); let contract_address = - address!("0x73314940630fd6dcda0d772d4c972c4e0a9946bef9dabf4ef84eda8ef542b82"); + address!("0x73314940630fd6dcda0d772d4c972c4e0a9946bef9dabf4ef84eda8ef542b82", crate); let entry_point_selector = felt!("0x1b64b1b3b690b43b9b514fb81377518f4039cd3e4f4914d8a6bdf01d679fb19"); let calldata = vec![ @@ -859,7 +859,7 @@ mod tests { EthAddress::from_slice(&hex!("f6080d9fbeebcd44d89affbfd42f098cbff92816")); let to_address = - address!("0x5cd48fccbfd8aa2773fe22c217e808319ffcc1c5a6a463f7d8fa2da48218196"); + address!("0x5cd48fccbfd8aa2773fe22c217e808319ffcc1c5a6a463f7d8fa2da48218196", crate); let entry_point_selector = felt!("0x1b64b1b3b690b43b9b514fb81377518f4039cd3e4f4914d8a6bdf01d679fb19"); diff --git a/crates/primitives/src/version.rs b/crates/primitives/src/version.rs index d8dd26b36..7930e5389 100644 --- a/crates/primitives/src/version.rs +++ b/crates/primitives/src/version.rs @@ -148,6 +148,7 @@ impl TryFrom for starknet_api::block::StarknetVersion { [0, 13, 4, 0] => Ok(Self::V0_13_4), [0, 13, 5, 0] => Ok(Self::V0_13_5), [0, 14, 0, 0] => Ok(Self::V0_14_0), + [0, 14, 1, 0] => Ok(Self::V0_14_1), _ => Err(InvalidVersionError(version)), } } diff --git a/crates/rpc/rpc-api/Cargo.toml b/crates/rpc/rpc-api/Cargo.toml index a93d26e90..458b919e3 100644 --- a/crates/rpc/rpc-api/Cargo.toml +++ b/crates/rpc/rpc-api/Cargo.toml @@ -22,5 +22,6 @@ thiserror.workspace = true rstest.workspace = true [features] -cartridge = [ ] -client = [ "jsonrpsee/client" ] +cartridge = [] +client = ["jsonrpsee/client"] +tee = [] diff --git a/crates/rpc/rpc-api/src/dev.rs b/crates/rpc/rpc-api/src/dev.rs index 7e7dc1707..1747c82a8 100644 --- a/crates/rpc/rpc-api/src/dev.rs +++ b/crates/rpc/rpc-api/src/dev.rs @@ -1,6 +1,7 @@ use jsonrpsee::core::RpcResult; use jsonrpsee::proc_macros::rpc; -use katana_primitives::Felt; +use katana_primitives::contract::{StorageKey, StorageValue}; +use katana_primitives::ContractAddress; use katana_rpc_types::account::Account; #[cfg_attr(not(feature = "client"), rpc(server, namespace = "dev"))] @@ -19,8 +20,12 @@ pub trait DevApi { async fn increase_next_block_timestamp(&self, timestamp: u64) -> RpcResult<()>; #[method(name = "setStorageAt")] - async fn set_storage_at(&self, contract_address: Felt, key: Felt, value: Felt) - -> RpcResult<()>; + async fn set_storage_at( + &self, + contract_address: ContractAddress, + key: StorageKey, + value: StorageValue, + ) -> RpcResult<()>; #[method(name = "predeployedAccounts")] async fn predeployed_accounts(&self) -> RpcResult>; diff --git a/crates/rpc/rpc-api/src/error/dev.rs b/crates/rpc/rpc-api/src/error/dev.rs index e91feb28d..02c993898 100644 --- a/crates/rpc/rpc-api/src/error/dev.rs +++ b/crates/rpc/rpc-api/src/error/dev.rs @@ -1,14 +1,36 @@ use jsonrpsee::types::ErrorObjectOwned; +use serde::{Deserialize, Serialize}; -#[derive(thiserror::Error, Clone, Copy, Debug)] +#[derive(thiserror::Error, Clone, Debug)] #[allow(clippy::enum_variant_names)] pub enum DevApiError { #[error("Wait for pending transactions.")] PendingTransactions, + #[error("An unexpected error occurred: {}", .0.reason)] + UnexpectedError(UnexpectedErrorData), +} + +impl DevApiError { + pub fn unexpected_error(reason: T) -> Self { + DevApiError::UnexpectedError(UnexpectedErrorData { reason: reason.to_string() }) + } +} + +/// Data for the [`DevApiError::UnexpectedError`] error. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct UnexpectedErrorData { + pub reason: String, } impl From for ErrorObjectOwned { fn from(err: DevApiError) -> Self { - ErrorObjectOwned::owned(err as i32, err.to_string(), None::<()>) + match &err { + DevApiError::PendingTransactions => { + ErrorObjectOwned::owned(1, err.to_string(), None::<()>) + } + DevApiError::UnexpectedError(data) => { + ErrorObjectOwned::owned(2, err.to_string(), Some(data)) + } + } } } diff --git a/crates/rpc/rpc-api/src/error/mod.rs b/crates/rpc/rpc-api/src/error/mod.rs index 90b37a310..2979fa8e6 100644 --- a/crates/rpc/rpc-api/src/error/mod.rs +++ b/crates/rpc/rpc-api/src/error/mod.rs @@ -1,3 +1,6 @@ pub mod dev; pub mod katana; pub mod starknet; + +#[cfg(feature = "tee")] +pub mod tee; diff --git a/crates/rpc/rpc-api/src/error/tee.rs b/crates/rpc/rpc-api/src/error/tee.rs new file mode 100644 index 000000000..854fa93e6 --- /dev/null +++ b/crates/rpc/rpc-api/src/error/tee.rs @@ -0,0 +1,29 @@ +use jsonrpsee::types::ErrorObjectOwned; + +/// Error codes for TEE API (starting at 100 to avoid conflicts). +const TEE_NOT_AVAILABLE: i32 = 100; +const TEE_QUOTE_GENERATION_FAILED: i32 = 101; +const TEE_PROVIDER_ERROR: i32 = 102; + +#[derive(thiserror::Error, Clone, Debug)] +pub enum TeeApiError { + #[error("TEE not available: {0}")] + NotAvailable(String), + + #[error("Quote generation failed: {0}")] + QuoteGenerationFailed(String), + + #[error("Provider error: {0}")] + ProviderError(String), +} + +impl From for ErrorObjectOwned { + fn from(err: TeeApiError) -> Self { + let code = match &err { + TeeApiError::NotAvailable(_) => TEE_NOT_AVAILABLE, + TeeApiError::QuoteGenerationFailed(_) => TEE_QUOTE_GENERATION_FAILED, + TeeApiError::ProviderError(_) => TEE_PROVIDER_ERROR, + }; + ErrorObjectOwned::owned(code, err.to_string(), None::<()>) + } +} diff --git a/crates/rpc/rpc-api/src/lib.rs b/crates/rpc/rpc-api/src/lib.rs index 3aca9b454..d4c76f140 100644 --- a/crates/rpc/rpc-api/src/lib.rs +++ b/crates/rpc/rpc-api/src/lib.rs @@ -7,3 +7,6 @@ pub mod starknet_ext; #[cfg(feature = "cartridge")] pub mod cartridge; + +#[cfg(feature = "tee")] +pub mod tee; diff --git a/crates/rpc/rpc-api/src/tee.rs b/crates/rpc/rpc-api/src/tee.rs new file mode 100644 index 000000000..55e7f4036 --- /dev/null +++ b/crates/rpc/rpc-api/src/tee.rs @@ -0,0 +1,45 @@ +use jsonrpsee::core::RpcResult; +use jsonrpsee::proc_macros::rpc; +use katana_primitives::block::{BlockHash, BlockNumber}; +use katana_primitives::Felt; +use serde::{Deserialize, Serialize}; + +/// Response type for TEE quote generation. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct TeeQuoteResponse { + /// The raw attestation quote bytes (hex-encoded). + pub quote: String, + + /// The state root at the attested block. + pub state_root: Felt, + + /// The hash of the attested block. + pub block_hash: BlockHash, + + /// The number of the attested block. + pub block_number: BlockNumber, +} + +/// TEE API for generating hardware attestation quotes. +/// +/// This API allows clients to request attestation quotes that +/// cryptographically bind the current blockchain state to a +/// hardware-backed measurement. +#[cfg_attr(not(feature = "client"), rpc(server, namespace = "tee"))] +#[cfg_attr(feature = "client", rpc(client, server, namespace = "tee"))] +pub trait TeeApi { + /// Generate a TEE attestation quote for the current blockchain state. + /// + /// The quote includes a commitment to the latest block's state root + /// and block hash, allowing verifiers to cryptographically verify + /// that the state was attested from within a trusted execution environment. + /// + /// # Returns + /// - `TeeQuoteResponse` containing the quote and the attested state information. + /// + /// # Errors + /// - Returns an error if TEE quote generation fails or TEE is not available. + #[method(name = "generateQuote")] + async fn generate_quote(&self) -> RpcResult; +} diff --git a/crates/rpc/rpc-server/Cargo.toml b/crates/rpc/rpc-server/Cargo.toml index 3fcbce106..1d4c7ce03 100644 --- a/crates/rpc/rpc-server/Cargo.toml +++ b/crates/rpc/rpc-server/Cargo.toml @@ -39,9 +39,12 @@ tracing.workspace = true cainome = { workspace = true, optional = true } cartridge = { workspace = true, optional = true } -starknet-crypto = { workspace = true, optional = true } url = { workspace = true, optional = true } +katana-tee = { workspace = true, optional = true } +hex = { workspace = true, optional = true } +starknet-types-core = { workspace = true, optional = true } + [dev-dependencies] katana-chain-spec.workspace = true katana-contracts.workspace = true @@ -85,8 +88,13 @@ cartridge = [ "dep:cartridge", "dep:katana-genesis", "dep:starknet", - "dep:starknet-crypto", "dep:url", "katana-rpc-api/cartridge", ] -explorer = [ "dep:katana-explorer" ] +explorer = ["dep:katana-explorer"] +tee = [ + "dep:katana-tee", + "dep:hex", + "dep:starknet-types-core", + "katana-rpc-api/tee", +] diff --git a/crates/rpc/rpc-server/src/cartridge/mod.rs b/crates/rpc/rpc-server/src/cartridge/mod.rs index 59725f146..f64bcea52 100644 --- a/crates/rpc/rpc-server/src/cartridge/mod.rs +++ b/crates/rpc/rpc-server/src/cartridge/mod.rs @@ -45,6 +45,7 @@ use katana_primitives::chain::ChainId; use katana_primitives::contract::Nonce; use katana_primitives::da::DataAvailabilityMode; use katana_primitives::fee::{AllResourceBoundsMapping, ResourceBoundsMapping}; +use katana_primitives::hash::{Pedersen, Poseidon, StarkHash}; use katana_primitives::transaction::{ExecutableTx, ExecutableTxWithHash, InvokeTx, InvokeTxV3}; use katana_primitives::{ContractAddress, Felt}; use katana_provider::api::state::{StateFactoryProvider, StateProvider}; @@ -59,7 +60,6 @@ use katana_rpc_types::FunctionCall; use katana_tasks::{Result as TaskResult, TaskSpawner}; use starknet::macros::selector; use starknet::signers::{LocalWallet, Signer, SigningKey}; -use starknet_crypto::pedersen_hash; use tracing::{debug, info}; use url::Url; @@ -487,12 +487,13 @@ async fn handle_vrf_calls( // compute storage key of the VRF contract storage member VrfProvider_nonces: // Map let address = salt_or_nonce; - let key = pedersen_hash(&selector!("VrfProvider_nonces"), &address); + let key = Pedersen::hash(&selector!("VrfProvider_nonces"), &address); + let nonce = state.storage(vrf_ctx.address(), key).unwrap_or_default().unwrap_or_default(); - starknet_crypto::poseidon_hash_many(vec![&nonce, &caller, &chain_id.id()]) + Poseidon::hash_array(&[nonce, caller, chain_id.id()]) } else if salt_or_nonce_selector == Felt::ONE { let salt = salt_or_nonce; - starknet_crypto::poseidon_hash_many(vec![&salt, &caller, &chain_id.id()]) + Poseidon::hash_array(&[salt, caller, chain_id.id()]) } else { anyhow::bail!( "Invalid salt or nonce for VRF request, expecting 0 or 1, got {}", @@ -535,7 +536,7 @@ pub async fn craft_deploy_cartridge_vrf_tx( ) -> anyhow::Result { let calldata = vec![ CARTRIDGE_VRF_CLASS_HASH, - CARTRIDGE_VRF_SALT, + CARTRIDGE_VRF_SALT.into(), // from zero Felt::ZERO, // Calldata len diff --git a/crates/rpc/rpc-server/src/cors.rs b/crates/rpc/rpc-server/src/cors.rs index 9d7c2a5f4..937215ca1 100644 --- a/crates/rpc/rpc-server/src/cors.rs +++ b/crates/rpc/rpc-server/src/cors.rs @@ -94,7 +94,7 @@ impl AllowOrigins { I: IntoIterator, { let origins = origins.into_iter().collect::>(); - if origins.iter().any(|o| o == WILDCARD) { + if origins.contains(&WILDCARD) { Self(cors::AllowOrigin::any()) } else { Self(cors::AllowOrigin::list(origins)) diff --git a/crates/rpc/rpc-server/src/dev.rs b/crates/rpc/rpc-server/src/dev.rs index e027f3fc5..8b59031c9 100644 --- a/crates/rpc/rpc-server/src/dev.rs +++ b/crates/rpc/rpc-server/src/dev.rs @@ -5,8 +5,9 @@ use katana_core::backend::storage::{ProviderRO, ProviderRW}; use katana_core::backend::Backend; use katana_core::service::block_producer::{BlockProducer, BlockProducerMode, PendingExecutor}; use katana_executor::ExecutorFactory; -use katana_primitives::Felt; -use katana_provider::ProviderFactory; +use katana_primitives::contract::{ContractAddress, StorageKey, StorageValue}; +use katana_provider::api::state::StateWriter; +use katana_provider::{MutableProvider, ProviderFactory}; use katana_rpc_api::dev::DevApiServer; use katana_rpc_api::error::dev::DevApiError; use katana_rpc_types::account::Account; @@ -69,6 +70,35 @@ where Ok(()) } + + pub fn set_storage_at( + &self, + contract_address: ContractAddress, + key: StorageKey, + value: StorageValue, + ) -> Result<(), DevApiError> { + // If there's a pending executor (interval mining mode), update the pending state + // so that the change is visible to the pending block. + if let Some(pending_executor) = self.pending_executor() { + // Leaky-leaky abstraction: + // The logic here might seem counterintuitive because we're taking a non-mutable + // reference (ie read lock) but we're allowed to update the pending state. + pending_executor + .read() + .set_storage_at(contract_address, key, value) + .map_err(DevApiError::unexpected_error)?; + } else { + let provider = self.backend.storage.provider_mut(); + + provider + .set_storage(contract_address, key, value) + .map_err(DevApiError::unexpected_error)?; + + provider.commit().map_err(DevApiError::unexpected_error)?; + } + + Ok(()) + } } #[async_trait] @@ -99,15 +129,11 @@ where async fn set_storage_at( &self, - _contract_address: Felt, - _key: Felt, - _value: Felt, + contract_address: ContractAddress, + key: StorageKey, + value: StorageValue, ) -> RpcResult<()> { - // self.sequencer - // .set_storage_at(contract_address.into(), key, value) - // .await - // .map_err(|_| Error::from(KatanaApiError::FailedToUpdateStorage)) - Ok(()) + Ok(self.set_storage_at(contract_address, key, value)?) } async fn predeployed_accounts(&self) -> RpcResult> { diff --git a/crates/rpc/rpc-server/src/lib.rs b/crates/rpc/rpc-server/src/lib.rs index cc40a516a..dfb078eba 100644 --- a/crates/rpc/rpc-server/src/lib.rs +++ b/crates/rpc/rpc-server/src/lib.rs @@ -18,6 +18,9 @@ use tracing::info; #[cfg(feature = "cartridge")] pub mod cartridge; +#[cfg(feature = "tee")] +pub mod tee; + pub mod cors; pub mod dev; pub mod health; diff --git a/crates/rpc/rpc-server/src/starknet/blockifier.rs b/crates/rpc/rpc-server/src/starknet/blockifier.rs index 9cb74e15d..852dc4a1e 100644 --- a/crates/rpc/rpc-server/src/starknet/blockifier.rs +++ b/crates/rpc/rpc-server/src/starknet/blockifier.rs @@ -90,10 +90,10 @@ pub fn estimate_fees( overall_fee: fee.overall_fee, l2_gas_price: fee.l2_gas_price, l1_gas_price: fee.l1_gas_price, - l2_gas_consumed: resources.gas.l2_gas, - l1_gas_consumed: resources.gas.l1_gas, + l2_gas_consumed: resources.total_gas_consumed.l2_gas, + l1_gas_consumed: resources.total_gas_consumed.l1_gas, l1_data_gas_price: fee.l1_data_gas_price, - l1_data_gas_consumed: resources.gas.l1_data_gas, + l1_data_gas_consumed: resources.total_gas_consumed.l1_data_gas, }); } } @@ -151,8 +151,8 @@ fn to_api_error(error: ExecutionError) -> StarknetApiError { mod tests { use katana_chain_spec::ChainSpec; + use katana_primitives::address; use katana_primitives::env::BlockEnv; - use katana_primitives::{address, ContractAddress}; use katana_provider::api::state::StateFactoryProvider; use katana_provider::test_utils::test_provider; use katana_provider::ProviderFactory; diff --git a/crates/rpc/rpc-server/src/starknet/mod.rs b/crates/rpc/rpc-server/src/starknet/mod.rs index 9450afceb..77f8f64c3 100644 --- a/crates/rpc/rpc-server/src/starknet/mod.rs +++ b/crates/rpc/rpc-server/src/starknet/mod.rs @@ -369,9 +369,12 @@ where contract_address: ContractAddress, ) -> StarknetApiResult { self.on_io_blocking_task(move |this| { - // Contract address 0x1 is special system contract and does not - // have a class. See https://docs.starknet.io/architecture-and-concepts/network-architecture/starknet-state/#address_0x1. - if contract_address.0 == Felt::ONE { + // Contract address 0x1 and 0x2 are special system contracts and does not + // have a class. + // + // See https://docs.starknet.io/architecture-and-concepts/network-architecture/starknet-state/#address_0x1. + if contract_address == ContractAddress::ONE || contract_address == ContractAddress::TWO + { return Ok(ClassHash::ZERO); } @@ -424,11 +427,10 @@ where // Check that contract exist by checking the class hash of the contract, // unless its address 0x1 or 0x2 which are special system contracts and does not // have a class. - // See: - // https://docs.starknet.io/learn/protocol/state#address-0x1. - // https://docs.starknet.io/learn/protocol/data-availability#v0-13-4 - if contract_address.0 != Felt::ONE - && contract_address.0 != Felt::TWO + // + // See https://docs.starknet.io/architecture-and-concepts/network-architecture/starknet-state/#address_0x1. + if contract_address != ContractAddress::ONE + && contract_address != ContractAddress::TWO && state.class_hash_of_contract(contract_address)?.is_none() { return Err(StarknetApiError::ContractNotFound); @@ -769,7 +771,7 @@ where let state_update = katana_rpc_types_builder::StateUpdateBuilder::new(block_id, provider) .build()? - .map(StateUpdate::Update); + .map(StateUpdate::Confirmed); StarknetApiResult::Ok(state_update) }) diff --git a/crates/rpc/rpc-server/src/starknet/trace.rs b/crates/rpc/rpc-server/src/starknet/trace.rs index 01f9bc448..b2626679c 100644 --- a/crates/rpc/rpc-server/src/starknet/trace.rs +++ b/crates/rpc/rpc-server/src/starknet/trace.rs @@ -5,8 +5,8 @@ use katana_pool::TransactionPool; use katana_primitives::block::{BlockHashOrNumber, BlockIdOrTag, ConfirmedBlockIdOrTag}; use katana_primitives::execution::TypedTransactionExecutionInfo; use katana_primitives::transaction::{ExecutableTx, ExecutableTxWithHash, TxHash}; -use katana_provider::api::block::{BlockNumberProvider, BlockProvider}; -use katana_provider::api::transaction::{TransactionTraceProvider, TransactionsProviderExt}; +use katana_provider::api::block::BlockNumberProvider; +use katana_provider::api::transaction::TransactionTraceProvider; use katana_provider::ProviderFactory; use katana_rpc_api::error::starknet::StarknetApiError; use katana_rpc_api::starknet::StarknetTraceApiServer; diff --git a/crates/rpc/rpc-server/src/tee.rs b/crates/rpc/rpc-server/src/tee.rs new file mode 100644 index 000000000..5a28e06fa --- /dev/null +++ b/crates/rpc/rpc-server/src/tee.rs @@ -0,0 +1,122 @@ +//! TEE RPC API implementation. + +use std::sync::Arc; + +use jsonrpsee::core::{async_trait, RpcResult}; +use katana_primitives::Felt; +use katana_provider::api::block::{BlockHashProvider, BlockNumberProvider, HeaderProvider}; +use katana_provider::ProviderFactory; +use katana_rpc_api::error::tee::TeeApiError; +use katana_rpc_api::tee::{TeeApiServer, TeeQuoteResponse}; +use katana_tee::TeeProvider; +use starknet_types_core::hash::{Poseidon, StarkHash}; +use tracing::{debug, info}; + +/// TEE API implementation. +#[allow(missing_debug_implementations)] +pub struct TeeApi +where + PF: ProviderFactory, +{ + /// Storage provider factory for accessing blockchain state. + provider_factory: PF, + /// TEE provider for generating attestation quotes. + tee_provider: Arc, +} + +impl TeeApi +where + PF: ProviderFactory, +{ + /// Create a new TEE API instance. + pub fn new(provider_factory: PF, tee_provider: Arc) -> Self { + info!( + target: "rpc::tee", + provider_type = tee_provider.provider_type(), + "TEE API initialized" + ); + Self { provider_factory, tee_provider } + } + + /// Compute the 64-byte report data for attestation. + /// + /// The report data is: Poseidon(state_root, block_hash) padded to 64 bytes. + fn compute_report_data(&self, state_root: Felt, block_hash: Felt) -> [u8; 64] { + // Compute Poseidon hash of state_root and block_hash + let commitment = Poseidon::hash(&state_root, &block_hash); + + // Convert Felt to bytes (32 bytes) and pad to 64 bytes + let commitment_bytes = commitment.to_bytes_be(); + + let mut report_data = [0u8; 64]; + // Place the 32-byte hash in the first half + report_data[..32].copy_from_slice(&commitment_bytes); + // Second half remains zeros (or could include additional metadata) + + debug!( + target: "rpc::tee", + %state_root, + %block_hash, + %commitment, + "Computed report data for attestation" + ); + + report_data + } +} + +#[async_trait] +impl TeeApiServer for TeeApi +where + PF: ProviderFactory + Send + Sync + 'static, + ::Provider: + BlockHashProvider + BlockNumberProvider + HeaderProvider + Send + Sync, +{ + async fn generate_quote(&self) -> RpcResult { + debug!(target: "rpc::tee", "Generating TEE attestation quote"); + + // Get the latest blockchain state + let provider = self.provider_factory.provider(); + + // Get latest block information + let block_number = + provider.latest_number().map_err(|e| TeeApiError::ProviderError(e.to_string()))?; + + let block_hash = + provider.latest_hash().map_err(|e| TeeApiError::ProviderError(e.to_string()))?; + + // Get the header to retrieve state_root + let header = provider + .header_by_number(block_number) + .map_err(|e| TeeApiError::ProviderError(e.to_string()))? + .ok_or_else(|| { + TeeApiError::ProviderError(format!("Header not found for block {block_number}")) + })?; + + let state_root = header.state_root; + + // Compute report data: Poseidon(state_root, block_hash) + let report_data = self.compute_report_data(state_root, block_hash); + + // Generate the attestation quote + let quote = self + .tee_provider + .generate_quote(&report_data) + .map_err(|e| TeeApiError::QuoteGenerationFailed(e.to_string()))?; + + info!( + target: "rpc::tee", + block_number, + %block_hash, + quote_size = quote.len(), + "Generated TEE attestation quote" + ); + + Ok(TeeQuoteResponse { + quote: format!("0x{}", hex::encode("e)), + state_root, + block_hash, + block_number, + }) + } +} diff --git a/crates/rpc/rpc-server/src/utils/events.rs b/crates/rpc/rpc-server/src/utils/events.rs index 0fa07c54b..061e344e4 100644 --- a/crates/rpc/rpc-server/src/utils/events.rs +++ b/crates/rpc/rpc-server/src/utils/events.rs @@ -97,7 +97,7 @@ pub fn fetch_pending_events( // process individual transactions in the block. // the iterator will start with txn index == cursor.txn.idx - for (tx_idx, (tx_hash, events)) in pending_block + for (tx_idx, (tx_hash, tx_events)) in pending_block .transactions .iter() .map(|receipt| (receipt.receipt.transaction_hash, receipt.receipt.receipt.events())) @@ -105,7 +105,7 @@ pub fn fetch_pending_events( .skip(cursor.txn.idx) { if tx_idx == cursor.txn.idx { - match events.len().cmp(&cursor.txn.event) { + match tx_events.len().cmp(&cursor.txn.event) { Ordering::Equal | Ordering::Greater => {} Ordering::Less => continue, } @@ -119,7 +119,7 @@ pub fn fetch_pending_events( None, tx_idx, tx_hash, - events, + tx_events, filter, chunk_size as usize, buffer, @@ -222,24 +222,24 @@ pub fn fetch_events_at_blocks( Ok(None) } -/// An iterator that yields events that match the given filters. +/// An iterator that yields events (with their original indices) that match the given filters. #[derive(Debug)] -struct FilteredEvents<'a, I: Iterator> { +struct FilteredEvents<'a, I: Iterator> { iter: I, filter: &'a Filter, } -impl<'a, I: Iterator> FilteredEvents<'a, I> { +impl<'a, I: Iterator> FilteredEvents<'a, I> { fn new(iter: I, filter: &'a Filter) -> Self { Self { iter, filter } } } -impl<'a, I: Iterator> Iterator for FilteredEvents<'a, I> { - type Item = &'a Event; +impl<'a, I: Iterator> Iterator for FilteredEvents<'a, I> { + type Item = (usize, &'a Event); fn next(&mut self) -> Option { - for event in self.iter.by_ref() { + for (idx, event) in self.iter.by_ref() { // Skip this event if there is an address filter but doesn't match the address of the // event. if self.filter.address.is_some_and(|addr| addr != event.from_address) { @@ -271,7 +271,7 @@ impl<'a, I: Iterator> Iterator for FilteredEvents<'a, I> { }; if is_matched { - return Some(event); + return Some((idx, event)); } } @@ -303,7 +303,7 @@ fn fetch_tx_events( block_hash: Option, tx_idx: usize, tx_hash: TxHash, - events: &[Event], + tx_events: &[Event], filter: &Filter, chunk_size: usize, buffer: &mut Vec, @@ -312,18 +312,24 @@ fn fetch_tx_events( // number of events we have taken. let total_can_take = chunk_size.saturating_sub(buffer.len()); + // Enumerate events first to preserve original indices, then filter. // skip events according to the continuation token. - let filtered = FilteredEvents::new(events.iter(), filter) - .map(|e| EmittedEvent { - block_hash, - block_number, - keys: e.keys.clone(), - data: e.data.clone(), - transaction_hash: tx_hash, - from_address: e.from_address, + let filtered = FilteredEvents::new(tx_events.iter().enumerate(), filter) + .map(|(event_idx, e)| { + ( + event_idx, + EmittedEvent { + block_hash, + block_number, + keys: e.keys.clone(), + data: e.data.clone(), + transaction_hash: tx_hash, + from_address: e.from_address, + transaction_index: Some(tx_idx as u64), + event_index: Some(event_idx as u64), + }, + ) }) - // enumerate so that we can keep track of the event's index in the transaction - .enumerate() .skip(next_event_idx) .take(total_can_take) .collect::>(); @@ -349,7 +355,7 @@ fn fetch_tx_events( }; // if there are still more events that we haven't fetched yet for this tx. - if new_last_event < events.len() { + if new_last_event < tx_events.len() { return Ok(Some(PartialCursor { idx: tx_idx, event: new_last_event })); } } diff --git a/crates/rpc/rpc-server/tests/common/mod.rs b/crates/rpc/rpc-server/tests/common/mod.rs index c1e790018..07bfb00d9 100644 --- a/crates/rpc/rpc-server/tests/common/mod.rs +++ b/crates/rpc/rpc-server/tests/common/mod.rs @@ -8,8 +8,9 @@ use cainome::rs::abigen_legacy; use cairo_lang_starknet_classes::casm_contract_class::CasmContractClass; use cairo_lang_starknet_classes::contract_class::ContractClass; use katana_primitives::class::CompiledClass; +use katana_primitives::Felt; use starknet::core::types::contract::SierraClass; -use starknet::core::types::{Call, Felt, FlattenedSierraClass}; +use starknet::core::types::{Call, FlattenedSierraClass}; use starknet::core::utils::get_selector_from_name; abigen_legacy!(Erc20Contract, "crates/contracts/build/legacy/erc20.json", derives(Clone)); diff --git a/crates/rpc/rpc-server/tests/dev.rs b/crates/rpc/rpc-server/tests/dev.rs index f259888bb..ba7e2c8d9 100644 --- a/crates/rpc/rpc-server/tests/dev.rs +++ b/crates/rpc/rpc-server/tests/dev.rs @@ -1,5 +1,8 @@ +use katana_primitives::contract::ContractAddress; +use katana_primitives::Felt; use katana_provider::api::block::{BlockNumberProvider, BlockProvider}; use katana_provider::api::env::BlockEnvProvider; +use katana_provider::api::state::StateFactoryProvider; use katana_provider::ProviderFactory; use katana_rpc_server::api::dev::DevApiClient; use katana_utils::TestNode; @@ -126,26 +129,65 @@ async fn test_dev_api_enabled() { assert!(!accounts.is_empty(), "predeployed accounts should not be empty"); } -// #[tokio::test] -// async fn test_set_storage_at_on_instant_mode() { -// let sequencer = create_test_sequencer().await; -// sequencer.backend().mine_empty_block(); - -// let contract_address = ContractAddress(patricia_key!("0x1337")); -// let key = StorageKey(patricia_key!("0x20")); -// let val = stark_felt!("0xABC"); - -// { -// let mut state = sequencer.backend().state.write().await; -// let read_val = state.get_storage_at(contract_address, key).unwrap(); -// assert_eq!(stark_felt!("0x0"), read_val, "latest storage value should be 0"); -// } - -// sequencer.set_storage_at(contract_address, key, val).await.unwrap(); - -// { -// let mut state = sequencer.backend().state.write().await; -// let read_val = state.get_storage_at(contract_address, key).unwrap(); -// assert_eq!(val, read_val, "latest storage value incorrect after generate"); -// } -// } +/// Test set_storage_at in instant mining mode (no pending block) +#[tokio::test] +async fn test_set_storage_at() { + let sequencer = TestNode::new().await; + let backend = sequencer.backend(); + let client = sequencer.rpc_http_client(); + + let contract_address = ContractAddress(Felt::from(0x1337u64)); + let key = Felt::from(0x20u64); + let value = Felt::from(0xABCu64); + + // Check that storage is initially None/zero + { + let provider = backend.storage.provider(); + let state = provider.latest().unwrap(); + let read_val = state.storage(contract_address, key).unwrap(); + assert!(read_val.is_none(), "initial storage value should be None"); + } + + // Set the storage value via RPC + client.set_storage_at(contract_address, key, value).await.unwrap(); + + // Verify the storage value was set correctly + { + let provider = backend.storage.provider(); + let state = provider.latest().unwrap(); + let read_val = state.storage(contract_address, key).unwrap(); + assert_eq!(read_val, Some(value), "storage value should be set correctly"); + } +} + +/// Test set_storage_at in interval mining mode (with pending block) +/// This verifies that the storage update is visible in the pending state and persists after mining. +#[tokio::test] +async fn test_set_storage_at_with_pending_block() { + // Create a node with interval mining (block time of 10 seconds - long enough that we can test + // before the block is mined) + let sequencer = TestNode::new_with_block_time(10000).await; + let backend = sequencer.backend(); + let client = sequencer.rpc_http_client(); + + let contract_address = ContractAddress(Felt::from(0x1337u64)); + let key = Felt::from(0x20u64); + let value = Felt::from(0xABCu64); + + // Set the storage value via RPC - this updates the pending state + client.set_storage_at(contract_address, key, value).await.unwrap(); + + // In interval mode, the storage is updated in the pending executor's state, not the database. + // The database will be updated when the block is mined. + + // Force mine a block to close the pending block and persist the changes + client.generate_block().await.unwrap(); + + // Verify the storage value was persisted to the database after the block was mined + { + let provider = backend.storage.provider(); + let state = provider.latest().unwrap(); + let read_val = state.storage(contract_address, key).unwrap(); + assert_eq!(read_val, Some(value), "storage value should persist after block is mined"); + } +} diff --git a/crates/rpc/rpc-server/tests/estimate_fee_rate_limit.rs b/crates/rpc/rpc-server/tests/estimate_fee_rate_limit.rs index dd09ff11f..a347e3301 100644 --- a/crates/rpc/rpc-server/tests/estimate_fee_rate_limit.rs +++ b/crates/rpc/rpc-server/tests/estimate_fee_rate_limit.rs @@ -4,9 +4,8 @@ use std::time::Instant; use anyhow::Result; use cainome::rs::abigen_legacy; use katana_genesis::constant::DEFAULT_ETH_FEE_TOKEN_ADDRESS; +use katana_primitives::{felt, Felt}; use katana_utils::TestNode; -use starknet::core::types::Felt; -use starknet::macros::felt; use tokio::sync::Mutex; mod common; diff --git a/crates/rpc/rpc-server/tests/forking.rs b/crates/rpc/rpc-server/tests/forking.rs index 264abdedb..112d59654 100644 --- a/crates/rpc/rpc-server/tests/forking.rs +++ b/crates/rpc/rpc-server/tests/forking.rs @@ -20,8 +20,7 @@ use url::Url; mod common; -const SEPOLIA_CHAIN_ID: Felt = NamedChainId::SN_SEPOLIA; -const SEPOLIA_URL: &str = "https://api.cartridge.gg/x/starknet/sepolia"; +const SEPOLIA_URL: &str = "https://api.cartridge.gg/x/starknet/sepolia/rpc/v0_10"; const FORK_BLOCK_NUMBER: BlockNumber = 268_471; const FORK_BLOCK_HASH: BlockHash = felt!("0x208950cfcbba73ecbda1c14e4d58d66a8d60655ea1b9dcf07c16014ae8a93cd"); @@ -105,7 +104,7 @@ async fn can_fork() -> Result<()> { let BlockNumberResponse { block_number } = provider.block_number().await?; let chain = provider.chain_id().await?; - assert_eq!(chain, SEPOLIA_CHAIN_ID); + assert_eq!(NamedChainId::SN_SEPOLIA, chain); assert_eq!(block_number, FORK_BLOCK_NUMBER + 11); // fork block + genesis + 10 blocks Ok(()) @@ -661,33 +660,32 @@ async fn get_events_with_invalid_block_hash(#[case] hash: BlockHash) { #[cfg(test)] mod tests { + use std::collections::{BTreeMap, BTreeSet}; + use std::sync::Arc; + use katana_chain_spec::dev::DEV_UNALLOCATED; use katana_chain_spec::{dev, ChainSpec}; use katana_core::service::block_producer::IntervalBlockProducer; use katana_db::Db; use katana_node::config::fork::ForkingConfig; use katana_primitives::block::{ - Block, BlockNumber, FinalityStatus, Header, SealedBlockWithStatus, + BlockHash, BlockNumber, FinalityStatus, Header, SealedBlock, SealedBlockWithStatus, }; use katana_primitives::chain::ChainId; use katana_primitives::class::ClassHash; use katana_primitives::state::{StateUpdates, StateUpdatesWithClasses}; - use katana_primitives::ContractAddress; - use katana_primitives::Felt; - use katana_provider::api::block::BlockNumberProvider; - use katana_provider::api::block::BlockWriter; + use katana_primitives::{ContractAddress, Felt}; + use katana_provider::api::block::{BlockNumberProvider, BlockWriter}; use katana_provider::api::trie::TrieWriter; - use katana_provider::MutableProvider; - use katana_provider::{ForkProviderFactory, ProviderFactory}; + use katana_provider::{ForkProviderFactory, MutableProvider, ProviderFactory}; + use katana_utils::node::ForkTestNode; use katana_utils::TestNode; use proptest::arbitrary::any; use proptest::prelude::{Just, ProptestConfig, Strategy}; - use proptest::prop_assert_eq; - use proptest::proptest; + use proptest::{prop_assert_eq, proptest}; use rand::{thread_rng, Rng}; - use std::collections::{BTreeMap, BTreeSet}; - use std::sync::Arc; - use url::Url; + + use crate::Url; #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_commit_new_state_root_mainnet_blockchain_and_forked_provider() { @@ -700,10 +698,8 @@ mod tests { let block_number = provider.latest_number().unwrap(); - // Generate random state updates let state_updates = setup_mainnet_updates_randomized(5); - //init first state for mainnet provider_mut.compute_state_root(block_number, &state_updates).unwrap(); provider_mut.commit().unwrap(); @@ -734,8 +730,8 @@ mod tests { // Second iteration with new random updates let state_updates = setup_mainnet_updates_randomized(5); - //IT's important here to compute state root for forked network first, then for mainnet - //otherwise it will be different roots because it's like double computation of same changes + // IT's important here to compute state root for forked network first, then for mainnet + // otherwise it will be different roots because it's like double computation of same changes let fork_state_root = { let forked_provider = fork_factory.provider_mut(); let root = forked_provider.compute_state_root(block_number, &state_updates).unwrap(); @@ -806,69 +802,247 @@ mod tests { _state_updates: StateUpdates, ) -> SealedBlockWithStatus { SealedBlockWithStatus { - block: Block { - header: Header { number: block_number, ..Default::default() }, - body: Vec::new(), - } - .seal(), status: FinalityStatus::AcceptedOnL2, + block: SealedBlock { + hash: BlockHash::from(block_number), + header: Header { number: block_number, ..Default::default() }, + body: Default::default(), + }, } } - /// To run this test you need to comment out global cache part in Node::build() "let global_class_cache = class_cache.build_global()?"; + fn arb_felt() -> impl Strategy { + any::<[u8; 32]>().prop_map(|bytes| Felt::from_bytes_be(&bytes)) + } + + fn arb_class_hash() -> impl Strategy { + arb_felt().prop_map(ClassHash::from) + } + + fn arb_contract_address() -> impl Strategy { + arb_felt().prop_map(ContractAddress::from) + } + + fn arb_storage() -> impl Strategy> { + proptest::collection::btree_map(arb_felt(), arb_felt(), 0..3) + } + + fn arb_state_updates() -> impl Strategy { + proptest::collection::btree_map( + arb_contract_address(), + (arb_class_hash(), arb_storage(), arb_felt()), + 1..6, + ) + .prop_flat_map(|contracts| { + let mut deployed_contracts = BTreeMap::new(); + let mut storage_updates = BTreeMap::new(); + let mut nonce_updates = BTreeMap::new(); + let mut declared_classes = BTreeMap::new(); + let replaced_classes = BTreeMap::new(); + let deprecated_declared_classes = BTreeSet::new(); + + for (address, (class_hash, storage, nonce)) in &contracts { + deployed_contracts.insert(*address, *class_hash); + storage_updates.insert(*address, storage.clone()); + nonce_updates.insert(*address, *nonce); + declared_classes.insert(*class_hash, Felt::from(1u8)); + } + + Just(StateUpdates { + deployed_contracts, + storage_updates, + nonce_updates, + declared_classes, + replaced_classes, + deprecated_declared_classes, + ..Default::default() + }) + }) + } + + // Deterministic test - no workaround required + #[test] + fn test_minimal_failing_input_regression() { + let rt = tokio::runtime::Runtime::new().unwrap(); + + rt.block_on(async { + let sequencer = TestNode::new().await; + let backend = sequencer.backend(); + let provider = backend.storage.provider(); + let mut block_number = provider.latest_number().unwrap(); + let mut producer = IntervalBlockProducer::new(backend.clone(), None); + + // state_updates_vec[0] - the initial state from minimal failing input + let initial_state = StateUpdates { + nonce_updates: BTreeMap::from([( + ContractAddress::from(Felt::from_hex_unchecked( + "0x475cedf016783eb3d5d0a8ae58102641303e400ac71dee1107990c4144a0aa4", + )), + Felt::from_hex_unchecked( + "0x1629f837c6a0d07ade7a8925a6843adb39e48dc808c67bae82961f6bef896e1", + ), + )]), + storage_updates: BTreeMap::from([]), + deployed_contracts: BTreeMap::from([]), + declared_classes: BTreeMap::from([]), + deprecated_declared_classes: BTreeSet::new(), + replaced_classes: BTreeMap::new(), + migrated_compiled_classes: BTreeMap::new(), + }; + + let fork_minimal_updates_vec = vec![ + StateUpdates { + nonce_updates: BTreeMap::from([( + ContractAddress::from(Felt::from_hex_unchecked( + "0x5e6f1fa63556682aaee138df20080a70a803cc2d6711f271dc910635b9d66d7", + )), + Felt::from_hex_unchecked( + "0x20755f5ad5fcdfe23fc74d6fb617d82a107a994b0653a6952ec3ef1fc0b2de5", + ), + )]), + storage_updates: BTreeMap::from([]), + deployed_contracts: BTreeMap::from([]), + declared_classes: BTreeMap::from([]), + deprecated_declared_classes: BTreeSet::new(), + replaced_classes: BTreeMap::new(), + migrated_compiled_classes: BTreeMap::new(), + }, + StateUpdates { + nonce_updates: BTreeMap::from([( + ContractAddress::from(Felt::from_hex_unchecked( + "0x44a7b4f76c2fe9cb6367d7a7f0c4a5188b3c02c6038706546b516f527470d51", + )), + Felt::from_hex_unchecked( + "0x4c2cb13bd093da7cbead27adef8b2ab02d36f2b8c47eeeee4759709b96847ee", + ), + )]), + storage_updates: BTreeMap::from([]), + deployed_contracts: BTreeMap::from([]), + declared_classes: BTreeMap::from([]), + deprecated_declared_classes: BTreeSet::new(), + replaced_classes: BTreeMap::new(), + migrated_compiled_classes: BTreeMap::new(), + }, + ]; + let num_iters = 2; + + let provider_mut = backend.storage.provider_mut(); + provider_mut.compute_state_root(block_number + 1, &initial_state).unwrap(); + provider_mut.commit().unwrap(); + producer.force_mine(); + let provider = backend.storage.provider(); + block_number = provider.latest_number().unwrap(); + + let db = Db::in_memory().unwrap(); + let starknet_rpc_client = sequencer.starknet_rpc_client(); + let fork_factory = ForkProviderFactory::new(db, block_number, starknet_rpc_client); + + for i in 0..num_iters { + let fork_minimal_updates = &fork_minimal_updates_vec[i]; + + let fork_root = { + let forked_provider = fork_factory.provider_mut(); + let root = forked_provider + .compute_state_root(block_number, fork_minimal_updates) + .unwrap(); + forked_provider.commit().unwrap(); + root + }; + + let provider_mut = backend.storage.provider_mut(); + let mainnet_root = + provider_mut.compute_state_root(block_number, fork_minimal_updates).unwrap(); + provider_mut.commit().unwrap(); + + assert_eq!( + fork_root, mainnet_root, + "State roots do not match at iteration {}: fork={:?}, mainnet={:?}", + i, fork_root, mainnet_root + ); + + producer.force_mine(); + // Create fresh provider to see the new block after force_mine + let provider = backend.storage.provider(); + block_number = provider.latest_number().unwrap(); + } + }); + } + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_commit_new_state_root_two_katana_instances() { - let sequencer = TestNode::new().await; - let backend = sequencer.backend(); - let provider = backend.storage.provider(); - let url = format!("http://{}", sequencer.rpc_addr()); + // Setup: Create main instance and fork instance + + let main_instance = TestNode::new().await; + let backend_main_instance = main_instance.backend(); + let url = format!("http://{}", main_instance.rpc_addr()); + + // Initialize state with random updates and mine at least one block before starting the fork + let initial_state_updates = setup_mainnet_updates_randomized(5); + let main_provider_mut = backend_main_instance.storage.provider_mut(); + let initial_block_number = main_provider_mut.latest_number().unwrap_or(0); + main_provider_mut + .compute_state_root(initial_block_number + 1, &initial_state_updates) + .unwrap(); + let initial_block = create_test_block_with_state_updates( + initial_block_number + 1, + initial_state_updates.clone(), + ); + main_provider_mut + .insert_block_with_states_and_receipts( + initial_block, + StateUpdatesWithClasses { + state_updates: initial_state_updates, + ..Default::default() + }, + vec![], + vec![], + ) + .unwrap(); + main_provider_mut.commit().unwrap(); - // initialize state and mine at least one block before starting the fork - let mut producer = IntervalBlockProducer::new(backend.clone(), None); - producer.force_mine(); + let main_provider = backend_main_instance.storage.provider(); + let fork_block_number = main_provider.latest_number().unwrap(); - let fork_from_block = provider.latest_number().unwrap(); assert!( - fork_from_block > 0, + fork_block_number > 0, "mainnet provider must produce at least one block before forking" ); + + // --- Fork Instance Setup --- let fork_url = Url::parse(&url).unwrap(); let mut fork_config = katana_utils::node::test_config(); - let mut fork_chain_spec = (*DEV_UNALLOCATED).clone(); + + let mut fork_chain_spec = DEV_UNALLOCATED.clone(); fork_chain_spec.id = ChainId::SEPOLIA; fork_chain_spec.genesis.sequencer_address = dev::ChainSpec::default().genesis.sequencer_address; + fork_config.chain = Arc::new(ChainSpec::Dev(fork_chain_spec)); - let fork_block = katana_primitives::block::BlockHashOrNumber::Num(fork_from_block); + let fork_block = katana_primitives::block::BlockHashOrNumber::Num(fork_block_number); fork_config.forking = Some(ForkingConfig { url: fork_url, block: Some(fork_block) }); - let fork_sequencer = TestNode::new_with_config(fork_config).await; - let fork_backend = fork_sequencer.backend(); - let fork_provider = fork_backend.storage.provider(); - let mut fork_producer = IntervalBlockProducer::new(fork_backend.clone(), None); + let fork_node = ForkTestNode::new_forked_with_config(fork_config).await; + let fork_backend = fork_node.backend(); - let block_number = provider.latest_number().unwrap(); - let fork_block_number = fork_provider.latest_number().unwrap(); + // Iteration 1: Insert block with state updates - let fork_minimal_updates = setup_mainnet_updates_randomized(5); + let state_updates = setup_mainnet_updates_randomized(5); + let main_block_number = main_provider.latest_number().unwrap(); + let fork_provider = fork_backend.storage.provider(); + let fork_block_number = fork_provider.latest_number().unwrap(); - // Insert block with state updates on fork + // Fork Instance: Insert block let fork_provider_mut = fork_backend.storage.provider_mut(); let new_fork_block_number = fork_block_number + 1; - let fork_state_root = fork_provider_mut - .compute_state_root(new_fork_block_number, &fork_minimal_updates) - .unwrap(); - - // Create and insert block with the state updates - let fork_block = create_test_block_with_state_updates( - new_fork_block_number, - fork_minimal_updates.clone(), - ); + let fork_state_root = + fork_provider_mut.compute_state_root(new_fork_block_number, &state_updates).unwrap(); + let fork_block = + create_test_block_with_state_updates(new_fork_block_number, state_updates.clone()); fork_provider_mut .insert_block_with_states_and_receipts( fork_block, StateUpdatesWithClasses { - state_updates: fork_minimal_updates.clone(), + state_updates: state_updates.clone(), ..Default::default() }, vec![], @@ -877,26 +1051,25 @@ mod tests { .unwrap(); fork_provider_mut.commit().unwrap(); - // Insert block with same state updates on mainnet - let provider_mut = backend.storage.provider_mut(); - let new_block_number = block_number + 1; + // Main Instance: Insert block with same state updates + let main_provider_mut = backend_main_instance.storage.provider_mut(); + let new_main_block_number = main_block_number + 1; let mainnet_state_root = - provider_mut.compute_state_root(new_block_number, &fork_minimal_updates).unwrap(); - + main_provider_mut.compute_state_root(new_main_block_number, &state_updates).unwrap(); let mainnet_block = - create_test_block_with_state_updates(new_block_number, fork_minimal_updates.clone()); - provider_mut + create_test_block_with_state_updates(new_main_block_number, state_updates.clone()); + main_provider_mut .insert_block_with_states_and_receipts( mainnet_block, StateUpdatesWithClasses { - state_updates: fork_minimal_updates.clone(), + state_updates: state_updates.clone(), ..Default::default() }, vec![], vec![], ) .unwrap(); - provider_mut.commit().unwrap(); + main_provider_mut.commit().unwrap(); assert_eq!( fork_state_root, mainnet_state_root, @@ -904,22 +1077,25 @@ mod tests { fork_state_root, mainnet_state_root ); - let block_number = provider.latest_number().unwrap(); - let fork_block_number = fork_provider.latest_number().unwrap(); + // Iteration 2: Insert another block with new state updates + let state_updates = setup_mainnet_updates_randomized(5); + let main_block_number = main_provider.latest_number().unwrap(); + let fork_block_number = fork_provider.latest_number().unwrap(); + + // Fork Instance: Insert block let fork_provider_mut = fork_backend.storage.provider_mut(); let new_fork_block_number = fork_block_number + 1; let fork_state_root = fork_provider_mut.compute_state_root(new_fork_block_number, &state_updates).unwrap(); fork_provider_mut.commit().unwrap(); - let provider_mut = backend.storage.provider_mut(); - let new_block_number = block_number + 1; - let mainnet_state_root = - provider_mut.compute_state_root(new_block_number, &state_updates).unwrap(); - provider_mut.commit().unwrap(); - producer.force_mine(); - fork_producer.force_mine(); + // Main Instance: Insert block + let main_provider_mut = backend_main_instance.storage.provider_mut(); + let new_main_block_number = main_block_number + 1; + let mainnet_state_root = + main_provider_mut.compute_state_root(new_main_block_number, &state_updates).unwrap(); + main_provider_mut.commit().unwrap(); assert_eq!( fork_state_root, mainnet_state_root, @@ -927,84 +1103,199 @@ mod tests { fork_state_root, mainnet_state_root ); - let block_number = provider.latest_number().unwrap(); + // Iteration 3: Insert block after force_mine + + // Create fresh providers to see new blocks after force_mine + let main_provider = backend_main_instance.storage.provider(); + let fork_provider = fork_backend.storage.provider(); + let main_block_number = main_provider.latest_number().unwrap(); let fork_block_number = fork_provider.latest_number().unwrap(); let state_updates = setup_mainnet_updates_randomized(5); + + // Fork Instance: Insert block let fork_provider_mut = fork_backend.storage.provider_mut(); let new_fork_block_number = fork_block_number + 1; let fork_state_root = fork_provider_mut.compute_state_root(new_fork_block_number, &state_updates).unwrap(); fork_provider_mut.commit().unwrap(); - let provider_mut = backend.storage.provider_mut(); - let new_block_number = block_number + 1; + + // Main Instance: Insert block + let main_provider_mut = backend_main_instance.storage.provider_mut(); + let new_main_block_number = main_block_number + 1; let mainnet_state_root = - provider_mut.compute_state_root(new_block_number, &state_updates).unwrap(); - provider_mut.commit().unwrap(); + main_provider_mut.compute_state_root(new_main_block_number, &state_updates).unwrap(); + main_provider_mut.commit().unwrap(); assert_eq!( fork_state_root, mainnet_state_root, "State roots do not match on third run: fork={:?}, mainnet={:?}", fork_state_root, mainnet_state_root ); - - producer.force_mine(); - fork_producer.force_mine(); } - fn arb_felt() -> impl Strategy { - any::<[u8; 32]>().prop_map(|bytes| Felt::from_bytes_be(&bytes)) - } + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_e2e_state_roots_with_real_transactions() { + use katana_primitives::block::BlockHashOrNumber; + use katana_provider::api::block::{BlockNumberProvider, HeaderProvider}; + + use crate::{abigen_legacy, DEFAULT_STRK_FEE_TOKEN_ADDRESS}; + + // Setup: Create main instance and fork instance + + // Main Instance Setup + let main_instance = TestNode::new().await; + let backend_main_instance = main_instance.backend(); + let url = format!("http://{}", main_instance.rpc_addr()); + + // Initialize state with real transactions - mine a block with ERC20 transfers + abigen_legacy!(Erc20Contract, "crates/contracts/build/legacy/erc20.json", derives(Clone)); + let main_provider = main_instance.starknet_rpc_client(); + let main_account = main_instance.account(); + let main_contract = + Erc20Contract::new(DEFAULT_STRK_FEE_TOKEN_ADDRESS.into(), &main_account); + + // Setup: Create initial state with different transactions to different recipients + let setup_recipients = vec![ + Felt::from_hex("0x111").unwrap(), + Felt::from_hex("0x222").unwrap(), + Felt::from_hex("0x333").unwrap(), + ]; + let setup_amounts = vec![ + Uint256 { low: Felt::from_hex("0x1000").unwrap(), high: Felt::ZERO }, + Uint256 { low: Felt::from_hex("0x2000").unwrap(), high: Felt::ZERO }, + Uint256 { low: Felt::from_hex("0x3000").unwrap(), high: Felt::ZERO }, + ]; + + for (recipient, amount) in setup_recipients.iter().zip(setup_amounts.iter()) { + let res = main_contract.transfer(recipient, amount).send().await.unwrap(); + katana_utils::TxWaiter::new(res.transaction_hash, &main_provider).await.unwrap(); + } - fn arb_class_hash() -> impl Strategy { - arb_felt().prop_map(ClassHash::from) - } + let main_provider_db = backend_main_instance.storage.provider(); + let fork_block_number = main_provider_db.latest_number().unwrap(); - fn arb_contract_address() -> impl Strategy { - arb_felt().prop_map(ContractAddress::from) - } + assert!(fork_block_number == 3, "mainnet should have 3 blocks at this point"); - fn arb_storage() -> impl Strategy> { - proptest::collection::btree_map(arb_felt(), arb_felt(), 0..3) - } + // Fork Instance Setup + let fork_url: Url = Url::parse(&url).unwrap(); + let mut fork_config = katana_utils::node::test_config(); + let fork_block = katana_primitives::block::BlockHashOrNumber::Num(fork_block_number); + fork_config.forking = Some(ForkingConfig { url: fork_url, block: Some(fork_block) }); - fn arb_state_updates() -> impl Strategy { - proptest::collection::btree_map( - arb_contract_address(), - (arb_class_hash(), arb_storage(), arb_felt()), - 1..6, - ) - .prop_flat_map(|contracts| { - // Rozbij na odpowiednie pola - let mut deployed_contracts = BTreeMap::new(); - let mut storage_updates = BTreeMap::new(); - let mut nonce_updates = BTreeMap::new(); - let mut declared_classes = BTreeMap::new(); - let replaced_classes = BTreeMap::new(); - let deprecated_declared_classes = BTreeSet::new(); + let fork_node = ForkTestNode::new_forked_with_config(fork_config).await; + let fork_backend = fork_node.backend(); + let fork_provider = fork_node.starknet_rpc_client(); + let fork_account = fork_node.account(); + let fork_contract = + Erc20Contract::new(DEFAULT_STRK_FEE_TOKEN_ADDRESS.into(), &fork_account); + + // Iteration 1: Execute transactions on both instances and compare state roots + + let recipient1 = Felt::from_hex("0x456").unwrap(); + let amount1 = Uint256 { low: Felt::from_hex("0x2000").unwrap(), high: Felt::ZERO }; + + // Main Instance: Execute transaction + let main_tx1 = main_contract.transfer(&recipient1, &amount1).send().await.unwrap(); + katana_utils::TxWaiter::new(main_tx1.transaction_hash, &main_provider).await.unwrap(); + + let main_provider_db = backend_main_instance.storage.provider(); + let main_block_num = main_provider_db.latest_number().unwrap(); + let main_state_root_1 = main_provider_db + .header(BlockHashOrNumber::Num(main_block_num)) + .unwrap() + .unwrap() + .state_root; + + // Fork Instance: Execute same transaction + let fork_tx1 = fork_contract.transfer(&recipient1, &amount1).send().await.unwrap(); + katana_utils::TxWaiter::new(fork_tx1.transaction_hash, &fork_provider).await.unwrap(); + + let fork_provider_db = fork_backend.storage.provider(); + let fork_block_num = fork_provider_db.latest_number().unwrap(); + let fork_state_root_1 = fork_provider_db + .header(BlockHashOrNumber::Num(fork_block_num)) + .unwrap() + .unwrap() + .state_root; - for (address, (class_hash, storage, nonce)) in &contracts { - deployed_contracts.insert(*address, *class_hash); - storage_updates.insert(*address, storage.clone()); - nonce_updates.insert(*address, *nonce); - declared_classes.insert(*class_hash, Felt::from(1u8)); - } + assert_eq!( + fork_state_root_1, main_state_root_1, + "State roots do not match after first transaction: fork={:?}, mainnet={:?}", + fork_state_root_1, main_state_root_1 + ); - Just(StateUpdates { - deployed_contracts, - storage_updates, - nonce_updates, - declared_classes, - replaced_classes, - deprecated_declared_classes, - ..Default::default() - }) - }) + // Iteration 2: Execute another transaction and compare + + let recipient2 = Felt::from_hex("0x789").unwrap(); + let amount2 = Uint256 { low: Felt::from_hex("0x3000").unwrap(), high: Felt::ZERO }; + + // Main Instance: Execute transaction + let main_tx2 = main_contract.transfer(&recipient2, &amount2).send().await.unwrap(); + katana_utils::TxWaiter::new(main_tx2.transaction_hash, &main_provider).await.unwrap(); + + let main_provider_db = backend_main_instance.storage.provider(); + let main_block_num = main_provider_db.latest_number().unwrap(); + let main_state_root_2 = main_provider_db + .header(BlockHashOrNumber::Num(main_block_num)) + .unwrap() + .unwrap() + .state_root; + + // Fork Instance: Execute same transaction + let fork_tx2 = fork_contract.transfer(&recipient2, &amount2).send().await.unwrap(); + katana_utils::TxWaiter::new(fork_tx2.transaction_hash, &fork_provider).await.unwrap(); + + let fork_provider_db = fork_backend.storage.provider(); + let fork_block_num = fork_provider_db.latest_number().unwrap(); + let fork_state_root_2 = fork_provider_db + .header(BlockHashOrNumber::Num(fork_block_num)) + .unwrap() + .unwrap() + .state_root; + + assert_eq!( + fork_state_root_2, main_state_root_2, + "State roots do not match after second transaction: fork={:?}, mainnet={:?}", + fork_state_root_2, main_state_root_2 + ); + + // Iteration 3: Execute one more transaction and compare + + let recipient3 = Felt::from_hex("0xabc").unwrap(); + let amount3 = Uint256 { low: Felt::from_hex("0x4000").unwrap(), high: Felt::ZERO }; + + // Main Instance: Execute transaction + let main_tx3 = main_contract.transfer(&recipient3, &amount3).send().await.unwrap(); + katana_utils::TxWaiter::new(main_tx3.transaction_hash, &main_provider).await.unwrap(); + + let main_provider_db = backend_main_instance.storage.provider(); + let main_block_num = main_provider_db.latest_number().unwrap(); + let main_state_root_3 = main_provider_db + .header(BlockHashOrNumber::Num(main_block_num)) + .unwrap() + .unwrap() + .state_root; + + // Fork Instance: Execute same transaction + let fork_tx3 = fork_contract.transfer(&recipient3, &amount3).send().await.unwrap(); + katana_utils::TxWaiter::new(fork_tx3.transaction_hash, &fork_provider).await.unwrap(); + + let fork_provider_db = fork_backend.storage.provider(); + let fork_block_num = fork_provider_db.latest_number().unwrap(); + let fork_state_root_3 = fork_provider_db + .header(BlockHashOrNumber::Num(fork_block_num)) + .unwrap() + .unwrap() + .state_root; + + assert_eq!( + fork_state_root_3, main_state_root_3, + "State roots do not match after third transaction: fork={:?}, mainnet={:?}", + fork_state_root_3, main_state_root_3 + ); } - // These tests require walkaround to work - // We need to comment out "let global_class_cache = class_cache.build_global()?;" - // in Node::build() proptest! { #![proptest_config(ProptestConfig { cases: 50, @@ -1027,9 +1318,12 @@ mod tests { let initial_state = &state_updates_vec[0]; let provider_mut = backend.storage.provider_mut(); - provider_mut.compute_state_root(block_number, initial_state).unwrap(); + // IT's really important here to compute state root for the next block + provider_mut.compute_state_root(block_number +1, initial_state).unwrap(); provider_mut.commit().unwrap(); producer.force_mine(); + // Create fresh provider to see the new block after force_mine + let provider = backend.storage.provider(); block_number = provider.latest_number().unwrap(); let db = Db::in_memory().unwrap(); @@ -1041,17 +1335,19 @@ mod tests { let fork_root = { let forked_provider = fork_factory.provider_mut(); - let root = forked_provider.compute_state_root(block_number, fork_minimal_updates).unwrap(); + let root = forked_provider.compute_state_root(block_number + 1, fork_minimal_updates).unwrap(); forked_provider.commit().unwrap(); root }; let provider_mut = backend.storage.provider_mut(); - let mainnet_root = provider_mut.compute_state_root(block_number, fork_minimal_updates).unwrap(); + let mainnet_root = provider_mut.compute_state_root(block_number + 1, fork_minimal_updates).unwrap(); provider_mut.commit().unwrap(); prop_assert_eq!(fork_root, mainnet_root, "State roots do not match at iteration {}", i); producer.force_mine(); + // Create fresh provider to see the new block after force_mine + let provider = backend.storage.provider(); block_number = provider.latest_number().unwrap(); } Ok(()) diff --git a/crates/rpc/rpc-server/tests/messaging.rs b/crates/rpc/rpc-server/tests/messaging.rs index b47ad128d..3584b1e84 100644 --- a/crates/rpc/rpc-server/tests/messaging.rs +++ b/crates/rpc/rpc-server/tests/messaging.rs @@ -11,13 +11,13 @@ use katana_primitives::block::BlockIdOrTag; use katana_primitives::utils::transaction::{ compute_l1_handler_tx_hash, compute_l1_to_l2_message_hash, }; -use katana_primitives::{eth_address, felt, ContractAddress}; +use katana_primitives::{eth_address, felt, ContractAddress, Felt}; use katana_rpc_types::{Class, MsgFromL1}; use katana_utils::{TestNode, TxWaiter}; use rand::Rng; use starknet::accounts::{Account, ConnectedAccount}; use starknet::contract::ContractFactory; -use starknet::core::types::{Felt, Hash256, ReceiptBlock, Transaction, TransactionReceipt}; +use starknet::core::types::{Hash256, ReceiptBlock, Transaction, TransactionReceipt}; use starknet::core::utils::get_contract_address; use starknet::macros::selector; use starknet::providers::Provider; @@ -95,6 +95,7 @@ async fn test_messaging() { let address = get_contract_address(Felt::ZERO, class_hash, &[], Felt::ZERO); // Deploy the contract using UDC + #[allow(deprecated)] let res = ContractFactory::new(class_hash, &katana_account) .deploy_v3(Vec::new(), Felt::ZERO, false) .send() @@ -239,6 +240,7 @@ async fn estimate_message_fee() -> Result<()> { TxWaiter::new(res.transaction_hash, &rpc_client).await?; // Deploy the contract using UDC + #[allow(deprecated)] let res = ContractFactory::new(class_hash, &account) .deploy_v3(Vec::new(), Felt::ZERO, false) .send() diff --git a/crates/rpc/rpc-server/tests/simulate.rs b/crates/rpc/rpc-server/tests/simulate.rs index f0c09292c..49badffab 100644 --- a/crates/rpc/rpc-server/tests/simulate.rs +++ b/crates/rpc/rpc-server/tests/simulate.rs @@ -1,10 +1,9 @@ use cainome::rs::abigen_legacy; use katana_genesis::constant::DEFAULT_ETH_FEE_TOKEN_ADDRESS; use katana_primitives::block::BlockIdOrTag; +use katana_primitives::{felt, Felt}; use katana_utils::TestNode; use starknet::accounts::{Account, ExecutionEncoding, SingleOwnerAccount}; -use starknet::core::types::Felt; -use starknet::macros::felt; use starknet::providers::Provider; use starknet::signers::{LocalWallet, SigningKey}; diff --git a/crates/rpc/rpc-server/tests/starknet.rs b/crates/rpc/rpc-server/tests/starknet.rs index bd7759e52..5be27fd75 100644 --- a/crates/rpc/rpc-server/tests/starknet.rs +++ b/crates/rpc/rpc-server/tests/starknet.rs @@ -13,7 +13,7 @@ use katana_genesis::constant::{ }; use katana_primitives::block::{BlockIdOrTag, ConfirmedBlockIdOrTag}; use katana_primitives::event::ContinuationToken; -use katana_primitives::Felt; +use katana_primitives::{felt, Felt}; use katana_rpc_api::dev::DevApiClient; use katana_rpc_types::state_update::StateUpdate; use katana_rpc_types::trace::TxTrace; @@ -31,7 +31,7 @@ use starknet::accounts::{ }; use starknet::core::types::Call; use starknet::core::utils::get_contract_address; -use starknet::macros::{felt, selector}; +use starknet::macros::selector; use starknet::providers::{Provider, ProviderError}; use starknet::signers::{LocalWallet, SigningKey}; use tokio::sync::Mutex; @@ -62,7 +62,7 @@ async fn declare_and_deploy_contract() { // check state update includes class in declared_classes let state_update = provider.get_state_update(BlockIdOrTag::Latest).await.unwrap(); match state_update { - StateUpdate::Update(update) => { + StateUpdate::Confirmed(update) => { similar_asserts::assert_eq!( update.state_diff.declared_classes, BTreeMap::from_iter([(class_hash, compiled_class_hash)]) @@ -1340,3 +1340,45 @@ async fn simulate_should_skip_strict_nonce_check(#[case] nonce: Felt, #[case] sh let res = contract.transfer(&recipient, &amount).nonce(nonce).simulate(false, false).await; assert_eq!(res.is_ok(), should_ok) } + +/// Test that special system contract addresses (0x1 and 0x2) return ClassHash::ZERO +/// for `get_class_hash_at` calls, as they don't have an associated class. +/// +/// See https://docs.starknet.io/architecture-and-concepts/network-architecture/starknet-state/#address_0x1 +#[tokio::test] +async fn special_system_contract_class_hash() { + let sequencer = TestNode::new().await; + let provider = sequencer.starknet_rpc_client(); + + // Address 0x1 should return ClassHash::ZERO + let class_hash = + provider.get_class_hash_at(BlockIdOrTag::PreConfirmed, felt!("0x1").into()).await.unwrap(); + assert_eq!(class_hash, Felt::ZERO, "Address 0x1 should return ClassHash::ZERO"); + + // Address 0x2 should return ClassHash::ZERO + let class_hash = + provider.get_class_hash_at(BlockIdOrTag::PreConfirmed, felt!("0x2").into()).await.unwrap(); + assert_eq!(class_hash, Felt::ZERO, "Address 0x2 should return ClassHash::ZERO"); +} + +/// Test that `get_storage_at` works for special system contract addresses (0x1 and 0x2) +/// without returning ContractNotFound error. +/// +/// See https://docs.starknet.io/architecture-and-concepts/network-architecture/starknet-state/#address_0x1 +#[tokio::test] +async fn special_system_contract_storage() { + let sequencer = TestNode::new().await; + let provider = sequencer.starknet_rpc_client(); + + let storage_key = felt!("0x0"); + + // Address 0x1 should not return ContractNotFound + let result = + provider.get_storage_at(felt!("0x1").into(), storage_key, BlockIdOrTag::PreConfirmed).await; + assert!(result.is_ok(), "get_storage_at for address 0x1 should not fail"); + + // Address 0x2 should not return ContractNotFound + let result = + provider.get_storage_at(felt!("0x2").into(), storage_key, BlockIdOrTag::PreConfirmed).await; + assert!(result.is_ok(), "get_storage_at for address 0x2 should not fail"); +} diff --git a/crates/rpc/rpc-types/src/broadcasted.rs b/crates/rpc/rpc-types/src/broadcasted.rs index cac6f3424..1ee963df5 100644 --- a/crates/rpc/rpc-types/src/broadcasted.rs +++ b/crates/rpc/rpc-types/src/broadcasted.rs @@ -735,7 +735,7 @@ mod tests { use assert_matches::assert_matches; use katana_primitives::fee::ResourceBoundsMapping; use katana_primitives::transaction::TxType; - use katana_primitives::{address, felt, ContractAddress, Felt}; + use katana_primitives::{address, felt, Felt}; use serde_json::json; use super::*; diff --git a/crates/rpc/rpc-types/src/event.rs b/crates/rpc/rpc-types/src/event.rs index d5fadd1a3..9abc8ab21 100644 --- a/crates/rpc/rpc-types/src/event.rs +++ b/crates/rpc/rpc-types/src/event.rs @@ -27,6 +27,9 @@ pub struct EventFilter { #[serde(skip_serializing_if = "Option::is_none")] pub address: Option, /// The keys to filter over + /// + /// Per key (by position), designate the possible values to be matched for events to be + /// returned. Empty array designates 'any' value #[serde(skip_serializing_if = "Option::is_none")] pub keys: Option>>, } @@ -63,12 +66,22 @@ pub struct GetEventsResponse { /// of transaction execution. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct EmittedEvent { - pub from_address: ContractAddress, - pub keys: Vec, - pub data: Vec, + /// The hash of the block in which the event was emitted. #[serde(skip_serializing_if = "Option::is_none")] pub block_hash: Option, + /// The number of the block in which the event was emitted. #[serde(skip_serializing_if = "Option::is_none")] pub block_number: Option, + /// The hash of the transaction where the event was emitted. pub transaction_hash: TxHash, + /// The index of the transaction in the block. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub transaction_index: Option, + /// The index of the event within the transaction. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub event_index: Option, + /// The address of the contract that emitted the event. + pub from_address: ContractAddress, + pub keys: Vec, + pub data: Vec, } diff --git a/crates/rpc/rpc-types/src/lib.rs b/crates/rpc/rpc-types/src/lib.rs index 14b0a7751..81119a626 100644 --- a/crates/rpc/rpc-types/src/lib.rs +++ b/crates/rpc/rpc-types/src/lib.rs @@ -141,26 +141,36 @@ impl Serialize for SyncingResponse { impl<'de> Deserialize<'de> for SyncingResponse { fn deserialize>(deserializer: D) -> Result { - use serde::Deserialize; - use serde::__private::de::{Content, ContentRefDeserializer}; - - let content = as Deserialize>::deserialize(deserializer)?; - let deserializer = ContentRefDeserializer::::new(&content); - - if let Ok(bool) = ::deserialize(deserializer) { - // The only valid boolean value is `false` which indicates that the node is not syncing. - if !bool { - return Ok(Self::NotSyncing); - }; - } else if let Ok(value) = ::deserialize(deserializer) { - return Ok(Self::Syncing(value)); + use serde::de::{self, MapAccess, Visitor}; + + struct SyncingResponseVisitor; + + impl<'de> Visitor<'de> for SyncingResponseVisitor { + type Value = SyncingResponse; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str( + "either `false` or an object with fields: starting_block_hash, \ + starting_block_num, current_block_hash, current_block_num, \ + highest_block_hash, highest_block_num", + ) + } + + fn visit_bool(self, value: bool) -> Result { + if !value { + Ok(SyncingResponse::NotSyncing) + } else { + Err(E::custom("expected `false` for not syncing state")) + } + } + + fn visit_map>(self, map: A) -> Result { + let status = SyncStatus::deserialize(de::value::MapAccessDeserializer::new(map))?; + Ok(SyncingResponse::Syncing(status)) + } } - Err(serde::de::Error::custom( - "expected either `false` or an object with fields: starting_block_hash, \ - starting_block_num, current_block_hash, current_block_num, highest_block_hash, \ - highest_block_num", - )) + deserializer.deserialize_any(SyncingResponseVisitor) } } diff --git a/crates/rpc/rpc-types/src/outside_execution.rs b/crates/rpc/rpc-types/src/outside_execution.rs index 58ec81999..b75b7d3c6 100644 --- a/crates/rpc/rpc-types/src/outside_execution.rs +++ b/crates/rpc/rpc-types/src/outside_execution.rs @@ -91,7 +91,7 @@ impl OutsideExecution { #[cfg(test)] mod tests { - use katana_primitives::{address, felt, ContractAddress, Felt}; + use katana_primitives::{address, felt, Felt}; use serde_json::json; use starknet::macros::selector; diff --git a/crates/rpc/rpc-types/src/receipt.rs b/crates/rpc/rpc-types/src/receipt.rs index c05f3f0df..2a4f87d8e 100644 --- a/crates/rpc/rpc-types/src/receipt.rs +++ b/crates/rpc/rpc-types/src/receipt.rs @@ -175,6 +175,25 @@ pub struct RpcDeployAccountTxReceipt { impl RpcTxReceipt { fn new(receipt: Receipt, finality_status: FinalityStatus) -> Self { match receipt { + Receipt::Deploy(rct) => { + let messages_sent = rct.messages_sent; + let events = rct.events; + + RpcTxReceipt::Deploy(RpcDeployTxReceipt { + events, + messages_sent, + finality_status, + actual_fee: rct.fee.into(), + contract_address: rct.contract_address, + execution_resources: rct.execution_resources.into(), + execution_result: if let Some(reason) = rct.revert_error { + ExecutionResult::Reverted { reason } + } else { + ExecutionResult::Succeeded + }, + }) + } + Receipt::Invoke(rct) => { let messages_sent = rct.messages_sent; let events = rct.events; @@ -360,9 +379,9 @@ pub struct ExecutionResources { impl From for ExecutionResources { fn from(resources: receipt::ExecutionResources) -> Self { ExecutionResources { - l2_gas: resources.gas.l2_gas, - l1_gas: resources.gas.l1_gas, - l1_data_gas: resources.gas.l1_data_gas, + l2_gas: resources.total_gas_consumed.l2_gas, + l1_gas: resources.total_gas_consumed.l1_gas, + l1_data_gas: resources.total_gas_consumed.l1_data_gas, } } } @@ -398,18 +417,18 @@ impl From for receipt::ExecutionResources { use std::collections::HashMap; receipt::ExecutionResources { - gas: receipt::GasUsed { + total_gas_consumed: receipt::GasUsed { l2_gas: resources.l2_gas, l1_gas: resources.l1_gas, l1_data_gas: resources.l1_data_gas, }, // VM resources are not available in RPC types, use defaults - computation_resources: VmResources { + vm_resources: VmResources { n_steps: 0, n_memory_holes: 0, builtin_instance_counter: HashMap::new(), }, - da_resources: receipt::DataAvailabilityResources { + data_availability: receipt::DataAvailabilityResources { l1_gas: resources.l1_gas, l1_data_gas: resources.l1_data_gas, }, diff --git a/crates/rpc/rpc-types/src/state_update.rs b/crates/rpc/rpc-types/src/state_update.rs index 5bc9996c5..35c42957d 100644 --- a/crates/rpc/rpc-types/src/state_update.rs +++ b/crates/rpc/rpc-types/src/state_update.rs @@ -9,7 +9,7 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum StateUpdate { - Update(ConfirmedStateUpdate), + Confirmed(ConfirmedStateUpdate), PreConfirmed(PreConfirmedStateUpdate), } @@ -17,7 +17,7 @@ pub enum StateUpdate { #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct PreConfirmedStateUpdate { /// The previous global state root - pub old_root: Felt, + pub old_root: Option, /// State diff pub state_diff: StateDiff, } @@ -48,6 +48,7 @@ pub struct StateDiff { pub declared_classes: BTreeMap, pub deprecated_declared_classes: BTreeSet, pub replaced_classes: BTreeMap, + pub migrated_compiled_classes: Option>, } impl Serialize for StateDiff { @@ -251,6 +252,39 @@ impl Serialize for StateDiff { } } + /// Serializes `migrated_compiled_classes` as an array of objects with the following + /// structure: + /// + /// ```json + /// [ + /// { + /// "class_hash": "0x123", + /// "compiled_class_hash": "0x456" + /// }, + /// ... + /// ] + /// ``` + struct MigratedCompiledClassesSer<'a>(&'a BTreeMap); + + impl Serialize for MigratedCompiledClassesSer<'_> { + fn serialize(&self, serializer: S) -> Result { + #[derive(Debug, Serialize)] + struct MigratedCompiledClass { + class_hash: ClassHash, + compiled_class_hash: CompiledClassHash, + } + + let mut seq = serializer.serialize_seq(Some(self.0.len()))?; + for (class_hash, compiled_class_hash) in self.0 { + seq.serialize_element(&MigratedCompiledClass { + class_hash: *class_hash, + compiled_class_hash: *compiled_class_hash, + })?; + } + seq.end() + } + } + let nonces = NoncesSer(&self.nonces); let storage_diffs = StorageDiffsSer(&self.storage_diffs); let replaced_classes = ReplacedClassesSer(&self.replaced_classes); @@ -258,13 +292,22 @@ impl Serialize for StateDiff { let deployed_contracts = DeployedContractsSer(&self.deployed_contracts); let deprecated_declared_classes = DepDeclaredClassesSer(&self.deprecated_declared_classes); - let mut map = serializer.serialize_map(Some(6))?; + let len = 6 + self.migrated_compiled_classes.is_some() as usize; + let mut map = serializer.serialize_map(Some(len))?; + map.serialize_entry("nonces", &nonces)?; map.serialize_entry("storage_diffs", &storage_diffs)?; map.serialize_entry("declared_classes", &declared_classes)?; map.serialize_entry("replaced_classes", &replaced_classes)?; map.serialize_entry("deployed_contracts", &deployed_contracts)?; map.serialize_entry("deprecated_declared_classes", &deprecated_declared_classes)?; + if let Some(ref migrated) = self.migrated_compiled_classes { + map.serialize_entry( + "migrated_compiled_classes", + &MigratedCompiledClassesSer(migrated), + )?; + } + map.end() } } @@ -289,6 +332,7 @@ impl<'de> Deserialize<'de> for StateDiff { let mut declared_classes = None; let mut deprecated_declared_classes = None; let mut replaced_classes = None; + let mut migrated_compiled_classes = None; while let Some(key) = map.next_key::()? { match key.as_str() { @@ -311,6 +355,10 @@ impl<'de> Deserialize<'de> for StateDiff { "replaced_classes" => { replaced_classes = Some(map.next_value_seed(ReplacedClassesDe)?); } + "migrated_compiled_classes" => { + migrated_compiled_classes = + Some(map.next_value_seed(MigratedCompiledClassesDe)?); + } _ => { let _ = map.next_value::()?; } @@ -330,10 +378,22 @@ impl<'de> Deserialize<'de> for StateDiff { })?, replaced_classes: replaced_classes .ok_or_else(|| serde::de::Error::missing_field("replaced_classes"))?, + migrated_compiled_classes, }) } } + /// Deserializes nonces from an array of objects with the following structure: + /// + /// ```json + /// [ + /// { + /// "contract_address": "0x123", + /// "nonce": "0x123" + /// }, + /// ... + /// ] + /// ``` struct NoncesDe; impl<'de> DeserializeSeed<'de> for NoncesDe { @@ -377,6 +437,23 @@ impl<'de> Deserialize<'de> for StateDiff { } } + /// Deserializes storage diffs from an array of objects with the following structure: + /// + /// ```json + /// [ + /// { + /// "address": "0x123", + /// "storage_entries": [ + /// { + /// "key": "0x123", + /// "value": "0x456" + /// }, + /// ... + /// ] + /// }, + /// ... + /// ] + /// ``` struct StorageDiffsDe; impl<'de> DeserializeSeed<'de> for StorageDiffsDe { @@ -430,6 +507,17 @@ impl<'de> Deserialize<'de> for StateDiff { } } + /// Deserializes deployed contracts from an array of objects with the following structure: + /// + /// ```json + /// [ + /// { + /// "address": "0x123", + /// "class_hash": "0x456" + /// }, + /// ... + /// ] + /// ``` struct DeployedContractsDe; impl<'de> DeserializeSeed<'de> for DeployedContractsDe { @@ -473,6 +561,17 @@ impl<'de> Deserialize<'de> for StateDiff { } } + /// Deserializes declared classes from an array of objects with the following structure: + /// + /// ```json + /// [ + /// { + /// "class_hash": "0x123", + /// "compiled_class_hash": "0x456" + /// }, + /// ... + /// ] + /// ``` struct DeclaredClassesDe; impl<'de> DeserializeSeed<'de> for DeclaredClassesDe { @@ -516,6 +615,11 @@ impl<'de> Deserialize<'de> for StateDiff { } } + /// Deserializes deprecated declared classes from an array of class hashes: + /// + /// ```json + /// ["0x123", "0x456", ...] + /// ``` struct DepDeclaredClassesDe; impl<'de> DeserializeSeed<'de> for DepDeclaredClassesDe { @@ -553,6 +657,17 @@ impl<'de> Deserialize<'de> for StateDiff { } } + /// Deserializes `replaced_classes` from an array of objects with the following structure: + /// + /// ```json + /// [ + /// { + /// "contract_address": "0x123", + /// "class_hash": "0x123" + /// }, + /// ... + /// ] + /// ``` struct ReplacedClassesDe; impl<'de> DeserializeSeed<'de> for ReplacedClassesDe { @@ -596,12 +711,74 @@ impl<'de> Deserialize<'de> for StateDiff { } } + /// Deserializes `migrated_compiled_classes` from an array of objects with the following + /// structure: + /// + /// ```json + /// [ + /// { + /// "class_hash": "0x123", + /// "compiled_class_hash": "0x456" + /// }, + /// ... + /// ] + /// ``` + struct MigratedCompiledClassesDe; + + impl<'de> DeserializeSeed<'de> for MigratedCompiledClassesDe { + type Value = BTreeMap; + + fn deserialize>( + self, + deserializer: D, + ) -> Result { + struct MigratedCompiledClassesVisitor; + + impl<'de> Visitor<'de> for MigratedCompiledClassesVisitor { + type Value = BTreeMap; + + fn expecting( + &self, + formatter: &mut std::fmt::Formatter<'_>, + ) -> std::fmt::Result { + formatter.write_str("an array of migrated compiled classes") + } + + fn visit_seq>( + self, + mut seq: A, + ) -> Result { + #[derive(Debug, Deserialize)] + struct MigratedCompiledClass { + class_hash: ClassHash, + compiled_class_hash: CompiledClassHash, + } + + let mut migrated_compiled_classes = BTreeMap::new(); + while let Some(migrated) = seq.next_element::()? { + migrated_compiled_classes + .insert(migrated.class_hash, migrated.compiled_class_hash); + } + Ok(migrated_compiled_classes) + } + } + + deserializer.deserialize_seq(MigratedCompiledClassesVisitor) + } + } + deserializer.deserialize_map(StateDiffVisitor) } } impl From for StateDiff { fn from(value: katana_primitives::state::StateUpdates) -> Self { + let migrated_compiled_classes = if value.migrated_compiled_classes.is_empty() { + None + } else { + Some(value.migrated_compiled_classes) + }; + Self { nonces: value.nonce_updates, storage_diffs: value.storage_updates, @@ -609,6 +786,7 @@ impl From for StateDiff { declared_classes: value.declared_classes, deployed_contracts: value.deployed_contracts, deprecated_declared_classes: value.deprecated_declared_classes, + migrated_compiled_classes, } } } @@ -622,6 +800,7 @@ impl From for katana_primitives::state::StateUpdates { declared_classes: value.declared_classes, deployed_contracts: value.deployed_contracts, deprecated_declared_classes: value.deprecated_declared_classes, + migrated_compiled_classes: value.migrated_compiled_classes.unwrap_or_default(), } } } diff --git a/crates/rpc/rpc-types/src/trace.rs b/crates/rpc/rpc-types/src/trace.rs index 6e1df96cb..fdf1eb9f7 100644 --- a/crates/rpc/rpc-types/src/trace.rs +++ b/crates/rpc/rpc-types/src/trace.rs @@ -39,6 +39,7 @@ pub struct TxTraceWithHash { } /// Execution trace of a Starknet transaction. +#[allow(clippy::large_enum_variant)] #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(tag = "type")] pub enum TxTrace { @@ -330,9 +331,9 @@ pub fn to_rpc_fee_estimate(resources: &receipt::ExecutionResources, fee: &FeeInf l2_gas_price: fee.l2_gas_price, l1_gas_price: fee.l1_gas_price, l1_data_gas_price: fee.l1_data_gas_price, - l1_gas_consumed: resources.gas.l1_gas, - l2_gas_consumed: resources.gas.l2_gas, - l1_data_gas_consumed: resources.gas.l1_data_gas, + l1_gas_consumed: resources.total_gas_consumed.l1_gas, + l2_gas_consumed: resources.total_gas_consumed.l2_gas, + l1_data_gas_consumed: resources.total_gas_consumed.l1_data_gas, } } diff --git a/crates/rpc/rpc-types/tests/block.rs b/crates/rpc/rpc-types/tests/block.rs index e2d492e13..e72c2cead 100644 --- a/crates/rpc/rpc-types/tests/block.rs +++ b/crates/rpc/rpc-types/tests/block.rs @@ -1,6 +1,6 @@ use assert_matches::assert_matches; use katana_primitives::da::L1DataAvailabilityMode; -use katana_primitives::{address, felt, ContractAddress}; +use katana_primitives::{address, felt}; use katana_rpc_types::block::{ GetBlockWithReceiptsResponse, GetBlockWithTxHashesResponse, MaybePreConfirmedBlock, }; diff --git a/crates/rpc/rpc-types/tests/fixtures/v0.10/state-updates/confirmed_state_update.json b/crates/rpc/rpc-types/tests/fixtures/v0.10/state-updates/confirmed_state_update.json new file mode 100644 index 000000000..0506e97d1 --- /dev/null +++ b/crates/rpc/rpc-types/tests/fixtures/v0.10/state-updates/confirmed_state_update.json @@ -0,0 +1,47 @@ +{ + "block_hash": "0x1935ec0e5c7758fdc11a78ed9d4cadd4225eab826aabd98fe2d04b45ca4c150", + "new_root": "0x7e72ca880e4fa1f4987257d90b2642860a4574a03b79ac830f6fb5968520977", + "old_root": "0x484d8010568613b1878e03085989536d9112d89e2979297f0fbd741a3f73138", + "state_diff": { + "declared_classes": [], + "deployed_contracts": [], + "deprecated_declared_classes": [], + "migrated_compiled_classes": [ + { + "class_hash": "0x4ac055f14361bb6f7bf4b9af6e96ca68825e6037e9bdf87ea0b2c641dea73ae", + "compiled_class_hash": "0x17f3b8f7225a160ec0542ea5c44ee876f2b132e7dee00ec36f2422d8155a4e4" + } + ], + "nonces": [ + { + "contract_address": "0x662776dac110a170767d83da4f1d8fae022df7aa8a78252eb9c501c68d49604", + "nonce": "0x1bb63" + } + ], + "replaced_classes": [], + "storage_diffs": [ + { + "address": "0x18469ed2d40a016a602371173c7287e25f85cb6abb6fc0866d3c444e2837603", + "storage_entries": [ + { + "key": "0x6d410d47be5497b0dafef14e24c8767731a6e50126ff8fa99f25a0d0ee02788", + "value": "0x1" + } + ] + }, + { + "address": "0x377c2d65debb3978ea81904e7d59740da1f07412e30d01c5ded1c5d6f1ddc43", + "storage_entries": [ + { + "key": "0x3ee4ba0f59886159d92a35f96ded219dd7f69c30953f9b68d333f10a27e312b", + "value": "0x18469ed2d40a016a602371173c7287e25f85cb6abb6fc0866d3c444e2837603" + }, + { + "key": "0x484b46148d37383593029fa3b4c09a5e0e3cb66bbcf5fc66529fa452ccc6e34", + "value": "0x8" + } + ] + } + ] + } +} diff --git a/crates/rpc/rpc-types/tests/receipt.rs b/crates/rpc/rpc-types/tests/receipt.rs index 5f1c4fb61..d68b3434f 100644 --- a/crates/rpc/rpc-types/tests/receipt.rs +++ b/crates/rpc/rpc-types/tests/receipt.rs @@ -3,7 +3,7 @@ use katana_primitives::alloy::FromHex; use katana_primitives::block::FinalityStatus; use katana_primitives::fee::PriceUnit; use katana_primitives::receipt::Event; -use katana_primitives::{address, felt, ContractAddress, B256}; +use katana_primitives::{address, felt, B256}; use katana_rpc_types::receipt::{ ExecutionResult, ReceiptBlockInfo, RpcTxReceipt, TxReceiptWithBlockInfo, }; diff --git a/crates/rpc/rpc-types/tests/state_update.rs b/crates/rpc/rpc-types/tests/state_update.rs index 792a938a0..5ef09718f 100644 --- a/crates/rpc/rpc-types/tests/state_update.rs +++ b/crates/rpc/rpc-types/tests/state_update.rs @@ -1,7 +1,7 @@ use std::collections::{BTreeMap, BTreeSet}; use assert_matches::assert_matches; -use katana_primitives::{address, felt, ContractAddress}; +use katana_primitives::{address, felt}; use katana_rpc_types::state_update::{ConfirmedStateUpdate, PreConfirmedStateUpdate, StateUpdate}; use serde_json::Value; @@ -35,7 +35,7 @@ fn preconfirmed_state_update() { let PreConfirmedStateUpdate { old_root, ref state_diff } = state_update; assert_eq!( old_root, - felt!("0x6a59de5353d4050a800fd240020d014653d950df357ffa14319ee809a65427a") + Some(felt!("0x6a59de5353d4050a800fd240020d014653d950df357ffa14319ee809a65427a")) ); assert_eq!(state_diff.deprecated_declared_classes, BTreeSet::new()); assert_eq!(state_diff.replaced_classes, map!()); @@ -63,6 +63,7 @@ fn preconfirmed_state_update() { } } ); + assert!(state_diff.migrated_compiled_classes.is_none()); let serialized = serde_json::to_value(&state_update).unwrap(); similar_asserts::assert_eq!(serialized, json); @@ -74,7 +75,7 @@ fn confirmed_state_update() { let state_update: ConfirmedStateUpdate = serde_json::from_value(json.clone()).unwrap(); let as_enum: StateUpdate = serde_json::from_value(json.clone()).unwrap(); - assert_matches!(as_enum, StateUpdate::Update(as_enum_update) => { + assert_matches!(as_enum, StateUpdate::Confirmed(as_enum_update) => { similar_asserts::assert_eq!(as_enum_update, state_update); }); @@ -135,3 +136,59 @@ fn confirmed_state_update() { let serialized = serde_json::to_value(&state_update).unwrap(); similar_asserts::assert_eq!(serialized, json); } + +#[test] +fn v0_10_0_confirmed_state_update() { + let json = fixtures::test_data::("v0.10/state-updates/confirmed_state_update.json"); + + let state_update: ConfirmedStateUpdate = serde_json::from_value(json.clone()).unwrap(); + let as_enum: StateUpdate = serde_json::from_value(json.clone()).unwrap(); + assert_matches!(as_enum, StateUpdate::Confirmed(as_enum_update) => { + similar_asserts::assert_eq!(as_enum_update, state_update); + }); + + let ConfirmedStateUpdate { block_hash, new_root, old_root, ref state_diff } = state_update; + assert_eq!( + block_hash, + felt!("0x1935ec0e5c7758fdc11a78ed9d4cadd4225eab826aabd98fe2d04b45ca4c150") + ); + assert_eq!( + old_root, + felt!("0x484d8010568613b1878e03085989536d9112d89e2979297f0fbd741a3f73138") + ); + assert_eq!( + new_root, + felt!("0x7e72ca880e4fa1f4987257d90b2642860a4574a03b79ac830f6fb5968520977") + ); + assert!(state_diff.deprecated_declared_classes.is_empty()); + assert!(state_diff.replaced_classes.is_empty()); + assert!(state_diff.declared_classes.is_empty()); + assert!(state_diff.deployed_contracts.is_empty()); + assert_eq!( + state_diff.nonces, + map! { + address!("0x662776dac110a170767d83da4f1d8fae022df7aa8a78252eb9c501c68d49604"), felt!("0x1bb63"), + } + ); + assert_eq!( + state_diff.storage_diffs, + map! { + address!("0x18469ed2d40a016a602371173c7287e25f85cb6abb6fc0866d3c444e2837603"), map! { + felt!("0x6d410d47be5497b0dafef14e24c8767731a6e50126ff8fa99f25a0d0ee02788"), felt!("0x1"), + }, + address!("0x377c2d65debb3978ea81904e7d59740da1f07412e30d01c5ded1c5d6f1ddc43"), map! { + felt!("0x484b46148d37383593029fa3b4c09a5e0e3cb66bbcf5fc66529fa452ccc6e34"), felt!("0x8"), + felt!("0x3ee4ba0f59886159d92a35f96ded219dd7f69c30953f9b68d333f10a27e312b"), felt!("0x18469ed2d40a016a602371173c7287e25f85cb6abb6fc0866d3c444e2837603"), + } + } + ); + assert_eq!( + state_diff.migrated_compiled_classes, + Some(map! { + felt!("0x4ac055f14361bb6f7bf4b9af6e96ca68825e6037e9bdf87ea0b2c641dea73ae"), felt!("0x17f3b8f7225a160ec0542ea5c44ee876f2b132e7dee00ec36f2422d8155a4e4"), + }) + ); + + let serialized = serde_json::to_value(&state_update).unwrap(); + similar_asserts::assert_eq!(serialized, json); +} diff --git a/crates/rpc/rpc-types/tests/trace.rs b/crates/rpc/rpc-types/tests/trace.rs index 727f7c25e..c3dd00721 100644 --- a/crates/rpc/rpc-types/tests/trace.rs +++ b/crates/rpc/rpc-types/tests/trace.rs @@ -1,6 +1,6 @@ use assert_matches::assert_matches; use katana_primitives::execution::EntryPointType; -use katana_primitives::{address, felt, ContractAddress}; +use katana_primitives::{address, felt}; use katana_rpc_types::trace::{ CallType, ExecuteInvocation, FunctionInvocation, InnerCallExecutionResources, InvokeTxTrace, OrderedEvent, OrderedL2ToL1Message, RevertedInvocation, TxTrace, @@ -531,6 +531,7 @@ fn tx_trace_with_state_diff() { replaced_classes, declared_classes, deprecated_declared_classes: BTreeSet::new(), + migrated_compiled_classes: None, }; let trace = InvokeTxTrace { diff --git a/crates/rpc/rpc-types/tests/transaction.rs b/crates/rpc/rpc-types/tests/transaction.rs index cda2b9c81..064270d22 100644 --- a/crates/rpc/rpc-types/tests/transaction.rs +++ b/crates/rpc/rpc-types/tests/transaction.rs @@ -4,7 +4,7 @@ use katana_primitives::fee::{ AllResourceBoundsMapping, L1GasResourceBoundsMapping, ResourceBounds, ResourceBoundsMapping, Tip, }; -use katana_primitives::{address, felt, transaction as primitives, ContractAddress}; +use katana_primitives::{address, felt, transaction as primitives}; use katana_rpc_types::transaction::{ RpcDeclareTx, RpcDeployAccountTx, RpcInvokeTx, RpcTx, RpcTxWithHash, }; diff --git a/crates/storage/db/src/codecs/postcard.rs b/crates/storage/db/src/codecs/postcard.rs index 35b6fb76e..58807077e 100644 --- a/crates/storage/db/src/codecs/postcard.rs +++ b/crates/storage/db/src/codecs/postcard.rs @@ -1,15 +1,57 @@ -use katana_primitives::contract::{ContractAddress, GenericContractInfo}; +use katana_primitives::contract::GenericContractInfo; use katana_primitives::execution::TypedTransactionExecutionInfo; use katana_primitives::receipt::Receipt; use katana_primitives::Felt; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; use {postcard, zstd}; use super::{Compress, Decompress}; use crate::error::CodecError; + +/// A wrapper type for `Felt` that serializes/deserializes as a 32-byte big-endian array. +/// +/// This exists for backward compatibility - older versions of `Felt` used to always serialize as +/// a 32-byte array, but newer versions have changed this behavior. This wrapper ensures +/// consistent serialization format for database storage. However, deserialization is still backward +/// compatible. +/// +/// See for the breaking change. +/// +/// This is temporary and may change in the future. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +struct Felt32(Felt); + +impl Serialize for Felt32 { + fn serialize(&self, serializer: S) -> Result { + serializer.serialize_bytes(&self.0.to_bytes_be()) + } +} + +impl<'de> Deserialize<'de> for Felt32 { + fn deserialize>(deserializer: D) -> Result { + Ok(Felt32(Felt::deserialize(deserializer)?)) + } +} + +impl Compress for Felt { + type Compressed = Vec; + fn compress(self) -> Result { + postcard::to_stdvec(&Felt32(self)).map_err(|e| CodecError::Compress(e.to_string())) + } +} + +impl Decompress for Felt { + fn decompress>(bytes: B) -> Result { + let wrapper: Felt32 = postcard::from_bytes(bytes.as_ref()) + .map_err(|e| CodecError::Decompress(e.to_string()))?; + Ok(wrapper.0) + } +} + use crate::models::block::StoredBlockBodyIndices; use crate::models::contract::ContractInfoChangeList; use crate::models::list::BlockList; -use crate::models::stage::StageCheckpoint; +use crate::models::stage::{ExecutionCheckpoint, PruningCheckpoint}; use crate::models::trie::TrieDatabaseValue; macro_rules! impl_compress_and_decompress_for_table_values { @@ -52,11 +94,10 @@ impl Decompress for TypedTransactionExecutionInfo { impl_compress_and_decompress_for_table_values!( u64, Receipt, - Felt, TrieDatabaseValue, - ContractAddress, BlockList, - StageCheckpoint, + ExecutionCheckpoint, + PruningCheckpoint, GenericContractInfo, StoredBlockBodyIndices, ContractInfoChangeList diff --git a/crates/storage/db/src/mdbx/mod.rs b/crates/storage/db/src/mdbx/mod.rs index 9e8e2065a..bc64a5cec 100644 --- a/crates/storage/db/src/mdbx/mod.rs +++ b/crates/storage/db/src/mdbx/mod.rs @@ -271,9 +271,8 @@ pub mod test_utils { #[cfg(test)] mod tests { - use katana_primitives::contract::{ContractAddress, GenericContractInfo}; - use katana_primitives::{address, Felt}; - use starknet::macros::felt; + use katana_primitives::contract::GenericContractInfo; + use katana_primitives::{address, felt, Felt}; use super::*; use crate::abstraction::{DbCursor, DbCursorMut, DbDupSortCursor, DbTx, DbTxMut, Walker}; diff --git a/crates/storage/db/src/mdbx/tx.rs b/crates/storage/db/src/mdbx/tx.rs index b96df6e61..57e540127 100644 --- a/crates/storage/db/src/mdbx/tx.rs +++ b/crates/storage/db/src/mdbx/tx.rs @@ -43,7 +43,7 @@ impl std::fmt::Debug for Tx { impl Tx { /// Creates new `Tx` object with a `RO` or `RW` transaction. pub fn new(inner: libmdbx::Transaction, metrics: DbMetrics) -> Self { - Self { inner, db_handles: Default::default(), metrics } + Self { inner, db_handles: Arc::new(RwLock::new([None; NUM_TABLES])), metrics } } pub fn get_dbi(&self) -> Result { diff --git a/crates/storage/db/src/models/class.rs b/crates/storage/db/src/models/class.rs index 414dbfa94..c1228b8b4 100644 --- a/crates/storage/db/src/models/class.rs +++ b/crates/storage/db/src/models/class.rs @@ -1,8 +1,37 @@ -use katana_primitives::class::CompiledClass; +use katana_primitives::class::{ClassHash, CompiledClass, CompiledClassHash}; +use serde::{Deserialize, Serialize}; -use crate::codecs::{Compress, Decompress}; +use crate::codecs::{Compress, Decode, Decompress, Encode}; use crate::error::CodecError; +/// The value for the [MigratedCompiledClassHashes](crate::tables::MigratedCompiledClassHashes) +/// table. +#[derive(Debug, Default, Serialize, Deserialize, PartialEq)] +pub struct MigratedCompiledClassHash { + pub class_hash: ClassHash, + pub compiled_class_hash: CompiledClassHash, +} + +impl Compress for MigratedCompiledClassHash { + type Compressed = Vec; + + fn compress(self) -> Result { + let mut buf = Vec::new(); + buf.extend_from_slice(&self.class_hash.encode()); + buf.extend_from_slice(&self.compiled_class_hash.compress()?); + Ok(buf) + } +} + +impl Decompress for MigratedCompiledClassHash { + fn decompress>(bytes: B) -> Result { + let bytes = bytes.as_ref(); + let class_hash = ClassHash::decode(&bytes[0..32])?; + let compiled_class_hash = ClassHash::decompress(&bytes[32..])?; + Ok(Self { class_hash, compiled_class_hash }) + } +} + impl Compress for CompiledClass { type Compressed = Vec; fn compress(self) -> Result { diff --git a/crates/storage/db/src/models/stage.rs b/crates/storage/db/src/models/stage.rs index 7bd32d222..92049e47f 100644 --- a/crates/storage/db/src/models/stage.rs +++ b/crates/storage/db/src/models/stage.rs @@ -7,7 +7,15 @@ pub type StageId = String; /// Pipeline stage checkpoint. #[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq, Eq)] #[cfg_attr(test, derive(::arbitrary::Arbitrary))] -pub struct StageCheckpoint { +pub struct ExecutionCheckpoint { /// The block number that the stage has processed up to. pub block: BlockNumber, } + +/// Pipeline stage prune checkpoint. +#[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq, Eq)] +#[cfg_attr(test, derive(::arbitrary::Arbitrary))] +pub struct PruningCheckpoint { + /// The block number up to which the stage has been pruned (inclusive). + pub block: BlockNumber, +} diff --git a/crates/storage/db/src/models/storage.rs b/crates/storage/db/src/models/storage.rs index 00f0a93de..d12f64065 100644 --- a/crates/storage/db/src/models/storage.rs +++ b/crates/storage/db/src/models/storage.rs @@ -88,7 +88,7 @@ impl Decompress for ContractStorageEntry { #[cfg(test)] mod tests { - use starknet::macros::felt; + use katana_primitives::felt; use crate::codecs::{Compress, Decompress}; diff --git a/crates/storage/db/src/tables.rs b/crates/storage/db/src/tables.rs index 5a1e08957..0d4e9e134 100644 --- a/crates/storage/db/src/tables.rs +++ b/crates/storage/db/src/tables.rs @@ -7,9 +7,10 @@ use katana_primitives::transaction::{TxHash, TxNumber}; use crate::codecs::{Compress, Decode, Decompress, Encode}; use crate::models::block::StoredBlockBodyIndices; +use crate::models::class::MigratedCompiledClassHash; use crate::models::contract::{ContractClassChange, ContractInfoChangeList, ContractNonceChange}; use crate::models::list::BlockList; -use crate::models::stage::{StageCheckpoint, StageId}; +use crate::models::stage::{ExecutionCheckpoint, PruningCheckpoint, StageId}; use crate::models::storage::{ContractStorageEntry, ContractStorageKey, StorageEntry}; use crate::models::trie::{TrieDatabaseKey, TrieDatabaseValue, TrieHistoryEntry}; use crate::models::{VersionedContractClass, VersionedHeader, VersionedTx}; @@ -54,7 +55,7 @@ pub enum TableType { DupSort, } -pub const NUM_TABLES: usize = 32; +pub const NUM_TABLES: usize = 34; /// Macro to declare `libmdbx` tables. #[macro_export] @@ -172,12 +173,14 @@ define_tables_enum! {[ (ContractStorage, TableType::DupSort), (ClassDeclarationBlock, TableType::Table), (ClassDeclarations, TableType::DupSort), + (MigratedCompiledClassHashes, TableType::DupSort), (ContractInfoChangeSet, TableType::Table), (NonceChangeHistory, TableType::DupSort), (ClassChangeHistory, TableType::DupSort), (StorageChangeHistory, TableType::DupSort), (StorageChangeSet, TableType::Table), - (StageCheckpoints, TableType::Table), + (StageExecutionCheckpoints, TableType::Table), + (StagePruningCheckpoints, TableType::Table), (ClassesTrie, TableType::Table), (ContractsTrie, TableType::Table), (StoragesTrie, TableType::Table), @@ -191,7 +194,9 @@ define_tables_enum! {[ tables! { /// Pipeline stages checkpoint - StageCheckpoints: (StageId) => StageCheckpoint, + StageExecutionCheckpoints: (StageId) => ExecutionCheckpoint, + /// Pipeline stages prune checkpoint + StagePruningCheckpoints: (StageId) => PruningCheckpoint, /// Store canonical block headers Headers: (BlockNumber) => VersionedHeader, @@ -230,6 +235,8 @@ tables! { ClassDeclarationBlock: (ClassHash) => BlockNumber, /// Stores the list of class hashes according to the block number it was declared in. ClassDeclarations: (BlockNumber, ClassHash) => ClassHash, + /// Stores the list of class hashes according to the block number it was declared in. + MigratedCompiledClassHashes: (BlockNumber, ClassHash) => MigratedCompiledClassHash, /// Generic contract info change set. /// @@ -306,21 +313,23 @@ mod tests { assert_eq!(Tables::ALL[14].name(), ContractStorage::NAME); assert_eq!(Tables::ALL[15].name(), ClassDeclarationBlock::NAME); assert_eq!(Tables::ALL[16].name(), ClassDeclarations::NAME); - assert_eq!(Tables::ALL[17].name(), ContractInfoChangeSet::NAME); - assert_eq!(Tables::ALL[18].name(), NonceChangeHistory::NAME); - assert_eq!(Tables::ALL[19].name(), ClassChangeHistory::NAME); - assert_eq!(Tables::ALL[20].name(), StorageChangeHistory::NAME); - assert_eq!(Tables::ALL[21].name(), StorageChangeSet::NAME); - assert_eq!(Tables::ALL[22].name(), StageCheckpoints::NAME); - assert_eq!(Tables::ALL[23].name(), ClassesTrie::NAME); - assert_eq!(Tables::ALL[24].name(), ContractsTrie::NAME); - assert_eq!(Tables::ALL[25].name(), StoragesTrie::NAME); - assert_eq!(Tables::ALL[26].name(), ClassesTrieHistory::NAME); - assert_eq!(Tables::ALL[27].name(), ContractsTrieHistory::NAME); - assert_eq!(Tables::ALL[28].name(), StoragesTrieHistory::NAME); - assert_eq!(Tables::ALL[29].name(), ClassesTrieChangeSet::NAME); - assert_eq!(Tables::ALL[30].name(), ContractsTrieChangeSet::NAME); - assert_eq!(Tables::ALL[31].name(), StoragesTrieChangeSet::NAME); + assert_eq!(Tables::ALL[17].name(), MigratedCompiledClassHashes::NAME); + assert_eq!(Tables::ALL[18].name(), ContractInfoChangeSet::NAME); + assert_eq!(Tables::ALL[19].name(), NonceChangeHistory::NAME); + assert_eq!(Tables::ALL[20].name(), ClassChangeHistory::NAME); + assert_eq!(Tables::ALL[21].name(), StorageChangeHistory::NAME); + assert_eq!(Tables::ALL[22].name(), StorageChangeSet::NAME); + assert_eq!(Tables::ALL[23].name(), StageExecutionCheckpoints::NAME); + assert_eq!(Tables::ALL[24].name(), StagePruningCheckpoints::NAME); + assert_eq!(Tables::ALL[25].name(), ClassesTrie::NAME); + assert_eq!(Tables::ALL[26].name(), ContractsTrie::NAME); + assert_eq!(Tables::ALL[27].name(), StoragesTrie::NAME); + assert_eq!(Tables::ALL[28].name(), ClassesTrieHistory::NAME); + assert_eq!(Tables::ALL[29].name(), ContractsTrieHistory::NAME); + assert_eq!(Tables::ALL[30].name(), StoragesTrieHistory::NAME); + assert_eq!(Tables::ALL[31].name(), ClassesTrieChangeSet::NAME); + assert_eq!(Tables::ALL[32].name(), ContractsTrieChangeSet::NAME); + assert_eq!(Tables::ALL[33].name(), StoragesTrieChangeSet::NAME); assert_eq!(Tables::Headers.table_type(), TableType::Table); assert_eq!(Tables::BlockHashes.table_type(), TableType::Table); @@ -339,12 +348,14 @@ mod tests { assert_eq!(Tables::ContractStorage.table_type(), TableType::DupSort); assert_eq!(Tables::ClassDeclarationBlock.table_type(), TableType::Table); assert_eq!(Tables::ClassDeclarations.table_type(), TableType::DupSort); + assert_eq!(Tables::MigratedCompiledClassHashes.table_type(), TableType::DupSort); assert_eq!(Tables::ContractInfoChangeSet.table_type(), TableType::Table); assert_eq!(Tables::NonceChangeHistory.table_type(), TableType::DupSort); assert_eq!(Tables::ClassChangeHistory.table_type(), TableType::DupSort); assert_eq!(Tables::StorageChangeHistory.table_type(), TableType::DupSort); assert_eq!(Tables::StorageChangeSet.table_type(), TableType::Table); - assert_eq!(Tables::StageCheckpoints.table_type(), TableType::Table); + assert_eq!(Tables::StageExecutionCheckpoints.table_type(), TableType::Table); + assert_eq!(Tables::StagePruningCheckpoints.table_type(), TableType::Table); assert_eq!(Tables::ClassesTrie.table_type(), TableType::Table); assert_eq!(Tables::ContractsTrie.table_type(), TableType::Table); assert_eq!(Tables::StoragesTrie.table_type(), TableType::Table); @@ -356,17 +367,17 @@ mod tests { assert_eq!(Tables::StoragesTrieChangeSet.table_type(), TableType::Table); } - use katana_primitives::address; use katana_primitives::block::{BlockHash, BlockNumber, FinalityStatus}; use katana_primitives::class::{ClassHash, CompiledClass, CompiledClassHash}; use katana_primitives::contract::{ContractAddress, GenericContractInfo}; use katana_primitives::execution::TypedTransactionExecutionInfo; use katana_primitives::receipt::{InvokeTxReceipt, Receipt}; use katana_primitives::transaction::{InvokeTx, Tx, TxHash, TxNumber}; - use starknet::macros::felt; + use katana_primitives::{address, felt}; use crate::codecs::{Compress, Decode, Decompress, Encode}; use crate::models::block::StoredBlockBodyIndices; + use crate::models::class::MigratedCompiledClassHash; use crate::models::contract::{ ContractClassChange, ContractInfoChangeList, ContractNonceChange, }; @@ -436,6 +447,7 @@ mod tests { (CompiledClassHash, felt!("211")), (CompiledClass, CompiledClass::Legacy(Default::default())), (GenericContractInfo, GenericContractInfo::default()), + (MigratedCompiledClassHash, MigratedCompiledClassHash::default()), (StorageEntry, StorageEntry::default()), (ContractInfoChangeList, ContractInfoChangeList::default()), (ContractNonceChange, ContractNonceChange::default()), diff --git a/crates/storage/db/src/trie/mod.rs b/crates/storage/db/src/trie/mod.rs index 9e7f8c723..88a376e6b 100644 --- a/crates/storage/db/src/trie/mod.rs +++ b/crates/storage/db/src/trie/mod.rs @@ -10,7 +10,7 @@ use katana_trie::bonsai::{BonsaiDatabase, BonsaiPersistentDatabase, ByteVec, Dat use katana_trie::CommitId; use smallvec::ToSmallVec; -use crate::abstraction::{DbCursor, DbTxMutRef, DbTxRef}; +use crate::abstraction::{DbCursor, DbDupSortCursor, DbTx, DbTxMut}; use crate::models::trie::{TrieDatabaseKey, TrieDatabaseKeyType, TrieHistoryEntry}; use crate::models::{self}; use crate::tables::{self, Trie}; @@ -25,61 +25,59 @@ pub struct Error(#[from] crate::error::DatabaseError); impl katana_trie::bonsai::DBError for Error {} +impl Error { + /// Returns the inner database error. + pub fn into_inner(self) -> crate::error::DatabaseError { + self.0 + } +} + #[derive(Debug)] -pub struct TrieDbFactory<'a, Tx: DbTxRef<'a>> { +pub struct TrieDbFactory { tx: Tx, - _phantom: &'a PhantomData<()>, } -impl<'a, Tx: DbTxRef<'a>> TrieDbFactory<'a, Tx> { +impl TrieDbFactory { pub fn new(tx: Tx) -> Self { - Self { tx, _phantom: &PhantomData } + Self { tx } } - pub fn latest(&self) -> GlobalTrie<'a, Tx> { - GlobalTrie { tx: self.tx.clone(), _phantom: &PhantomData } + pub fn latest(&self) -> GlobalTrie { + GlobalTrie { tx: self.tx.clone() } } // TODO: check that the snapshot for the block number is available - pub fn historical(&self, block: BlockNumber) -> Option> { - Some(HistoricalGlobalTrie { tx: self.tx.clone(), block, _phantom: &PhantomData }) + pub fn historical(&self, block: BlockNumber) -> Option> { + Some(HistoricalGlobalTrie { tx: self.tx.clone(), block }) } } /// Provides access to the latest tries. #[derive(Debug)] -pub struct GlobalTrie<'a, Tx: DbTxRef<'a>> { +pub struct GlobalTrie { tx: Tx, - _phantom: &'a PhantomData<()>, } -impl<'a, Tx> GlobalTrie<'a, Tx> -where - Tx: DbTxRef<'a>, -{ +impl GlobalTrie { /// Returns the contracts trie. - pub fn contracts_trie( - &self, - ) -> katana_trie::ContractsTrie> { + pub fn contracts_trie(&self) -> katana_trie::ContractsTrie> { katana_trie::ContractsTrie::new(TrieDb::new(self.tx.clone())) } - /// Returns the partial contracts trie (for forked instances that use insert_with_proof). pub fn partial_contracts_trie( &self, - ) -> katana_trie::PartialContractsTrie> { + ) -> katana_trie::PartialContractsTrie> { katana_trie::PartialContractsTrie::new_partial(TrieDb::new(self.tx.clone())) } /// Returns the classes trie. - pub fn classes_trie(&self) -> katana_trie::ClassesTrie> { + pub fn classes_trie(&self) -> katana_trie::ClassesTrie> { katana_trie::ClassesTrie::new(TrieDb::new(self.tx.clone())) } - /// Returns the partial classes trie (for forked instances that use insert_with_proof). pub fn partial_classes_trie( &self, - ) -> katana_trie::PartialClassesTrie> { + ) -> katana_trie::PartialClassesTrie> { katana_trie::PartialClassesTrie::new_partial(TrieDb::new(self.tx.clone())) } @@ -88,45 +86,39 @@ where pub fn storages_trie( &self, address: ContractAddress, - ) -> katana_trie::StoragesTrie> { + ) -> katana_trie::StoragesTrie> { katana_trie::StoragesTrie::new(TrieDb::new(self.tx.clone()), address) } - /// Returns the partial storages trie (for forked instances that use insert_with_proof). pub fn partial_storages_trie( &self, address: ContractAddress, - ) -> katana_trie::PartialStoragesTrie> { + ) -> katana_trie::PartialStoragesTrie> { katana_trie::PartialStoragesTrie::new_partial(TrieDb::new(self.tx.clone()), address) } } /// Historical tries, allowing access to the state tries at each block. #[derive(Debug)] -pub struct HistoricalGlobalTrie<'a, Tx: DbTxRef<'a>> { +pub struct HistoricalGlobalTrie { /// The database transaction. tx: Tx, /// The block number at which the trie was constructed. block: BlockNumber, - _phantom: &'a PhantomData<()>, } -impl<'a, Tx> HistoricalGlobalTrie<'a, Tx> -where - Tx: DbTxRef<'a>, -{ +impl HistoricalGlobalTrie { /// Returns the historical contracts trie. pub fn contracts_trie( &self, - ) -> katana_trie::ContractsTrie> { + ) -> katana_trie::ContractsTrie> { let commit = CommitId::new(self.block); katana_trie::ContractsTrie::new(SnapshotTrieDb::new(self.tx.clone(), commit)) } - /// Returns the partial historical contracts trie (for forked instances that use insert_with_proof). pub fn partial_contracts_trie( &self, - ) -> katana_trie::PartialContractsTrie> { + ) -> katana_trie::PartialContractsTrie> { let commit = CommitId::new(self.block); katana_trie::PartialContractsTrie::new_partial(SnapshotTrieDb::new(self.tx.clone(), commit)) } @@ -134,15 +126,14 @@ where /// Returns the historical classes trie. pub fn classes_trie( &self, - ) -> katana_trie::ClassesTrie> { + ) -> katana_trie::ClassesTrie> { let commit = CommitId::new(self.block); katana_trie::ClassesTrie::new(SnapshotTrieDb::new(self.tx.clone(), commit)) } - /// Returns the partial historical classes trie (for forked instances that use insert_with_proof). pub fn partial_classes_trie( &self, - ) -> katana_trie::PartialClassesTrie> { + ) -> katana_trie::PartialClassesTrie> { let commit = CommitId::new(self.block); katana_trie::PartialClassesTrie::new_partial(SnapshotTrieDb::new(self.tx.clone(), commit)) } @@ -152,16 +143,15 @@ where pub fn storages_trie( &self, address: ContractAddress, - ) -> katana_trie::StoragesTrie> { + ) -> katana_trie::StoragesTrie> { let commit = CommitId::new(self.block); katana_trie::StoragesTrie::new(SnapshotTrieDb::new(self.tx.clone(), commit), address) } - /// Returns the partial historical storages trie (for forked instances that use insert_with_proof). pub fn partial_storages_trie( &self, address: ContractAddress, - ) -> katana_trie::PartialStoragesTrie> { + ) -> katana_trie::PartialStoragesTrie> { let commit = CommitId::new(self.block); katana_trie::PartialStoragesTrie::new_partial( SnapshotTrieDb::new(self.tx.clone(), commit), @@ -173,39 +163,39 @@ where // --- Trie's database implementations. These are implemented based on the Bonsai Trie // functionalities and abstractions. -pub struct TrieDb<'a, Tb, Tx> +pub struct TrieDb where Tb: Trie, - Tx: DbTxRef<'a>, + Tx: DbTx, { tx: Tx, - _phantom: &'a PhantomData, + _phantom: PhantomData, } -impl<'a, Tb, Tx> fmt::Debug for TrieDb<'a, Tb, Tx> +impl fmt::Debug for TrieDb where Tb: Trie, - Tx: DbTxRef<'a>, + Tx: DbTx, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("TrieDbMut").field("tx", &"..").finish() } } -impl<'a, Tb, Tx> TrieDb<'a, Tb, Tx> +impl TrieDb where Tb: Trie, - Tx: DbTxRef<'a>, + Tx: DbTx, { pub(crate) fn new(tx: Tx) -> Self { - Self { tx, _phantom: &PhantomData } + Self { tx, _phantom: PhantomData } } } -impl<'a, Tb, Tx> BonsaiDatabase for TrieDb<'a, Tb, Tx> +impl BonsaiDatabase for TrieDb where Tb: Trie, - Tx: DbTxRef<'a>, + Tx: DbTx, { type Batch = (); type DatabaseError = Error; @@ -223,9 +213,22 @@ where fn get_by_prefix( &self, - _: &DatabaseKey<'_>, + prefix: &DatabaseKey<'_>, ) -> Result, Self::DatabaseError> { - todo!() + let mut results = Vec::new(); + + let mut cursor = self.tx.cursor::()?; + let walker = cursor.walk(None)?; + + for entry in walker { + let (TrieDatabaseKey { key, .. }, value) = entry?; + + if key.starts_with(prefix.as_slice()) { + results.push((key.to_smallvec(), value)); + } + } + + Ok(results) } fn insert( @@ -256,10 +259,10 @@ where } } -pub struct TrieDbMut<'tx, Tb, Tx> +pub struct TrieDbMut where Tb: Trie, - Tx: DbTxMutRef<'tx>, + Tx: DbTxMut, { tx: Tx, /// List of key-value pairs that has been added throughout the duration of the trie @@ -267,33 +270,84 @@ where /// /// This will be used to create the trie snapshot. write_cache: HashMap, - _phantom: &'tx PhantomData, + _phantom: PhantomData, } -impl<'tx, Tb, Tx> fmt::Debug for TrieDbMut<'tx, Tb, Tx> +impl fmt::Debug for TrieDbMut where Tb: Trie, - Tx: DbTxMutRef<'tx>, + Tx: DbTxMut, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("TrieDbMut").field("tx", &"..").finish() } } -impl<'tx, Tb, Tx> TrieDbMut<'tx, Tb, Tx> +impl TrieDbMut where Tb: Trie, - Tx: DbTxMutRef<'tx>, + Tx: DbTxMut, { pub fn new(tx: Tx) -> Self { - Self { tx, write_cache: HashMap::new(), _phantom: &PhantomData } + Self { tx, write_cache: HashMap::new(), _phantom: PhantomData } + } + + /// Removes the snapshot data for the given block number. + /// + /// This is the inverse of [`BonsaiPersistentDatabase::snapshot`] - it removes all history + /// entries for the given block and updates the corresponding changesets. + /// + /// Note: There is currently no efficient way to check if a snapshot exists for a given block + /// without querying the `Tb::History` table. As a result, calling this method on a + /// non-existent snapshot is a no-op. + pub fn remove_snapshot(&mut self, block: BlockNumber) -> Result<(), Error> { + // Get all history entries for this block using dupsort cursor + let mut cursor = self.tx.cursor_dup::()?; + + // walk_dup iterates only over entries with the same key (block number) + let Some(walker) = cursor.walk_dup(Some(block), None)? else { + // No entries for this block + return Ok(()); + }; + + let mut keys_to_update = Vec::new(); + for entry in walker { + let (_, entry) = entry?; + keys_to_update.push(entry.key); + } + + // For each key, update its changeset by removing this block number + for key in &keys_to_update { + if let Some(mut set) = self.tx.get::(key.clone())? { + set.remove(block); + if set.is_empty() { + self.tx.delete::(key.clone(), None)?; + } else { + self.tx.put::(key.clone(), set)?; + } + } + } + + // Delete all history entries for this block using dupsort cursor + let mut cursor = self.tx.cursor_dup_mut::()?; + + let Some(mut walker) = cursor.walk_dup(Some(block), None)? else { + return Ok(()); + }; + + // Use delete_current to delete each entry as we iterate + while walker.next().is_some() { + walker.delete_current()?; + } + + Ok(()) } } -impl<'tx, Tb, Tx> BonsaiDatabase for TrieDbMut<'tx, Tb, Tx> +impl BonsaiDatabase for TrieDbMut where Tb: Trie, - Tx: DbTxMutRef<'tx>, + Tx: DbTxMut, { type Batch = (); type DatabaseError = Error; @@ -350,8 +404,7 @@ where &self, prefix: &DatabaseKey<'_>, ) -> Result, Self::DatabaseError> { - let _ = prefix; - todo!() + TrieDb::::new(self.tx.clone()).get_by_prefix(prefix) } fn insert( @@ -396,14 +449,14 @@ where } } -impl<'tx, Tb, Tx> BonsaiPersistentDatabase for TrieDbMut<'tx, Tb, Tx> +impl BonsaiPersistentDatabase for TrieDbMut where Tb: Trie, - Tx: DbTxMutRef<'tx> + 'tx, + Tx: DbTxMut, { type DatabaseError = Error; type Transaction<'a> - = SnapshotTrieDb<'tx, Tb, Tx> + = SnapshotTrieDb where Self: 'a; @@ -462,10 +515,10 @@ fn to_db_key(key: &DatabaseKey<'_>) -> models::trie::TrieDatabaseKey { #[cfg(test)] mod tests { + use katana_primitives::cairo::ShortString; use katana_primitives::hash::{Poseidon, StarkHash}; use katana_primitives::{felt, hash}; use katana_trie::{verify_proof, ClassesTrie, CommitId}; - use starknet::macros::short_string; use super::TrieDbMut; use crate::abstraction::Database; @@ -476,9 +529,9 @@ mod tests { #[test] fn snapshot() { let db = test_utils::create_test_db(); - let db_tx = db.tx_mut().expect("failed to get tx"); + let tx = db.tx_mut().expect("failed to get tx"); - let mut trie = ClassesTrie::new(TrieDbMut::::new(&db_tx)); + let mut trie = ClassesTrie::new(TrieDbMut::::new(tx.clone())); let root0 = { let entries = [ @@ -513,7 +566,7 @@ mod tests { assert_ne!(root0, root1); { - let db = SnapshotTrieDb::::new(&db_tx, CommitId::new(0)); + let db = SnapshotTrieDb::::new(tx.clone(), CommitId::new(0)); let mut snapshot0 = ClassesTrie::new(db); let snapshot_root0 = snapshot0.root(); @@ -523,15 +576,17 @@ mod tests { let verify_result0 = verify_proof::(&proofs0, snapshot_root0, vec![felt!("0x9999")]); - let value = - hash::Poseidon::hash(&short_string!("CONTRACT_CLASS_LEAF_V0"), &felt!("0xdead")); + let value = hash::Poseidon::hash( + &ShortString::from_ascii("CONTRACT_CLASS_LEAF_V0").into(), + &felt!("0xdead"), + ); assert_eq!(vec![value], verify_result0); } { let commit = CommitId::new(1); let mut snapshot1 = - ClassesTrie::new(SnapshotTrieDb::::new(&db_tx, commit)); + ClassesTrie::new(SnapshotTrieDb::::new(tx.clone(), commit)); let snapshot_root1 = snapshot1.root(); assert_eq!(snapshot_root1, root1); @@ -540,8 +595,10 @@ mod tests { let verify_result1 = verify_proof::(&proofs1, snapshot_root1, vec![felt!("0x6969")]); - let value = - hash::Poseidon::hash(&short_string!("CONTRACT_CLASS_LEAF_V0"), &felt!("0x80085")); + let value = hash::Poseidon::hash( + &ShortString::from_ascii("CONTRACT_CLASS_LEAF_V0").into(), + &felt!("0x80085"), + ); assert_eq!(vec![value], verify_result1); } @@ -551,12 +608,246 @@ mod tests { let result = verify_proof::(&proofs, root, vec![felt!("0x6969"), felt!("0x9999")]); - let value0 = - hash::Poseidon::hash(&short_string!("CONTRACT_CLASS_LEAF_V0"), &felt!("0x80085")); - let value1 = - hash::Poseidon::hash(&short_string!("CONTRACT_CLASS_LEAF_V0"), &felt!("0xdead")); + let value0 = hash::Poseidon::hash( + &ShortString::from_ascii("CONTRACT_CLASS_LEAF_V0").into(), + &felt!("0x80085"), + ); + let value1 = hash::Poseidon::hash( + &ShortString::from_ascii("CONTRACT_CLASS_LEAF_V0").into(), + &felt!("0xdead"), + ); assert_eq!(vec![value0, value1], result); } } + + #[test] + fn revert_to() { + let db = test_utils::create_test_db(); + let tx = db.tx_mut().expect("failed to get tx"); + + let mut trie = ClassesTrie::new(TrieDbMut::::new(tx.clone())); + + // Insert values at block 0 + trie.insert(felt!("0x1"), felt!("0x100")); + trie.insert(felt!("0x2"), felt!("0x200")); + trie.commit(0); + let root_at_block_0 = trie.root(); + + // Insert more values at block 1 + trie.insert(felt!("0x3"), felt!("0x300")); + trie.insert(felt!("0x4"), felt!("0x400")); + trie.commit(1); + let root_at_block_1 = trie.root(); + + // Roots should be different + assert_ne!(root_at_block_0, root_at_block_1); + + // Insert even more values at block 2 + trie.insert(felt!("0x5"), felt!("0x500")); + trie.commit(2); + let root_at_block_2 = trie.root(); + + // Roots should be different + assert_ne!(root_at_block_1, root_at_block_2); + assert_ne!(root_at_block_0, root_at_block_2); + + // Revert to block 1 + trie.revert_to(1, 2); + let root_after_revert = trie.root(); + + // After revert, root should match block 1 + assert_eq!(root_after_revert, root_at_block_1); + + // Revert to block 0 + trie.revert_to(0, 1); + let root_after_second_revert = trie.root(); + + // After revert, root should match block 0 + assert_eq!(root_after_second_revert, root_at_block_0); + + // Insert more values at block 1 + trie.insert(felt!("0x3"), felt!("0x300")); + trie.insert(felt!("0x4"), felt!("0x400")); + trie.commit(1); + let root_at_block_1_after_insert = trie.root(); + + // After insertion, root should match block 1 + assert_eq!(root_at_block_1_after_insert, root_at_block_1); + + // Insert even more values at block 2 + trie.insert(felt!("0x5"), felt!("0x500")); + trie.commit(2); + let root_at_block_2_after_insert = trie.root(); + + // After insertion, root should match block 2 + assert_eq!(root_at_block_2_after_insert, root_at_block_2); + } + + /// Tests the `remove_snapshot` method by creating multiple snapshots and removing them. + /// + /// Note: This test verifies that remaining snapshots still work correctly after removal, + /// but does not explicitly verify that removed snapshots no longer exist. This is because + /// there is currently no efficient way to check snapshot existence at the `SnapshotTrieDb` + /// level without querying the underlying `Tb::History` table directly. + #[test] + fn remove_snapshot() { + use katana_primitives::Felt; + + let db = test_utils::create_test_db(); + let tx = db.tx_mut().expect("failed to get tx"); + + let mut trie = ClassesTrie::new(TrieDbMut::::new(tx.clone())); + + //////////////////////////////////////////////////////////////////////////////////// + // Setup: Create snapshots at blocks 0-4 with various insertions and updates + //////////////////////////////////////////////////////////////////////////////////// + + // Block 0: Insert 50 new values + for i in 0u64..50 { + trie.insert(Felt::from(i), Felt::from(i * 100)); + } + trie.commit(0); + let root_at_block_0 = trie.root(); + + // Block 1: Insert 50 new values + update 10 existing keys from block 0 + for i in 50u64..100 { + trie.insert(Felt::from(i), Felt::from(i * 100)); + } + for i in 10u64..20 { + trie.insert(Felt::from(i), Felt::from(i * 200)); + } + trie.commit(1); + let root_at_block_1 = trie.root(); + assert_ne!(root_at_block_0, root_at_block_1); + + // Block 2: Insert 50 new values + update 10 existing keys from block 1 + for i in 100u64..150 { + trie.insert(Felt::from(i), Felt::from(i * 100)); + } + for i in 60u64..70 { + trie.insert(Felt::from(i), Felt::from(i * 300)); + } + trie.commit(2); + let root_at_block_2 = trie.root(); + assert_ne!(root_at_block_1, root_at_block_2); + + // Block 3: Insert 50 new values + for i in 150u64..200 { + trie.insert(Felt::from(i), Felt::from(i * 100)); + } + trie.commit(3); + let root_at_block_3 = trie.root(); + assert_ne!(root_at_block_2, root_at_block_3); + + // Block 4: Insert 50 new values + for i in 200u64..250 { + trie.insert(Felt::from(i), Felt::from(i * 100)); + } + trie.commit(4); + let root_at_block_4 = trie.root(); + assert_ne!(root_at_block_3, root_at_block_4); + + //////////////////////////////////////////////////////////////////////////////////// + // Verify: All snapshots (blocks 0-4) exist and have correct roots + //////////////////////////////////////////////////////////////////////////////////// + + let snapshot = + ClassesTrie::new(SnapshotTrieDb::::new(tx.clone(), 0.into())); + assert_eq!(snapshot.root(), root_at_block_0); + + let snapshot = + ClassesTrie::new(SnapshotTrieDb::::new(tx.clone(), 1.into())); + assert_eq!(snapshot.root(), root_at_block_1); + + let snapshot = + ClassesTrie::new(SnapshotTrieDb::::new(tx.clone(), 2.into())); + assert_eq!(snapshot.root(), root_at_block_2); + + let snapshot = + ClassesTrie::new(SnapshotTrieDb::::new(tx.clone(), 3.into())); + assert_eq!(snapshot.root(), root_at_block_3); + + let snapshot = + ClassesTrie::new(SnapshotTrieDb::::new(tx.clone(), 4.into())); + assert_eq!(snapshot.root(), root_at_block_4); + + //////////////////////////////////////////////////////////////////////////////////// + // Remove snapshot at block 1 + //////////////////////////////////////////////////////////////////////////////////// + + let mut trie_db = TrieDbMut::::new(tx.clone()); + trie_db.remove_snapshot(1).expect("failed to remove snapshot"); + + // snapshots at blocks 0, 2, 3, 4 should still exist + + let snapshot = + ClassesTrie::new(SnapshotTrieDb::::new(tx.clone(), 0.into())); + assert_eq!(snapshot.root(), root_at_block_0); + + let snapshot = + ClassesTrie::new(SnapshotTrieDb::::new(tx.clone(), 2.into())); + assert_eq!(snapshot.root(), root_at_block_2); + + let snapshot = + ClassesTrie::new(SnapshotTrieDb::::new(tx.clone(), 3.into())); + assert_eq!(snapshot.root(), root_at_block_3); + + let snapshot = + ClassesTrie::new(SnapshotTrieDb::::new(tx.clone(), 4.into())); + assert_eq!(snapshot.root(), root_at_block_4); + + //////////////////////////////////////////////////////////////////////////////////// + // Remove snapshots at blocks 0 and 2 + //////////////////////////////////////////////////////////////////////////////////// + + let mut trie_db = TrieDbMut::::new(tx.clone()); + trie_db.remove_snapshot(0).expect("failed to remove snapshot"); + trie_db.remove_snapshot(2).expect("failed to remove snapshot"); + + // snapshots at blocks 3 and 4 should still exist + + let snapshot = + ClassesTrie::new(SnapshotTrieDb::::new(tx.clone(), 3.into())); + assert_eq!(snapshot.root(), root_at_block_3); + + let snapshot = + ClassesTrie::new(SnapshotTrieDb::::new(tx.clone(), 4.into())); + assert_eq!(snapshot.root(), root_at_block_4); + + //////////////////////////////////////////////////////////////////////////////////// + // Remove snapshot at block 3 + //////////////////////////////////////////////////////////////////////////////////// + + let mut trie_db = TrieDbMut::::new(tx.clone()); + trie_db.remove_snapshot(3).expect("failed to remove snapshot"); + + // snapshot at block 4 should still exist + + let snapshot = + ClassesTrie::new(SnapshotTrieDb::::new(tx.clone(), 4.into())); + assert_eq!(snapshot.root(), root_at_block_4); + + //////////////////////////////////////////////////////////////////////////////////// + // Verify: Trie still works after pruning - insert new values at block 5 + //////////////////////////////////////////////////////////////////////////////////// + + let mut trie = ClassesTrie::new(TrieDbMut::::new(tx.clone())); + for i in 250u64..300 { + trie.insert(Felt::from(i), Felt::from(i * 100)); + } + trie.commit(5); + let root_at_block_5 = trie.root(); + assert_ne!(root_at_block_4, root_at_block_5); + + // both remaining snapshots (blocks 4 and 5) should exist + + let snapshot = + ClassesTrie::new(SnapshotTrieDb::::new(tx.clone(), 4.into())); + assert_eq!(snapshot.root(), root_at_block_4); + + let snapshot = + ClassesTrie::new(SnapshotTrieDb::::new(tx.clone(), 5.into())); + assert_eq!(snapshot.root(), root_at_block_5); + } } diff --git a/crates/storage/db/src/trie/snapshot.rs b/crates/storage/db/src/trie/snapshot.rs index 620cbdc02..7acf5502f 100644 --- a/crates/storage/db/src/trie/snapshot.rs +++ b/crates/storage/db/src/trie/snapshot.rs @@ -7,25 +7,25 @@ use katana_trie::bonsai::{BonsaiDatabase, ByteVec, DatabaseKey}; use katana_trie::CommitId; use super::Error; -use crate::abstraction::{DbDupSortCursor, DbTxRef}; +use crate::abstraction::{DbDupSortCursor, DbTx}; use crate::models::list::BlockList; use crate::tables::Trie; use crate::trie::to_db_key; -pub struct SnapshotTrieDb<'tx, Tb, Tx> +pub struct SnapshotTrieDb where Tb: Trie, - Tx: DbTxRef<'tx>, + Tx: DbTx, { tx: Tx, snapshot_id: CommitId, - _table: &'tx PhantomData, + _table: PhantomData, } -impl<'a, Tb, Tx> fmt::Debug for SnapshotTrieDb<'a, Tb, Tx> +impl fmt::Debug for SnapshotTrieDb where Tb: Trie, - Tx: DbTxRef<'a>, + Tx: DbTx, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("SnapshotTrieDb").field("tx", &"..").finish() @@ -50,20 +50,20 @@ fn recent_change_from_block(target: BlockNumber, block_list: &BlockList) -> Opti } } -impl<'tx, Tb, Tx> SnapshotTrieDb<'tx, Tb, Tx> +impl SnapshotTrieDb where Tb: Trie, - Tx: DbTxRef<'tx>, + Tx: DbTx, { - pub(crate) fn new(tx: Tx, id: CommitId) -> Self { - Self { tx, snapshot_id: id, _table: &PhantomData } + pub fn new(tx: Tx, id: CommitId) -> Self { + Self { tx, snapshot_id: id, _table: PhantomData } } } -impl<'tx, Tb, Tx> BonsaiDatabase for SnapshotTrieDb<'tx, Tb, Tx> +impl BonsaiDatabase for SnapshotTrieDb where Tb: Trie, - Tx: DbTxRef<'tx>, + Tx: DbTx, { type Batch = (); type DatabaseError = Error; @@ -207,7 +207,7 @@ mod tests { let tx = db.tx_mut().expect("failed to create rw tx"); for block in &blocks { - let mut trie = TrieDbMut::::new(&tx); + let mut trie = TrieDbMut::::new(tx.clone()); // Insert key/value pairs for ((r#type, key), value) in &block.keyvalues { @@ -229,7 +229,7 @@ mod tests { for block in &blocks { let snapshot_id = CommitId::from(block.number); - let snapshot_db = SnapshotTrieDb::::new(&tx, snapshot_id); + let snapshot_db = SnapshotTrieDb::::new(tx.clone(), snapshot_id); // Verify snapshots for ((r#type, key), value) in &block.keyvalues { diff --git a/crates/storage/fork/src/lib.rs b/crates/storage/fork/src/lib.rs index 6b617f629..dc0038f00 100644 --- a/crates/storage/fork/src/lib.rs +++ b/crates/storage/fork/src/lib.rs @@ -1187,7 +1187,7 @@ mod tests { use std::sync::Mutex; use std::time::Duration; - use starknet::macros::felt; + use katana_primitives::felt; use super::test_utils::*; use super::*; diff --git a/crates/storage/provider/provider-api/src/stage.rs b/crates/storage/provider/provider-api/src/stage.rs index ff5e2c226..0692eb0b5 100644 --- a/crates/storage/provider/provider-api/src/stage.rs +++ b/crates/storage/provider/provider-api/src/stage.rs @@ -5,8 +5,14 @@ use crate::ProviderResult; #[auto_impl::auto_impl(&, Box, Arc)] pub trait StageCheckpointProvider: Send + Sync { /// Returns the number of the last block that was successfully processed by the stage. - fn checkpoint(&self, id: &str) -> ProviderResult>; + fn execution_checkpoint(&self, id: &str) -> ProviderResult>; /// Sets the checkpoint for a stage to the given block number. - fn set_checkpoint(&self, id: &str, block_number: BlockNumber) -> ProviderResult<()>; + fn set_execution_checkpoint(&self, id: &str, block_number: BlockNumber) -> ProviderResult<()>; + + /// Returns the number of the last block that was successfully pruned by the stage. + fn prune_checkpoint(&self, id: &str) -> ProviderResult>; + + /// Sets the prune checkpoint for a stage to the given block number. + fn set_prune_checkpoint(&self, id: &str, block_number: BlockNumber) -> ProviderResult<()>; } diff --git a/crates/storage/provider/provider-api/src/state.rs b/crates/storage/provider/provider-api/src/state.rs index beb699eec..3618360fd 100644 --- a/crates/storage/provider/provider-api/src/state.rs +++ b/crates/storage/provider/provider-api/src/state.rs @@ -1,9 +1,9 @@ use katana_primitives::block::BlockHashOrNumber; +use katana_primitives::cairo::ShortString; use katana_primitives::class::ClassHash; use katana_primitives::contract::{ContractAddress, Nonce, StorageKey, StorageValue}; use katana_primitives::Felt; use katana_trie::MultiProof; -use starknet::macros::short_string; use starknet_types_core::hash::StarkHash; use super::contract::ContractClassProvider; @@ -16,7 +16,7 @@ pub trait StateRootProvider: Send + Sync { fn state_root(&self) -> ProviderResult { // https://docs.starknet.io/architecture-and-concepts/network-architecture/starknet-state/#state_commitment Ok(starknet_types_core::hash::Poseidon::hash_array(&[ - short_string!("STARKNET_STATE_V0"), + ShortString::from_ascii("STARKNET_STATE_V0").into(), self.contracts_root()?, self.classes_root()?, ])) diff --git a/crates/storage/provider/provider-api/src/trie.rs b/crates/storage/provider/provider-api/src/trie.rs index 099433c03..52f8a6638 100644 --- a/crates/storage/provider/provider-api/src/trie.rs +++ b/crates/storage/provider/provider-api/src/trie.rs @@ -1,21 +1,18 @@ -use std::collections::{BTreeMap, HashMap}; - -use crate::ProviderResult; use katana_primitives::block::BlockNumber; use katana_primitives::class::{ClassHash, CompiledClassHash}; -use katana_primitives::contract::ContractAddress; use katana_primitives::hash::StarkHash; use katana_primitives::state::StateUpdates; use katana_primitives::Felt; -use katana_trie::{ContractLeaf, MultiProof}; use starknet::macros::short_string; +use crate::ProviderResult; + #[auto_impl::auto_impl(&, Box, Arc)] pub trait TrieWriter: Send + Sync { fn trie_insert_declared_classes( &self, block_number: BlockNumber, - updates: &BTreeMap, + classes: Vec<(ClassHash, CompiledClassHash)>, ) -> ProviderResult; fn trie_insert_contract_updates( @@ -24,44 +21,19 @@ pub trait TrieWriter: Send + Sync { state_updates: &StateUpdates, ) -> ProviderResult; - /// Insert declared classes into trie using proof for verification. - /// Default implementation falls back to regular method (ignoring proof). - fn trie_insert_declared_classes_with_proof( - &self, - block_number: BlockNumber, - updates: &BTreeMap, - _proof: MultiProof, - _original_root: Felt, - ) -> ProviderResult { - // Default implementation falls back to regular method (ignoring proof) - self.trie_insert_declared_classes(block_number, updates) - } - - /// Insert contract updates into trie using proofs for verification. - /// Default implementation falls back to regular method (ignoring proofs). - fn trie_insert_contract_updates_with_proof( - &self, - block_number: BlockNumber, - state_updates: &StateUpdates, - _proof: MultiProof, - _original_root: Felt, - _contract_leaves_data: HashMap, - _contracts_storage_proofs: Vec, - ) -> ProviderResult { - // Default implementation falls back to regular method (ignoring proofs) - self.trie_insert_contract_updates(block_number, state_updates) - } - /// Compute state root for a block with given state updates. - /// Can be overridden by providers that need special logic (e.g., ForkedProvider with partial tries). + /// Can be overridden by providers that need special logic (e.g., ForkedProvider with partial + /// tries). fn compute_state_root( &self, block_number: BlockNumber, state_updates: &StateUpdates, ) -> ProviderResult { // Default implementation for regular providers - let class_trie_root = - self.trie_insert_declared_classes(block_number, &state_updates.declared_classes)?; + let class_trie_root = self.trie_insert_declared_classes( + block_number, + state_updates.declared_classes.clone().into_iter().collect(), + )?; let contract_trie_root = self.trie_insert_contract_updates(block_number, state_updates)?; diff --git a/crates/storage/provider/provider/src/providers/db/mod.rs b/crates/storage/provider/provider/src/providers/db/mod.rs index cf0f65885..b3cb220c4 100644 --- a/crates/storage/provider/provider/src/providers/db/mod.rs +++ b/crates/storage/provider/provider/src/providers/db/mod.rs @@ -5,15 +5,15 @@ use std::collections::{BTreeMap, BTreeSet}; use std::fmt::Debug; use std::ops::{Deref, Range, RangeInclusive}; -use crate::{MutableProvider, ProviderResult}; use katana_db::abstraction::{DbCursor, DbCursorMut, DbDupSortCursor, DbTx, DbTxMut}; -use katana_db::error::DatabaseError; +use katana_db::error::{CodecError, DatabaseError}; use katana_db::models::block::StoredBlockBodyIndices; +use katana_db::models::class::MigratedCompiledClassHash; use katana_db::models::contract::{ ContractClassChange, ContractClassChangeType, ContractInfoChangeList, ContractNonceChange, }; use katana_db::models::list::BlockList; -use katana_db::models::stage::StageCheckpoint; +use katana_db::models::stage::{ExecutionCheckpoint, PruningCheckpoint}; use katana_db::models::storage::{ContractStorageEntry, ContractStorageKey, StorageEntry}; use katana_db::models::{VersionedHeader, VersionedTx}; use katana_db::tables::{self, DupSort, Table}; @@ -44,6 +44,9 @@ use katana_provider_api::transaction::{ }; use katana_provider_api::ProviderError; use katana_rpc_types::{TxTrace, TxTraceWithHash}; +use tracing::warn; + +use crate::{MutableProvider, ProviderResult}; /// A provider implementation that uses a persistent database as the backend. // TODO: remove the default generic type @@ -309,6 +312,16 @@ impl StateUpdateProvider for DbProvider { } } + let migrated_compiled_classes = dup_entries::< + Tx, + tables::MigratedCompiledClassHashes, + BTreeMap, + _, + >(&self.0, block_num, |entry| { + let (_, MigratedCompiledClassHash { class_hash, compiled_class_hash }) = entry?; + Ok(Some((class_hash, compiled_class_hash))) + })?; + let storage_updates = { let entries = dup_entries::< Tx, @@ -336,6 +349,7 @@ impl StateUpdateProvider for DbProvider { declared_classes, replaced_classes, deprecated_declared_classes, + migrated_compiled_classes, })) } else { Ok(None) @@ -524,18 +538,31 @@ impl TransactionStatusProvider for DbProvider { } } +/// NOTE: +/// +/// The `TransactionExecutionInfo` type (from the `blockifier` crate) has had breaking +/// serialization changes between versions. Entries stored with older versions may fail to +/// deserialize. +/// +/// Though this may change in the future, this behavior is currently necessary to maintain +/// backward compatibility. As a compromise, traces that cannot be deserialized +/// are treated as non-existent rather than causing errors. impl TransactionTraceProvider for DbProvider { fn transaction_execution( &self, hash: TxHash, ) -> ProviderResult> { if let Some(num) = self.0.get::(hash)? { - let execution = self - .0 - .get::(num)? - .ok_or(ProviderError::MissingTxExecution(num))?; - - Ok(Some(execution)) + match self.0.get::(num) { + Ok(Some(execution)) => Ok(Some(execution)), + Ok(None) => Ok(None), + // Treat decompress errors as non-existent for backward compatibility + Err(DatabaseError::Codec(CodecError::Decompress(err))) => { + warn!(tx_num = %num, %err, "Failed to deserialize transaction trace"); + Ok(None) + } + Err(e) => Err(e.into()), + } } else { Ok(None) } @@ -571,8 +598,14 @@ impl TransactionTraceProvider for DbProvider { let mut traces = Vec::with_capacity(total as usize); for i in range { - if let Some(trace) = self.0.get::(i)? { - traces.push(trace); + match self.0.get::(i) { + Ok(Some(trace)) => traces.push(trace), + Ok(None) => {} + // Skip entries that fail to decompress for backward compatibility + Err(DatabaseError::Codec(CodecError::Decompress(err))) => { + warn!(tx_num = %i, %err, "Failed to deserialize transaction trace"); + } + Err(e) => return Err(e.into()), } } @@ -698,6 +731,12 @@ impl BlockWriter for DbProvider { self.0.put::(block_number, class_hash)?; } + // insert migrated class hashes + for (class_hash, compiled_class_hash) in states.state_updates.migrated_compiled_classes { + let entry = MigratedCompiledClassHash { class_hash, compiled_class_hash }; + self.0.put::(block_number, entry)?; + } + // insert storage changes { let mut storage_cursor = self.0.cursor_dup_mut::()?; @@ -773,24 +812,28 @@ impl BlockWriter for DbProvider { } for (addr, new_class_hash) in states.state_updates.replaced_classes { - let mut info = self - .0 - .get::(addr)? - .ok_or(ProviderError::MissingContractInfo { address: addr })?; - - info.class_hash = new_class_hash; - self.0.put::(addr, info)?; + let info = if let Some(info) = self.0.get::(addr)? { + GenericContractInfo { class_hash: new_class_hash, ..info } + } else { + GenericContractInfo { class_hash: new_class_hash, ..Default::default() } + }; - let mut change_set = self - .0 - .get::(addr)? - .ok_or(ProviderError::MissingContractInfoChangeSet { address: addr })?; + let new_change_set = + if let Some(mut change_set) = self.0.get::(addr)? { + change_set.class_change_list.insert(block_number); + change_set + } else { + ContractInfoChangeList { + class_change_list: BlockList::from([block_number]), + ..Default::default() + } + }; - change_set.class_change_list.insert(block_number); - self.0.put::(addr, change_set)?; + self.0.put::(addr, info)?; let class_change_key = ContractClassChange::replaced(addr, new_class_hash); self.0.put::(block_number, class_change_key)?; + self.0.put::(addr, new_change_set)?; } for (addr, nonce) in states.state_updates.nonce_updates { @@ -823,15 +866,27 @@ impl BlockWriter for DbProvider { } impl StageCheckpointProvider for DbProvider { - fn checkpoint(&self, id: &str) -> ProviderResult> { - let result = self.0.get::(id.to_string())?; + fn execution_checkpoint(&self, id: &str) -> ProviderResult> { + let result = self.0.get::(id.to_string())?; + Ok(result.map(|x| x.block)) + } + + fn set_execution_checkpoint(&self, id: &str, block_number: BlockNumber) -> ProviderResult<()> { + let key = id.to_string(); + let value = ExecutionCheckpoint { block: block_number }; + self.0.put::(key, value)?; + Ok(()) + } + + fn prune_checkpoint(&self, id: &str) -> ProviderResult> { + let result = self.0.get::(id.to_string())?; Ok(result.map(|x| x.block)) } - fn set_checkpoint(&self, id: &str, block_number: BlockNumber) -> ProviderResult<()> { + fn set_prune_checkpoint(&self, id: &str, block_number: BlockNumber) -> ProviderResult<()> { let key = id.to_string(); - let value = StageCheckpoint { block: block_number }; - self.0.put::(key, value)?; + let value = PruningCheckpoint { block: block_number }; + self.0.put::(key, value)?; Ok(()) } } @@ -840,23 +895,21 @@ impl StageCheckpointProvider for DbProvider { mod tests { use std::collections::BTreeMap; - use katana_primitives::address; use katana_primitives::block::{ Block, BlockHashOrNumber, FinalityStatus, Header, SealedBlockWithStatus, }; use katana_primitives::class::ContractClass; - use katana_primitives::contract::ContractAddress; use katana_primitives::execution::TypedTransactionExecutionInfo; use katana_primitives::fee::FeeInfo; use katana_primitives::receipt::{InvokeTxReceipt, Receipt}; use katana_primitives::state::{StateUpdates, StateUpdatesWithClasses}; use katana_primitives::transaction::{InvokeTx, Tx, TxHash, TxWithHash}; + use katana_primitives::{address, felt}; use katana_provider_api::block::{ BlockHashProvider, BlockNumberProvider, BlockProvider, BlockStatusProvider, BlockWriter, }; use katana_provider_api::state::StateFactoryProvider; use katana_provider_api::transaction::TransactionProvider; - use starknet::macros::felt; use crate::{DbProviderFactory, ProviderFactory}; diff --git a/crates/storage/provider/provider/src/providers/db/state.rs b/crates/storage/provider/provider/src/providers/db/state.rs index 0a16531ae..929bbf47f 100644 --- a/crates/storage/provider/provider/src/providers/db/state.rs +++ b/crates/storage/provider/provider/src/providers/db/state.rs @@ -161,7 +161,7 @@ impl StateProvider for LatestStateProvider { impl StateProofProvider for LatestStateProvider { fn class_multiproof(&self, classes: Vec) -> ProviderResult { - let mut trie = TrieDbFactory::new(self.0.tx()).latest().classes_trie(); + let mut trie = TrieDbFactory::new(self.0.tx().clone()).latest().classes_trie(); let proofs = trie.multiproof(classes); Ok(proofs) } @@ -170,7 +170,7 @@ impl StateProofProvider for LatestStateProvider { &self, addresses: Vec, ) -> ProviderResult { - let mut trie = TrieDbFactory::new(self.0.tx()).latest().contracts_trie(); + let mut trie = TrieDbFactory::new(self.0.tx().clone()).latest().contracts_trie(); let proofs = trie.multiproof(addresses); Ok(proofs) } @@ -180,7 +180,7 @@ impl StateProofProvider for LatestStateProvider { address: ContractAddress, storage_keys: Vec, ) -> ProviderResult { - let mut trie = TrieDbFactory::new(self.0.tx()).latest().storages_trie(address); + let mut trie = TrieDbFactory::new(self.0.tx().clone()).latest().storages_trie(address); let proofs = trie.multiproof(storage_keys); Ok(proofs) } @@ -188,17 +188,17 @@ impl StateProofProvider for LatestStateProvider { impl StateRootProvider for LatestStateProvider { fn classes_root(&self) -> ProviderResult { - let trie = TrieDbFactory::new(self.0.tx()).latest().classes_trie(); + let trie = TrieDbFactory::new(self.0.tx().clone()).latest().classes_trie(); Ok(trie.root()) } fn contracts_root(&self) -> ProviderResult { - let trie = TrieDbFactory::new(self.0.tx()).latest().contracts_trie(); + let trie = TrieDbFactory::new(self.0.tx().clone()).latest().contracts_trie(); Ok(trie.root()) } fn storage_root(&self, contract: ContractAddress) -> ProviderResult> { - let trie = TrieDbFactory::new(self.0.tx()).latest().storages_trie(contract); + let trie = TrieDbFactory::new(self.0.tx().clone()).latest().storages_trie(contract); Ok(Some(trie.root())) } } @@ -335,7 +335,7 @@ impl StateProvider for HistoricalStateProvider { impl StateProofProvider for HistoricalStateProvider { fn class_multiproof(&self, classes: Vec) -> ProviderResult { - let proofs = TrieDbFactory::new(&self.tx) + let proofs = TrieDbFactory::new(self.tx().clone()) .historical(self.block_number) .expect("should exist") .classes_trie() @@ -347,7 +347,7 @@ impl StateProofProvider for HistoricalStateProvider { &self, addresses: Vec, ) -> ProviderResult { - let proofs = TrieDbFactory::new(&self.tx) + let proofs = TrieDbFactory::new(self.tx().clone()) .historical(self.block_number) .expect("should exist") .contracts_trie() @@ -360,7 +360,7 @@ impl StateProofProvider for HistoricalStateProvider { address: ContractAddress, storage_keys: Vec, ) -> ProviderResult { - let proofs = TrieDbFactory::new(&self.tx) + let proofs = TrieDbFactory::new(self.tx().clone()) .historical(self.block_number) .expect("should exist") .storages_trie(address) @@ -371,7 +371,7 @@ impl StateProofProvider for HistoricalStateProvider { impl StateRootProvider for HistoricalStateProvider { fn classes_root(&self) -> ProviderResult { - let root = TrieDbFactory::new(&self.tx) + let root = TrieDbFactory::new(self.tx().clone()) .historical(self.block_number) .expect("should exist") .classes_trie() @@ -380,7 +380,7 @@ impl StateRootProvider for HistoricalStateProvider { } fn contracts_root(&self) -> ProviderResult { - let root = TrieDbFactory::new(&self.tx) + let root = TrieDbFactory::new(self.tx().clone()) .historical(self.block_number) .expect("should exist") .contracts_trie() @@ -389,7 +389,7 @@ impl StateRootProvider for HistoricalStateProvider { } fn storage_root(&self, contract: ContractAddress) -> ProviderResult> { - let root = TrieDbFactory::new(&self.tx) + let root = TrieDbFactory::new(self.tx().clone()) .historical(self.block_number) .expect("should exist") .storages_trie(contract) diff --git a/crates/storage/provider/provider/src/providers/db/trie.rs b/crates/storage/provider/provider/src/providers/db/trie.rs index d1b3d033d..5bf2259a1 100644 --- a/crates/storage/provider/provider/src/providers/db/trie.rs +++ b/crates/storage/provider/provider/src/providers/db/trie.rs @@ -1,4 +1,4 @@ -use std::collections::{BTreeMap, HashMap}; +use std::collections::HashMap; use katana_db::abstraction::DbTxMut; use katana_db::tables; @@ -13,8 +13,6 @@ use katana_provider_api::ProviderError; use katana_trie::{ compute_contract_state_hash, ClassesTrie, ContractLeaf, ContractsTrie, StoragesTrie, }; -use starknet::macros::short_string; -use tracing::{debug, info, warn}; use crate::providers::db::DbProvider; use crate::ProviderResult; @@ -23,12 +21,12 @@ impl TrieWriter for DbProvider { fn trie_insert_declared_classes( &self, block_number: BlockNumber, - updates: &BTreeMap, + classes: Vec<(ClassHash, CompiledClassHash)>, ) -> ProviderResult { - let mut trie = ClassesTrie::new(TrieDbMut::::new(&self.0)); + let mut trie = ClassesTrie::new(TrieDbMut::::new(self.0.clone())); - for (class_hash, compiled_hash) in updates { - trie.insert(*class_hash, *compiled_hash); + for (class_hash, compiled_hash) in classes { + trie.insert(class_hash, compiled_hash); } trie.commit(block_number); @@ -41,15 +39,17 @@ impl TrieWriter for DbProvider { state_updates: &StateUpdates, ) -> ProviderResult { let mut contract_trie_db = - ContractsTrie::new(TrieDbMut::::new(&self.0)); + ContractsTrie::new(TrieDbMut::::new(self.0.clone())); let mut contract_leafs: HashMap = HashMap::new(); let leaf_hashes: Vec<_> = { // First we insert the contract storage changes for (address, storage_entries) in &state_updates.storage_updates { - let mut storage_trie_db = - StoragesTrie::new(TrieDbMut::::new(&self.0), *address); + let mut storage_trie_db = StoragesTrie::new( + TrieDbMut::::new(self.0.clone()), + *address, + ); for (key, value) in storage_entries { storage_trie_db.insert(*key, *value); @@ -78,7 +78,7 @@ impl TrieWriter for DbProvider { .into_iter() .map(|(address, mut leaf)| { let storage_trie = StoragesTrie::new( - TrieDbMut::::new(&self.0), + TrieDbMut::::new(self.0.clone()), address, ); let storage_root = storage_trie.root(); diff --git a/crates/storage/provider/provider/src/providers/fork/mod.rs b/crates/storage/provider/provider/src/providers/fork/mod.rs index c85535b76..d35b400d4 100644 --- a/crates/storage/provider/provider/src/providers/fork/mod.rs +++ b/crates/storage/provider/provider/src/providers/fork/mod.rs @@ -21,7 +21,6 @@ use katana_provider_api::block::{ }; use katana_provider_api::env::BlockEnvProvider; use katana_provider_api::stage::StageCheckpointProvider; -use katana_provider_api::state::StateFactoryProvider; use katana_provider_api::state_update::StateUpdateProvider; use katana_provider_api::transaction::{ ReceiptProvider, TransactionProvider, TransactionStatusProvider, TransactionTraceProvider, @@ -103,7 +102,7 @@ impl ForkedDb { }; let state_update = self.backend.get_state_update(block_id)?.unwrap(); // should exist if block exist - let StateUpdate::Update(state_update) = state_update else { unreachable!() }; + let StateUpdate::Confirmed(state_update) = state_update else { unreachable!() }; let header = Header { parent_hash: block.parent_hash, @@ -208,13 +207,12 @@ impl BlockNumberProvider for ForkedProvider { } fn latest_number(&self) -> ProviderResult { - let fork_point = self.block_id(); let local_latest = match self.local_db.latest_number() { Ok(num) => num, - Err(ProviderError::MissingLatestBlockNumber) => fork_point, + Err(ProviderError::MissingLatestBlockNumber) => self.block_id(), Err(err) => return Err(err), }; - Ok(local_latest.max(fork_point)) + Ok(local_latest.max(self.block_id())) } } @@ -222,39 +220,14 @@ impl BlockIdReader for ForkedProvider {} impl BlockHashProvider for ForkedProvider { fn latest_hash(&self) -> ProviderResult { - // Use the same logic as latest_number() - if local_db has blocks, use local hash let fork_point = self.block_id(); - let local_latest = match self.local_db.latest_number() { - Ok(num) => num, - Err(ProviderError::MissingLatestBlockNumber) => fork_point, - Err(err) => return Err(err), - }; + let latest_num = self.latest_number()?; - // If we have local blocks after fork point, use local hash - if local_latest > fork_point { + if latest_num > fork_point { return self.local_db.latest_hash(); } - // Otherwise, use fork point hash (either local_latest == fork_point or local_db is empty) - if let Ok(hash) = self.local_db.latest_hash() { - Ok(hash) - } else { - // If local_db is empty, return the hash of the fork point block - if let Some(hash) = self.fork_db.db.provider().block_hash_by_num(fork_point)? { - Ok(hash) - } else { - // Fetch the fork point block if not cached - if self.fork_db.fetch_historical_blocks(fork_point.into())? { - self.fork_db - .db - .provider() - .block_hash_by_num(fork_point)? - .ok_or(ProviderError::MissingLatestBlockHash) - } else { - Err(ProviderError::MissingLatestBlockHash) - } - } - } + self.block_hash_by_num(latest_num)?.ok_or(ProviderError::MissingLatestBlockHash) } fn block_hash_by_num(&self, num: BlockNumber) -> ProviderResult> { @@ -700,38 +673,24 @@ impl BlockWriter for ForkedProvider { receipts: Vec, executions: Vec, ) -> ProviderResult<()> { - // BUGFIX: Before inserting state updates, ensure all contracts referenced in nonce_updates - // have their ContractInfo in local_db. For forked contracts, the class_hash may only exist - // in fork_cache (in-memory) or on the remote fork. We need to copy it to local_db first. - use katana_db::tables; - use katana_provider_api::state::StateProvider; - - for addr in states.state_updates.nonce_updates.keys() { - // Check if ContractInfo exists in local_db - if self.local_db.tx().get::(*addr)?.is_none() { - // Contract info not in local_db, check if it exists in fork cache or remote - // Create a state provider to search in fork - let state = self.latest()?; - if let Some(class_hash) = state.class_hash_of_contract(*addr)? { - // Found in fork - copy to local_db before processing state updates - let nonce = state.nonce(*addr)?.unwrap_or_default(); - let contract_info = - katana_primitives::contract::GenericContractInfo { class_hash, nonce }; - self.local_db.tx().put::(*addr, contract_info)?; - } - } - } - self.local_db.insert_block_with_states_and_receipts(block, states, receipts, executions) } } impl StageCheckpointProvider for ForkedProvider { - fn checkpoint(&self, id: &str) -> ProviderResult> { - self.local_db.checkpoint(id) + fn execution_checkpoint(&self, id: &str) -> ProviderResult> { + self.local_db.execution_checkpoint(id) + } + + fn set_execution_checkpoint(&self, id: &str, block_number: BlockNumber) -> ProviderResult<()> { + self.local_db.set_execution_checkpoint(id, block_number) + } + + fn prune_checkpoint(&self, id: &str) -> ProviderResult> { + self.local_db.prune_checkpoint(id) } - fn set_checkpoint(&self, id: &str, block_number: BlockNumber) -> ProviderResult<()> { - self.local_db.set_checkpoint(id, block_number) + fn set_prune_checkpoint(&self, id: &str, block_number: BlockNumber) -> ProviderResult<()> { + self.local_db.set_prune_checkpoint(id, block_number) } } diff --git a/crates/storage/provider/provider/src/providers/fork/state.rs b/crates/storage/provider/provider/src/providers/fork/state.rs index 11b72404b..bc25ce17b 100644 --- a/crates/storage/provider/provider/src/providers/fork/state.rs +++ b/crates/storage/provider/provider/src/providers/fork/state.rs @@ -1,9 +1,7 @@ use std::cmp::Ordering; use katana_db::abstraction::{DbTx, DbTxMut}; -use katana_db::models::contract::ContractClassChange; -use katana_db::models::contract::ContractNonceChange; -use katana_db::models::storage::{ContractStorageEntry, ContractStorageKey, StorageEntry}; +use katana_db::models::storage::StorageEntry; use katana_db::tables; use katana_db::trie::TrieDbFactory; use katana_primitives::block::BlockHashOrNumber; @@ -21,7 +19,7 @@ use katana_rpc_types::ContractStorageKeys; use super::db::{self}; use super::ForkedProvider; use crate::providers::fork::ForkedDb; -use crate::{MutableProvider, ProviderFactory, ProviderResult}; +use crate::{BlockNumber, MutableProvider, ProviderFactory, ProviderResult}; impl StateFactoryProvider for ForkedProvider { fn latest(&self) -> ProviderResult> { @@ -36,14 +34,12 @@ impl StateFactoryProvider for ForkedProvider { ) -> ProviderResult>> { let block_number = match block_id { BlockHashOrNumber::Num(num) => { - // Use the same logic as latest_number() - max of local_latest and fork_point - let fork_point = self.block_id(); let local_latest = match self.local_db.latest_number() { Ok(num) => num, - Err(ProviderError::MissingLatestBlockNumber) => fork_point, + Err(ProviderError::MissingLatestBlockNumber) => self.block_id(), Err(err) => return Err(err), }; - let latest_num = local_latest.max(fork_point); + let latest_num = local_latest.max(self.block_id()); match num.cmp(&latest_num) { Ordering::Less => Some(num), @@ -121,8 +117,9 @@ impl ContractClassProvider for LatestStateProvider { } impl LatestStateProvider { - /// Returns the latest block number, which is the maximum of local_db.latest_number() and fork_point. - /// This ensures that even if local_db is empty, we return the fork point as the latest block. + /// Returns the latest block number, which is the maximum of local_db.latest_number() and + /// fork_point. This ensures that even if local_db is empty, we return the fork point as the + /// latest block. fn latest_block_number(&self) -> ProviderResult { let fork_point = self.fork_provider.block_id; let local_latest = match self.local_provider.0.latest_number() { @@ -136,8 +133,30 @@ impl LatestStateProvider { impl StateProvider for LatestStateProvider { fn nonce(&self, address: ContractAddress) -> ProviderResult> { - if let res @ Some(..) = self.local_provider.nonce(address)? { - Ok(res) + if let Some(nonce) = self.local_provider.nonce(address)? { + // TEMPFIX: + // + // This check is required due to the limitation on how we're storing updates for + // contracts that were deployed before the fork point. For those contracts, + // their corresponding entries for the `ContractInfo` table might not exist. + // In which case, `BlockWriter::insert_block_with_states_and_receipts` + // implementation of `ForkedProvider` would simply defaulted the fields to zero + // (depending which field is being updated). Thus, this check is to + // determine if the this contract's info is split across the local and fork dbs, where + // the value zero means the data is stored in the forked db. + // + // False positive: + // + // Nonce can be zero + if nonce == Nonce::ZERO { + if let Some(nonce) = self.fork_provider.db.provider().latest()?.nonce(address)? { + if nonce != Nonce::ZERO { + return Ok(Some(nonce)); + } + } + } + + Ok(Some(nonce)) } else if let Some(nonce) = self.fork_provider.backend.get_nonce(address, self.fork_provider.block_id)? { @@ -163,8 +182,32 @@ impl StateProvider for LatestStateProvider { &self, address: ContractAddress, ) -> ProviderResult> { - if let res @ Some(..) = self.local_provider.class_hash_of_contract(address)? { - Ok(res) + if let Some(hash) = self.local_provider.class_hash_of_contract(address)? { + // TEMPFIX: + // + // This check is required due to the limitation on how we're storing updates for + // contracts that were deployed before the fork point. For those contracts, + // their corresponding entries for the `ContractInfo` table might not exist. + // In which case, `BlockWriter::insert_block_with_states_and_receipts` + // implementation of `ForkedProvider` would simply defaulted the fields to zero + // (depending which field is being updated). Thus, this check is to + // determine if the this contract's info is split across the local and fork dbs, where + // the value zero means the data is stored in the forked db. + // + // False positive: + // + // Some contracts can have class hash of zero (ie special contracts like 0x1, 0x2) hence + // why we simply return the value if it can't be found in the forked db. + // This is very hacky but it works for now. + if hash == ClassHash::ZERO { + if let Some(hash) = + self.fork_provider.db.provider().latest()?.class_hash_of_contract(address)? + { + return Ok(Some(hash)); + } + } + + Ok(Some(hash)) } else if let Some(class_hash) = self.fork_provider.backend.get_class_hash_at(address, self.fork_provider.block_id)? { @@ -220,11 +263,10 @@ impl StateProofProvider for LatestStateProvider { Ok(proofs.classes_proof.nodes.into()) } else { - // Use partial trie with proof and root from fork_point - let mut trie = - TrieDbFactory::new(self.local_provider.0.tx()).latest().partial_classes_trie(); + let mut trie = TrieDbFactory::new(self.local_provider.0.tx().clone()) + .latest() + .partial_classes_trie(); - // Fetch proof and root from fork_point let rpc_proof = self.fork_provider.backend.get_classes_proofs(classes.clone(), fork_point)?; let rpc_root = self.fork_provider.backend.get_global_roots(fork_point)?; @@ -252,11 +294,10 @@ impl StateProofProvider for LatestStateProvider { Ok(proofs.contracts_proof.nodes.into()) } else { - // Use partial trie with proof and root from fork_point - let mut trie = - TrieDbFactory::new(self.local_provider.0.tx()).latest().partial_contracts_trie(); + let mut trie = TrieDbFactory::new(self.local_provider.0.tx().clone()) + .latest() + .partial_contracts_trie(); - // Fetch proof and root from fork_point let rpc_proof = self.fork_provider.backend.get_contracts_proofs(addresses.clone(), fork_point)?; let rpc_root = self.fork_provider.backend.get_global_roots(fork_point)?; @@ -283,31 +324,26 @@ impl StateProofProvider for LatestStateProvider { let key = vec![ContractStorageKeys { address, keys: storage_keys }]; let result = self.fork_provider.backend.get_storages_proofs(key, fork_point)?; - let mut proofs = result.expect("proofs should exist for block"); - let proofs = proofs.contracts_storage_proofs.nodes.pop().unwrap(); + let proof = result + .and_then(|mut p| p.contracts_storage_proofs.nodes.pop()) + .map(|p| p.into()) + .unwrap_or_else(|| katana_trie::MultiProof(Default::default())); - Ok(proofs.into()) + Ok(proof) } else { - let mut trie = TrieDbFactory::new(self.local_provider.0.tx()) + let mut trie = TrieDbFactory::new(self.local_provider.0.tx().clone()) .latest() .partial_storages_trie(address); - // Fetch proof and root from fork_point let key = vec![ContractStorageKeys { address, keys: storage_keys.clone() }]; let rpc_proof = self.fork_provider.backend.get_storages_proofs(key, fork_point)?; let rpc_root = self.fork_provider.backend.get_storage_root(address, fork_point)?; - // Get proof for this contract (should be exactly one element in nodes) let proof = rpc_proof - .and_then(|p| { - // Should have exactly one element - proof for all storage_keys of this contract - if p.contracts_storage_proofs.nodes.len() == 1 { - Some(p.contracts_storage_proofs.nodes[0].clone().into()) - } else { - None - } - }) + .and_then(|mut p| p.contracts_storage_proofs.nodes.pop()) + .map(|p| p.into()) .unwrap_or_else(|| katana_trie::MultiProof(Default::default())); + let root = rpc_root.unwrap_or(Felt::ZERO); let proofs = trie.partial_multiproof(storage_keys, Some(proof), Some(root)); @@ -321,7 +357,6 @@ impl StateRootProvider for LatestStateProvider { let fork_point = self.fork_provider.block_id; let latest_block_number = self.latest_block_number()?; - //That's not necessary if latest_block_number == fork_point { let result = self.fork_provider.backend.get_global_roots(fork_point)?; return Ok(result @@ -330,8 +365,7 @@ impl StateRootProvider for LatestStateProvider { .classes_tree_root); } - // Try to get root from local trie - let trie = TrieDbFactory::new(self.local_provider.0.tx()).latest().classes_trie(); + let trie = TrieDbFactory::new(self.local_provider.0.tx().clone()).latest().classes_trie(); let root = trie.root(); if root == Felt::ZERO { @@ -354,11 +388,9 @@ impl StateRootProvider for LatestStateProvider { .contracts_tree_root); } - // Try to get root from local trie - let trie = TrieDbFactory::new(self.local_provider.0.tx()).latest().contracts_trie(); + let trie = TrieDbFactory::new(self.local_provider.0.tx().clone()).latest().contracts_trie(); let root = trie.root(); - // If trie is empty (no local contract changes), use the fork point root if root == Felt::ZERO { let result = self.fork_provider.backend.get_global_roots(fork_point)?; Ok(result.expect("proofs should exist for block").global_roots.contracts_tree_root) @@ -376,11 +408,11 @@ impl StateRootProvider for LatestStateProvider { let root = result.expect("proofs should exist for block"); Ok(Some(root)) } else { - let root = TrieDbFactory::new(self.local_provider.0.tx()) + let root = TrieDbFactory::new(self.local_provider.0.tx().clone()) .latest() .storages_trie(contract) .root(); - // If trie is empty (no local storage changes), use the fork point root as base + if root == Felt::ZERO { Ok(self .fork_provider @@ -400,6 +432,12 @@ struct HistoricalStateProvider { fork_provider: ForkedDb, } +impl HistoricalStateProvider { + fn target_block(&self) -> BlockNumber { + self.local_provider.block().min(self.fork_provider.block_id) + } +} + impl ContractClassProvider for HistoricalStateProvider { fn class(&self, hash: ClassHash) -> ProviderResult> { if let res @ Some(..) = self.local_provider.class(hash)? { @@ -443,13 +481,9 @@ impl StateProvider for HistoricalStateProvider { return Ok(res); } - if let res @ Some(nonce) = - self.fork_provider.backend.get_nonce(address, self.fork_provider.block_id)? - { - Ok(res) - } else { - Ok(None) - } + let block_id = self.target_block(); + + Ok(self.fork_provider.backend.get_nonce(address, block_id)?) } fn class_hash_of_contract( @@ -460,13 +494,9 @@ impl StateProvider for HistoricalStateProvider { return Ok(res); } - if let res @ Some(hash) = - self.fork_provider.backend.get_class_hash_at(address, self.fork_provider.block_id)? - { - Ok(res) - } else { - Ok(None) - } + let block_id = self.target_block(); + + Ok(self.fork_provider.backend.get_class_hash_at(address, block_id)?) } fn storage( @@ -478,25 +508,20 @@ impl StateProvider for HistoricalStateProvider { return Ok(res); } - if let res @ Some(value) = - self.fork_provider.backend.get_storage(address, key, self.fork_provider.block_id)? - { - Ok(res) - } else { - Ok(None) - } + let block_id = self.target_block(); + + Ok(self.fork_provider.backend.get_storage(address, key, block_id)?) } } impl StateProofProvider for HistoricalStateProvider { fn class_multiproof(&self, classes: Vec) -> ProviderResult { if self.local_provider.block() > self.fork_provider.block_id { - let mut trie = TrieDbFactory::new(self.local_provider.tx()) + let mut trie = TrieDbFactory::new(self.local_provider.tx().clone()) .historical(self.local_provider.block()) .ok_or(ProviderError::StateProofNotSupported)? .partial_classes_trie(); - // Fetch proof and root from fork_point let rpc_proof = self .fork_provider .backend @@ -515,7 +540,7 @@ impl StateProofProvider for HistoricalStateProvider { let result = self .fork_provider .backend - .get_classes_proofs(classes, self.fork_provider.block_id)?; + .get_classes_proofs(classes, self.local_provider.block())?; let proofs = result.expect("block should exist"); Ok(proofs.classes_proof.nodes.into()) @@ -527,12 +552,11 @@ impl StateProofProvider for HistoricalStateProvider { addresses: Vec, ) -> ProviderResult { if self.local_provider.block() > self.fork_provider.block_id { - let mut trie = TrieDbFactory::new(self.local_provider.tx()) + let mut trie = TrieDbFactory::new(self.local_provider.tx().clone()) .historical(self.local_provider.block()) .ok_or(ProviderError::StateProofNotSupported)? .partial_contracts_trie(); - // Fetch proof and root from fork_point let rpc_proof = self .fork_provider .backend @@ -551,7 +575,7 @@ impl StateProofProvider for HistoricalStateProvider { let result = self .fork_provider .backend - .get_contracts_proofs(addresses, self.fork_provider.block_id)?; + .get_contracts_proofs(addresses, self.local_provider.block())?; let proofs = result.expect("block should exist"); Ok(proofs.contracts_proof.nodes.into()) @@ -564,12 +588,11 @@ impl StateProofProvider for HistoricalStateProvider { storage_keys: Vec, ) -> ProviderResult { if self.local_provider.block() > self.fork_provider.block_id { - let mut trie = TrieDbFactory::new(self.local_provider.tx()) + let mut trie = TrieDbFactory::new(self.local_provider.tx().clone()) .historical(self.local_provider.block()) .ok_or(ProviderError::StateProofNotSupported)? .partial_storages_trie(address); - // Fetch proof and root from fork_point let key = vec![ContractStorageKeys { address, keys: storage_keys.clone() }]; let rpc_proof = self.fork_provider.backend.get_storages_proofs(key, self.fork_provider.block_id)?; @@ -582,6 +605,7 @@ impl StateProofProvider for HistoricalStateProvider { .and_then(|mut p| p.contracts_storage_proofs.nodes.pop()) .map(|p| p.into()) .unwrap_or_else(|| katana_trie::MultiProof(Default::default())); + let root = rpc_root.unwrap_or(Felt::ZERO); let proofs = trie.partial_multiproof(storage_keys, Some(proof), Some(root)); @@ -589,7 +613,7 @@ impl StateProofProvider for HistoricalStateProvider { } else { let key = vec![ContractStorageKeys { address, keys: storage_keys }]; let result = - self.fork_provider.backend.get_storages_proofs(key, self.fork_provider.block_id)?; + self.fork_provider.backend.get_storages_proofs(key, self.local_provider.block())?; let mut proofs = result.expect("block should exist"); let storage_proof = proofs.contracts_storage_proofs.nodes.pop().unwrap_or_default(); @@ -625,13 +649,12 @@ impl StateRootProvider for HistoricalStateProvider { fn classes_root(&self) -> ProviderResult { if self.local_provider.block() > self.fork_provider.block_id { - let root = TrieDbFactory::new(self.local_provider.tx()) + let root = TrieDbFactory::new(self.local_provider.tx().clone()) .historical(self.local_provider.block()) .ok_or(ProviderError::StateProofNotSupported)? .classes_trie() .root(); - // It trie is empty, use the fork point root, because nothing has changed locally if root == Felt::ZERO { let result = self.fork_provider.backend.get_global_roots(self.fork_provider.block_id)?; @@ -652,13 +675,12 @@ impl StateRootProvider for HistoricalStateProvider { fn contracts_root(&self) -> ProviderResult { if self.local_provider.block() > self.fork_provider.block_id { - let root = TrieDbFactory::new(self.local_provider.tx()) + let root = TrieDbFactory::new(self.local_provider.tx().clone()) .historical(self.local_provider.block()) .ok_or(ProviderError::StateProofNotSupported)? .contracts_trie() .root(); - // It trie is empty, use the fork point root, because nothing has changed locally if root == Felt::ZERO { let result = self.fork_provider.backend.get_global_roots(self.fork_provider.block_id)?; @@ -671,7 +693,7 @@ impl StateRootProvider for HistoricalStateProvider { } } else { let result = - self.fork_provider.backend.get_global_roots(self.fork_provider.block_id)?; + self.fork_provider.backend.get_global_roots(self.local_provider.block())?; let roots = result.expect("block should exist"); Ok(roots.global_roots.contracts_tree_root) } @@ -679,7 +701,7 @@ impl StateRootProvider for HistoricalStateProvider { fn storage_root(&self, contract: ContractAddress) -> ProviderResult> { if self.local_provider.block() > self.fork_provider.block_id { - let root = TrieDbFactory::new(self.local_provider.tx()) + let root = TrieDbFactory::new(self.local_provider.tx().clone()) .historical(self.local_provider.block()) .ok_or(ProviderError::StateProofNotSupported)? .storages_trie(contract) @@ -698,7 +720,7 @@ impl StateRootProvider for HistoricalStateProvider { let result = self .fork_provider .backend - .get_storage_root(contract, self.fork_provider.block_id)?; + .get_storage_root(contract, self.local_provider.block())?; Ok(result) } } diff --git a/crates/storage/provider/provider/src/providers/fork/trie.rs b/crates/storage/provider/provider/src/providers/fork/trie.rs index f39cc0c07..3a0fc3a9a 100644 --- a/crates/storage/provider/provider/src/providers/fork/trie.rs +++ b/crates/storage/provider/provider/src/providers/fork/trie.rs @@ -1,10 +1,11 @@ -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::collections::{HashMap, HashSet}; use katana_db::abstraction::DbTxMut; use katana_db::tables; use katana_db::trie::TrieDbMut; use katana_primitives::block::BlockNumber; use katana_primitives::class::{ClassHash, CompiledClassHash}; +use katana_primitives::hash::StarkHash; use katana_primitives::state::StateUpdates; use katana_primitives::{ContractAddress, Felt}; use katana_provider_api::state::{StateFactoryProvider, StateProvider, StateRootProvider}; @@ -26,231 +27,49 @@ impl TrieWriter for ForkedProvider { block_number: BlockNumber, state_updates: &StateUpdates, ) -> ProviderResult { - self.local_db.trie_insert_contract_updates(block_number, state_updates) - } - - fn trie_insert_declared_classes( - &self, - block_number: BlockNumber, - updates: &BTreeMap, - ) -> ProviderResult { - self.local_db.trie_insert_declared_classes(block_number, updates) - } - - fn trie_insert_declared_classes_with_proof( - &self, - block_number: BlockNumber, - updates: &BTreeMap, - proof: MultiProof, - original_root: Felt, - ) -> ProviderResult { - let mut trie = PartialClassesTrie::new_partial(TrieDbMut::::new( - &*self.local_db, - )); - - for (class_hash, compiled_hash) in updates { - trie.insert(*class_hash, *compiled_hash, proof.clone(), original_root); - } - - trie.commit(block_number); - Ok(trie.root()) - } - - fn trie_insert_contract_updates_with_proof( - &self, - block_number: BlockNumber, - state_updates: &StateUpdates, - proof: MultiProof, - original_root: Felt, - contract_leaves_data: HashMap, - contracts_storage_proofs: Vec, - ) -> ProviderResult { - let mut contract_trie_db = - PartialContractsTrie::new_partial(TrieDbMut::::new( - &*self.local_db, - )); - - let mut contract_leafs: HashMap = HashMap::new(); - - // Verify that storage updates and storage proofs have matching lengths - if state_updates.storage_updates.len() != contracts_storage_proofs.len() { - return Err(ProviderError::ParsingError( - "storage updates/proofs count mismatch".to_string(), - )); - } - - let latest_state = self.latest()?; - - let leaf_hashes: Vec<_> = { - // First handle storage updates with proofs - for ((address, storage_entries), storage_proof) in - state_updates.storage_updates.iter().zip(contracts_storage_proofs.iter()) - { - let mut storage_trie_db = PartialStoragesTrie::new_partial( - TrieDbMut::::new(&*self.local_db), - *address, - ); - - // Get the original root from the contract leaf's storage_root - let original_storage_root = contract_leaves_data - .get(address) - .and_then(|leaf| leaf.storage_root) - .unwrap_or(Felt::ZERO); - - for (key, value) in storage_entries { - storage_trie_db.insert( - *key, - *value, - storage_proof.clone(), - original_storage_root, - ); - } - - contract_leafs.insert(*address, Default::default()); - storage_trie_db.commit(block_number); - } - - // Handle other contract updates - for (address, nonce) in &state_updates.nonce_updates { - contract_leafs.entry(*address).or_default().nonce = Some(*nonce); - } - - for (address, class_hash) in &state_updates.deployed_contracts { - contract_leafs.entry(*address).or_default().class_hash = Some(*class_hash); - } - - for (address, class_hash) in &state_updates.replaced_classes { - contract_leafs.entry(*address).or_default().class_hash = Some(*class_hash); - } - - contract_leafs - .into_iter() - .map(|(address, mut leaf)| { - // Use storage root from contract_leaves_data if available, otherwise get from trie - if leaf.storage_root.is_none() { - let storage_trie = PartialStoragesTrie::new_partial( - TrieDbMut::::new(&*self.local_db), - address, - ); - let storage_root = storage_trie.root(); - // Only update storage root if we have local changes (non-zero root) - if storage_root != Felt::ZERO { - leaf.storage_root = Some(storage_root); - } else if let Some(leaf_data) = contract_leaves_data.get(&address) { - leaf.storage_root = leaf_data.storage_root; - } - } - - // Merge with contract_leaves_data to get nonce/class_hash if not in updates - if let Some(leaf_data) = contract_leaves_data.get(&address) { - if leaf.nonce.is_none() { - leaf.nonce = leaf_data.nonce; - } - if leaf.class_hash.is_none() { - leaf.class_hash = leaf_data.class_hash; - } - if leaf.storage_root.is_none() { - leaf.storage_root = leaf_data.storage_root; - } - } - - // If storage_root is still None, get it from the previous state - // This handles cases where contract has nonce/class changes but no storage updates - // and the contract wasn't in the remote proof response - if leaf.storage_root.is_none() { - if let Ok(Some(prev_storage_root)) = latest_state.storage_root(address) { - leaf.storage_root = Some(prev_storage_root); - } else { - // If no previous storage root exists, use ZERO (empty storage) - leaf.storage_root = Some(Felt::ZERO); - } - } - - let leaf_hash = - contract_state_leaf_hash(latest_state.as_ref(), &address, &leaf); - - Ok((address, leaf_hash)) - }) - .collect::, ProviderError>>()? - }; - - for (k, v) in leaf_hashes { - contract_trie_db.insert(k, v, proof.clone(), original_root); - } - - contract_trie_db.commit(block_number); - Ok(contract_trie_db.root()) - } - - fn compute_state_root( - &self, - block_number: BlockNumber, - state_updates: &StateUpdates, - ) -> ProviderResult { - // Collect all needed data from StateUpdates - let mut class_hashes = Vec::new(); - let mut contract_addresses = HashSet::new(); - let mut contracts_storage_keys = Vec::new(); - - // Collect class hashes - class_hashes.extend(state_updates.declared_classes.keys().copied()); - // class_hashes.extend(state_updates.deprecated_declared_classes.iter().copied()); + let mut contracts = HashSet::new(); + let mut storage_keys = Vec::new(); // Collect all unique contract addresses that need proofs for address in state_updates.deployed_contracts.keys() { - contract_addresses.insert(*address); + contracts.insert(*address); } for address in state_updates.replaced_classes.keys() { - contract_addresses.insert(*address); + contracts.insert(*address); } for address in state_updates.nonce_updates.keys() { - contract_addresses.insert(*address); + contracts.insert(*address); } for (address, storage_map) in &state_updates.storage_updates { - contract_addresses.insert(*address); + contracts.insert(*address); let keys = storage_map.keys().cloned().collect::>(); - contracts_storage_keys.push(ContractStorageKeys { address: *address, keys }); + storage_keys.push(ContractStorageKeys { address: *address, keys }); } - let mut contract_addresses: Vec<_> = contract_addresses.into_iter().collect(); - contract_addresses.sort(); - - // Fetch proofs from remote RPC (only if we have changes) - let fork_point = self.fork_db.block_id; - - // Fetch classes proof - let classes_proof_result = if !class_hashes.is_empty() { - self.fork_db.backend.get_classes_proofs(class_hashes.clone(), fork_point)? - } else { - None - }; + let mut contracts: Vec<_> = contracts.into_iter().collect(); + contracts.sort(); // Fetch contracts proof - let contracts_proof_result = if !contract_addresses.is_empty() { - self.fork_db.backend.get_contracts_proofs(contract_addresses.clone(), fork_point)? + let contracts_proof_result = if !contracts.is_empty() { + self.fork_db.backend.get_contracts_proofs(contracts.clone(), self.fork_db.block_id)? } else { None }; // Fetch storages proofs - let storages_proof_result = if !contracts_storage_keys.is_empty() { - self.fork_db.backend.get_storages_proofs(contracts_storage_keys.clone(), fork_point)? + let storages_proof_result = if !storage_keys.is_empty() { + self.fork_db.backend.get_storages_proofs(storage_keys.clone(), self.fork_db.block_id)? } else { None }; // Fetch global roots (always needed as fallback when no changes) - let global_roots = self + let original_root = self .fork_db .backend - .get_global_roots(fork_point)? + .get_global_roots(self.fork_db.block_id)? + .map(|roots| roots.global_roots.contracts_tree_root) .expect("global roots should exist for fork point"); - let final_classes_root = global_roots.global_roots.classes_tree_root; - let final_contracts_root = global_roots.global_roots.contracts_tree_root; - - // Extract proofs (only if we have changes) - let classes_proof = - classes_proof_result.map(|response| response.classes_proof.nodes.into()); let (contracts_proof, contract_leaves_data) = if let Some(proof_response) = contracts_proof_result { @@ -261,7 +80,7 @@ impl TrieWriter for ForkedProvider { .contracts_proof .contract_leaves_data .iter() - .zip(contract_addresses.iter()) + .zip(contracts.iter()) .map(|(leaf_data, &addr)| { let leaf = ContractLeaf { storage_root: Some(leaf_data.storage_root), @@ -289,35 +108,190 @@ impl TrieWriter for ForkedProvider { }) .unwrap_or_default(); - // Use proof-based methods if we have proofs (which means we have changes) - // If no proofs, use the fork point root (matches logic in state.rs: if trie is empty, use fork root) - let class_trie_root = if let Some(proof) = classes_proof { - self.trie_insert_declared_classes_with_proof( - block_number, - &state_updates.declared_classes, - proof, - final_classes_root, - )? - } else { - // No class changes - use the fork point root (same as state.rs logic) - final_classes_root - }; + if let Some(proof) = contracts_proof { + let mut contract_trie_db = + PartialContractsTrie::new_partial(TrieDbMut::::new( + self.local_db.tx().clone(), + )); + + let mut contract_leafs: HashMap = HashMap::new(); + + // Verify that storage updates and storage proofs have matching lengths + if state_updates.storage_updates.len() != contracts_storage_proofs.len() { + return Err(ProviderError::ParsingError( + "storage updates/proofs count mismatch".to_string(), + )); + } + + let latest_state = self.latest()?; + + let leaf_hashes: Vec<_> = { + // First handle storage updates with proofs + for ((address, storage_entries), storage_proof) in + state_updates.storage_updates.iter().zip(contracts_storage_proofs.iter()) + { + let mut storage_trie_db = PartialStoragesTrie::new_partial( + TrieDbMut::::new(self.local_db.tx().clone()), + *address, + ); + + // Get the original root from the contract leaf's storage_root + let original_storage_root = contract_leaves_data + .get(address) + .and_then(|leaf| leaf.storage_root) + .unwrap_or(Felt::ZERO); + + for (key, value) in storage_entries { + storage_trie_db.insert( + *key, + *value, + storage_proof.clone(), + original_storage_root, + ); + } - let contract_trie_root = if let Some(proof) = contracts_proof { - self.trie_insert_contract_updates_with_proof( - block_number, - state_updates, - proof, - final_contracts_root, - contract_leaves_data, - contracts_storage_proofs, - )? + contract_leafs.insert(*address, Default::default()); + storage_trie_db.commit(block_number); + } + + // Handle other contract updates + for (address, nonce) in &state_updates.nonce_updates { + contract_leafs.entry(*address).or_default().nonce = Some(*nonce); + } + + for (address, class_hash) in &state_updates.deployed_contracts { + contract_leafs.entry(*address).or_default().class_hash = Some(*class_hash); + } + + for (address, class_hash) in &state_updates.replaced_classes { + contract_leafs.entry(*address).or_default().class_hash = Some(*class_hash); + } + + contract_leafs + .into_iter() + .map(|(address, mut leaf)| { + // Use storage root from contract_leaves_data if available, otherwise get + // from trie + if leaf.storage_root.is_none() { + let storage_trie = PartialStoragesTrie::new_partial( + TrieDbMut::::new( + self.local_db.tx().clone(), + ), + address, + ); + let storage_root = storage_trie.root(); + // Only update storage root if we have local changes (non-zero root) + if storage_root != Felt::ZERO { + leaf.storage_root = Some(storage_root); + } else if let Some(leaf_data) = contract_leaves_data.get(&address) { + leaf.storage_root = leaf_data.storage_root; + } + } + + // Merge with contract_leaves_data to get nonce/class_hash if not in updates + if let Some(leaf_data) = contract_leaves_data.get(&address) { + if leaf.nonce.is_none() { + leaf.nonce = leaf_data.nonce; + } + if leaf.class_hash.is_none() { + leaf.class_hash = leaf_data.class_hash; + } + if leaf.storage_root.is_none() { + leaf.storage_root = leaf_data.storage_root; + } + } + + // If storage_root is still None, get it from the previous state + // This handles cases where contract has nonce/class changes but no storage + // updates and the contract wasn't in the remote proof + // response + if leaf.storage_root.is_none() { + if let Ok(Some(prev_storage_root)) = latest_state.storage_root(address) + { + leaf.storage_root = Some(prev_storage_root); + } else { + // If no previous storage root exists, use ZERO (empty storage) + leaf.storage_root = Some(Felt::ZERO); + } + } + + let leaf_hash = + contract_state_leaf_hash(latest_state.as_ref(), &address, &leaf); + + Ok((address, leaf_hash)) + }) + .collect::, ProviderError>>()? + }; + + for (k, v) in leaf_hashes { + contract_trie_db.insert(k, v, proof.clone(), original_root); + } + + contract_trie_db.commit(block_number); + Ok(contract_trie_db.root()) } else { // No contract changes - use the fork point root (same as state.rs logic) - final_contracts_root + Ok(original_root) + } + } + + fn trie_insert_declared_classes( + &self, + block_number: BlockNumber, + classes: Vec<(ClassHash, CompiledClassHash)>, + ) -> ProviderResult { + let class_hashes = classes.iter().map(|e| e.0).collect::>(); + + // Fetch proofs from remote RPC (only if we have changes) + let classes_proof_result = if !class_hashes.is_empty() { + self.fork_db.backend.get_classes_proofs(class_hashes, self.fork_db.block_id)? + } else { + None }; - use katana_primitives::hash::StarkHash; + // Extract proofs (only if we have changes) + let classes_proof: Option = + classes_proof_result.map(|response| response.classes_proof.nodes.into()); + + // Fetch global roots (always needed as fallback when no changes) + let global_roots = self + .fork_db + .backend + .get_global_roots(self.fork_db.block_id)? + .expect("global roots should exist for fork point"); + + let original_root = global_roots.global_roots.classes_tree_root; + + // Use proof-based methods if we have proofs (which means we have changes) + // If no proofs, use the fork point root + if let Some(proof) = classes_proof { + let mut trie = PartialClassesTrie::new_partial( + TrieDbMut::::new(self.local_db.tx().clone()), + ); + + for (class_hash, compiled_hash) in classes { + trie.insert(class_hash, compiled_hash, proof.clone(), original_root); + } + + trie.commit(block_number); + Ok(trie.root()) + } else { + Ok(original_root) + } + } + + fn compute_state_root( + &self, + block_number: BlockNumber, + state_updates: &StateUpdates, + ) -> ProviderResult { + let mut updated_classes = Vec::new(); + updated_classes.extend(state_updates.declared_classes.clone()); + updated_classes.extend(state_updates.migrated_compiled_classes.clone()); + + let class_trie_root = self.trie_insert_declared_classes(block_number, updated_classes)?; + let contract_trie_root = self.trie_insert_contract_updates(block_number, state_updates)?; + Ok(katana_primitives::hash::Poseidon::hash_array(&[ short_string!("STARKNET_STATE_V0"), contract_trie_root, @@ -332,9 +306,8 @@ fn contract_state_leaf_hash( address: &ContractAddress, contract_leaf: &ContractLeaf, ) -> Felt { - let nonce = contract_leaf - .nonce - .unwrap_or_else(|| provider.nonce(*address).ok().flatten().unwrap_or_default()); + let nonce = + contract_leaf.nonce.unwrap_or(provider.nonce(*address).unwrap().unwrap_or_default()); let class_hash = contract_leaf.class_hash.unwrap_or_else(|| { provider.class_hash_of_contract(*address).ok().flatten().unwrap_or_default() diff --git a/crates/storage/provider/provider/src/test_utils.rs b/crates/storage/provider/provider/src/test_utils.rs index b5222e99f..96ddf7c8a 100644 --- a/crates/storage/provider/provider/src/test_utils.rs +++ b/crates/storage/provider/provider/src/test_utils.rs @@ -3,7 +3,6 @@ use std::sync::Arc; use katana_genesis::allocation::{DevGenesisAccount, GenesisAccountAlloc, GenesisAllocation}; use katana_genesis::Genesis; use katana_primitives::block::{Block, BlockHash, FinalityStatus}; -use katana_primitives::contract::ContractAddress; use katana_primitives::utils::class::parse_sierra_class; use katana_primitives::{address, felt, U256}; use katana_provider_api::block::BlockWriter; diff --git a/crates/storage/provider/provider/tests/block.rs b/crates/storage/provider/provider/tests/block.rs index d74b45a4e..da2968ff6 100644 --- a/crates/storage/provider/provider/tests/block.rs +++ b/crates/storage/provider/provider/tests/block.rs @@ -15,6 +15,7 @@ use katana_provider::api::transaction::{ ReceiptProvider, TransactionProvider, TransactionStatusProvider, TransactionTraceProvider, }; use katana_provider::{DbProviderFactory, MutableProvider, ProviderFactory}; +use katana_rpc_types::{TxTrace, TxTraceWithHash}; use rstest_reuse::{self, *}; mod fixtures; @@ -140,11 +141,20 @@ where assert_eq!(actual_tx, Some(tx.clone())); } - assert_eq!(actual_block_env, Some(expected_block_env)); + let expected_executions: Vec = expected_block + .body + .iter() + .zip(executions.iter()) + .map(|(tx, exec)| TxTraceWithHash { + transaction_hash: tx.hash, + trace_root: TxTrace::from(exec.clone()), + }) + .collect(); + assert_eq!(actual_block_env, Some(expected_block_env)); assert_eq!(actual_receipts.as_ref().map(|r| r.len()), Some(expected_block.body.len())); assert_eq!(actual_receipts, Some(receipts)); - assert_eq!(actual_executions, Some(executions)); + assert_eq!(actual_executions, Some(expected_executions)); assert_eq!(actual_block_tx_count, Some(expected_block.body.len() as u64)); assert_eq!(actual_state_root, Some(expected_block.header.state_root)); diff --git a/crates/storage/provider/provider/tests/class.rs b/crates/storage/provider/provider/tests/class.rs index 9f9a1df2b..ce5e45696 100644 --- a/crates/storage/provider/provider/tests/class.rs +++ b/crates/storage/provider/provider/tests/class.rs @@ -3,10 +3,10 @@ mod fixtures; use anyhow::Result; use katana_primitives::block::{BlockHashOrNumber, BlockNumber}; use katana_primitives::class::{ClassHash, CompiledClassHash, ContractClass}; +use katana_primitives::felt; use katana_provider::api::contract::ContractClassProviderExt; use katana_provider::api::state::{StateFactoryProvider, StateProvider}; use rstest_reuse::{self, *}; -use starknet::macros::felt; use crate::fixtures::{db_provider_with_states, DOJO_WORLD_SIERRA_CLASS}; diff --git a/crates/storage/provider/provider/tests/fixtures.rs b/crates/storage/provider/provider/tests/fixtures.rs index c926fe3a6..04cb8c743 100644 --- a/crates/storage/provider/provider/tests/fixtures.rs +++ b/crates/storage/provider/provider/tests/fixtures.rs @@ -6,7 +6,6 @@ use katana_primitives::block::{ BlockHashOrNumber, FinalityStatus, Header, SealedBlock, SealedBlockWithStatus, }; use katana_primitives::class::{ContractClass, SierraContractClass}; -use katana_primitives::contract::ContractAddress; use katana_primitives::state::{StateUpdates, StateUpdatesWithClasses}; use katana_provider::api::block::BlockWriter; use katana_provider::api::state::StateFactoryProvider; diff --git a/crates/storage/provider/provider/tests/fork.rs b/crates/storage/provider/provider/tests/fork.rs index 726c1c8db..07b1f344a 100644 --- a/crates/storage/provider/provider/tests/fork.rs +++ b/crates/storage/provider/provider/tests/fork.rs @@ -2,8 +2,9 @@ use assert_matches::assert_matches; use katana_primitives::block::{ Block, BlockHashOrNumber, FinalityStatus, Header, SealedBlockWithStatus, }; +use katana_primitives::state::StateUpdatesWithClasses; use katana_primitives::transaction::TxType; -use katana_primitives::{address, felt, ContractAddress}; +use katana_primitives::{address, felt}; use katana_provider::api::block::{ BlockHashProvider, BlockNumberProvider, BlockProvider, BlockWriter, }; @@ -392,13 +393,11 @@ async fn post_fork_state_proof_should_not_be_supported() { let class_hash = felt!("0x00e022115a73679d4e215da00f53d8f681f5c52b488bf18c71fea115e92181b1"); let result = state.class_multiproof(vec![class_hash]); - // assert_matches!(result, Err(ProviderError::StateProofNotSupported)); - println!("class_hash result: {:?}", result); + assert_matches!(result, Err(ProviderError::StateProofNotSupported)); let address = address!("0x0164b86b8fC5C0c84d3c53Bc95760F290420Ea2a32ed49A44fd046683a1CaAc2"); let result = state.contract_multiproof(vec![address]); - println!("contract_multiproof result: {:?}", result); - // assert_matches!(result, Err(ProviderError::StateProofNotSupported)); + assert_matches!(result, Err(ProviderError::StateProofNotSupported)); } #[tokio::test(flavor = "multi_thread")] @@ -546,3 +545,354 @@ async fn pre_fork_state_root() { assert_eq!(actual_contract2_root, expected_contract2_root); assert_eq!(actual_contract3_root, expected_contract3_root); } + +/// This test validates that: +/// 1. State changes made after the fork point are correctly stored and retrievable +/// 2. The latest state reflects the post-fork state changes +/// 3. Historical state access works for both pre-fork and post-fork blocks +#[tokio::test] +async fn post_fork_state() { + let fork_block_number = 2906771; + + let starknet_client = StarknetClient::new(SEPOLIA_RPC_URL.try_into().unwrap()); + let provider_factory = ForkProviderFactory::new_in_memory(fork_block_number, starknet_client); + + // First verify we can access state at the fork point + let provider = provider_factory.provider(); + let fork_state = provider.latest().unwrap(); + + // Class that exists at the fork point (declared at block 2892448) + let existing_class_hash = + felt!("0x00e022115a73679d4e215da00f53d8f681f5c52b488bf18c71fea115e92181b1"); + let result = fork_state.class(existing_class_hash).unwrap(); + assert!(result.is_some(), "Class should exist at fork point"); + + // Contract that exists at the fork point (deployed at block 2906741) + let existing_contract = + address!("0x0164b86b8fC5C0c84d3c53Bc95760F290420Ea2a32ed49A44fd046683a1CaAc2"); + let result = fork_state.class_hash_of_contract(existing_contract).unwrap(); + assert!(result.is_some(), "Contract should exist at fork point"); + + // Now add a new block after the fork point with state changes + let provider_mut = provider_factory.provider_mut(); + + let new_block_number = fork_block_number + 1; + let new_contract_address = address!("0x1234567890abcdef"); + let new_contract_class_hash = felt!("0xdeadbeef"); + let new_contract_nonce = felt!("0x1"); + let storage_key = felt!("0x1"); + let storage_value = felt!("0x42"); + + // Create state updates for the new block + let mut state_updates = StateUpdatesWithClasses::default(); + state_updates + .state_updates + .deployed_contracts + .insert(new_contract_address, new_contract_class_hash); + state_updates.state_updates.nonce_updates.insert(new_contract_address, new_contract_nonce); + state_updates + .state_updates + .storage_updates + .insert(new_contract_address, [(storage_key, storage_value)].into_iter().collect()); + + provider_mut + .insert_block_with_states_and_receipts( + SealedBlockWithStatus { + block: Block { + header: Header { number: new_block_number, ..Default::default() }, + body: Vec::new(), + } + .seal(), + status: FinalityStatus::AcceptedOnL2, + }, + state_updates, + Default::default(), + Default::default(), + ) + .unwrap(); + + provider_mut.commit().unwrap(); + + // Now verify the post-fork state + let provider = provider_factory.provider(); + let latest_state = provider.latest().unwrap(); + + // The new contract should exist in the latest state + let result = latest_state.class_hash_of_contract(new_contract_address).unwrap(); + assert_eq!(result, Some(new_contract_class_hash), "New contract should exist in latest state"); + + let result = latest_state.nonce(new_contract_address).unwrap(); + assert_eq!(result, Some(new_contract_nonce), "New contract nonce should be set"); + + let result = latest_state.storage(new_contract_address, storage_key).unwrap(); + assert_eq!(result, Some(storage_value), "New contract storage should be set"); + + // Pre-fork state should still be accessible + let result = latest_state.class(existing_class_hash).unwrap(); + assert!(result.is_some(), "Pre-fork class should still exist in latest state"); + + let result = latest_state.class_hash_of_contract(existing_contract).unwrap(); + assert!(result.is_some(), "Pre-fork contract should still exist in latest state"); + + // Historical state at fork point should NOT have the new contract + let fork_block_id = BlockHashOrNumber::Num(fork_block_number); + let historical_state = + provider.historical(fork_block_id).unwrap().expect("historical state must exist"); + + let result = historical_state.class_hash_of_contract(new_contract_address).unwrap(); + assert!(result.is_none(), "New contract should NOT exist at fork block"); + + // But pre-fork data should still be accessible from historical state + let result = historical_state.class(existing_class_hash).unwrap(); + assert!(result.is_some(), "Pre-fork class should exist in historical state"); + + let result = historical_state.class_hash_of_contract(existing_contract).unwrap(); + assert!(result.is_some(), "Pre-fork contract should exist in historical state"); + + // Historical state at the new block should have the new contract + let new_block_id = BlockHashOrNumber::Num(new_block_number); + let post_fork_historical = + provider.historical(new_block_id).unwrap().expect("post-fork historical state must exist"); + + let result = post_fork_historical.class_hash_of_contract(new_contract_address).unwrap(); + assert_eq!( + result, + Some(new_contract_class_hash), + "New contract should exist in post-fork historical state" + ); + + let result = post_fork_historical.nonce(new_contract_address).unwrap(); + assert_eq!(result, Some(new_contract_nonce), "New contract nonce should be in historical"); + + let result = post_fork_historical.storage(new_contract_address, storage_key).unwrap(); + assert_eq!(result, Some(storage_value), "New contract storage should be in historical"); +} + +/// Test updating only the nonce of a pre-fork contract (contract that has already been deployed +/// before the fork point). +/// +/// Verifies that: +/// - Nonce is updated in latest state +/// - Class hash remains unchanged +/// - Historical state at fork point preserves original values +#[tokio::test] +async fn post_fork_state_update_nonce_only() { + let fork_block_number = 3631794; + + let starknet_client = StarknetClient::new(SEPOLIA_RPC_URL.try_into().unwrap()); + let provider_factory = ForkProviderFactory::new_in_memory(fork_block_number, starknet_client); + + // Contract that exists at the fork point (deployed at block 2906741) + let contract = address!("0x4f4e29add19afa12c868ba1f4439099f225403ff9a71fe667eebb50e13518d3"); + let og_class_hash = felt!("0x4d9d2b2e26f94fad32e7b7a7e710286636322d5905f1cd64dc58a144294e6"); + let og_nonce = felt!("0x1d6cb2"); + + // verify original state at fork point + + let provider = provider_factory.provider(); + let fork_state = provider.latest().unwrap(); + + assert_eq!(fork_state.class_hash_of_contract(contract).unwrap(), Some(og_class_hash)); + assert_eq!(fork_state.nonce(contract).unwrap(), Some(og_nonce)); + + let new_block = fork_block_number + 1; + let new_nonce = felt!("0xdeadbeef"); + + // update only nonce + + { + let mut state_updates = StateUpdatesWithClasses::default(); + state_updates.state_updates.nonce_updates.insert(contract, new_nonce); + + let provider_mut = provider_factory.provider_mut(); + + provider_mut + .insert_block_with_states_and_receipts( + SealedBlockWithStatus { + block: Block { + header: Header { number: new_block, ..Default::default() }, + body: Vec::new(), + } + .seal(), + status: FinalityStatus::AcceptedOnL2, + }, + state_updates, + Default::default(), + Default::default(), + ) + .unwrap(); + + provider_mut.commit().unwrap(); + } + + // verify latest state: nonce updated, class hash unchanged + let provider = provider_factory.provider(); + let latest_state = provider.latest().unwrap(); + + assert_eq!(latest_state.nonce(contract).unwrap(), Some(new_nonce)); + assert_eq!(latest_state.class_hash_of_contract(contract).unwrap(), Some(og_class_hash)); + + // verify historical state at new block + let new_block_state = provider.historical(new_block.into()).unwrap().unwrap(); + + assert_eq!(new_block_state.nonce(contract).unwrap(), Some(new_nonce)); + assert_eq!(new_block_state.class_hash_of_contract(contract).unwrap(), Some(og_class_hash)); + + // verify historical state at fork point still has original values + let fork_state = provider.historical(fork_block_number.into()).unwrap().unwrap(); + + assert_eq!(fork_state.class_hash_of_contract(contract).unwrap(), Some(og_class_hash)); + assert_eq!(fork_state.nonce(contract).unwrap(), Some(og_nonce)); +} + +/// Test updating only the class hash of a pre-fork contract (contract that has already been +/// deployed before the fork point) via replace_class. +/// +/// Verifies that: +/// - Class hash is updated in latest state +/// - Nonce remains unchanged +/// - Historical state at fork point preserves original values +#[tokio::test] +async fn post_fork_state_update_class_hash_only() { + let fork_block_number = 3631794; + + let starknet_client = StarknetClient::new(SEPOLIA_RPC_URL.try_into().unwrap()); + let provider_factory = ForkProviderFactory::new_in_memory(fork_block_number, starknet_client); + + // Contract that exists at the fork point + let contract = address!("0x4f4e29add19afa12c868ba1f4439099f225403ff9a71fe667eebb50e13518d3"); + let og_class_hash = felt!("0x4d9d2b2e26f94fad32e7b7a7e710286636322d5905f1cd64dc58a144294e6"); + let og_nonce = felt!("0x1d6cb2"); + + // verify original state at fork point + + let provider = provider_factory.provider(); + let fork_state = provider.latest().unwrap(); + + assert_eq!(fork_state.class_hash_of_contract(contract).unwrap(), Some(og_class_hash)); + assert_eq!(fork_state.nonce(contract).unwrap(), Some(og_nonce)); + + let new_block = fork_block_number + 1; + let new_class_hash = felt!("0xdeadbeef"); + + // update only class hash via replace_class + + { + let mut state_updates = StateUpdatesWithClasses::default(); + state_updates.state_updates.replaced_classes.insert(contract, new_class_hash); + + let provider_mut = provider_factory.provider_mut(); + + provider_mut + .insert_block_with_states_and_receipts( + SealedBlockWithStatus { + block: Block { + header: Header { number: new_block, ..Default::default() }, + body: Vec::new(), + } + .seal(), + status: FinalityStatus::AcceptedOnL2, + }, + state_updates, + Default::default(), + Default::default(), + ) + .unwrap(); + + provider_mut.commit().unwrap(); + } + + // verify latest state: class hash updated, nonce unchanged + + let provider = provider_factory.provider(); + + let latest_state = provider.latest().unwrap(); + assert_eq!(latest_state.class_hash_of_contract(contract).unwrap(), Some(new_class_hash)); + assert_eq!(latest_state.nonce(contract).unwrap(), Some(og_nonce)); + + // verify historical state at new block + let new_block_state = provider.historical(new_block.into()).unwrap().unwrap(); + assert_eq!(new_block_state.class_hash_of_contract(contract).unwrap(), Some(new_class_hash)); + assert_eq!(new_block_state.nonce(contract).unwrap(), Some(og_nonce)); + + // verify historical state at fork point still has original values + let fork_state = provider.historical(fork_block_number.into()).unwrap().unwrap(); + assert_eq!(fork_state.class_hash_of_contract(contract).unwrap(), Some(og_class_hash)); + assert_eq!(fork_state.nonce(contract).unwrap(), Some(og_nonce)); +} + +/// Test updating both nonce and class hash of a pre-fork contract (contract that has already been +/// deployed before the fork point). +/// +/// Verifies that: +/// - Both nonce and class hash are updated in latest state +/// - Historical state at fork point preserves original values +#[tokio::test] +async fn post_fork_state_update_nonce_and_class_hash() { + let fork_block_number = 2906771; + + let starknet_client = StarknetClient::new(SEPOLIA_RPC_URL.try_into().unwrap()); + let provider_factory = ForkProviderFactory::new_in_memory(fork_block_number, starknet_client); + + // Contract that exists at the fork point (deployed at block 2906741) + let contract = address!("0x0164b86b8fC5C0c84d3c53Bc95760F290420Ea2a32ed49A44fd046683a1CaAc2"); + let og_class_hash = felt!("0xe824b9f2aa225812cf230d276784b99f182ec95066d84be90cd1682e4ad069"); + let og_nonce = felt!("0x0"); + + // verify original state at fork point + let provider = provider_factory.provider(); + let fork_state = provider.latest().unwrap(); + + assert_eq!(fork_state.class_hash_of_contract(contract).unwrap(), Some(og_class_hash)); + assert_eq!(fork_state.nonce(contract).unwrap(), Some(og_nonce)); + + // update both nonce and class hash + + let new_block = fork_block_number + 1; + let new_nonce = felt!("0x10"); + let new_class_hash = felt!("0xcafebabe"); + + { + let mut state_updates = StateUpdatesWithClasses::default(); + state_updates.state_updates.nonce_updates.insert(contract, new_nonce); + state_updates.state_updates.replaced_classes.insert(contract, new_class_hash); + + let provider_mut = provider_factory.provider_mut(); + provider_mut + .insert_block_with_states_and_receipts( + SealedBlockWithStatus { + block: Block { + header: Header { number: new_block, ..Default::default() }, + body: Vec::new(), + } + .seal(), + status: FinalityStatus::AcceptedOnL2, + }, + state_updates, + Default::default(), + Default::default(), + ) + .unwrap(); + + provider_mut.commit().unwrap(); + } + + // verify latest state: both updated + let provider = provider_factory.provider(); + let latest_state = provider.latest().unwrap(); + + assert_eq!(latest_state.nonce(contract).unwrap(), Some(new_nonce)); + assert_eq!(latest_state.class_hash_of_contract(contract).unwrap(), Some(new_class_hash)); + + // verify historical state at new block + let new_block_state = provider.historical(new_block.into()).unwrap().unwrap(); + + assert_eq!(new_block_state.nonce(contract).unwrap(), Some(new_nonce)); + assert_eq!(new_block_state.class_hash_of_contract(contract).unwrap(), Some(new_class_hash)); + + // verify historical state at fork point still has original values + let fork_state = provider.historical(fork_block_number.into()).unwrap().unwrap(); + + assert_eq!(fork_state.class_hash_of_contract(contract).unwrap(), Some(og_class_hash)); + assert_eq!(fork_state.nonce(contract).unwrap(), Some(og_nonce)); +} diff --git a/crates/sync/pipeline/src/lib.rs b/crates/sync/pipeline/src/lib.rs index 1cab7dd2b..d1ecd90f7 100644 --- a/crates/sync/pipeline/src/lib.rs +++ b/crates/sync/pipeline/src/lib.rs @@ -29,12 +29,12 @@ //! //! ```no_run //! use katana_pipeline::Pipeline; -//! use katana_provider::providers::in_memory::InMemoryProvider; +//! use katana_provider::DbProviderFactory; //! use katana_stage::Stage; //! //! # async fn example() -> Result<(), Box> { -//! // Create a provider for stage checkpoint management -//! let provider = InMemoryProvider::new(); +//! // Create a provider factory for stage checkpoint management +//! let provider = DbProviderFactory::new_in_memory(); //! //! // Create a pipeline with a chunk size of 100 blocks //! let (mut pipeline, handle) = Pipeline::new(provider, 100); @@ -78,10 +78,10 @@ use core::future::IntoFuture; use futures::future::BoxFuture; use katana_primitives::block::BlockNumber; -use katana_provider::{MutableProvider, ProviderFactory}; +use katana_provider::{DbProviderFactory, MutableProvider, ProviderFactory}; use katana_provider_api::stage::StageCheckpointProvider; use katana_provider_api::ProviderError; -use katana_stage::{Stage, StageExecutionInput, StageExecutionOutput}; +use katana_stage::{PruneInput, PruneOutput, Stage, StageExecutionInput, StageExecutionOutput}; use tokio::sync::watch::{self}; use tokio::task::yield_now; use tracing::{debug, error, info, info_span, Instrument}; @@ -103,6 +103,9 @@ pub enum Error { #[error("stage {id} execution failed: {error}")] StageExecution { id: &'static str, error: katana_stage::Error }, + #[error("stage {id} pruning failed: {error}")] + StagePruning { id: &'static str, error: katana_stage::Error }, + #[error(transparent)] Provider(#[from] ProviderError), @@ -206,6 +209,26 @@ impl PipelineHandle { } } +/// Configuration for pruning behavior in the pipeline. +#[derive(Debug, Clone, Default)] +pub struct PruningConfig { + /// Distance from tip. Blocks older than `tip - distance` will be pruned. + /// `None` means no pruning (archive mode). + pub distance: Option, +} + +impl PruningConfig { + /// Creates a new pruning configuration with the specified distance. + pub fn new(distance: Option) -> Self { + Self { distance } + } + + /// Returns whether pruning is enabled. + pub fn is_enabled(&self) -> bool { + self.distance.is_some() + } +} + /// Syncing pipeline. /// /// The pipeline drives the execution of stages, running each stage to completion in the order they @@ -220,18 +243,19 @@ impl PipelineHandle { /// Proper unwinding support would require each stage to implement rollback logic to revert their /// state to an earlier block. This is a significant feature that would need to be designed and /// implemented across all stages. -pub struct Pipeline

{ +pub struct Pipeline { chunk_size: u64, - storage_provider: P, + storage_provider: DbProviderFactory, stages: Vec>, cmd_rx: watch::Receiver>, cmd_tx: watch::Sender>, block_tx: watch::Sender>, tip: Option, metrics: PipelineMetrics, + pruning_config: PruningConfig, } -impl

Pipeline

{ +impl Pipeline { /// Creates a new empty pipeline. /// /// # Arguments @@ -242,7 +266,7 @@ impl

Pipeline

{ /// # Returns /// /// A tuple containing the pipeline instance and a handle for controlling it. - pub fn new(provider: P, chunk_size: u64) -> (Self, PipelineHandle) { + pub fn new(provider: DbProviderFactory, chunk_size: u64) -> (Self, PipelineHandle) { let (tx, rx) = watch::channel(None); let (block_tx, _block_rx) = watch::channel(None); let handle = PipelineHandle { tx: tx.clone(), block_tx: block_tx.clone() }; @@ -255,10 +279,23 @@ impl

Pipeline

{ chunk_size, tip: None, metrics: PipelineMetrics::new(), + pruning_config: PruningConfig::default(), }; (pipeline, handle) } + /// Sets the pruning configuration for the pipeline. + /// + /// This controls how and when historical state is pruned during synchronization. + pub fn set_pruning_config(&mut self, config: PruningConfig) { + self.pruning_config = config; + } + + /// Returns the current pruning configuration. + pub fn pruning_config(&self) -> &PruningConfig { + &self.pruning_config + } + /// Adds a new stage to the end of the pipeline. /// /// Stages are executed in the order they are added. @@ -287,11 +324,7 @@ impl

Pipeline

{ } } -impl

Pipeline

-where - P: ProviderFactory, -

::ProviderMut: StageCheckpointProvider, -{ +impl Pipeline { /// Runs the pipeline continuously until signaled to stop. /// /// The pipeline processes each stage in chunks up until it reaches the current tip, then waits @@ -321,7 +354,6 @@ where result = self.run_loop() => { if let Err(error) = result { error!(target: "pipeline", %error, "Pipeline finished due to error."); - break; } } } @@ -351,7 +383,7 @@ where /// /// Returns an error if any stage execution fails or if the pipeline fails to read the /// checkpoint. - pub async fn run_once(&mut self, to: BlockNumber) -> PipelineResult { + pub async fn execute(&mut self, to: BlockNumber) -> PipelineResult { if self.stages.is_empty() { return Ok(to); } @@ -370,7 +402,7 @@ where let stage_metrics = self.metrics.stage(id); // Get the checkpoint for the stage, otherwise default to block number 0 - let checkpoint = self.storage_provider.provider_mut().checkpoint(id)?; + let checkpoint = self.storage_provider.provider_mut().execution_checkpoint(id)?; let span = info_span!(target: "pipeline", "stage.execute", stage = %id, %to); let enter = span.entered(); @@ -414,7 +446,7 @@ where stage_metrics.record_blocks_processed(blocks_processed); let provider_mut = self.storage_provider.provider_mut(); - provider_mut.set_checkpoint(id, last_block_processed)?; + provider_mut.set_execution_checkpoint(id, last_block_processed)?; provider_mut.commit()?; stage_metrics.set_checkpoint(last_block_processed); @@ -422,14 +454,60 @@ where info!(target: "pipeline", checkpoint = %last_block_processed, "New checkpoint set."); } - let min_last_block_processed = last_block_processed_list.into_iter().min(); + Ok(last_block_processed_list.into_iter().min().unwrap_or(to)) + } - // Update overall pipeline sync position (minimum checkpoint across all stages) - if let Some(min_checkpoint) = min_last_block_processed { - self.metrics.set_sync_position(min_checkpoint); + /// Runs pruning on all stages. + pub async fn prune(&mut self) -> PipelineResult<()> { + if self.stages.is_empty() { + return Ok(()); } - Ok(min_last_block_processed.unwrap_or(to)) + for stage in self.stages.iter_mut() { + let id = stage.id(); + + let span = info_span!(target: "pipeline", "stage.prune", stage = %id); + let enter = span.entered(); + + // Get execution checkpoint (tip for this stage) and prune checkpoint + let execution_checkpoint = + self.storage_provider.provider_mut().execution_checkpoint(id)?; + let prune_checkpoint = self.storage_provider.provider_mut().prune_checkpoint(id)?; + + let Some(tip) = execution_checkpoint else { + info!(target: "pipeline", "Skipping stage - no data to prune (no execution checkpoint)."); + continue; + }; + + let prune_input = PruneInput::new(tip, self.pruning_config.distance, prune_checkpoint); + + let Some(range) = prune_input.prune_range() else { + info!(target: "pipeline", "Skipping stage - nothing to prune (already caught up)."); + continue; + }; + + info!(target: "pipeline", distance = ?self.pruning_config.distance, from = range.start, to = range.end, "Pruning stage."); + + let span_inner = enter.exit(); + let PruneOutput { pruned_count } = stage + .prune(&prune_input) + .instrument(span_inner.clone()) + .await + .map_err(|error| Error::StagePruning { id, error })?; + + // Update prune checkpoint to the last pruned block (range.end - 1 since range is + // exclusive) + if range.end > 0 { + let provider_mut = self.storage_provider.provider_mut(); + provider_mut.set_prune_checkpoint(id, range.end - 1)?; + provider_mut.commit()?; + } + + let _enter = span_inner.enter(); + info!(target: "pipeline", %pruned_count, "Stage pruning completed."); + } + + Ok(()) } /// Run the pipeline loop. @@ -441,15 +519,21 @@ where if let Some(tip) = self.tip { let to = current_chunk_tip.min(tip); let iteration_start = std::time::Instant::now(); - let last_block_processed = self.run_once(to).await?; - let iteration_duration = iteration_start.elapsed().as_secs_f64(); - // Record pipeline metrics for this iteration + let last_block_processed = self.execute(to).await?; + self.metrics.set_sync_position(last_block_processed); + + let iteration_duration = iteration_start.elapsed().as_secs_f64(); self.metrics.record_iteration_duration(iteration_duration); // Notify subscribers about the newly processed block let _ = self.block_tx.send(Some(last_block_processed)); + // Run pruning if enabled + if self.pruning_config.is_enabled() { + self.prune().await?; + } + if last_block_processed >= tip { info!(target: "pipeline", %tip, "Finished syncing until tip."); self.tip = None; @@ -459,29 +543,29 @@ where } } else { info!(target: "pipeline", "Waiting to receive new tip."); - } + self.cmd_rx.changed().await.map_err(|_| Error::CommandChannelClosed)?; - if let Some(PipelineCommand::SetTip(new_tip)) = *self - .cmd_rx - .wait_for(|c| matches!(c, &Some(PipelineCommand::SetTip(_)))) - .await - .map_err(|_| Error::CommandChannelClosed)? - { - info!(target: "pipeline", tip = %new_tip, "A new tip has been set."); - self.tip = Some(new_tip); - self.metrics.set_sync_target(new_tip); + match *self.cmd_rx.borrow_and_update() { + Some(PipelineCommand::SetTip(new_tip)) => { + info!(target: "pipeline", tip = %new_tip, "A new tip has been set."); + self.tip = Some(new_tip); + self.metrics.set_sync_target(new_tip); + } + + Some(PipelineCommand::Stop) => break, + + _ => {} + } } yield_now().await; } + + Ok(()) } } -impl

IntoFuture for Pipeline

-where - P: ProviderFactory + 'static, -

::ProviderMut: StageCheckpointProvider, -{ +impl IntoFuture for Pipeline { type Output = PipelineResult<()>; type IntoFuture = PipelineFut; @@ -494,13 +578,14 @@ where } } -impl core::fmt::Debug for Pipeline

{ +impl core::fmt::Debug for Pipeline { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("Pipeline") .field("command", &self.cmd_rx) .field("provider", &self.storage_provider) .field("chunk_size", &self.chunk_size) + .field("pruning_config", &self.pruning_config) .field("stages", &self.stages.iter().map(|s| s.id()).collect::>()) - .finish() + .finish_non_exhaustive() } } diff --git a/crates/sync/pipeline/tests/pipeline.rs b/crates/sync/pipeline/tests/pipeline.rs index 165356c52..10a5399f0 100644 --- a/crates/sync/pipeline/tests/pipeline.rs +++ b/crates/sync/pipeline/tests/pipeline.rs @@ -9,7 +9,10 @@ use katana_primitives::block::BlockNumber; use katana_provider::api::stage::StageCheckpointProvider; use katana_provider::test_utils::test_provider; use katana_provider::{MutableProvider, ProviderFactory}; -use katana_stage::{Stage, StageExecutionInput, StageExecutionOutput, StageResult}; +use katana_stage::{ + PruneInput, PruneOutput, PruneResult, Stage, StageExecutionInput, StageExecutionOutput, + StageResult, +}; /// Simple mock stage that does nothing struct MockStage; @@ -22,6 +25,11 @@ impl Stage for MockStage { fn execute<'a>(&'a mut self, input: &'a StageExecutionInput) -> BoxFuture<'a, StageResult> { Box::pin(async move { Ok(StageExecutionOutput { last_block_processed: input.to() }) }) } + + fn prune<'a>(&'a mut self, input: &'a PruneInput) -> BoxFuture<'a, PruneResult> { + let _ = input; + Box::pin(async move { Ok(PruneOutput::default()) }) + } } /// Tracks execution calls with their inputs @@ -31,17 +39,30 @@ struct ExecutionRecord { to: BlockNumber, } -/// Mock stage that tracks execution +/// Tracks pruning calls with their inputs +#[derive(Debug, Clone)] +struct PruneRecord { + from: BlockNumber, + to: BlockNumber, +} + +/// Mock stage that tracks execution and pruning #[derive(Debug, Clone)] struct TrackingStage { id: &'static str, /// Used to tracks how many times the stage has been executed executions: Arc>>, + /// Used to track how many times the stage has been pruned + prunes: Arc>>, } impl TrackingStage { fn new(id: &'static str) -> Self { - Self { id, executions: Arc::new(Mutex::new(Vec::new())) } + Self { + id, + executions: Arc::new(Mutex::new(Vec::new())), + prunes: Arc::new(Mutex::new(Vec::new())), + } } fn executions(&self) -> Vec { @@ -51,6 +72,14 @@ impl TrackingStage { fn execution_count(&self) -> usize { self.executions.lock().unwrap().len() } + + fn prune_records(&self) -> Vec { + self.prunes.lock().unwrap().clone() + } + + fn prune_count(&self) -> usize { + self.prunes.lock().unwrap().len() + } } impl Stage for TrackingStage { @@ -68,6 +97,17 @@ impl Stage for TrackingStage { Ok(StageExecutionOutput { last_block_processed: input.to() }) }) } + + fn prune<'a>(&'a mut self, input: &'a PruneInput) -> BoxFuture<'a, PruneResult> { + Box::pin(async move { + if let Some(range) = input.prune_range() { + self.prunes.lock().unwrap().push(PruneRecord { from: range.start, to: range.end }); + Ok(PruneOutput { pruned_count: range.end - range.start }) + } else { + Ok(PruneOutput::default()) + } + }) + } } /// Mock stage that fails on execution @@ -90,6 +130,11 @@ impl Stage for FailingStage { fn execute<'a>(&'a mut self, _: &'a StageExecutionInput) -> BoxFuture<'a, StageResult> { Box::pin(async { Err(katana_stage::Error::Other(anyhow!("Stage execution failed"))) }) } + + fn prune<'a>(&'a mut self, input: &'a PruneInput) -> BoxFuture<'a, PruneResult> { + let _ = input; + Box::pin(async move { Ok(PruneOutput::default()) }) + } } /// Mock stage that always reports a fixed `last_block_processed`. @@ -131,14 +176,19 @@ impl Stage for FixedOutputStage { Ok(StageExecutionOutput { last_block_processed }) }) } + + fn prune<'a>(&'a mut self, input: &'a PruneInput) -> BoxFuture<'a, PruneResult> { + let _ = input; + Box::pin(async move { Ok(PruneOutput::default()) }) + } } // ============================================================================ -// run_to() - Single Stage Tests +// execute() - Single Stage Tests // ============================================================================ #[tokio::test] -async fn run_to_executes_stage_to_target() { +async fn execute_executes_stage_to_target() { let provider_factory = test_provider(); let (mut pipeline, handle) = Pipeline::new(provider_factory.clone(), 10); @@ -147,11 +197,11 @@ async fn run_to_executes_stage_to_target() { pipeline.add_stage(stage); handle.set_tip(5); - let result = pipeline.run_once(5).await.unwrap(); + let result = pipeline.execute(5).await.unwrap(); let provider = provider_factory.provider_mut(); assert_eq!(result, 5); - assert_eq!(provider.checkpoint(stage_clone.id()).unwrap(), Some(5)); + assert_eq!(provider.execution_checkpoint(stage_clone.id()).unwrap(), Some(5)); let execs = stage_clone.executions(); assert_eq!(execs.len(), 1); @@ -160,7 +210,7 @@ async fn run_to_executes_stage_to_target() { } #[tokio::test] -async fn run_to_skips_stage_when_checkpoint_equals_target() { +async fn execute_skips_stage_when_checkpoint_equals_target() { let provider_factory = test_provider(); let (mut pipeline, handle) = Pipeline::new(provider_factory.clone(), 10); @@ -169,20 +219,20 @@ async fn run_to_skips_stage_when_checkpoint_equals_target() { // Set initial checkpoint let provider = provider_factory.provider_mut(); - provider.set_checkpoint(stage.id(), 5).unwrap(); + provider.set_execution_checkpoint(stage.id(), 5).unwrap(); provider.commit().unwrap(); pipeline.add_stage(stage); handle.set_tip(5); - let result = pipeline.run_once(5).await.unwrap(); + let result = pipeline.execute(5).await.unwrap(); assert_eq!(result, 5); assert_eq!(stage_clone.executions().len(), 0); // Not executed } #[tokio::test] -async fn run_to_skips_stage_when_checkpoint_exceeds_target() { +async fn execute_skips_stage_when_checkpoint_exceeds_target() { let provider_factory = test_provider(); let (mut pipeline, handle) = Pipeline::new(provider_factory.clone(), 10); @@ -191,20 +241,20 @@ async fn run_to_skips_stage_when_checkpoint_exceeds_target() { // Set checkpoint beyond target let provider = provider_factory.provider_mut(); - provider.set_checkpoint("Stage1", 10).unwrap(); + provider.set_execution_checkpoint("Stage1", 10).unwrap(); provider.commit().unwrap(); pipeline.add_stage(stage); handle.set_tip(10); - let result = pipeline.run_once(5).await.unwrap(); + let result = pipeline.execute(5).await.unwrap(); assert_eq!(result, 10); // Returns the checkpoint assert_eq!(stage_clone.executions().len(), 0); // Not executed } #[tokio::test] -async fn run_to_uses_checkpoint_plus_one_as_from() { +async fn execute_uses_checkpoint_plus_one_as_from() { let provider_factory = test_provider(); let (mut pipeline, handle) = Pipeline::new(provider_factory.clone(), 10); @@ -213,12 +263,12 @@ async fn run_to_uses_checkpoint_plus_one_as_from() { // Set checkpoint to 3 let provider = provider_factory.provider_mut(); - provider.set_checkpoint(stage.id(), 3).unwrap(); + provider.set_execution_checkpoint(stage.id(), 3).unwrap(); provider.commit().unwrap(); pipeline.add_stage(stage); handle.set_tip(10); - pipeline.run_once(10).await.unwrap(); + pipeline.execute(10).await.unwrap(); let execs = stage_clone.executions(); assert_eq!(execs.len(), 1); @@ -229,11 +279,11 @@ async fn run_to_uses_checkpoint_plus_one_as_from() { } // ============================================================================ -// run_to() - Multiple Stages Tests +// execute() - Multiple Stages Tests // ============================================================================ #[tokio::test] -async fn run_to_executes_all_stages_in_order() { +async fn execute_executes_all_stages_in_order() { let provider_factory = test_provider(); let (mut pipeline, handle) = Pipeline::new(provider_factory.clone(), 10); @@ -252,7 +302,7 @@ async fn run_to_executes_all_stages_in_order() { ]); handle.set_tip(5); - pipeline.run_once(5).await.unwrap(); + pipeline.execute(5).await.unwrap(); // All stages should be executed once because the tip is 5 and the chunk size is 10 assert_eq!(stage1_clone.execution_count(), 1); @@ -261,13 +311,13 @@ async fn run_to_executes_all_stages_in_order() { // All checkpoints should be set let provider = provider_factory.provider_mut(); - assert_eq!(provider.checkpoint(stage1_clone.id()).unwrap(), Some(5)); - assert_eq!(provider.checkpoint(stage2_clone.id()).unwrap(), Some(5)); - assert_eq!(provider.checkpoint(stage3_clone.id()).unwrap(), Some(5)); + assert_eq!(provider.execution_checkpoint(stage1_clone.id()).unwrap(), Some(5)); + assert_eq!(provider.execution_checkpoint(stage2_clone.id()).unwrap(), Some(5)); + assert_eq!(provider.execution_checkpoint(stage3_clone.id()).unwrap(), Some(5)); } #[tokio::test] -async fn run_to_with_mixed_checkpoints() { +async fn execute_with_mixed_checkpoints() { let provider_factory = test_provider(); let (mut pipeline, handle) = Pipeline::new(provider_factory.clone(), 10); @@ -287,13 +337,13 @@ async fn run_to_with_mixed_checkpoints() { let provider = provider_factory.provider_mut(); // Stage1 already at checkpoint 10 (should skip) - provider.set_checkpoint(stage1_clone.id(), 10).unwrap(); + provider.set_execution_checkpoint(stage1_clone.id(), 10).unwrap(); // Stage2 at checkpoint 3 (should execute) - provider.set_checkpoint(stage2_clone.id(), 3).unwrap(); + provider.set_execution_checkpoint(stage2_clone.id(), 3).unwrap(); provider.commit().unwrap(); handle.set_tip(10); - pipeline.run_once(10).await.unwrap(); + pipeline.execute(10).await.unwrap(); // Stage1 should be skipped because its checkpoint (10) >= than the tip (10) assert_eq!(stage1_clone.execution_count(), 0); @@ -312,7 +362,7 @@ async fn run_to_with_mixed_checkpoints() { } #[tokio::test] -async fn run_to_returns_minimum_last_block_processed() { +async fn execute_returns_minimum_last_block_processed() { let provider_factory = test_provider(); let (mut pipeline, handle) = Pipeline::new(provider_factory.clone(), 10); @@ -331,7 +381,7 @@ async fn run_to_returns_minimum_last_block_processed() { ]); handle.set_tip(20); - let result = pipeline.run_once(20).await.unwrap(); + let result = pipeline.execute(20).await.unwrap(); // make sure that all the stages were executed once assert_eq!(stage1_clone.execution_count(), 1); @@ -340,13 +390,13 @@ async fn run_to_returns_minimum_last_block_processed() { let provider = provider_factory.provider_mut(); assert_eq!(result, 5); - assert_eq!(provider.checkpoint(stage1_clone.id()).unwrap(), Some(10)); - assert_eq!(provider.checkpoint(stage2_clone.id()).unwrap(), Some(5)); - assert_eq!(provider.checkpoint(stage3_clone.id()).unwrap(), Some(20)); + assert_eq!(provider.execution_checkpoint(stage1_clone.id()).unwrap(), Some(10)); + assert_eq!(provider.execution_checkpoint(stage2_clone.id()).unwrap(), Some(5)); + assert_eq!(provider.execution_checkpoint(stage3_clone.id()).unwrap(), Some(20)); } #[tokio::test] -async fn run_to_middle_stage_skip_continues() { +async fn execute_middle_stage_skip_continues() { let provider_factory = test_provider(); let (mut pipeline, handle) = Pipeline::new(provider_factory.clone(), 10); @@ -366,11 +416,11 @@ async fn run_to_middle_stage_skip_continues() { // stage in the middle of the sequence already complete let provider = provider_factory.provider_mut(); - provider.set_checkpoint(stage2_clone.id(), 10).unwrap(); + provider.set_execution_checkpoint(stage2_clone.id(), 10).unwrap(); provider.commit().unwrap(); handle.set_tip(10); - pipeline.run_once(10).await.unwrap(); + pipeline.execute(10).await.unwrap(); // Stage1 and Stage3 should execute assert_eq!(stage1_clone.execution_count(), 1); @@ -410,12 +460,12 @@ async fn run_processes_single_chunk_to_tip() { assert_eq!(execs[0].from, 0); assert_eq!(execs[0].to, 50); - assert_eq!(provider_factory.provider_mut().checkpoint("Stage1").unwrap(), Some(50)); + assert_eq!(provider_factory.provider_mut().execution_checkpoint("Stage1").unwrap(), Some(50)); } #[tokio::test] async fn run_processes_multiple_chunks_to_tip() { - let provider_factory = Arc::new(test_provider()); + let provider_factory = test_provider(); let (mut pipeline, handle) = Pipeline::new(provider_factory.clone(), 10); // Small chunk size let stage = TrackingStage::new("Stage1"); @@ -483,7 +533,89 @@ async fn run_processes_new_tip_after_completing_previous() { let execs = executions.lock().unwrap(); assert!(execs.len() >= 3); // 1-10, 11-20, 21-25 let provider = provider_factory.provider_mut(); - assert_eq!(provider.checkpoint("Stage1").unwrap(), Some(25)); + assert_eq!(provider.execution_checkpoint("Stage1").unwrap(), Some(25)); +} + +#[tokio::test] +async fn run_should_prune() { + let provider_factory = test_provider(); + + let (mut pipeline, handle) = Pipeline::new(provider_factory.clone(), 10); + pipeline.set_pruning_config(PruningConfig::new(Some(5))); + + let stage = TrackingStage::new("Stage1"); + let executions = stage.executions.clone(); + let prunings = stage.prunes.clone(); + + pipeline.add_stage(stage); + handle.set_tip(10); // Set initial tip + + let task_handle = tokio::spawn(async move { pipeline.run().await }); + + // Wait for first tip to process + tokio::time::sleep(Duration::from_millis(100)).await; + + // Set new tip + handle.set_tip(25); + + // Wait for second tip to process + tokio::time::sleep(Duration::from_millis(100)).await; + + handle.stop(); + let result = task_handle.await.unwrap(); + assert!(result.is_ok()); + + // Should have processed both tips + let execs = executions.lock().unwrap(); + assert!(execs.len() >= 3); // 1-10, 11-20, 21-25 + let prunes = prunings.lock().unwrap(); + assert!(prunes.len() >= 3); // 0-4, 5-14, 15-19 + + let provider = provider_factory.provider_mut(); + assert_eq!(provider.execution_checkpoint("Stage1").unwrap(), Some(25)); + assert_eq!(provider.prune_checkpoint("Stage1").unwrap(), Some(19)); +} + +#[tokio::test] +async fn run_should_not_prune_if_pruning_disabled() { + let provider_factory = test_provider(); + + let (mut pipeline, handle) = Pipeline::new(provider_factory.clone(), 10); + + // disable pruning by not setting the pruning config + // pipeline.set_pruning_config(PruningConfig::new(Some(5))); + + let stage = TrackingStage::new("Stage1"); + let executions = stage.executions.clone(); + let prunings = stage.prunes.clone(); + + pipeline.add_stage(stage); + handle.set_tip(10); // Set initial tip + + let task_handle = tokio::spawn(async move { pipeline.run().await }); + + // Wait for first tip to process + tokio::time::sleep(Duration::from_millis(100)).await; + + // Set new tip + handle.set_tip(25); + + // Wait for second tip to process + tokio::time::sleep(Duration::from_millis(100)).await; + + handle.stop(); + let result = task_handle.await.unwrap(); + assert!(result.is_ok()); + + // Should have processed both tips + let execs = executions.lock().unwrap(); + assert!(execs.len() >= 3); // 1-10, 11-20, 21-25 + let prunes = prunings.lock().unwrap(); + assert!(prunes.is_empty()); + + let provider = provider_factory.provider_mut(); + assert_eq!(provider.execution_checkpoint("Stage1").unwrap(), Some(25)); + assert_eq!(provider.prune_checkpoint("Stage1").unwrap(), None); } /// This test ensures that the pipeline will immediately stop its execution if the stop signal @@ -508,9 +640,14 @@ async fn run_should_be_cancelled_if_stop_requested() { Ok(StageExecutionOutput { last_block_processed: 100 }) }) } + + fn prune<'a>(&'a mut self, input: &'a PruneInput) -> BoxFuture<'a, PruneResult> { + let _ = input; + Box::pin(async move { Ok(PruneOutput::default()) }) + } } - let provider = Arc::new(test_provider()); + let provider = test_provider(); let (mut pipeline, handle) = Pipeline::new(provider.clone(), 100); let stage = PendingStage::default(); @@ -545,12 +682,12 @@ async fn stage_execution_error_stops_pipeline() { pipeline.add_stage(stage); handle.set_tip(10); - let result = pipeline.run_once(10).await; + let result = pipeline.execute(10).await; assert!(result.is_err()); // Checkpoint should not be set after failure let provider = provider_factory.provider_mut(); - assert_eq!(provider.checkpoint(stage_clone.id()).unwrap(), None); + assert_eq!(provider.execution_checkpoint(stage_clone.id()).unwrap(), None); } /// If a stage fails, all subsequent stages should not execute and the pipeline should stop. @@ -569,7 +706,7 @@ async fn stage_error_doesnt_affect_subsequent_runs() { pipeline.add_stage(stage2); handle.set_tip(10); - let error = pipeline.run_once(10).await.unwrap_err(); + let error = pipeline.execute(10).await.unwrap_err(); let katana_pipeline::Error::StageExecution { id, error } = error else { panic!("Unexpected error type"); @@ -593,7 +730,7 @@ async fn empty_pipeline_returns_target() { // No stages added handle.set_tip(10); - let result = pipeline.run_once(10).await.unwrap(); + let result = pipeline.execute(10).await.unwrap(); assert_eq!(result, 10); } @@ -608,13 +745,13 @@ async fn tip_equals_checkpoint_no_execution() { // set checkpoint for Stage1 stage let provider = provider_factory.provider_mut(); - provider.set_checkpoint(stage.id(), 10).unwrap(); + provider.set_execution_checkpoint(stage.id(), 10).unwrap(); provider.commit().unwrap(); pipeline.add_stage(stage); handle.set_tip(10); - pipeline.run_once(10).await.unwrap(); + pipeline.execute(10).await.unwrap(); assert_eq!(executions.lock().unwrap().len(), 0, "Stage1 should not be executed"); } @@ -632,13 +769,13 @@ async fn tip_less_than_checkpoint_skip_all() { // set checkpoint for Stage1 stage let provider = provider_factory.provider_mut(); let checkpoint = 20; - provider.set_checkpoint(stage.id(), checkpoint).unwrap(); + provider.set_execution_checkpoint(stage.id(), checkpoint).unwrap(); provider.commit().unwrap(); pipeline.add_stage(stage); handle.set_tip(20); - let result = pipeline.run_once(10).await.unwrap(); + let result = pipeline.execute(10).await.unwrap(); assert_eq!(result, checkpoint); assert_eq!(executions.lock().unwrap().len(), 0, "Stage1 should not be executed"); @@ -682,26 +819,388 @@ async fn stage_checkpoint() { pipeline.add_stage(MockStage); // check that the checkpoint was set - let initial_checkpoint = provider_factory.provider_mut().checkpoint("Mock").unwrap(); + let initial_checkpoint = provider_factory.provider_mut().execution_checkpoint("Mock").unwrap(); assert_eq!(initial_checkpoint, None); handle.set_tip(5); - pipeline.run_once(5).await.expect("failed to run the pipeline once"); + pipeline.execute(5).await.expect("failed to run the pipeline once"); // check that the checkpoint was set - let actual_checkpoint = provider_factory.provider_mut().checkpoint("Mock").unwrap(); + let actual_checkpoint = provider_factory.provider_mut().execution_checkpoint("Mock").unwrap(); assert_eq!(actual_checkpoint, Some(5)); handle.set_tip(10); - pipeline.run_once(10).await.expect("failed to run the pipeline once"); + pipeline.execute(10).await.expect("failed to run the pipeline once"); // check that the checkpoint was set - let actual_checkpoint = provider_factory.provider_mut().checkpoint("Mock").unwrap(); + let actual_checkpoint = provider_factory.provider_mut().execution_checkpoint("Mock").unwrap(); assert_eq!(actual_checkpoint, Some(10)); - pipeline.run_once(10).await.expect("failed to run the pipeline once"); + pipeline.execute(10).await.expect("failed to run the pipeline once"); // check that the checkpoint doesn't change - let actual_checkpoint = provider_factory.provider_mut().checkpoint("Mock").unwrap(); + let actual_checkpoint = provider_factory.provider_mut().execution_checkpoint("Mock").unwrap(); assert_eq!(actual_checkpoint, Some(10)); } + +// ============================================================================ +// Pruning Tests +// ============================================================================ + +use katana_pipeline::PruningConfig; + +#[tokio::test] +async fn prune_skips_when_no_execution_checkpoint() { + let provider_factory = test_provider(); + let (mut pipeline, _handle) = Pipeline::new(provider_factory.clone(), 10); + + pipeline.set_pruning_config(PruningConfig::new(Some(10))); + + let stage = TrackingStage::new("Stage1"); + let stage_clone = stage.clone(); + pipeline.add_stage(stage); + + let provider = provider_factory.provider_mut(); + + // Verify we don't have an execution checkpoint for the stage yet + let execution_checkpoint = provider.execution_checkpoint(stage_clone.id()).unwrap(); + assert_eq!(execution_checkpoint, None); + provider.commit().unwrap(); + + // No checkpoint set - stage has no data to prune + pipeline.prune().await.unwrap(); + + // Should not prune when there's no execution checkpoint + assert_eq!(stage_clone.prune_count(), 0); +} + +#[tokio::test] +async fn prune_skips_when_archive_mode() { + let provider_factory = test_provider(); + let (mut pipeline, handle) = Pipeline::new(provider_factory.clone(), 10); + + // None distance means no pruning (archive mode) + pipeline.set_pruning_config(PruningConfig::new(None)); + + let stage = TrackingStage::new("Stage1"); + let stage_clone = stage.clone(); + pipeline.add_stage(stage); + + // Set checkpoint to simulate execution having completed + let provider = provider_factory.provider_mut(); + provider.set_execution_checkpoint(stage_clone.id(), 100).unwrap(); + provider.commit().unwrap(); + + handle.set_tip(100); + pipeline.prune().await.unwrap(); + + // Archive mode should not prune anything + assert_eq!(stage_clone.prune_count(), 0); +} + +/// Tests different pruning distances and verifies the correct prune range is calculated: +/// - distance=50: keeps last 50 blocks, prunes everything before tip - 50 +/// - distance=1: keeps only latest state, prunes everything before tip - 1 +/// - distance=100 with tip=50: skips pruning when tip < distance +#[tokio::test] +async fn prune_distance_behavior() { + // Test case: distance=50 keeps last 50 blocks + // tip=100, distance=50 -> prune 0..50 + { + let provider_factory = test_provider(); + let (mut pipeline, handle) = Pipeline::new(provider_factory.clone(), 10); + pipeline.set_pruning_config(PruningConfig::new(Some(50))); + + let stage = TrackingStage::new("Stage1"); + let stage_clone = stage.clone(); + pipeline.add_stage(stage); + + let provider = provider_factory.provider_mut(); + provider.set_execution_checkpoint(stage_clone.id(), 100).unwrap(); + provider.commit().unwrap(); + + handle.set_tip(100); + pipeline.prune().await.unwrap(); + + let records = stage_clone.prune_records(); + assert_eq!(records.len(), 1, "distance=50: expected one prune operation"); + assert_eq!(records[0].from, 0, "distance=50: prune range start mismatch"); + assert_eq!(records[0].to, 50, "distance=50: prune range end mismatch"); + } + + // Test case: distance=1 keeps only latest (minimal equivalent) + // tip=100 -> prune 0..99 + { + let provider_factory = test_provider(); + let (mut pipeline, handle) = Pipeline::new(provider_factory.clone(), 10); + pipeline.set_pruning_config(PruningConfig::new(Some(1))); + + let stage = TrackingStage::new("Stage1"); + let stage_clone = stage.clone(); + pipeline.add_stage(stage); + + let provider = provider_factory.provider_mut(); + provider.set_execution_checkpoint(stage_clone.id(), 100).unwrap(); + provider.commit().unwrap(); + + handle.set_tip(100); + pipeline.prune().await.unwrap(); + + let records = stage_clone.prune_records(); + assert_eq!(records.len(), 1, "distance=1: expected one prune operation"); + assert_eq!(records[0].from, 0, "distance=1: prune range start mismatch"); + assert_eq!(records[0].to, 99, "distance=1: prune range end mismatch"); + } + + // Test case: distance=100 skips when not enough blocks + // tip=50, distance=100 -> no pruning + { + let provider_factory = test_provider(); + let (mut pipeline, handle) = Pipeline::new(provider_factory.clone(), 10); + pipeline.set_pruning_config(PruningConfig::new(Some(100))); + + let stage = TrackingStage::new("Stage1"); + let stage_clone = stage.clone(); + pipeline.add_stage(stage); + + let provider = provider_factory.provider_mut(); + provider.set_execution_checkpoint(stage_clone.id(), 50).unwrap(); + provider.commit().unwrap(); + + handle.set_tip(50); + pipeline.prune().await.unwrap(); + + assert_eq!(stage_clone.prune_count(), 0, "distance=100 with tip=50: expected no pruning"); + } +} + +#[tokio::test] +async fn prune_uses_checkpoint_to_avoid_re_pruning() { + let provider_factory = test_provider(); + let (mut pipeline, handle) = Pipeline::new(provider_factory.clone(), 10); + + pipeline.set_pruning_config(PruningConfig::new(Some(50))); + + let stage = TrackingStage::new("Stage1"); + let stage_clone = stage.clone(); + pipeline.add_stage(stage); + + let provider = provider_factory.provider_mut(); + // Set execution checkpoint to 200 + provider.set_execution_checkpoint(stage_clone.id(), 200).unwrap(); + // Set prune checkpoint - already pruned up to block 100 + provider.set_prune_checkpoint(stage_clone.id(), 100).unwrap(); + provider.commit().unwrap(); + + handle.set_tip(200); + pipeline.prune().await.unwrap(); + + // Should only prune blocks 101-149 (from last_pruned+1 to tip-keep_blocks) + let records = stage_clone.prune_records(); + assert_eq!(records.len(), 1); + assert_eq!(records[0].from, 101); // last_pruned + 1 + assert_eq!(records[0].to, 150); // tip (200) - keep_blocks (50) = 150 +} + +#[tokio::test] +async fn prune_skips_when_already_caught_up() { + let provider_factory = test_provider(); + let (mut pipeline, handle) = Pipeline::new(provider_factory.clone(), 10); + + pipeline.set_pruning_config(PruningConfig::new(Some(50))); + + let stage = TrackingStage::new("Stage1"); + let stage_clone = stage.clone(); + pipeline.add_stage(stage); + + let provider = provider_factory.provider_mut(); + // Set execution checkpoint to 100 + provider.set_execution_checkpoint(stage_clone.id(), 100).unwrap(); + // Already pruned up to block 49 (which is tip - keep_blocks - 1) + provider.set_prune_checkpoint(stage_clone.id(), 49).unwrap(); + provider.commit().unwrap(); + + handle.set_tip(100); + pipeline.prune().await.unwrap(); + + // Should not prune - already caught up + assert_eq!(stage_clone.prune_count(), 0); +} + +#[tokio::test] +async fn prune_multiple_stages_independently() { + let provider_factory = test_provider(); + let (mut pipeline, handle) = Pipeline::new(provider_factory.clone(), 10); + + pipeline.set_pruning_config(PruningConfig::new(Some(50))); + + let stage1 = TrackingStage::new("Stage1"); + let stage2 = TrackingStage::new("Stage2"); + let stage1_clone = stage1.clone(); + let stage2_clone = stage2.clone(); + + pipeline.add_stage(stage1); + pipeline.add_stage(stage2); + + let provider = provider_factory.provider_mut(); + // Stage1 at checkpoint 100 + provider.set_execution_checkpoint(stage1_clone.id(), 100).unwrap(); + // Stage2 at checkpoint 200 + provider.set_execution_checkpoint(stage2_clone.id(), 200).unwrap(); + provider.commit().unwrap(); + + handle.set_tip(200); + pipeline.prune().await.unwrap(); + + // Stage1: prune 0-49 (tip=100, keep=50) + let records1 = stage1_clone.prune_records(); + assert_eq!(records1.len(), 1); + assert_eq!(records1[0].from, 0); + assert_eq!(records1[0].to, 50); + + // Stage2: prune 0-149 (tip=200, keep=50) + let records2 = stage2_clone.prune_records(); + assert_eq!(records2.len(), 1); + assert_eq!(records2[0].from, 0); + assert_eq!(records2[0].to, 150); + + // Verify prune checkpoints were set independently + let provider = provider_factory.provider_mut(); + assert_eq!(provider.prune_checkpoint(stage1_clone.id()).unwrap(), Some(49)); + assert_eq!(provider.prune_checkpoint(stage2_clone.id()).unwrap(), Some(149)); +} + +/// Tests incremental pruning across multiple runs and verifies checkpoint persistence. +/// +/// This test covers: +/// 1. Initial pruning sets the checkpoint correctly +/// 2. Subsequent pruning uses the checkpoint to avoid re-pruning +/// 3. Checkpoint is updated after each prune operation +#[tokio::test] +async fn prune_incremental_with_checkpoint_persistence() { + let provider_factory = test_provider(); + let (mut pipeline, handle) = Pipeline::new(provider_factory.clone(), 10); + + pipeline.set_pruning_config(PruningConfig::new(Some(50))); + + let stage = TrackingStage::new("Stage1"); + let stage_clone = stage.clone(); + pipeline.add_stage(stage); + + // Verify no prune checkpoint initially + let initial_prune_checkpoint = + provider_factory.provider_mut().prune_checkpoint(stage_clone.id()).unwrap(); + assert_eq!(initial_prune_checkpoint, None); + + // First run: execution at 100 + let provider = provider_factory.provider_mut(); + provider.set_execution_checkpoint(stage_clone.id(), 100).unwrap(); + provider.commit().unwrap(); + + handle.set_tip(100); + pipeline.prune().await.unwrap(); + + // Verify first prune operation + let records = stage_clone.prune_records(); + assert_eq!(records.len(), 1); + assert_eq!(records[0].from, 0); + assert_eq!(records[0].to, 50); + + // Verify prune checkpoint was set after first prune + let prune_checkpoint = + provider_factory.provider_mut().prune_checkpoint(stage_clone.id()).unwrap(); + assert_eq!(prune_checkpoint, Some(49)); // 50 - 1 = 49 + + // Second run: execution advanced to 200 + let provider = provider_factory.provider_mut(); + provider.set_execution_checkpoint(stage_clone.id(), 200).unwrap(); + provider.commit().unwrap(); + + handle.set_tip(200); + pipeline.prune().await.unwrap(); + + // Should have two prune records now + let records = stage_clone.prune_records(); + assert_eq!(records.len(), 2); + // Second prune should start from 50 (previous prune checkpoint + 1) + assert_eq!(records[1].from, 50); + assert_eq!(records[1].to, 150); + + // Verify final prune checkpoint + let provider = provider_factory.provider_mut(); + assert_eq!(provider.prune_checkpoint(stage_clone.id()).unwrap(), Some(149)); +} + +/// Mock stage that fails during pruning +#[derive(Debug, Clone)] +struct FailingPruneStage { + id: &'static str, +} + +impl FailingPruneStage { + fn new(id: &'static str) -> Self { + Self { id } + } +} + +impl Stage for FailingPruneStage { + fn id(&self) -> &'static str { + self.id + } + + fn execute<'a>(&'a mut self, input: &'a StageExecutionInput) -> BoxFuture<'a, StageResult> { + Box::pin(async move { Ok(StageExecutionOutput { last_block_processed: input.to() }) }) + } + + fn prune<'a>(&'a mut self, _: &'a PruneInput) -> BoxFuture<'a, PruneResult> { + Box::pin(async { Err(katana_stage::Error::Other(anyhow!("Pruning failed"))) }) + } +} + +#[tokio::test] +async fn prune_error_stops_pipeline() { + let provider_factory = test_provider(); + let (mut pipeline, handle) = Pipeline::new(provider_factory.clone(), 10); + + pipeline.set_pruning_config(PruningConfig::new(Some(50))); + + let failing_stage = FailingPruneStage::new("FailingStage"); + let stage2 = TrackingStage::new("Stage2"); + let stage2_clone = stage2.clone(); + + pipeline.add_stage(failing_stage); + pipeline.add_stage(stage2); + + let provider = provider_factory.provider_mut(); + provider.set_execution_checkpoint("FailingStage", 100).unwrap(); + provider.set_execution_checkpoint(stage2_clone.id(), 100).unwrap(); + provider.commit().unwrap(); + + handle.set_tip(100); + let result = pipeline.prune().await; + + // Should return an error + assert!(result.is_err()); + + let katana_pipeline::Error::StagePruning { id, error } = result.unwrap_err() else { + panic!("Unexpected error type"); + }; + + assert_eq!(id, "FailingStage"); + assert!(error.to_string().contains("Pruning failed")); + + // Stage2 should not have been pruned since Stage1 failed + assert_eq!(stage2_clone.prune_count(), 0); +} + +#[tokio::test] +async fn prune_empty_pipeline_succeeds() { + let provider_factory = test_provider(); + let (mut pipeline, _handle) = Pipeline::new(provider_factory, 10); + + pipeline.set_pruning_config(PruningConfig::new(Some(50))); + + // No stages added + let result = pipeline.prune().await; + assert!(result.is_ok()); +} diff --git a/crates/sync/stage/Cargo.toml b/crates/sync/stage/Cargo.toml index 1e952ae28..23828aada 100644 --- a/crates/sync/stage/Cargo.toml +++ b/crates/sync/stage/Cargo.toml @@ -7,6 +7,7 @@ version.workspace = true [dependencies] katana-core.workspace = true +katana-db.workspace = true katana-executor.workspace = true katana-gateway-client.workspace = true katana-gateway-types.workspace = true @@ -30,5 +31,6 @@ tracing.workspace = true [dev-dependencies] katana-provider = { workspace = true, features = [ "test-utils" ] } +katana-trie.workspace = true rstest.workspace = true url.workspace = true diff --git a/crates/sync/stage/src/blocks/mod.rs b/crates/sync/stage/src/blocks/mod.rs index f2554b994..bb3fec074 100644 --- a/crates/sync/stage/src/blocks/mod.rs +++ b/crates/sync/stage/src/blocks/mod.rs @@ -6,18 +6,22 @@ use katana_primitives::block::{ }; use katana_primitives::fee::{FeeInfo, PriceUnit}; use katana_primitives::receipt::{ - DeclareTxReceipt, DeployAccountTxReceipt, InvokeTxReceipt, L1HandlerTxReceipt, Receipt, + DeclareTxReceipt, DeployAccountTxReceipt, DeployTxReceipt, InvokeTxReceipt, L1HandlerTxReceipt, + Receipt, }; use katana_primitives::state::{StateUpdates, StateUpdatesWithClasses}; use katana_primitives::transaction::{Tx, TxWithHash}; use katana_primitives::Felt; use katana_provider::api::block::{BlockHashProvider, BlockWriter}; -use katana_provider::{MutableProvider, ProviderError, ProviderFactory}; +use katana_provider::{DbProviderFactory, MutableProvider, ProviderError, ProviderFactory}; use num_traits::ToPrimitive; use starknet::core::types::ResourcePrice; use tracing::{error, info_span, Instrument}; -use crate::{Stage, StageExecutionInput, StageExecutionOutput, StageResult}; +use crate::{ + PruneInput, PruneOutput, PruneResult, Stage, StageExecutionInput, StageExecutionOutput, + StageResult, +}; mod downloader; @@ -25,14 +29,14 @@ pub use downloader::{BatchBlockDownloader, BlockDownloader}; /// A stage for syncing blocks. #[derive(Debug)] -pub struct Blocks { - provider: PF, +pub struct Blocks { + provider: DbProviderFactory, downloader: B, } -impl Blocks { +impl Blocks { /// Create a new [`Blocks`] stage. - pub fn new(provider: PF, downloader: B) -> Self { + pub fn new(provider: DbProviderFactory, downloader: B) -> Self { Self { provider, downloader } } @@ -40,11 +44,7 @@ impl Blocks { /// /// This method checks the chain invariant: block N's parent hash must be block N-1's hash. /// For the first block in the list (if not block 0), it fetches the parent hash from storage. - fn validate_chain_invariant(&self, blocks: &[StateUpdateWithBlock]) -> Result<(), Error> - where - PF: ProviderFactory, - ::Provider: BlockHashProvider, - { + fn validate_chain_invariant(&self, blocks: &[StateUpdateWithBlock]) -> Result<(), Error> { if blocks.is_empty() { return Ok(()); } @@ -92,11 +92,8 @@ impl Blocks { } } -impl Stage for Blocks +impl Stage for Blocks where - PF: ProviderFactory, - ::Provider: BlockHashProvider, - ::ProviderMut: BlockWriter, D: BlockDownloader, { fn id(&self) -> &'static str { @@ -141,6 +138,12 @@ where Ok(StageExecutionOutput { last_block_processed: input.to() }) }) } + + // TODO: implement block pruning + fn prune<'a>(&'a mut self, input: &'a PruneInput) -> BoxFuture<'a, PruneResult> { + let _ = input; + Box::pin(async move { Ok(PruneOutput::default()) }) + } } #[derive(Debug, thiserror::Error)] @@ -196,6 +199,7 @@ fn extract_block_data( let revert_error = receipt.body.revert_error; let messages_sent = receipt.body.l2_to_l1_messages; let overall_fee = receipt.body.actual_fee.to_u128().expect("valid u128"); + let execution_resources = receipt.body.execution_resources.unwrap_or_default(); let unit = if tx.transaction.version() >= Felt::THREE { PriceUnit::Fri @@ -205,20 +209,20 @@ fn extract_block_data( let fee = FeeInfo { unit, overall_fee, ..Default::default() }; - match tx.transaction { + match &tx.transaction { Tx::Invoke(_) => Receipt::Invoke(InvokeTxReceipt { fee, events, revert_error, messages_sent, - execution_resources: Default::default(), + execution_resources: execution_resources.into(), }), Tx::Declare(_) => Receipt::Declare(DeclareTxReceipt { fee, events, revert_error, messages_sent, - execution_resources: Default::default(), + execution_resources: execution_resources.into(), }), Tx::L1Handler(_) => Receipt::L1Handler(L1HandlerTxReceipt { fee, @@ -226,17 +230,24 @@ fn extract_block_data( messages_sent, revert_error, message_hash: Default::default(), - execution_resources: Default::default(), + execution_resources: execution_resources.into(), + }), + Tx::DeployAccount(tx) => Receipt::DeployAccount(DeployAccountTxReceipt { + fee, + events, + revert_error, + messages_sent, + contract_address: tx.contract_address(), + execution_resources: execution_resources.into(), }), - Tx::DeployAccount(_) => Receipt::DeployAccount(DeployAccountTxReceipt { + Tx::Deploy(tx) => Receipt::Deploy(DeployTxReceipt { fee, events, revert_error, messages_sent, - contract_address: Default::default(), - execution_resources: Default::default(), + contract_address: tx.contract_address.into(), + execution_resources: execution_resources.into(), }), - Tx::Deploy(_) => unreachable!("Deploy transactions are not supported"), } }) .collect::>(); diff --git a/crates/sync/stage/src/classes.rs b/crates/sync/stage/src/classes.rs index a5f488ce7..b63b61014 100644 --- a/crates/sync/stage/src/classes.rs +++ b/crates/sync/stage/src/classes.rs @@ -10,35 +10,38 @@ use katana_primitives::class::{ClassHash, ContractClass}; use katana_provider::api::contract::ContractClassWriter; use katana_provider::api::state_update::StateUpdateProvider; use katana_provider::api::ProviderError; -use katana_provider::{MutableProvider, ProviderFactory}; +use katana_provider::{DbProviderFactory, MutableProvider, ProviderFactory}; use katana_rpc_types::class::ConversionError; use rayon::prelude::*; use tracing::{debug, error, info_span, Instrument}; -use super::{Stage, StageExecutionInput, StageExecutionOutput, StageResult}; +use super::{ + PruneInput, PruneOutput, PruneResult, Stage, StageExecutionInput, StageExecutionOutput, + StageResult, +}; use crate::downloader::{BatchDownloader, Downloader, DownloaderResult}; /// A stage for downloading and storing contract classes. -pub struct Classes

{ - provider: P, +pub struct Classes { + provider: DbProviderFactory, downloader: BatchDownloader, /// Thread pool for parallel class hash verification verification_pool: rayon::ThreadPool, } -impl

std::fmt::Debug for Classes

{ +impl std::fmt::Debug for Classes { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Classes") - .field("provider", &std::any::type_name::

()) + .field("provider", &self.provider) .field("downloader", &self.downloader) .field("verification_pool", &"") .finish() } } -impl

Classes

{ +impl Classes { /// Create a new Classes stage using the Feeder Gateway downloader. - pub fn new(provider: P, gateway: SequencerGateway, batch_size: usize) -> Self { + pub fn new(provider: DbProviderFactory, gateway: SequencerGateway, batch_size: usize) -> Self { let downloader = ClassDownloader { gateway }; let downloader = BatchDownloader::new(downloader, batch_size); @@ -55,11 +58,7 @@ impl

Classes

{ &self, from_block: BlockNumber, to_block: BlockNumber, - ) -> Result, Error> - where - P: ProviderFactory, -

::Provider: StateUpdateProvider, - { + ) -> Result, Error> { let mut classes_keys: Vec = Vec::new(); for block in from_block..=to_block { @@ -116,12 +115,7 @@ impl

Classes

{ } } -impl

Stage for Classes

-where - P: ProviderFactory, -

::Provider: StateUpdateProvider, -

::ProviderMut: ContractClassWriter, -{ +impl Stage for Classes { fn id(&self) -> &'static str { "Classes" } @@ -161,6 +155,15 @@ where Ok(StageExecutionOutput { last_block_processed: input.to() }) }) } + + fn prune<'a>(&'a mut self, _: &'a PruneInput) -> BoxFuture<'a, PruneResult> { + Box::pin(async move { + // Classes are immutable once declared and don't need pruning. + // A class declared at block N can still be used to deploy contracts at block N+1000. + // Therefore, we cannot safely prune classes based on block age alone. + Ok(PruneOutput::default()) + }) + } } #[derive(Debug, thiserror::Error)] diff --git a/crates/sync/stage/src/lib.rs b/crates/sync/stage/src/lib.rs index 3fbb05c58..b5a911c0f 100644 --- a/crates/sync/stage/src/lib.rs +++ b/crates/sync/stage/src/lib.rs @@ -18,6 +18,9 @@ pub use trie::StateTrie; /// The result type of a stage execution. See [Stage::execute]. pub type StageResult = Result; +/// The result type of a stage pruning. See [Stage::prune]. +pub type PruneResult = Result; + /// Input parameters for stage execution. /// /// # Invariant @@ -62,6 +65,88 @@ pub struct StageExecutionOutput { pub last_block_processed: BlockNumber, } +/// Input parameters for stage pruning. +#[derive(Debug, Clone)] +pub struct PruneInput { + /// The current tip of the chain (highest synced block). + tip: BlockNumber, + /// Distance from tip. Blocks older than `tip - distance` will be pruned. + /// `None` means no pruning. + distance: Option, + /// The last block number that was successfully pruned (if any). + last_pruned: Option, +} + +impl PruneInput { + /// Creates a new [`PruneInput`] with the given tip, distance, and last pruned block. + /// + /// # Arguments + /// + /// * `tip` - The current tip of the chain (highest synced block) + /// * `distance` - Distance from tip. Blocks older than `tip - distance` will be pruned. `None` + /// means no pruning. + /// * `last_pruned` - The last block number that was successfully pruned (if any) + pub fn new(tip: BlockNumber, distance: Option, last_pruned: Option) -> Self { + Self { tip, distance, last_pruned } + } + + /// Returns the current chain tip. + #[inline] + pub fn tip(&self) -> BlockNumber { + self.tip + } + + /// Returns the distance from tip for pruning. + #[inline] + pub fn distance(&self) -> Option { + self.distance + } + + /// Returns the last block that was successfully pruned. + #[inline] + pub fn last_pruned(&self) -> Option { + self.last_pruned + } + + /// Returns the range of blocks to prune, if any. + /// + /// The range is `[start, end)` where: + /// - `start` is `last_pruned + 1` (or 0 if no previous pruning) + /// - `end` is the calculated prune target based on tip and distance + /// + /// Returns `None` if no pruning should occur (e.g., distance is `None` or already caught up). + pub fn prune_range(&self) -> Option> { + let prune_target = self.calculate_prune_target()?; + let start = self.last_pruned.map(|b| b + 1).unwrap_or(0); + + if start < prune_target { + Some(start..prune_target) + } else { + None + } + } + + /// Calculates the block number before which all state should be pruned. + /// + /// Returns `None` if no pruning should occur (e.g., distance is `None` or tip < distance). + /// Returns `Some(block_number)` indicating that all state before this block can be pruned. + fn calculate_prune_target(&self) -> Option { + let distance = self.distance?; + if self.tip >= distance { + Some(self.tip - distance) + } else { + None + } + } +} + +/// Output from a stage pruning operation. +#[derive(Debug, Default)] +pub struct PruneOutput { + /// The number of items (blocks, state entries, etc.) that were pruned. + pub pruned_count: u64, +} + #[derive(Debug, thiserror::Error)] pub enum Error { #[error(transparent)] @@ -96,9 +181,10 @@ pub enum Error { /// /// # Implementation Note /// -/// The [`execute`](Stage::execute) method returns a [`BoxFuture`] instead of `impl Future` to -/// maintain dyn-compatibility. This allows the pipeline to store different stage implementations -/// in a `Vec>`, enabling dynamic composition of sync stages at runtime. +/// The [`execute`](Stage::execute) and [`prune`](Stage::prune) methods return a [`BoxFuture`] +/// instead of `impl Future` to maintain dyn-compatibility. This allows the pipeline to store +/// different stage implementations in a `Vec>`, enabling dynamic composition of +/// sync stages at runtime. /// /// While this introduces a small heap allocation for the future, it's negligible compared to /// the actual async work performed by stages (network I/O, database operations, etc.). @@ -126,11 +212,36 @@ pub trait Stage: Send + Sync { /// Implementors are expected to perform any necessary processings on all blocks in the range /// `[input.from, input.to]`. fn execute<'a>(&'a mut self, input: &'a StageExecutionInput) -> BoxFuture<'a, StageResult>; + + /// Prunes historical data for this stage according to the pruning configuration. + /// + /// This method is called by the pipeline to remove old historical state that is no longer + /// needed according to the pruning mode. The pruning operation is non-blocking and runs + /// asynchronously. + /// + /// # Arguments + /// + /// * `input` - The pruning input containing the current chain tip and pruning mode + /// + /// # Returns + /// + /// A future that resolves to a [`PruneResult`] containing [`PruneOutput`] with the + /// number of items that were pruned. + /// + /// # Implementation Notes + /// + /// - Stages that don't store historical state (e.g., Classes) can provide a no-op + /// implementation that returns `Ok(PruneOutput::default())`. + /// - Stages that store state (e.g., Blocks, StateTrie) should implement pruning logic + /// appropriate to their data model. + /// - The pruning operation must be non-blocking, just like [`execute`](Stage::execute). + /// - Implementors should use [`PruneInput::prune_before`] to determine which blocks to prune. + fn prune<'a>(&'a mut self, input: &'a PruneInput) -> BoxFuture<'a, PruneResult>; } #[cfg(test)] mod tests { - use crate::StageExecutionInput; + use crate::{PruneInput, StageExecutionInput}; #[tokio::test] #[should_panic(expected = "Invalid block range")] @@ -138,4 +249,54 @@ mod tests { // When from > to, the range is invalid and should panic at construction time let _ = StageExecutionInput::new(100, 99); } + + #[test] + fn prune_range_no_pruning() { + // distance = None means no pruning (archive mode) + let input = PruneInput::new(1000, None, None); + assert_eq!(input.prune_range(), None); + } + + #[test] + fn prune_range_with_distance() { + // Keep last 100 blocks (distance=100), tip at 1000, no previous pruning + let input = PruneInput::new(1000, Some(100), None); + assert_eq!(input.prune_range(), Some(0..900)); + + // Keep last 100 blocks, tip at 1000, previously pruned up to 800 + let input = PruneInput::new(1000, Some(100), Some(800)); + assert_eq!(input.prune_range(), Some(801..900)); + + // Keep last 100 blocks, tip at 50 (not enough blocks yet) + let input = PruneInput::new(50, Some(100), None); + assert_eq!(input.prune_range(), None); + + // Keep last 100 blocks, tip at exactly 100 (prune target is 0, start is 0, so empty range) + let input = PruneInput::new(100, Some(100), None); + assert_eq!(input.prune_range(), None); // 0..0 is empty, returns None + + // Already caught up + let input = PruneInput::new(1000, Some(100), Some(899)); + assert_eq!(input.prune_range(), None); + } + + #[test] + fn prune_range_minimal_distance() { + // distance=1 means keep only the latest block (minimal mode equivalent) + // First prune: from 0 to tip-1 + let input = PruneInput::new(1000, Some(1), None); + assert_eq!(input.prune_range(), Some(0..999)); + + // Subsequent prune with checkpoint + let input = PruneInput::new(1005, Some(1), Some(998)); + assert_eq!(input.prune_range(), Some(999..1004)); + + // Already caught up + let input = PruneInput::new(1000, Some(1), Some(998)); + assert_eq!(input.prune_range(), None); + + // Edge case: tip at block 0 + let input = PruneInput::new(0, Some(1), None); + assert_eq!(input.prune_range(), None); // tip < distance, no pruning + } } diff --git a/crates/sync/stage/src/trie.rs b/crates/sync/stage/src/trie.rs index b2b566cbf..e9c3cdbc7 100644 --- a/crates/sync/stage/src/trie.rs +++ b/crates/sync/stage/src/trie.rs @@ -1,15 +1,22 @@ use futures::future::BoxFuture; +use katana_db::abstraction::{Database, DbTx}; +use katana_db::tables; +use katana_db::trie::TrieDbMut; use katana_primitives::block::BlockNumber; +use katana_primitives::cairo::ShortString; use katana_primitives::Felt; use katana_provider::api::block::HeaderProvider; use katana_provider::api::state_update::StateUpdateProvider; use katana_provider::api::trie::TrieWriter; -use katana_provider::{MutableProvider, ProviderFactory}; -use starknet::macros::short_string; +use katana_provider::{DbProviderFactory, MutableProvider, ProviderFactory}; +use katana_tasks::TaskSpawner; use starknet_types_core::hash::{Poseidon, StarkHash}; use tracing::{debug, debug_span, error}; -use crate::{Stage, StageExecutionInput, StageExecutionOutput, StageResult}; +use crate::{ + PruneInput, PruneOutput, PruneResult, Stage, StageExecutionInput, StageExecutionOutput, + StageResult, +}; /// A stage for computing and validating state tries. /// @@ -20,22 +27,19 @@ use crate::{Stage, StageExecutionInput, StageExecutionOutput, StageResult}; /// into the contract and class tries via the [`TrieWriter`] trait, which computes the new state /// root. #[derive(Debug)] -pub struct StateTrie

{ - storage_provider: P, +pub struct StateTrie { + storage_provider: DbProviderFactory, + task_spawner: TaskSpawner, } -impl

StateTrie

{ +impl StateTrie { /// Create a new [`StateTrie`] stage. - pub fn new(storage_provider: P) -> Self { - Self { storage_provider } + pub fn new(storage_provider: DbProviderFactory, task_spawner: TaskSpawner) -> Self { + Self { storage_provider, task_spawner } } } -impl

Stage for StateTrie

-where - P: ProviderFactory, -

::ProviderMut: StateUpdateProvider + HeaderProvider + TrieWriter, -{ +impl Stage for StateTrie { fn id(&self) -> &'static str { "StateTrie" } @@ -58,27 +62,47 @@ where .state_update(block_number.into())? .ok_or(Error::MissingStateUpdate(block_number))?; - let computed_contract_trie_root = - provider_mut.trie_insert_contract_updates(block_number, &state_update)?; - - debug!( - contract_trie_root = format!("{computed_contract_trie_root:#x}"), - "Computed contract trie root." - ); - - let computed_class_trie_root = provider_mut - .trie_insert_declared_classes(block_number, &state_update.declared_classes)?; - - debug!( - classes_tri_root = format!("{computed_class_trie_root:#x}"), - "Computed classes trie root." - ); + let provider_mut_clone = provider_mut.clone(); + let (computed_contract_trie_root, computed_class_trie_root) = self + .task_spawner + .cpu_bound() + .spawn(move || { + let computed_contract_trie_root = provider_mut_clone + .trie_insert_contract_updates(block_number, &state_update)?; + + debug!( + contract_trie_root = format!("{computed_contract_trie_root:#x}"), + "Computed contract trie root." + ); + + let class_updates: Vec<_> = state_update + .declared_classes + .clone() + .into_iter() + .chain(state_update.migrated_compiled_classes.clone().into_iter()) + .collect(); + + let computed_class_trie_root = provider_mut_clone + .trie_insert_declared_classes(block_number, class_updates)?; + + debug!( + classes_tri_root = format!("{computed_class_trie_root:#x}"), + "Computed classes trie root." + ); + + Result::<(Felt, Felt), crate::Error>::Ok(( + computed_contract_trie_root, + computed_class_trie_root, + )) + }) + .await + .map_err(Error::StateComputationTaskJoinError)??; let computed_state_root = if computed_class_trie_root == Felt::ZERO { computed_contract_trie_root } else { Poseidon::hash_array(&[ - short_string!("STARKNET_STATE_V0"), + ShortString::from_ascii("STARKNET_STATE_V0").into(), computed_contract_trie_root, computed_class_trie_root, ]) @@ -111,6 +135,59 @@ where Ok(StageExecutionOutput { last_block_processed: input.to() }) }) } + + fn prune<'a>(&'a mut self, input: &'a PruneInput) -> BoxFuture<'a, PruneResult> { + Box::pin(async move { + let Some(range) = input.prune_range() else { + // Archive mode, no pruning needed, or already caught up + return Ok(PruneOutput::default()); + }; + + let tx = self.storage_provider.db().tx_mut().map_err(Error::Database)?; + + let pruned_count = self + .task_spawner + .spawn_blocking(move || { + let mut pruned_count = 0u64; + + // Remove trie snapshots for blocks in the prune range + for block_number in range { + // Remove snapshot from classes trie + let mut classes_trie_db = + TrieDbMut::::new(tx.clone()); + classes_trie_db + .remove_snapshot(block_number) + .map_err(|e| Error::Database(e.into_inner()))?; + + // Remove snapshot from contracts trie + let mut contracts_trie_db = + TrieDbMut::::new(tx.clone()); + contracts_trie_db + .remove_snapshot(block_number) + .map_err(|e| Error::Database(e.into_inner()))?; + + // Remove snapshot from storages trie + let mut storages_trie_db = + TrieDbMut::::new(tx.clone()); + storages_trie_db + .remove_snapshot(block_number) + .map_err(|e| Error::Database(e.into_inner()))?; + + pruned_count += 1; + } + + tx.commit().map_err(Error::Database)?; + + Result::::Ok(pruned_count) + }) + .await + .map_err(Error::StateComputationTaskJoinError)??; + + debug!(target: "stage", %pruned_count, "Pruned trie snapshots"); + + Ok(PruneOutput { pruned_count }) + }) + } } #[derive(Debug, thiserror::Error)] @@ -121,9 +198,15 @@ pub enum Error { #[error("Missing state update for block {0}")] MissingStateUpdate(BlockNumber), + #[error("State computation task join error: {0}")] + StateComputationTaskJoinError(katana_tasks::JoinError), + #[error( "State root mismatch at block {block_number}: expected (from header) {expected:#x}, \ computed {computed:#x}" )] StateRootMismatch { block_number: BlockNumber, expected: Felt, computed: Felt }, + + #[error(transparent)] + Database(#[from] katana_db::error::DatabaseError), } diff --git a/crates/sync/stage/tests/block.rs b/crates/sync/stage/tests/block.rs index 5b99e6019..47cf65ff6 100644 --- a/crates/sync/stage/tests/block.rs +++ b/crates/sync/stage/tests/block.rs @@ -8,16 +8,12 @@ use katana_gateway_types::{ StateUpdateWithBlock, }; use katana_primitives::block::{ - BlockHash, BlockNumber, FinalityStatus, Header, SealedBlock, SealedBlockWithStatus, + BlockNumber, FinalityStatus, Header, SealedBlock, SealedBlockWithStatus, }; use katana_primitives::da::L1DataAvailabilityMode; -use katana_primitives::execution::TypedTransactionExecutionInfo; -use katana_primitives::receipt::Receipt; -use katana_primitives::state::StateUpdatesWithClasses; use katana_primitives::{felt, ContractAddress, Felt}; use katana_provider::api::block::{BlockHashProvider, BlockNumberProvider, BlockWriter}; -use katana_provider::test_utils::test_provider; -use katana_provider::{ProviderError, ProviderFactory, ProviderResult}; +use katana_provider::{DbProviderFactory, MutableProvider, ProviderFactory}; use katana_stage::blocks::{BatchBlockDownloader, BlockDownloader, Blocks}; use katana_stage::{Stage, StageExecutionInput}; use rstest::rstest; @@ -119,146 +115,24 @@ impl BlockDownloader for MockBlockDownloader { } } -/// Mock provider implementation for testing. -/// -/// Tracks all insert operations and can be configured to return errors. -#[derive(Clone, Debug)] -struct MockInnerProvider { - /// Stored blocks with their receipts and state updates. - blocks: Arc)>>>, - /// Whether to return an error on insert. - should_fail: Arc>, - /// Error message to return when should_fail is true. - error_message: Arc>, -} - -impl MockInnerProvider { - fn new( - blocks: Arc)>>>, - should_fail: Arc>, - error_message: Arc>, - ) -> Self { - Self { blocks, should_fail, error_message } - } -} - -impl BlockWriter for MockInnerProvider { - fn insert_block_with_states_and_receipts( - &self, - block: SealedBlockWithStatus, - states: StateUpdatesWithClasses, - receipts: Vec, - _executions: Vec, - ) -> ProviderResult<()> { - if *self.should_fail.lock().unwrap() { - return Err(katana_provider::ProviderError::Other( - self.error_message.lock().unwrap().clone(), - )); - } - - self.blocks.lock().unwrap().push((block, states, receipts)); - Ok(()) - } -} - -impl BlockHashProvider for MockInnerProvider { - fn latest_hash(&self) -> ProviderResult { - self.blocks - .lock() - .unwrap() - .last() - .map(|(block, _, _)| block.block.hash) - .ok_or(ProviderError::MissingLatestBlockHash) - } - - fn block_hash_by_num(&self, num: BlockNumber) -> ProviderResult> { - Ok(self - .blocks - .lock() - .unwrap() - .iter() - .find(|(block, _, _)| block.block.header.number == num) - .map(|(block, _, _)| block.block.hash)) - } -} - -impl katana_provider::MutableProvider for MockInnerProvider { - fn commit(self) -> ProviderResult<()> { - Ok(()) - } -} - -/// Mock ProviderFactory implementation for testing. -/// -/// Tracks all insert operations and can be configured to return errors. -#[derive(Clone)] -struct MockProvider { - /// Stored blocks with their receipts and state updates. - blocks: Arc)>>>, - /// Whether to return an error on insert. - should_fail: Arc>, - /// Error message to return when should_fail is true. - error_message: Arc>, +/// Creates a new in-memory provider with an initial block stored. +fn create_provider_with_block(block: SealedBlockWithStatus) -> DbProviderFactory { + let provider_factory = DbProviderFactory::new_in_memory(); + let provider_mut = provider_factory.provider_mut(); + provider_mut + .insert_block_with_states_and_receipts(block, Default::default(), Vec::new(), Vec::new()) + .expect("failed to insert initial block"); + provider_mut.commit().expect("failed to commit"); + provider_factory } -impl std::fmt::Debug for MockProvider { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("MockProvider").finish_non_exhaustive() - } -} - -impl MockProvider { - fn new() -> Self { - Self { - blocks: Arc::new(Mutex::new(Vec::new())), - should_fail: Arc::new(Mutex::new(false)), - error_message: Arc::new(Mutex::new(String::new())), - } - } - - /// Add a block directly to the provider's storage. - fn with_block(self, block: SealedBlockWithStatus) -> Self { - self.blocks.lock().unwrap().push((block, Default::default(), Default::default())); - self - } - - /// Configure the mock to fail on insert operations. - fn with_insert_error(self, error: String) -> Self { - *self.should_fail.lock().unwrap() = true; - *self.error_message.lock().unwrap() = error; - self - } - - /// Get the number of blocks stored. - fn stored_block_count(&self) -> usize { - self.blocks.lock().unwrap().len() - } - - /// Get all stored block numbers. - fn stored_block_numbers(&self) -> Vec { - self.blocks.lock().unwrap().iter().map(|(block, _, _)| block.block.header.number).collect() - } -} - -impl katana_provider::ProviderFactory for MockProvider { - type Provider = MockInnerProvider; - type ProviderMut = MockInnerProvider; - - fn provider(&self) -> Self::Provider { - MockInnerProvider::new( - Arc::clone(&self.blocks), - Arc::clone(&self.should_fail), - Arc::clone(&self.error_message), - ) - } - - fn provider_mut(&self) -> Self::ProviderMut { - MockInnerProvider::new( - Arc::clone(&self.blocks), - Arc::clone(&self.should_fail), - Arc::clone(&self.error_message), - ) - } +/// Gets all stored block numbers from the provider by checking which blocks actually exist. +fn get_stored_block_numbers( + provider: &DbProviderFactory, + expected_range: std::ops::RangeInclusive, +) -> Vec { + let p = provider.provider(); + expected_range.filter(|&num| p.block_hash_by_num(num).ok().flatten().is_some()).collect() } /// Helper function to create a minimal test `SealedBlockWithStatus`. @@ -342,7 +216,7 @@ async fn download_and_store_blocks( #[case] to_block: BlockNumber, #[case] expected_blocks: Vec, ) { - let provider = MockProvider::new().with_block(create_stored_block(from_block - 1)); + let provider = create_provider_with_block(create_stored_block(from_block - 1)); let mut downloader = MockBlockDownloader::new(); for block_num in from_block..=to_block { @@ -357,9 +231,10 @@ async fn download_and_store_blocks( // Verify download_blocks was called with the correct block numbers in the correct sequence assert_eq!(downloader.requested_blocks(), expected_blocks); - // Verify insert_block_with_states_and_receipts was called with the correct block numbers in the - // correct sequence. Ignore the first stored block. - assert_eq!(provider.stored_block_numbers()[1..], expected_blocks); + // Verify blocks were stored correctly - should have initial block + downloaded blocks + let stored = get_stored_block_numbers(&provider, (from_block - 1)..=to_block); + assert_eq!(stored.len(), expected_blocks.len() + 1); // +1 for initial block + assert_eq!(&stored[1..], expected_blocks.as_slice()); } #[tokio::test] @@ -368,7 +243,8 @@ async fn download_failure_returns_error() { let error_msg = "Network error".to_string(); let downloader = MockBlockDownloader::new().with_error(block_number, error_msg.clone()); - let provider = MockProvider::new(); + // Create provider with initial block at block_number - 1 + let provider = create_provider_with_block(create_stored_block(block_number - 1)); let mut stage = Blocks::new(provider.clone(), downloader.clone()); let input = StageExecutionInput::new(block_number, block_number); @@ -387,40 +263,9 @@ async fn download_failure_returns_error() { // Verify download was attempted assert_eq!(downloader.requested_blocks(), vec![100]); - // Verify no blocks were stored - assert_eq!(provider.stored_block_count(), 0); -} - -#[tokio::test] -async fn storage_failure_returns_error() { - let block_number = 100; - let test_block = create_downloaded_block(block_number); - let error_msg = "Storage full".to_string(); - - let downloader = MockBlockDownloader::new().with_block(block_number, test_block); - let provider = MockProvider::new() - .with_insert_error(error_msg.clone()) - .with_block(create_stored_block(block_number - 1)); - - let mut stage = Blocks::new(provider.clone(), downloader.clone()); - let input = StageExecutionInput::new(block_number, block_number); - - let result = stage.execute(&input).await; - - // Verify it's a Blocks error - if let Err(err) = result { - match err { - katana_stage::Error::Provider(e) => { - assert!(e.to_string().contains(&error_msg)) - } - _ => panic!("Expected Error::Provider variant, got: {err:#?}"), - } - } - - // Verify download was attempted - assert_eq!(downloader.requested_blocks(), vec![100]); - // Verify no blocks were stored (except block `block_number - 1`) - assert_eq!(provider.stored_block_count(), 1); + // Verify only initial block was stored (no new blocks) + let stored = get_stored_block_numbers(&provider, (block_number - 1)..=block_number); + assert_eq!(stored.len(), 1); // Only the initial block } #[tokio::test] @@ -435,7 +280,7 @@ async fn partial_download_failure_stops_execution() { } downloader = downloader.with_error(103, "Block not found".to_string()); - let provider = MockProvider::new(); + let provider = create_provider_with_block(create_stored_block(from_block - 1)); let mut stage = Blocks::new(provider.clone(), downloader.clone()); let input = StageExecutionInput::new(from_block, to_block); @@ -455,7 +300,9 @@ async fn fetch_blocks_from_gateway() { let from_block = 308919; let to_block = from_block + 2; - let provider = test_provider(); + // Create provider with initial block before the test range + // The parent hash must match what the network returns for block from_block + let provider = create_provider_with_block(create_stored_block(from_block - 1)); let feeder_gateway = SequencerGateway::sepolia(); let downloader = BatchBlockDownloader::new_gateway(feeder_gateway, 10); @@ -474,7 +321,7 @@ async fn fetch_blocks_from_gateway() { async fn downloaded_blocks_do_not_form_valid_chain_with_stored_blocks() { use katana_stage::blocks; - let provider = MockProvider::new().with_block(create_stored_block(99)); + let provider = create_provider_with_block(create_stored_block(99)); let downloader = MockBlockDownloader::new() .with_block(100, create_downloaded_block_with_parent(100, felt!("0x1337"))); @@ -501,14 +348,15 @@ async fn downloaded_blocks_do_not_form_valid_chain_with_stored_blocks() { } // Verify no blocks were stored due to validation failure (except for block 99) - assert_eq!(provider.stored_block_count(), 1); + let stored = get_stored_block_numbers(&provider, 99..=100); + assert_eq!(stored.len(), 1); } #[tokio::test] async fn downloaded_blocks_do_not_form_valid_chain() { use katana_stage::blocks; - let provider = MockProvider::new().with_block(create_stored_block(99)); + let provider = create_provider_with_block(create_stored_block(99)); let downloader = MockBlockDownloader::new() .with_block(100, create_downloaded_block(100)) .with_block(101, create_downloaded_block(101)) @@ -539,5 +387,6 @@ async fn downloaded_blocks_do_not_form_valid_chain() { } // Verify no blocks were stored due to validation failure (except for block 99) - assert_eq!(provider.stored_block_count(), 1); + let stored = get_stored_block_numbers(&provider, 99..=102); + assert_eq!(stored.len(), 1); } diff --git a/crates/sync/stage/tests/trie.rs b/crates/sync/stage/tests/trie.rs index 49d235041..2a28a3cfe 100644 --- a/crates/sync/stage/tests/trie.rs +++ b/crates/sync/stage/tests/trie.rs @@ -1,282 +1,464 @@ -use std::collections::{BTreeMap, HashMap}; -use std::sync::{Arc, Mutex}; - -use katana_primitives::block::{BlockHashOrNumber, BlockNumber, Header}; -use katana_primitives::class::{ClassHash, CompiledClassHash}; -use katana_primitives::da::L1DataAvailabilityMode; -use katana_primitives::state::StateUpdates; -use katana_primitives::Felt; -use katana_provider::api::block::HeaderProvider; -use katana_provider::api::state_update::StateUpdateProvider; -use katana_provider::api::trie::TrieWriter; -use katana_provider::{ProviderFactory, ProviderResult}; +// Tests for StateTrie stage +// +// Note: The detailed mock-based tests that were here previously tested the state root +// verification logic using mock providers. Since we moved to concrete types (DbProviderFactory), +// these tests would need to be rewritten as integration tests with real data. +// +// The stage itself is tested implicitly through the pipeline integration tests. + +use katana_db::abstraction::{Database, DbDupSortCursor, DbTx}; +use katana_db::tables; +use katana_db::trie::{SnapshotTrieDb, TrieDbMut}; +use katana_primitives::block::BlockNumber; +use katana_primitives::{ContractAddress, Felt}; +use katana_provider::DbProviderFactory; use katana_stage::trie::StateTrie; -use katana_stage::{Stage, StageExecutionInput}; -use rstest::rstest; -use starknet::macros::short_string; -use starknet_types_core::hash::{Poseidon, StarkHash}; +use katana_stage::{PruneInput, Stage}; +use katana_tasks::TaskManager; +use katana_trie::{ClassesTrie, ContractsTrie, StoragesTrie}; -/// Mock ProviderFactory implementation for testing StateTrie stage. -/// -/// Provides configurable responses for headers, state updates, and trie operations. -#[derive(Clone)] -struct MockProvider { - inner: MockInnerProvider, -} - -impl std::fmt::Debug for MockProvider { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("MockProvider").finish_non_exhaustive() - } +/// Test that the StateTrie stage can be constructed with DbProviderFactory. +#[tokio::test] +async fn can_construct_state_trie_stage() { + let provider = DbProviderFactory::new_in_memory(); + let task_manager = TaskManager::current(); + let _stage = StateTrie::new(provider, task_manager.task_spawner()); } -impl MockProvider { - fn new() -> Self { - Self { - inner: MockInnerProvider { - headers: Arc::new(Mutex::new(HashMap::new())), - state_updates: Arc::new(Mutex::new(HashMap::new())), - trie_insert_calls: Arc::new(Mutex::new(Vec::new())), - should_fail: Arc::new(Mutex::new(false)), - }, +// ============================================================================ +// StateTrie::prune Tests +// ============================================================================ + +/// Helper to create trie snapshots for testing. +/// Creates snapshots for ClassesTrie, ContractsTrie, and StoragesTrie at given block numbers. +fn create_trie_snapshots(provider: &DbProviderFactory, blocks: &[BlockNumber]) { + let tx = provider.db().tx_mut().expect("failed to create tx"); + + // Create ClassesTrie snapshots + { + let mut trie = ClassesTrie::new(TrieDbMut::::new(tx.clone())); + + for &block in blocks { + // Insert unique values for each block + for i in 0u64..10 { + let key = Felt::from(block * 1000 + i); + let value = Felt::from(block * 10000 + i); + trie.insert(key, value); + } + trie.commit(block); } } - /// Configure a header for a specific block. - fn with_header(self, block_number: BlockNumber, header: Header) -> Self { - self.inner.headers.lock().unwrap().insert(block_number, header); - self - } - - /// Configure a state update for a specific block. - fn with_state_update(self, block_number: BlockNumber, state_update: StateUpdates) -> Self { - self.inner.state_updates.lock().unwrap().insert(block_number, state_update); - self - } + // Create ContractsTrie snapshots + { + let mut trie = ContractsTrie::new(TrieDbMut::::new(tx.clone())); - /// Get all block numbers that had trie inserts called. - fn trie_insert_blocks(&self) -> Vec { - self.inner.trie_insert_calls.lock().unwrap().clone() + for &block in blocks { + for i in 0u64..10 { + let address = ContractAddress::from(Felt::from(block * 1000 + i)); + let state_hash = Felt::from(block * 10000 + i); + trie.insert(address, state_hash); + } + trie.commit(block); + } } -} - -impl ProviderFactory for MockProvider { - type Provider = MockInnerProvider; - type ProviderMut = MockInnerProvider; - fn provider(&self) -> Self::Provider { - self.inner.clone() + // Create StoragesTrie snapshots + { + let address = ContractAddress::from(Felt::from(0x1234u64)); + let mut trie = + StoragesTrie::new(TrieDbMut::::new(tx.clone()), address); + + for &block in blocks { + for i in 0u64..10 { + let key = Felt::from(block * 1000 + i); + let value = Felt::from(block * 10000 + i); + trie.insert(key, value); + } + trie.commit(block); + } } - fn provider_mut(&self) -> Self::ProviderMut { - self.inner.clone() - } + tx.commit().expect("failed to commit tx"); } -/// Mock inner provider implementation for testing StateTrie stage. +/// Helper to check if a snapshot exists by querying the `Tb::History` table. /// -/// Provides configurable responses for headers, state updates, and trie operations. -#[derive(Clone, Debug)] -struct MockInnerProvider { - /// Map of block number to header. - headers: Arc>>, - /// Map of block number to state update. - state_updates: Arc>>, - /// Track trie insert calls for verification. - trie_insert_calls: Arc>>, - /// Whether to return an error on trie operations. - should_fail: Arc>, +/// Note: There is currently no efficient way to check snapshot existence at the `SnapshotTrieDb` +/// level. We query the underlying `Tb::History` table directly to verify if entries exist for +/// a given block number. This is consistent with the approach documented in +/// [`TrieDbMut::remove_snapshot`](katana_db::trie::TrieDbMut::remove_snapshot). +fn snapshot_exists(provider: &DbProviderFactory, block: BlockNumber) -> bool { + let tx = provider.db().tx().expect("failed to create tx"); + let mut cursor = tx.cursor_dup::().expect("failed to create cursor"); + cursor.walk_dup(Some(block), None).expect("failed to walk_dup").is_some() } -impl HeaderProvider for MockInnerProvider { - fn header(&self, id: BlockHashOrNumber) -> ProviderResult> { - let block_number = match id { - BlockHashOrNumber::Num(num) => num, - BlockHashOrNumber::Hash(_) => { - return Err(katana_provider::ProviderError::Other( - "Hash lookup not supported in mock".to_string(), +#[tokio::test] +async fn prune_does_not_affect_remaining_snapshot_roots() { + let provider = DbProviderFactory::new_in_memory(); + let task_manager = TaskManager::current(); + let mut stage = StateTrie::new(provider.clone(), task_manager.task_spawner()); + let storage_address = ContractAddress::from(Felt::from(0x1234u64)); + + // Create snapshots for blocks 0-9 + create_trie_snapshots(&provider, &(0..=9).collect::>()); + + // Get state roots for all tries at blocks that will remain after pruning (blocks 5-9) + // + // [(block_number, classes_root, contracts_root, storages_root), ...] + let roots_before: Vec<_> = { + let tx = provider.db().tx().expect("failed to create tx"); + + (5..=9) + .map(|block| { + let classes_root = ClassesTrie::new(SnapshotTrieDb::::new( + tx.clone(), + block.into(), )) - } - }; - - Ok(self.headers.lock().unwrap().get(&block_number).cloned()) + .root(); + + let contracts_root = ContractsTrie::new( + SnapshotTrieDb::::new(tx.clone(), block.into()), + ) + .root(); + + let storages_root = StoragesTrie::new( + SnapshotTrieDb::::new(tx.clone(), block.into()), + storage_address, + ) + .root(); + + (block, classes_root, contracts_root, storages_root) + }) + .collect() + }; + + // Prune blocks 0-4 (Full mode with keep=5, tip=9) + let input = PruneInput::new(9, Some(5), None); + let result = stage.prune(&input).await; + + assert!(result.is_ok()); + assert_eq!(result.unwrap().pruned_count, 4); // blocks 0, 1, 2, 3 + + // Verify state roots for remaining snapshots (blocks 5-9) are unchanged + let tx = provider.db().tx().expect("failed to create tx"); + for (block, classes_root_before, contracts_root_before, storages_root_before) in roots_before { + let classes_root_after = ClassesTrie::new(SnapshotTrieDb::::new( + tx.clone(), + block.into(), + )) + .root(); + + let contracts_root_after = ContractsTrie::new( + SnapshotTrieDb::::new(tx.clone(), block.into()), + ) + .root(); + + let storages_root_after = StoragesTrie::new( + SnapshotTrieDb::::new(tx.clone(), block.into()), + storage_address, + ) + .root(); + + assert_eq!( + classes_root_before, classes_root_after, + "ClassesTrie root at block {block} should be unchanged after pruning" + ); + assert_eq!( + contracts_root_before, contracts_root_after, + "ContractsTrie root at block {block} should be unchanged after pruning" + ); + assert_eq!( + storages_root_before, storages_root_after, + "StoragesTrie root at block {block} should be unchanged after pruning" + ); } } -impl StateUpdateProvider for MockInnerProvider { - fn state_update(&self, block_id: BlockHashOrNumber) -> ProviderResult> { - let block_number = match block_id { - BlockHashOrNumber::Num(num) => num, - BlockHashOrNumber::Hash(_) => { - return Err(katana_provider::ProviderError::Other( - "Hash lookup not supported in mock".to_string(), - )) - } - }; - - Ok(self.state_updates.lock().unwrap().get(&block_number).cloned()) +#[tokio::test] +async fn prune_removes_snapshots_in_range() { + let provider = DbProviderFactory::new_in_memory(); + let task_manager = TaskManager::current(); + let mut stage = StateTrie::new(provider.clone(), task_manager.task_spawner()); + + // Create snapshots for blocks 0-9 + create_trie_snapshots(&provider, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); + + // Verify all snapshots exist + for block in 0..=9 { + assert!( + snapshot_exists::(&provider, block), + "ClassesTrie snapshot for block {block} should exist before pruning" + ); + assert!( + snapshot_exists::(&provider, block), + "ContractsTrie snapshot for block {block} should exist before pruning" + ); + assert!( + snapshot_exists::(&provider, block), + "StoragesTrie snapshot for block {block} should exist before pruning" + ); } - fn declared_classes( - &self, - _block_id: BlockHashOrNumber, - ) -> ProviderResult>> { - Ok(None) + // Prune blocks 0-4 (Full mode with keep=5, tip=9) + let input = PruneInput::new(9, Some(5), None); + let result = stage.prune(&input).await; + + assert!(result.is_ok()); + let output = result.unwrap(); + assert_eq!(output.pruned_count, 4); // blocks 0, 1, 2, 3 + + // Verify blocks 0-3 snapshots are removed + for block in 0..=3 { + assert!( + !snapshot_exists::(&provider, block), + "ClassesTrie snapshot for block {block} should be removed after pruning" + ); + assert!( + !snapshot_exists::(&provider, block), + "ContractsTrie snapshot for block {block} should be removed after pruning" + ); + assert!( + !snapshot_exists::(&provider, block), + "StoragesTrie snapshot for block {block} should be removed after pruning" + ); } - fn deployed_contracts( - &self, - _block_id: BlockHashOrNumber, - ) -> ProviderResult>> { - Ok(None) + // Verify blocks 4-9 snapshots still exist + for block in 4..=9 { + assert!( + snapshot_exists::(&provider, block), + "ClassesTrie snapshot for block {block} should still exist after pruning" + ); + assert!( + snapshot_exists::(&provider, block), + "ContractsTrie snapshot for block {block} should still exist after pruning" + ); + assert!( + snapshot_exists::(&provider, block), + "StoragesTrie snapshot for block {block} should still exist after pruning" + ); } } -impl TrieWriter for MockInnerProvider { - fn trie_insert_declared_classes( - &self, - block_number: BlockNumber, - _updates: &BTreeMap, - ) -> ProviderResult { - if *self.should_fail.lock().unwrap() { - return Err(katana_provider::ProviderError::Other("Trie insert failed".to_string())); - } - - self.trie_insert_calls.lock().unwrap().push(block_number); - // Return a mock class trie root - Ok(Felt::from(0x1234u64)) +#[tokio::test] +async fn prune_skips_when_archive_mode() { + let provider = DbProviderFactory::new_in_memory(); + let task_manager = TaskManager::current(); + let mut stage = StateTrie::new(provider.clone(), task_manager.task_spawner()); + + // Create snapshots for blocks 0-4 + create_trie_snapshots(&provider, &[0, 1, 2, 3, 4]); + + // Archive mode should not prune anything + let input = PruneInput::new(4, None, None); + let result = stage.prune(&input).await; + + assert!(result.is_ok()); + let output = result.unwrap(); + assert_eq!(output.pruned_count, 0); + + // All snapshots should still exist + for block in 0..=4 { + assert!( + snapshot_exists::(&provider, block), + "ClassesTrie snapshot for block {block} should still exist" + ); } +} - fn trie_insert_contract_updates( - &self, - block_number: BlockNumber, - _state_updates: &StateUpdates, - ) -> ProviderResult { - if *self.should_fail.lock().unwrap() { - return Err(katana_provider::ProviderError::Other("Trie insert failed".to_string())); - } - - self.trie_insert_calls.lock().unwrap().push(block_number); - // Return a mock contract trie root - Ok(Felt::from(0x5678u64)) +#[tokio::test] +async fn prune_skips_when_already_caught_up() { + let provider = DbProviderFactory::new_in_memory(); + let task_manager = TaskManager::current(); + let mut stage = StateTrie::new(provider.clone(), task_manager.task_spawner()); + + // Create snapshots for blocks 5-9 (simulating already-pruned state) + create_trie_snapshots(&provider, &[5, 6, 7, 8, 9]); + + // Full mode with keep=5, tip=9, last_pruned=4 + // Prune target is 9-5=4, start is 4+1=5, so range 5..4 is empty + let input = PruneInput::new(9, Some(5), Some(4)); + let result = stage.prune(&input).await; + + assert!(result.is_ok()); + let output = result.unwrap(); + assert_eq!(output.pruned_count, 0); + + // All remaining snapshots should still exist + for block in 5..=9 { + assert!( + snapshot_exists::(&provider, block), + "ClassesTrie snapshot for block {block} should still exist" + ); } } -impl katana_provider::MutableProvider for MockInnerProvider { - fn commit(self) -> ProviderResult<()> { - Ok(()) +#[tokio::test] +async fn prune_uses_checkpoint_for_incremental_pruning() { + let provider = DbProviderFactory::new_in_memory(); + let task_manager = TaskManager::current(); + let mut stage = StateTrie::new(provider.clone(), task_manager.task_spawner()); + + // Create snapshots for blocks 0-14 + create_trie_snapshots(&provider, &(0..=14).collect::>()); + + // First prune: tip=9, keep=5, no previous prune + // Should prune blocks 0-3 (range 0..4) + let input = PruneInput::new(9, Some(5), None); + let result = stage.prune(&input).await; + assert!(result.is_ok()); + assert_eq!(result.unwrap().pruned_count, 4); + + // Verify blocks 0-3 are pruned for all trie types + for block in 0..=3 { + assert!( + !snapshot_exists::(&provider, block), + "ClassesTrie snapshot for block {block} should be pruned" + ); + assert!( + !snapshot_exists::(&provider, block), + "ContractsTrie snapshot for block {block} should be pruned" + ); + assert!( + !snapshot_exists::(&provider, block), + "StoragesTrie snapshot for block {block} should be pruned" + ); } -} -/// Helper function to compute the expected state root from mock trie roots. -fn compute_mock_state_root() -> Felt { - let class_trie_root = Felt::from(0x1234u64); - let contract_trie_root = Felt::from(0x5678u64); - Poseidon::hash_array(&[short_string!("STARKNET_STATE_V0"), contract_trie_root, class_trie_root]) -} + // Second prune: tip=14, keep=5, last_pruned=3 + // Should prune blocks 4-8 (range 4..9) + let input = PruneInput::new(14, Some(5), Some(3)); + let result = stage.prune(&input).await; + assert!(result.is_ok()); + assert_eq!(result.unwrap().pruned_count, 5); + + // Verify blocks 4-8 are pruned for all trie types + for block in 4..=8 { + assert!( + !snapshot_exists::(&provider, block), + "ClassesTrie snapshot for block {block} should be pruned" + ); + assert!( + !snapshot_exists::(&provider, block), + "ContractsTrie snapshot for block {block} should be pruned" + ); + assert!( + !snapshot_exists::(&provider, block), + "StoragesTrie snapshot for block {block} should be pruned" + ); + } -/// Helper function to create a test header with a given state root. -fn create_test_header(block_number: BlockNumber, state_root: Felt) -> Header { - Header { - number: block_number, - state_root, - parent_hash: Felt::ZERO, - timestamp: block_number as u64, - sequencer_address: Default::default(), - l1_gas_prices: Default::default(), - l2_gas_prices: Default::default(), - l1_data_gas_prices: Default::default(), - l1_da_mode: L1DataAvailabilityMode::Calldata, - starknet_version: Default::default(), - transaction_count: 0, - events_count: 0, - state_diff_length: 0, - transactions_commitment: Felt::ZERO, - events_commitment: Felt::ZERO, - receipts_commitment: Felt::ZERO, - state_diff_commitment: Felt::ZERO, + // Verify blocks 9-14 still exist for all trie types + for block in 9..=14 { + assert!( + snapshot_exists::(&provider, block), + "ClassesTrie snapshot for block {block} should still exist" + ); + assert!( + snapshot_exists::(&provider, block), + "ContractsTrie snapshot for block {block} should still exist" + ); + assert!( + snapshot_exists::(&provider, block), + "StoragesTrie snapshot for block {block} should still exist" + ); } } -/// Helper function to create a minimal test state update. -fn create_test_state_update() -> StateUpdates { - StateUpdates { - nonce_updates: Default::default(), - storage_updates: Default::default(), - deployed_contracts: Default::default(), - replaced_classes: Default::default(), - declared_classes: Default::default(), - deprecated_declared_classes: Default::default(), +#[tokio::test] +async fn prune_minimal_mode_keeps_only_latest() { + let provider = DbProviderFactory::new_in_memory(); + let task_manager = TaskManager::current(); + let mut stage = StateTrie::new(provider.clone(), task_manager.task_spawner()); + + // Create snapshots for blocks 0-9 + create_trie_snapshots(&provider, &(0..=9).collect::>()); + + // Minimal mode: prune everything except tip-1 + // tip=9 -> prune 0..8 + let input = PruneInput::new(9, Some(1), None); + let result = stage.prune(&input).await; + + assert!(result.is_ok()); + assert_eq!(result.unwrap().pruned_count, 8); + + // Verify blocks 0-7 are pruned + for block in 0..=7 { + assert!( + !snapshot_exists::(&provider, block), + "ClassesTrie snapshot for block {block} should be pruned" + ); + assert!( + !snapshot_exists::(&provider, block), + "ContractsTrie snapshot for block {block} should be pruned" + ); + assert!( + !snapshot_exists::(&provider, block), + "StoragesTrie snapshot for block {block} should be pruned" + ); + } + + // Verify blocks 8-9 still exist + for block in 8..=9 { + assert!( + snapshot_exists::(&provider, block), + "ClassesTrie snapshot for block {block} should still exist" + ); + assert!( + snapshot_exists::(&provider, block), + "ContractsTrie snapshot for block {block} should still exist" + ); + assert!( + snapshot_exists::(&provider, block), + "StoragesTrie snapshot for block {block} should still exist" + ); } } -#[rstest] -#[case(100, 100, vec![100])] -#[case(100, 102, vec![100, 101, 102])] -#[case(100, 105, vec![100, 101, 102, 103, 104, 105])] #[tokio::test] -async fn verify_state_roots_success( - #[case] from_block: BlockNumber, - #[case] to_block: BlockNumber, - #[case] expected_blocks: Vec, -) { - let mut provider = MockProvider::new(); - - // Configure blocks with correct state roots - let correct_state_root = compute_mock_state_root(); - for num in from_block..=to_block { - let header = create_test_header(num, correct_state_root); - let state_update = create_test_state_update(); - provider = provider.with_header(num, header).with_state_update(num, state_update); - } +async fn prune_handles_empty_range_gracefully() { + let provider = DbProviderFactory::new_in_memory(); + let task_manager = TaskManager::current(); + let mut stage = StateTrie::new(provider.clone(), task_manager.task_spawner()); - let input = StageExecutionInput::new(from_block, to_block); - let result = StateTrie::new(provider.clone()).execute(&input).await; - assert!(result.is_ok(), "Stage execution should succeed"); + // Create snapshots for blocks 0-4 + create_trie_snapshots(&provider, &[0, 1, 2, 3, 4]); - // Verify that trie inserts were called for each block (twice per block: classes + contracts) - let trie_calls = provider.trie_insert_blocks(); - assert_eq!(trie_calls.len(), expected_blocks.len() * 2); + // Distance=10, tip=4 - nothing to prune (tip < distance) + let input = PruneInput::new(4, Some(10), None); + let result = stage.prune(&input).await; + + assert!(result.is_ok()); + assert_eq!(result.unwrap().pruned_count, 0); + + // All snapshots should still exist + for block in 0..=4 { + assert!(snapshot_exists::(&provider, block)); + } } #[tokio::test] -async fn state_root_mismatch_returns_error() { - let block_number = 100; - let correct_state_root = compute_mock_state_root(); - let wrong_state_root = Felt::from(0x9999u64); - - let header = create_test_header(block_number, wrong_state_root); - let state_update = create_test_state_update(); - let provider = MockProvider::new() - .with_header(block_number, header) - .with_state_update(block_number, state_update); - - let mut stage = StateTrie::new(provider); - let input = StageExecutionInput::new(block_number, block_number); - - let result = stage.execute(&input).await; - - // Verify it's a StateTrie error with state root mismatch - assert!(result.is_err()); - if let Err(err) = result { - match err { - katana_stage::Error::StateTrie(e) => { - let error_msg = e.to_string(); - assert!( - error_msg.contains("State root mismatch"), - "Expected state root mismatch error, got: {}", - error_msg - ); - assert!( - error_msg.contains(&format!("{:#x}", wrong_state_root)), - "Error should contain expected state root" - ); - assert!( - error_msg.contains(&format!("{:#x}", correct_state_root)), - "Error should contain computed state root" - ); - } - _ => panic!("Expected Error::StateTrie variant, got: {err:#?}"), - } +async fn prune_handles_nonexistent_snapshots_gracefully() { + let provider = DbProviderFactory::new_in_memory(); + let task_manager = TaskManager::current(); + let mut stage = StateTrie::new(provider.clone(), task_manager.task_spawner()); + + // Create snapshots only for blocks 5-9 (blocks 0-4 don't exist) + create_trie_snapshots(&provider, &[5, 6, 7, 8, 9]); + + // Try to prune blocks 0-4 which don't have snapshots + let input = PruneInput::new(9, Some(5), None); + let result = stage.prune(&input).await; + + // Should succeed even though blocks 0-3 don't have snapshots + assert!(result.is_ok()); + // Still counts as "pruned" even if no data existed + assert_eq!(result.unwrap().pruned_count, 4); + + // Existing snapshots should still be intact + for block in 5..=9 { + assert!(snapshot_exists::(&provider, block)); } } diff --git a/crates/tee/Cargo.toml b/crates/tee/Cargo.toml new file mode 100644 index 000000000..364d7fc63 --- /dev/null +++ b/crates/tee/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "katana-tee" +description = "TEE (Trusted Execution Environment) support for Katana" +edition.workspace = true +version.workspace = true + +[dependencies] +thiserror.workspace = true +clap = { workspace = true, default-features = false, features = [ "derive" ] } +serde.workspace = true +tracing = { workspace = true, optional = true } +bincode = { workspace = true, optional = true } +sev-snp = { workspace = true, optional = true } + +[dev-dependencies] +rstest.workspace = true +tempfile.workspace = true + +[features] +default = [] +snp = ["dep:sev-snp", "dep:bincode", "dep:tracing"] diff --git a/crates/tee/README.md b/crates/tee/README.md new file mode 100644 index 000000000..da57a2b97 --- /dev/null +++ b/crates/tee/README.md @@ -0,0 +1,83 @@ +# katana-tee + +TEE (Trusted Execution Environment) attestation support for Katana. + +## Overview + +This crate provides abstractions for generating hardware-backed attestation quotes that cryptographically bind Katana's blockchain state to a TEE measurement. When running Katana inside a TEE, clients can request attestation quotes that prove the sequencer is executing within a secure, isolated environment. + +## What is TEE? + +A Trusted Execution Environment (TEE) is a secure area within a processor that guarantees code and data loaded inside are protected with respect to confidentiality and integrity. TEEs provide: + +- **Isolation**: Code executing inside a TEE is isolated from the rest of the system, including the operating system and hypervisor +- **Attestation**: Hardware-backed proof that specific code is running inside a genuine TEE +- **Integrity**: Guarantee that the code and data have not been tampered with + +## Supported Platforms + +### AMD SEV-SNP + +AMD Secure Encrypted Virtualization - Secure Nested Paging (SEV-SNP) is a hardware security feature available on AMD EPYC processors (3rd Gen or later). See the [References](#references) section for detailed documentation. + +**Requirements:** +- AMD EPYC processor with SEV-SNP support +- Linux kernel with SEV-SNP guest support +- Access to `/dev/sev-guest` device + +To enable SEV-SNP support, compile with the `snp` feature: + +```toml +[dependencies] +katana-tee = { features = [ "snp" ], .. } +``` + +## Verifying Attestation Quotes + +SEV-SNP attestation quotes can be verified using: + +1. **AMD's Key Distribution Service (KDS)** - Fetches the certificate chain for verification +2. **sev-snp-measure** - Tool for measuring and verifying SEV-SNP guests +3. **Custom verification** - Parse the attestation report and verify the signature chain + +The quote contains: +- Measurement of the guest VM +- User-provided report data (the Poseidon hash commitment) +- Hardware-signed attestation from AMD's security processor + +## Feature Flags + +| Feature | Description | +|---------|-------------| +| `snp` | Enables AMD SEV-SNP support via the `sev-snp` crate | + +## Security Considerations + +- TEE attestation only proves code is running in a TEE; it does not verify the correctness of the code itself +- The attestation binds to a specific state; verifiers should check the block number/hash is recent +- Quote generation requires hardware access; quote generation will return errors on unsupported platforms +- The 64-byte report data is a Poseidon hash of `H_poseidon(state_root, block_hash)`, padded with zeros + +## References + +### AMD SEV-SNP + +- [AMD Secure Encrypted Virtualization (SEV)](https://www.amd.com/en/developer/sev.html) - Official AMD SEV developer resources +- [AMD SEV-SNP White Paper](https://docs.amd.com/v/u/en-US/SEV-SNP-strengthening-vm-isolation-with-integrity-protection-and-more.pdf) - Technical overview of SEV-SNP architecture and security guarantees +- [AMD SEV Developer Guide](https://www.amd.com/content/dam/amd/en/documents/epyc-technical-docs/programmer-references/55766_SEV-KM_API_Specification.pdf) - API specification for SEV key management +- [Linux Kernel SEV Guest API](https://docs.kernel.org/virt/coco/sev-guest.html) - Documentation for `/dev/sev-guest` interface + +### Automata Network SDK + +- [Automata SEV-SNP SDK](https://github.com/automata-network/amd-sev-snp-attestation-sdk) - The Rust SDK used by this crate for SEV-SNP attestation +- [Automata Network Documentation](https://docs.ata.network/) - Automata's TEE and attestation documentation + +### Running SEV-SNP VMs + +- [Azure Confidential VMs](https://learn.microsoft.com/en-us/azure/confidential-computing/confidential-vm-overview) - AMD SEV-SNP powered confidential VMs on Azure +- [Google Cloud Confidential VMs](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview) - Confidential computing on Google Cloud + +### General TEE Resources + +- [Confidential Computing Consortium](https://confidentialcomputing.io/) - Industry consortium for TEE standards and resources +- [TEE Fundamentals (ARM)](https://developer.arm.com/documentation/102418/latest/) - General TEE concepts (ARM-focused but broadly applicable) diff --git a/crates/tee/src/error.rs b/crates/tee/src/error.rs new file mode 100644 index 000000000..ef54b54e4 --- /dev/null +++ b/crates/tee/src/error.rs @@ -0,0 +1,21 @@ +use thiserror::Error; + +/// Errors that can occur during TEE operations. +#[derive(Debug, Error)] +pub enum TeeError { + /// I/O error when interacting with TEE interfaces. + #[error("TEE I/O error: {0}")] + Io(#[from] std::io::Error), + + /// Quote generation failed. + #[error("Quote generation failed: {0}")] + GenerationFailed(String), + + /// TEE functionality is not supported on this platform. + #[error("TEE not supported: {0}")] + NotSupported(String), + + /// Invalid report data size (must be exactly 64 bytes). + #[error("Invalid report data size: expected 64 bytes, got {0}")] + InvalidReportDataSize(usize), +} diff --git a/crates/tee/src/lib.rs b/crates/tee/src/lib.rs new file mode 100644 index 000000000..d8967728b --- /dev/null +++ b/crates/tee/src/lib.rs @@ -0,0 +1,68 @@ +//! TEE (Trusted Execution Environment) support for Katana. +//! +//! This crate provides abstractions for generating hardware-backed attestation +//! quotes that can cryptographically bind application state to a TEE measurement. +//! +//! # Supported TEE Platforms +//! +//! - **SEV-SNP (AMD Secure Encrypted Virtualization)**: Via Automata Network SDK (requires `snp` +//! feature) +//! +//! # Example +//! +//! ```rust,ignore +//! use katana_tee::{SevSnpProvider, TeeProvider}; +//! +//! let provider = SevSnpProvider::new()?; +//! let user_data = [0u8; 64]; // Your 64-byte commitment +//! let quote = provider.generate_quote(&user_data)?; +//! ``` + +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + +mod error; +mod provider; + +#[cfg(test)] +mod mock; + +#[cfg(feature = "snp")] +mod snp; + +pub use error::TeeError; +#[cfg(test)] +pub use mock::MockProvider; +pub use provider::TeeProvider; +#[cfg(feature = "snp")] +pub use snp::SevSnpProvider; + +/// TEE provider type enumeration. +/// +/// Currently only SEV-SNP is supported for production use. +#[derive( + Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize, clap::ValueEnum, +)] +pub enum TeeProviderType { + /// AMD SEV-SNP provider. + #[value(name = "sev-snp", alias = "snp")] + SevSnp, +} + +impl std::fmt::Display for TeeProviderType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::SevSnp => write!(f, "sev-snp"), + } + } +} + +impl std::str::FromStr for TeeProviderType { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "sev-snp" | "snp" => Ok(Self::SevSnp), + other => Err(format!("Unknown TEE provider: '{other}'. Available providers: sev-snp")), + } + } +} diff --git a/crates/tee/src/mock.rs b/crates/tee/src/mock.rs new file mode 100644 index 000000000..b27e4a6b9 --- /dev/null +++ b/crates/tee/src/mock.rs @@ -0,0 +1,78 @@ +use crate::error::TeeError; +use crate::provider::TeeProvider; + +/// Mock TEE provider for testing on non-TEE hardware. +/// +/// This provider generates deterministic mock quotes that include +/// the user data for verification in tests. +#[derive(Debug, Default, Clone)] +pub struct MockProvider { + /// Optional custom prefix for mock quotes. + prefix: Vec, +} + +impl MockProvider { + /// Create a new mock provider. + pub fn new() -> Self { + Self::default() + } + + /// Create a mock provider with a custom prefix. + pub fn with_prefix(prefix: Vec) -> Self { + Self { prefix } + } +} + +impl TeeProvider for MockProvider { + fn generate_quote(&self, user_data: &[u8; 64]) -> Result, TeeError> { + // Mock quote format: + // [4 bytes: magic] [prefix] [64 bytes: user_data] [4 bytes: checksum] + let magic = b"MOCK"; + let mut quote = Vec::with_capacity(4 + self.prefix.len() + 64 + 4); + + quote.extend_from_slice(magic); + quote.extend_from_slice(&self.prefix); + quote.extend_from_slice(user_data); + + // Simple checksum for verification + let checksum: u32 = quote.iter().map(|&b| b as u32).sum(); + quote.extend_from_slice(&checksum.to_le_bytes()); + + Ok(quote) + } + + fn provider_type(&self) -> &'static str { + "Mock" + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_mock_quote_generation() { + let provider = MockProvider::new(); + let user_data = [0u8; 64]; + + let quote = provider.generate_quote(&user_data).unwrap(); + + // Verify magic header + assert_eq!("e[0..4], b"MOCK"); + // Verify user data is included + assert_eq!("e[4..68], &user_data); + } + + #[test] + fn test_mock_with_prefix() { + let prefix = b"TEST".to_vec(); + let provider = MockProvider::with_prefix(prefix.clone()); + let user_data = [1u8; 64]; + + let quote = provider.generate_quote(&user_data).unwrap(); + + assert_eq!("e[0..4], b"MOCK"); + assert_eq!("e[4..8], &prefix[..]); + assert_eq!("e[8..72], &user_data); + } +} diff --git a/crates/tee/src/provider.rs b/crates/tee/src/provider.rs new file mode 100644 index 000000000..1c1f74dc7 --- /dev/null +++ b/crates/tee/src/provider.rs @@ -0,0 +1,24 @@ +use std::fmt::Debug; + +use crate::error::TeeError; + +/// Trait for TEE providers that can generate attestation quotes. +/// +/// Implementations of this trait interact with TEE hardware to generate +/// cryptographic attestation quotes that bind user-provided data to the +/// hardware state. +pub trait TeeProvider: Send + Sync + Debug { + /// Generate an attestation quote with the given user data. + /// + /// # Arguments + /// * `user_data` - A 64-byte slice of user-provided data to include in the quote. This is + /// typically a hash commitment to application state. + /// + /// # Returns + /// * `Ok(Vec)` - The raw attestation quote bytes. + /// * `Err(TeeError)` - If quote generation fails. + fn generate_quote(&self, user_data: &[u8; 64]) -> Result, TeeError>; + + /// Returns the name/type of this TEE provider for logging purposes. + fn provider_type(&self) -> &'static str; +} diff --git a/crates/tee/src/snp.rs b/crates/tee/src/snp.rs new file mode 100644 index 000000000..a1096ea6e --- /dev/null +++ b/crates/tee/src/snp.rs @@ -0,0 +1,92 @@ +use tracing::{debug, info}; + +use crate::error::TeeError; +use crate::provider::TeeProvider; + +/// AMD SEV-SNP provider using the Automata Network SEV-SNP SDK. +/// +/// This provider uses the /dev/sev-guest device to generate SEV-SNP +/// attestation reports via the Automata Network SDK. +pub struct SevSnpProvider { + /// SEV-SNP SDK instance + sev_snp: sev_snp::SevSnp, +} + +impl SevSnpProvider { + /// Create a new SEV-SNP provider. + /// + /// # Errors + /// + /// Returns `TeeError::NotSupported` if the SEV-SNP SDK initialization fails. + pub fn new() -> Result { + let sev_snp = sev_snp::SevSnp::new() + .map_err(|e| TeeError::NotSupported(format!("Failed to initialize SEV-SNP: {e}")))?; + + info!(target: "tee::snp", "SEV-SNP provider initialized"); + + Ok(Self { sev_snp }) + } +} + +impl std::fmt::Debug for SevSnpProvider { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("SevSnpProvider").finish_non_exhaustive() + } +} + +impl TeeProvider for SevSnpProvider { + fn generate_quote(&self, user_data: &[u8; 64]) -> Result, TeeError> { + debug!(target: "tee::snp", "Generating SEV-SNP attestation report"); + + // Configure report options with user data and VMPL + let options = + sev_snp::device::ReportOptions { report_data: Some(*user_data), vmpl: Some(1) }; + + // Generate the attestation report using the SEV-SNP SDK + let (report, _) = self + .sev_snp + .get_attestation_report_with_options(&options) + .map_err(|e| TeeError::GenerationFailed(format!("SEV-SNP attestation failed: {e}")))?; + + // Serialize the report structure to bytes + let report_bytes = bincode::serialize(&report).map_err(|e| { + TeeError::GenerationFailed(format!("Failed to serialize SEV-SNP report: {e}")) + })?; + + info!( + target: "tee::snp", + report_size = report_bytes.len(), + "Successfully generated SEV-SNP attestation report" + ); + + Ok(report_bytes) + } + + fn provider_type(&self) -> &'static str { + "SEV-SNP" + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn generate_quote_fails_on_unsupported_machine() { + // SevSnp::new() succeeds because it only initializes the KDS client, + // not the actual SEV-SNP device. + let provider = SevSnpProvider::new().expect("SevSnpProvider::new should succeed"); + + let user_data = [0u8; 64]; + + // generate_quote should fail on non-SEV-SNP machines because it attempts + // to access /dev/sev-guest via Device::new() internally. + let result = provider.generate_quote(&user_data); + + if let Err(TeeError::GenerationFailed(msg)) = result { + assert!(msg.contains("SEV-SNP")); + } else { + panic!("Expected TeeError::GenerationFailed, got {:?}", result); + } + } +} diff --git a/crates/tracing/Cargo.toml b/crates/tracing/Cargo.toml index d28bd662f..49e6f86a4 100644 --- a/crates/tracing/Cargo.toml +++ b/crates/tracing/Cargo.toml @@ -9,7 +9,9 @@ version.workspace = true clap.workspace = true serde.workspace = true thiserror.workspace = true +dirs = "6.0.0" tracing.workspace = true +tracing-appender.workspace = true tracing-log.workspace = true tracing-subscriber = { workspace = true, features = [ "chrono", "env-filter", "json", "time", "tracing-log" ] } diff --git a/crates/tracing/src/fmt.rs b/crates/tracing/src/fmt.rs index 5341ebaff..a7a0d68c7 100644 --- a/crates/tracing/src/fmt.rs +++ b/crates/tracing/src/fmt.rs @@ -4,6 +4,42 @@ use serde::{Deserialize, Serialize}; use tracing_subscriber::fmt::format::Writer; use tracing_subscriber::fmt::time::{self}; +/// Controls when ANSI escape codes are emitted in log output. +#[derive(Debug, Copy, Clone, PartialEq, Deserialize, Serialize, Default, Eq)] +pub enum LogColor { + /// Colors on. + #[default] + Always, + /// Auto-detect. + Auto, + /// Colors off. + Never, +} + +impl Display for LogColor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Always => write!(f, "always"), + Self::Auto => write!(f, "auto"), + Self::Never => write!(f, "never"), + } + } +} + +impl clap::ValueEnum for LogColor { + fn value_variants<'a>() -> &'a [Self] { + &[Self::Always, Self::Auto, Self::Never] + } + + fn to_possible_value(&self) -> Option { + match self { + Self::Always => Some(clap::builder::PossibleValue::new("always")), + Self::Auto => Some(clap::builder::PossibleValue::new("auto")), + Self::Never => Some(clap::builder::PossibleValue::new("never")), + } + } +} + /// Format for logging output. #[derive(Debug, Copy, Clone, PartialEq, Deserialize, Serialize, Default, Eq)] pub enum LogFormat { diff --git a/crates/tracing/src/lib.rs b/crates/tracing/src/lib.rs index a8505a880..bbe241fa5 100644 --- a/crates/tracing/src/lib.rs +++ b/crates/tracing/src/lib.rs @@ -1,7 +1,12 @@ +use std::path::PathBuf; +use std::sync::OnceLock; + use opentelemetry_gcloud_trace::errors::GcloudTraceError; use tracing::subscriber::SetGlobalDefaultError; +use tracing_appender::rolling::{RollingFileAppender, Rotation}; use tracing_log::log::SetLoggerError; use tracing_subscriber::layer::SubscriberExt; +use tracing_subscriber::registry::LookupSpan; use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::{filter, EnvFilter, Layer}; @@ -9,10 +14,40 @@ mod fmt; pub mod gcloud; pub mod otlp; -pub use fmt::LogFormat; +pub use fmt::{LogColor, LogFormat}; use crate::fmt::LocalTime; +#[derive(Debug, Clone)] +pub struct LoggingConfig { + /// Output format for the stdout sink. + pub stdout_format: LogFormat, + /// ANSI color policy for the stdout sink (independent of file output). + pub stdout_color: LogColor, + + /// Enables the file sink when `true`; when `false`, only stdout logging is used. + pub file_enabled: bool, + /// Output format for the file sink. + pub file_format: LogFormat, + /// Directory where rotated log files are written when the file sink is enabled. + pub file_directory: PathBuf, + /// Maximum number of rotated log files to keep (0 means unlimited). + pub file_max_files: usize, +} + +impl Default for LoggingConfig { + fn default() -> Self { + Self { + stdout_format: LogFormat::Full, + stdout_color: LogColor::Always, + file_enabled: false, + file_format: LogFormat::Full, + file_directory: default_log_file_directory(), + file_max_files: 0, + } + } +} + #[derive(Debug, Clone)] pub enum TracerConfig { Otlp(otlp::OtlpConfig), @@ -30,6 +65,12 @@ pub enum Error { #[error("failed to set global dispatcher: {0}")] SetGlobalDefault(#[from] SetGlobalDefaultError), + #[error("log file io error: {0}")] + LogFileIo(#[from] std::io::Error), + + #[error("failed to initialize rolling file appender: {0}")] + RollingFileInit(#[from] tracing_appender::rolling::InitError), + #[error("google cloud trace error: {0}")] GcloudTrace(#[from] GcloudTraceError), @@ -43,7 +84,16 @@ pub enum Error { OtelSdk(#[from] opentelemetry_sdk::error::OTelSdkError), } -pub async fn init(format: LogFormat, telemetry_config: Option) -> Result<(), Error> { +/// Keep `tracing_appender::non_blocking` workers alive for the lifetime of the process. +/// +/// `tracing_appender::non_blocking()` returns a `WorkerGuard` that must be held; if it is dropped +/// the background worker stops and buffered log lines may never be written. +static LOG_FILE_GUARD: OnceLock = OnceLock::new(); + +pub async fn init( + logging: LoggingConfig, + telemetry_config: Option, +) -> Result<(), Error> { const DEFAULT_LOG_FILTER: &str = "katana_db::mdbx=trace,cairo_native::compiler=off,\ pipeline=debug,stage=debug,tasks=debug,executor=trace,\ forking::backend=trace,blockifier=off,jsonrpsee_server=off,\ @@ -67,28 +117,81 @@ pub async fn init(format: LogFormat, telemetry_config: Option) -> } }; - let fmt = match format { - LogFormat::Full => { - tracing_subscriber::fmt::layer().with_timer(LocalTime::new()).boxed() - } - LogFormat::Json => { - tracing_subscriber::fmt::layer().json().with_timer(LocalTime::new()).boxed() - } - }; + let stdout_layer = stdout_layer(&logging); + let file_layer = file_layer(&logging)?; - tracing_subscriber::registry().with(filter).with(telemetry).with(fmt).init(); + tracing_subscriber::registry() + .with(filter) + .with(telemetry) + .with(stdout_layer) + .with(file_layer) + .init(); } else { - let fmt = match format { - LogFormat::Full => { - tracing_subscriber::fmt::layer().with_timer(LocalTime::new()).boxed() - } - LogFormat::Json => { - tracing_subscriber::fmt::layer().json().with_timer(LocalTime::new()).boxed() - } - }; - - tracing_subscriber::registry().with(filter).with(fmt).init(); + let stdout_layer = stdout_layer(&logging); + let file_layer = file_layer(&logging)?; + tracing_subscriber::registry().with(filter).with(stdout_layer).with(file_layer).init(); } Ok(()) } + +/// Returns the default directory where the log files will be stored at. +pub fn default_log_file_directory() -> PathBuf { + dirs::cache_dir().unwrap_or_else(std::env::temp_dir).join("katana").join("logs") +} + +fn stdout_layer(cfg: &LoggingConfig) -> Box + Send + Sync> +where + S: tracing::Subscriber + for<'a> LookupSpan<'a>, +{ + let ansi = match cfg.stdout_color { + LogColor::Always => true, + LogColor::Never => false, + LogColor::Auto => std::io::IsTerminal::is_terminal(&std::io::stdout()), + }; + + match cfg.stdout_format { + LogFormat::Full => { + tracing_subscriber::fmt::layer().with_timer(LocalTime::new()).with_ansi(ansi).boxed() + } + + LogFormat::Json => tracing_subscriber::fmt::layer() + .json() + .with_timer(LocalTime::new()) + .with_ansi(false) + .boxed(), + } +} + +fn file_layer(cfg: &LoggingConfig) -> Result + Send + Sync>>, Error> +where + S: tracing::Subscriber + for<'a> LookupSpan<'a>, +{ + if !cfg.file_enabled { + return Ok(None); + }; + + let appender = RollingFileAppender::builder() + .rotation(Rotation::DAILY) + .filename_suffix("katana.log") + .max_log_files(cfg.file_max_files) + .build(&cfg.file_directory)?; + + let (non_blocking, guard) = tracing_appender::non_blocking(appender); + let _ = LOG_FILE_GUARD.set(guard); + + Ok(Some(match cfg.file_format { + LogFormat::Full => tracing_subscriber::fmt::layer() + .with_timer(LocalTime::new()) + .with_writer(non_blocking) + .with_ansi(false) + .boxed(), + + LogFormat::Json => tracing_subscriber::fmt::layer() + .json() + .with_timer(LocalTime::new()) + .with_writer(non_blocking) + .with_ansi(false) + .boxed(), + })) +} diff --git a/crates/trie/src/classes.rs b/crates/trie/src/classes.rs index f396f4ec8..a88e9b1d1 100644 --- a/crates/trie/src/classes.rs +++ b/crates/trie/src/classes.rs @@ -1,12 +1,10 @@ -use bonsai_trie::{ - trie::trees::{FullMerkleTrees, PartialMerkleTrees}, - BonsaiDatabase, BonsaiPersistentDatabase, MultiProof, -}; +use bonsai_trie::trie::trees::{FullMerkleTrees, PartialMerkleTrees}; +use bonsai_trie::{BonsaiDatabase, BonsaiPersistentDatabase, MultiProof}; use katana_primitives::block::BlockNumber; +use katana_primitives::cairo::ShortString; use katana_primitives::class::{ClassHash, CompiledClassHash}; use katana_primitives::hash::{Poseidon, StarkHash}; use katana_primitives::Felt; -use starknet::macros::short_string; use crate::id::CommitId; @@ -51,6 +49,10 @@ impl ClassesTrie { pub fn multiproof(&mut self, class_hashes: Vec) -> MultiProof { self.trie.multiproof(CLASSES_IDENTIFIER, class_hashes) } + + pub fn revert_to(&mut self, block: BlockNumber, latest_block: BlockNumber) { + self.trie.revert_to(block, latest_block); + } } impl ClassesTrie @@ -103,7 +105,7 @@ where original_root: Felt, ) { let value = compute_classes_trie_value(compiled_hash); - self.trie.insert(CLASSES_IDENTIFIER, hash, value, proof, original_root) + self.trie.insert_with_proof(CLASSES_IDENTIFIER, hash, value, proof, original_root) } pub fn commit(&mut self, block: BlockNumber) { @@ -119,6 +121,6 @@ impl std::fmt::Debug for ClassesTrie pub fn compute_classes_trie_value(compiled_class_hash: CompiledClassHash) -> Felt { // https://docs.starknet.io/architecture-and-concepts/network-architecture/starknet-state/#classes_trie - const CONTRACT_CLASS_LEAF_V0: Felt = short_string!("CONTRACT_CLASS_LEAF_V0"); - Poseidon::hash(&CONTRACT_CLASS_LEAF_V0, &compiled_class_hash) + const CONTRACT_CLASS_LEAF_V0: ShortString = ShortString::from_ascii("CONTRACT_CLASS_LEAF_V0"); + Poseidon::hash(&CONTRACT_CLASS_LEAF_V0.into(), &compiled_class_hash) } diff --git a/crates/trie/src/contracts.rs b/crates/trie/src/contracts.rs index be2fb58e2..43e5d055a 100644 --- a/crates/trie/src/contracts.rs +++ b/crates/trie/src/contracts.rs @@ -1,7 +1,5 @@ -use bonsai_trie::{ - trie::trees::{FullMerkleTrees, PartialMerkleTrees}, - BonsaiDatabase, BonsaiPersistentDatabase, MultiProof, -}; +use bonsai_trie::trie::trees::{FullMerkleTrees, PartialMerkleTrees}; +use bonsai_trie::{BonsaiDatabase, BonsaiPersistentDatabase, MultiProof}; use katana_primitives::block::BlockNumber; use katana_primitives::hash::Pedersen; use katana_primitives::{ContractAddress, Felt}; @@ -33,6 +31,10 @@ impl ContractsTrie { let keys = addresses.into_iter().map(Felt::from).collect::>(); self.trie.multiproof(CONTRACTS_IDENTIFIER, keys) } + + pub fn revert_to(&mut self, block: BlockNumber, latest_block: BlockNumber) { + self.trie.revert_to(block, latest_block); + } } impl ContractsTrie @@ -84,7 +86,13 @@ where proof: MultiProof, original_root: Felt, ) { - self.trie.insert(CONTRACTS_IDENTIFIER, *address, state_hash, proof, original_root) + self.trie.insert_with_proof( + CONTRACTS_IDENTIFIER, + *address, + state_hash, + proof, + original_root, + ) } pub fn commit(&mut self, block: BlockNumber) { diff --git a/crates/trie/src/lib.rs b/crates/trie/src/lib.rs index 5ba79f4a1..5d1f3b251 100644 --- a/crates/trie/src/lib.rs +++ b/crates/trie/src/lib.rs @@ -1,11 +1,10 @@ use bitvec::view::AsBits; pub use bonsai::{BitVec, MultiProof, Path, ProofNode}; pub use bonsai_trie::databases::HashMapDb; +pub use bonsai_trie::trie::trees::{FullMerkleTrees, PartialMerkleTrees}; use bonsai_trie::BonsaiStorage; -pub use bonsai_trie::{ - trie::trees::{FullMerkleTrees, PartialMerkleTrees}, - BonsaiDatabase, BonsaiPersistentDatabase, BonsaiStorageConfig, -}; +pub use bonsai_trie::{BonsaiDatabase, BonsaiPersistentDatabase, BonsaiStorageConfig}; +use katana_primitives::block::BlockNumber; use katana_primitives::class::ClassHash; use katana_primitives::Felt; use starknet_types_core::hash::{Pedersen, StarkHash}; @@ -45,14 +44,23 @@ where { fn bonsai_config() -> BonsaiStorageConfig { BonsaiStorageConfig { - // we have our own implementation of storing trie changes - max_saved_trie_logs: Some(0), + // This field controls what's the oldest block we can revert to. + // + // The value 5 is chosen arbitrarily as a placeholder. This value should be + // configurable. + max_saved_trie_logs: Some(5), + // in the bonsai-trie crate, this field seems to be only used in rocksdb impl. // i dont understand why would they add a config thats implementation specific ???? // // this config should be used by our implementation of the // BonsaiPersistentDatabase::snapshot() + // + // note: currently, this value is not being used for anything. our trie will stores + // all created snapshots. max_saved_snapshots: Some(64usize), + + // creates a snapshot for every block snapshot_interval: 1, } } @@ -91,6 +99,10 @@ where let keys = keys.into_iter().map(|key| key.to_bytes_be().as_bits()[5..].to_owned()); self.storage.get_multi_proof(id, keys).expect("failed to get multiproof") } + + pub fn revert_to(&mut self, block: BlockNumber, latest_block: BlockNumber) { + self.storage.revert_to(block.into(), latest_block.into()).expect("failed to revert trie"); + } } impl BonsaiTrie> @@ -141,7 +153,7 @@ where DB: BonsaiDatabase + BonsaiPersistentDatabase, Hash: StarkHash + Send + Sync, { - pub fn insert( + pub fn insert_with_proof( &mut self, id: &[u8], key: Felt, @@ -259,4 +271,54 @@ mod tests { assert_eq!(result, expected); } + + #[test] + fn test_revert_to() { + use bonsai_trie::databases; + + // the identifier for the trie + const IDENTIFIER: &[u8] = b"test_trie"; + + // Create a BonsaiStorage with in-memory database and trie logs enabled + let bonsai_db = databases::HashMapDb::::default(); + let mut trie = BonsaiTrie::<_, hash::Pedersen>::new(bonsai_db); + + // Insert values at block 0 + trie.insert(IDENTIFIER, Felt::from(1), Felt::from(100)); + trie.insert(IDENTIFIER, Felt::from(2), Felt::from(200)); + trie.commit(0.into()); + let root_at_block_0 = trie.root(IDENTIFIER); + + // Insert more values at block 1 + trie.insert(IDENTIFIER, Felt::from(3), Felt::from(300)); + trie.insert(IDENTIFIER, Felt::from(4), Felt::from(400)); + trie.commit(1.into()); + let root_at_block_1 = trie.root(IDENTIFIER); + + // Roots should be different + assert_ne!(root_at_block_0, root_at_block_1); + + // Insert even more values at block 2 + trie.insert(IDENTIFIER, Felt::from(5), Felt::from(500)); + trie.commit(2.into()); + let root_at_block_2 = trie.root(IDENTIFIER); + + // Roots should be different + assert_ne!(root_at_block_1, root_at_block_2); + assert_ne!(root_at_block_0, root_at_block_2); + + // Revert to block 1 + trie.revert_to(1, 2); + let root_after_revert = trie.root(IDENTIFIER); + + // After revert, root should match block 1 + assert_eq!(root_after_revert, root_at_block_1); + + // Revert to block 0 + trie.revert_to(0, 1); + let root_after_second_revert = trie.root(IDENTIFIER); + + // After revert, root should match block 0 + assert_eq!(root_after_second_revert, root_at_block_0); + } } diff --git a/crates/trie/src/storages.rs b/crates/trie/src/storages.rs index 5f7a72d6d..466952b9d 100644 --- a/crates/trie/src/storages.rs +++ b/crates/trie/src/storages.rs @@ -1,7 +1,5 @@ -use bonsai_trie::{ - trie::trees::{FullMerkleTrees, PartialMerkleTrees}, - BonsaiDatabase, BonsaiPersistentDatabase, MultiProof, -}; +use bonsai_trie::trie::trees::{FullMerkleTrees, PartialMerkleTrees}; +use bonsai_trie::{BonsaiDatabase, BonsaiPersistentDatabase, MultiProof}; use katana_primitives::block::BlockNumber; use katana_primitives::contract::{StorageKey, StorageValue}; use katana_primitives::hash::Pedersen; @@ -17,6 +15,7 @@ pub struct StoragesTrie = StoragesTrie>; +// Full tree implementation impl StoragesTrie { pub fn new(db: DB, address: ContractAddress) -> Self { Self { address, trie: crate::BonsaiTrie::new(db) } @@ -29,6 +28,10 @@ impl StoragesTrie { pub fn multiproof(&mut self, storage_keys: Vec) -> MultiProof { self.trie.multiproof(&self.address.to_bytes_be(), storage_keys) } + + pub fn revert_to(&mut self, block: BlockNumber, latest_block: BlockNumber) { + self.trie.revert_to(block, latest_block); + } } impl StoragesTrie @@ -44,15 +47,7 @@ where } } -impl std::fmt::Debug for StoragesTrie { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("StoragesTrie") - .field("address", &self.address) - .field("trie", &"") - .finish() - } -} - +// Partial tree implementation impl PartialStoragesTrie { pub fn new_partial(db: DB, address: ContractAddress) -> Self { Self { address, trie: crate::PartialBonsaiTrie::new_partial(db) } @@ -87,7 +82,7 @@ where proof: MultiProof, original_root: Felt, ) { - self.trie.insert( + self.trie.insert_with_proof( &self.address.to_bytes_be(), storage_key, storage_value, @@ -100,3 +95,12 @@ where self.trie.commit(block.into()) } } + +impl std::fmt::Debug for StoragesTrie { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("StoragesTrie") + .field("address", &self.address) + .field("trie", &"") + .finish() + } +} diff --git a/crates/utils/src/node.rs b/crates/utils/src/node.rs index 25e290280..f93f48705 100644 --- a/crates/utils/src/node.rs +++ b/crates/utils/src/node.rs @@ -10,8 +10,8 @@ use katana_node::config::rpc::{RpcConfig, RpcModulesList, DEFAULT_RPC_ADDR}; use katana_node::config::sequencing::SequencingConfig; use katana_node::config::Config; use katana_node::{LaunchedNode, Node}; +use katana_primitives::address; use katana_primitives::chain::ChainId; -use katana_primitives::{address, ContractAddress}; use katana_provider::{DbProviderFactory, ForkProviderFactory, ProviderFactory}; use katana_rpc_server::HttpClient; use starknet::accounts::{ExecutionEncoding, SingleOwnerAccount}; diff --git a/crates/utils/src/tx_waiter.rs b/crates/utils/src/tx_waiter.rs index 4ef9bfbe6..079149b5b 100644 --- a/crates/utils/src/tx_waiter.rs +++ b/crates/utils/src/tx_waiter.rs @@ -308,12 +308,12 @@ mod tests { use assert_matches::assert_matches; use katana_primitives::block::FinalityStatus::{self, AcceptedOnL1, AcceptedOnL2}; use katana_primitives::fee::PriceUnit; + use katana_primitives::felt; use katana_rpc_types::receipt::ExecutionResult::{Reverted, Succeeded}; use katana_rpc_types::receipt::{ ExecutionResult, ReceiptBlockInfo, RpcInvokeTxReceipt, RpcTxReceipt, TxReceiptWithBlockInfo, }; use katana_rpc_types::{ExecutionResources, FeePayment}; - use starknet::macros::felt; use super::{Duration, TxWaiter}; use crate::{TestNode, TxWaitingError}; diff --git a/rust-toolchain.toml b/rust-toolchain.toml index cf6d0f556..b67e7d534 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "1.86.0" +channel = "1.89.0" diff --git a/scripts/clippy.sh b/scripts/clippy.sh index dec023999..1420fb732 100755 --- a/scripts/clippy.sh +++ b/scripts/clippy.sh @@ -8,7 +8,7 @@ set -x set -o pipefail run_clippy() { - cargo +nightly-2025-02-20 clippy --all-targets "$@" -- -D warnings -D future-incompatible -D nonstandard-style -D rust-2018-idioms -D unused -D missing-debug-implementations + cargo +nightly-2025-06-20 clippy --all-targets "$@" -- -D warnings -D future-incompatible -D nonstandard-style -D rust-2018-idioms -D unused -D missing-debug-implementations } . ./scripts/cairo-native.env.sh && run_clippy -p katana --all-features diff --git a/scripts/rust_fmt.sh b/scripts/rust_fmt.sh index 9d9559dc8..b14d4d4c2 100755 --- a/scripts/rust_fmt.sh +++ b/scripts/rust_fmt.sh @@ -8,4 +8,4 @@ if [ "$1" == "--fix" ]; then shift fi -cargo +nightly-2025-02-20 fmt $option --all -- "$@" +cargo +nightly-2025-06-20 fmt $option --all -- "$@" diff --git a/tests/db-compat/src/main.rs b/tests/db-compat/src/main.rs index c38a7bf76..9a55b11b3 100644 --- a/tests/db-compat/src/main.rs +++ b/tests/db-compat/src/main.rs @@ -2,7 +2,7 @@ use anyhow::Result; use katana_db::version::CURRENT_DB_VERSION; use katana_node_bindings::Katana; use katana_primitives::block::{BlockIdOrTag, ConfirmedBlockIdOrTag}; -use katana_primitives::{address, felt, ContractAddress}; +use katana_primitives::{address, felt}; use katana_rpc_client::starknet::Client as StarknetClient; #[tokio::main] diff --git a/tests/snos/src/main.rs b/tests/snos/src/main.rs index 583249907..b7e9e4d29 100644 --- a/tests/snos/src/main.rs +++ b/tests/snos/src/main.rs @@ -9,7 +9,7 @@ use katana_node::config::db::DbConfig; use katana_node::config::Config; use katana_node::{LaunchedNode, Node}; use katana_primitives::block::BlockNumber; -use katana_primitives::{address, ContractAddress, Felt}; +use katana_primitives::{address, Felt}; use katana_provider::api::block::BlockNumberProvider; use katana_provider::{DbProviderFactory, ProviderFactory};