diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index f089c2a19..6fea068e0 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -86,8 +86,6 @@ jobs: if-no-files-found: error path: | ./tests/snos/snos/build - ./tests/fixtures/db/spawn_and_move - ./tests/fixtures/db/simple ./crates/contracts/build build-katana-binary: @@ -159,8 +157,100 @@ jobs: - name: Run Clippy run: ./scripts/clippy.sh + generate-db-fixtures: + needs: [generate-test-artifacts] + runs-on: ubuntu-latest-32-cores + if: github.event_name == 'push' || (github.event_name == 'pull_request' && github.event.pull_request.draft == false) + container: + image: ghcr.io/dojoengine/katana-dev:latest + env: + MLIR_SYS_190_PREFIX: /usr/lib/llvm-19/ + LLVM_SYS_191_PREFIX: /usr/lib/llvm-19/ + TABLEGEN_190_PREFIX: /usr/lib/llvm-19/ + steps: + - uses: actions/checkout@v3 + with: + submodules: recursive + # Workaround for https://github.com/actions/runner-images/issues/6775 + - run: git config --global --add safe.directory "*" + + - name: Cache DB fixtures + id: cache-db-fixtures + uses: actions/cache@v4 + with: + path: | + tests/fixtures/db/spawn_and_move + tests/fixtures/db/simple + key: db-fixtures-${{ hashFiles('crates/storage/db/src/version.rs') }}-dojo-v1.7.0 + + - name: Download contract artifacts + if: steps.cache-db-fixtures.outputs.cache-hit != 'true' + uses: actions/download-artifact@v5 + with: + name: test-artifacts + + - uses: Swatinem/rust-cache@v2 + if: steps.cache-db-fixtures.outputs.cache-hit != 'true' + with: + key: ci-${{ github.job }} + shared-key: katana-ci-cache + + - name: Checkout Dojo repository + if: steps.cache-db-fixtures.outputs.cache-hit != 'true' + uses: actions/checkout@v3 + with: + repository: dojoengine/dojo + ref: v1.7.0 + path: dojo + + - name: Read scarb version from Dojo + if: steps.cache-db-fixtures.outputs.cache-hit != 'true' + id: dojo-scarb + run: | + SCARB_VERSION=$(grep '^scarb ' dojo/.tool-versions | awk '{print $2}') + echo "version=$SCARB_VERSION" >> $GITHUB_OUTPUT + + - uses: software-mansion/setup-scarb@v1 + if: steps.cache-db-fixtures.outputs.cache-hit != 'true' + with: + scarb-version: ${{ steps.dojo-scarb.outputs.version }} + + - name: Install sozo + if: steps.cache-db-fixtures.outputs.cache-hit != 'true' + run: cargo install --path dojo/bin/sozo --locked --force + + - name: Build generate_migration_db + if: steps.cache-db-fixtures.outputs.cache-hit != 'true' + run: cargo build --bin generate_migration_db --features node -p katana-utils + + - name: Generate spawn-and-move fixture + if: steps.cache-db-fixtures.outputs.cache-hit != 'true' + run: ./target/debug/generate_migration_db --example spawn-and-move --output /tmp/spawn_and_move.tar.gz + + - name: Generate simple fixture + if: steps.cache-db-fixtures.outputs.cache-hit != 'true' + run: ./target/debug/generate_migration_db --example simple --output /tmp/simple.tar.gz + + - name: Extract fixtures + if: steps.cache-db-fixtures.outputs.cache-hit != 'true' + run: | + mkdir -p tests/fixtures/db + cd tests/fixtures/db && tar -xzf /tmp/spawn_and_move.tar.gz + cd tests/fixtures/db && tar -xzf /tmp/simple.tar.gz + + - name: Upload DB fixtures + uses: actions/upload-artifact@v4 + with: + name: db-fixtures + overwrite: true + retention-days: 14 + if-no-files-found: error + path: | + ./tests/fixtures/db/spawn_and_move + ./tests/fixtures/db/simple + test: - needs: [fmt, clippy, generate-test-artifacts, build-katana-binary] + needs: [fmt, clippy, generate-test-artifacts, build-katana-binary, generate-db-fixtures] runs-on: ubuntu-latest-32-cores timeout-minutes: 30 if: github.event_name == 'push' || (github.event_name == 'pull_request' && github.event.pull_request.draft == false) @@ -188,6 +278,11 @@ jobs: with: name: test-artifacts + - name: Download DB fixtures + uses: actions/download-artifact@v5 + with: + name: db-fixtures + - name: Download Katana binary uses: actions/download-artifact@v5 with: diff --git a/.gitignore b/.gitignore index 8ee46c026..5c879813c 100644 --- a/.gitignore +++ b/.gitignore @@ -31,9 +31,10 @@ tests/snos/snos snos-env tests/fixtures/katana_db -# Ignore all files under tests/fixtures/db except .tar.gz files +# Ignore generated DB fixture directories; keep only committed tarballs tests/fixtures/db/* -!tests/fixtures/db/*.tar.gz +!tests/fixtures/db/1_6_0.tar.gz +!tests/fixtures/db/snos.tar.gz crates/contracts/build/ !crates/contracts/build/legacy/ diff --git a/Cargo.lock b/Cargo.lock index 33261726c..21daeb778 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4227,6 +4227,16 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28dd6caf6059519a65843af8fe2a3ae298b14b80179855aeb4adc2c1934ee619" +[[package]] +name = "fs2" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "fs_extra" version = "1.3.0" @@ -6081,12 +6091,14 @@ dependencies = [ "katana-rpc-api", "katana-rpc-server", "katana-rpc-types", + "katana-utils", "num-bigint", "prost 0.12.6", "serde_json", "starknet", "thiserror 1.0.69", "tokio", + "tokio-stream", "tonic 0.11.0", "tonic-build", "tonic-reflection", @@ -6621,6 +6633,7 @@ dependencies = [ "assert_matches", "async-trait", "clap", + "fs2", "futures", "katana-chain-spec", "katana-core", diff --git a/Makefile b/Makefile index 884755bdd..97ea5d7cf 100644 --- a/Makefile +++ b/Makefile @@ -17,10 +17,7 @@ SNOS_DB_DIR := $(DB_FIXTURES_DIR)/snos COMPATIBILITY_DB_TAR ?= $(DB_FIXTURES_DIR)/1_6_0.tar.gz COMPATIBILITY_DB_DIR ?= $(DB_FIXTURES_DIR)/1_6_0 -SPAWN_AND_MOVE_DB_TAR ?= $(DB_FIXTURES_DIR)/spawn_and_move.tar.gz SPAWN_AND_MOVE_DB_DIR := $(DB_FIXTURES_DIR)/spawn_and_move - -SIMPLE_DB_TAR ?= $(DB_FIXTURES_DIR)/simple.tar.gz SIMPLE_DB_DIR := $(DB_FIXTURES_DIR)/simple CONTRACTS_CRATE := crates/contracts @@ -32,7 +29,7 @@ SCARB_VERSION := 2.8.4 .DEFAULT_GOAL := usage .SILENT: clean -.PHONY: usage help check-llvm native-deps native-deps-macos native-deps-linux native-deps-windows build-explorer contracts clean deps install-scarb test-artifacts snos-artifacts db-compat-artifacts install-pyenv +.PHONY: usage help check-llvm native-deps native-deps-macos native-deps-linux native-deps-windows build-explorer contracts clean deps install-scarb test-artifacts snos-artifacts db-compat-artifacts generate-db-fixtures install-pyenv usage help: @echo "Usage:" @@ -43,6 +40,7 @@ usage help: @echo " test-artifacts: Prepare tests artifacts (including test database)." @echo " snos-artifacts: Prepare SNOS tests artifacts." @echo " db-compat-artifacts: Prepare database compatibility test artifacts." + @echo " generate-db-fixtures: Generate spawn-and-move and simple DB fixtures (requires scarb + sozo)." @echo " native-deps-macos: Install cairo-native dependencies for macOS." @echo " native-deps-linux: Install cairo-native dependencies for Linux." @echo " native-deps-windows: Install cairo-native dependencies for Windows." @@ -68,7 +66,7 @@ snos-artifacts: $(SNOS_OUTPUT) db-compat-artifacts: $(COMPATIBILITY_DB_DIR) @echo "Database compatibility test artifacts prepared successfully." -test-artifacts: $(SNOS_DB_DIR) $(SNOS_OUTPUT) $(COMPATIBILITY_DB_DIR) $(SPAWN_AND_MOVE_DB_DIR) $(SIMPLE_DB_DIR) contracts +test-artifacts: $(SNOS_DB_DIR) $(SNOS_OUTPUT) $(COMPATIBILITY_DB_DIR) contracts @echo "All test artifacts prepared successfully." build-explorer: @@ -116,17 +114,19 @@ $(COMPATIBILITY_DB_DIR): $(COMPATIBILITY_DB_TAR) mv katana_db $(notdir $(COMPATIBILITY_DB_DIR)) || { echo "Failed to extract backward compatibility test database\!"; exit 1; } @echo "Backward compatibility database extracted successfully." -$(SPAWN_AND_MOVE_DB_DIR): $(SPAWN_AND_MOVE_DB_TAR) - @echo "Extracting spawn-and-move test database..." - @cd $(DB_FIXTURES_DIR) && \ - tar -xzf $(notdir $(SPAWN_AND_MOVE_DB_TAR)) || { echo "Failed to extract spawn-and-move test database\!"; exit 1; } - @echo "Spawn-and-move test database extracted successfully." - -$(SIMPLE_DB_DIR): $(SIMPLE_DB_TAR) - @echo "Extracting simple test database..." - @cd $(DB_FIXTURES_DIR) && \ - tar -xzf $(notdir $(SIMPLE_DB_TAR)) || { echo "Failed to extract simple test database\!"; exit 1; } - @echo "Simple test database extracted successfully." +generate-db-fixtures: + @echo "Building generate_migration_db binary..." + cargo build --bin generate_migration_db --features node -p katana-utils + @echo "Generating spawn-and-move database fixture..." + ./target/debug/generate_migration_db --example spawn-and-move --output /tmp/spawn_and_move.tar.gz + @echo "Generating simple database fixture..." + ./target/debug/generate_migration_db --example simple --output /tmp/simple.tar.gz + @echo "Extracting spawn-and-move fixture..." + @mkdir -p $(DB_FIXTURES_DIR) + @cd $(DB_FIXTURES_DIR) && tar -xzf /tmp/spawn_and_move.tar.gz + @echo "Extracting simple fixture..." + @cd $(DB_FIXTURES_DIR) && tar -xzf /tmp/simple.tar.gz + @echo "DB fixtures generated successfully." check-llvm: ifndef MLIR_SYS_190_PREFIX diff --git a/crates/grpc/Cargo.toml b/crates/grpc/Cargo.toml index c8e8e9c67..530918558 100644 --- a/crates/grpc/Cargo.toml +++ b/crates/grpc/Cargo.toml @@ -22,6 +22,7 @@ http = "0.2" # Async runtime tokio.workspace = true +tokio-stream = "0.1" # Logging tracing.workspace = true @@ -44,6 +45,7 @@ tower-service.workspace = true [dev-dependencies] tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } hex = "0.4" +katana-utils = { workspace = true, features = ["node"] } [build-dependencies] tonic-build.workspace = true diff --git a/crates/grpc/src/server.rs b/crates/grpc/src/server.rs index 0c61a90e6..3753bfe55 100644 --- a/crates/grpc/src/server.rs +++ b/crates/grpc/src/server.rs @@ -4,7 +4,9 @@ use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; +use tokio::net::TcpListener; use tokio::sync::watch; +use tokio_stream::wrappers::TcpListenerStream; use tonic::transport::server::Routes; use tonic::transport::Server; use tracing::{error, info}; @@ -28,6 +30,10 @@ pub enum Error { /// Server has already been stopped. #[error("gRPC server has already been stopped")] AlreadyStopped, + + /// IO error during socket binding. + #[error("Failed to bind socket: {0}")] + Bind(#[from] std::io::Error), } /// Handle to a running gRPC server. @@ -104,6 +110,9 @@ impl GrpcServer { /// /// This method spawns the server on a new Tokio task and returns a handle /// that can be used to manage the server. + /// + /// If the port in `addr` is 0, the OS will assign an available port. + /// The actual bound address can be retrieved from the returned handle. pub async fn start(&self, addr: SocketAddr) -> Result { // Build reflection service for tooling support (grpcurl, Postman, etc.) let reflection_service = tonic_reflection::server::Builder::configure() @@ -114,11 +123,16 @@ impl GrpcServer { // Create shutdown channel let (shutdown_tx, mut shutdown_rx) = watch::channel(()); + // Bind to the socket first to get the actual address (important when port is 0) + let listener = TcpListener::bind(addr).await?; + let actual_addr = listener.local_addr()?; + let incoming = TcpListenerStream::new(listener); + let mut builder = Server::builder().timeout(self.timeout); let server = builder.add_routes(self.routes.clone()).add_service(reflection_service); - // Start the server with graceful shutdown - let server_future = server.serve_with_shutdown(addr, async move { + // Start the server with graceful shutdown using the pre-bound listener + let server_future = server.serve_with_incoming_shutdown(incoming, async move { let _ = shutdown_rx.changed().await; }); @@ -128,9 +142,9 @@ impl GrpcServer { } }); - info!(target: "grpc", %addr, "gRPC server started."); + info!(target: "grpc", addr = %actual_addr, "gRPC server started."); - Ok(GrpcServerHandle { addr, shutdown_tx: Arc::new(shutdown_tx) }) + Ok(GrpcServerHandle { addr: actual_addr, shutdown_tx: Arc::new(shutdown_tx) }) } } diff --git a/crates/grpc/tests/starknet.rs b/crates/grpc/tests/starknet.rs new file mode 100644 index 000000000..4b78f1c90 --- /dev/null +++ b/crates/grpc/tests/starknet.rs @@ -0,0 +1,536 @@ +use katana_grpc::proto::{ + BlockHashAndNumberRequest, BlockNumberRequest, BlockTag, ChainIdRequest, GetBlockRequest, + GetClassAtRequest, GetClassHashAtRequest, GetEventsRequest, GetNonceRequest, + GetStorageAtRequest, GetTransactionByHashRequest, GetTransactionReceiptRequest, + SpecVersionRequest, SyncingRequest, +}; +use katana_grpc::GrpcClient; +use katana_primitives::Felt; +use katana_utils::node::TestNode; +use starknet::core::types::{BlockId, BlockTag as StarknetBlockTag, EventFilter}; +use starknet::providers::jsonrpc::HttpTransport; +use starknet::providers::{JsonRpcClient, Provider, Url}; +use tonic::Request; + +async fn setup() -> (TestNode, GrpcClient, JsonRpcClient) { + let node = TestNode::new_with_spawn_and_move_db().await; + + let grpc_addr = *node.grpc_addr().expect("grpc not enabled"); + let grpc = GrpcClient::connect(format!("http://{grpc_addr}")) + .await + .expect("failed to connect to gRPC server"); + + let rpc_addr = *node.rpc_addr(); + let url = Url::parse(&format!("http://{rpc_addr}")).expect("failed to parse url"); + let rpc = JsonRpcClient::new(HttpTransport::new(url)); + + (node, grpc, rpc) +} + +fn genesis_address(node: &TestNode) -> Felt { + let (address, _) = + node.backend().chain_spec.genesis().accounts().next().expect("must have genesis account"); + (*address).into() +} + +fn felt_to_proto(felt: Felt) -> katana_grpc::proto::Felt { + katana_grpc::proto::Felt { value: felt.to_bytes_be().to_vec() } +} + +fn proto_to_felt(proto: &katana_grpc::proto::Felt) -> Felt { + Felt::from_bytes_be_slice(&proto.value) +} + +fn grpc_block_id_number(n: u64) -> Option { + Some(katana_grpc::proto::BlockId { + identifier: Some(katana_grpc::proto::block_id::Identifier::Number(n)), + }) +} + +fn grpc_block_id_latest() -> Option { + Some(katana_grpc::proto::BlockId { + identifier: Some(katana_grpc::proto::block_id::Identifier::Tag(BlockTag::Latest as i32)), + }) +} + +#[tokio::test] +async fn test_chain_id() { + let (node, mut grpc, rpc) = setup().await; + + let chain_id = node.backend().chain_spec.id().id(); + + let rpc_chain_id = rpc.chain_id().await.expect("rpc chain_id failed"); + + let grpc_chain_id = grpc + .chain_id(Request::new(ChainIdRequest {})) + .await + .expect("grpc chain_id failed") + .into_inner() + .chain_id; + + assert_eq!(rpc_chain_id, chain_id); + assert_eq!(grpc_chain_id, format!("{:#x}", chain_id)); +} + +#[tokio::test] +async fn test_block_number() { + let (_node, mut grpc, rpc) = setup().await; + + let rpc_block_number = rpc.block_number().await.expect("rpc block_number failed"); + + let grpc_block_number = grpc + .block_number(Request::new(BlockNumberRequest {})) + .await + .expect("grpc block_number failed") + .into_inner() + .block_number; + + assert_eq!(grpc_block_number, rpc_block_number); + assert!(rpc_block_number > 0, "Expected block number > 0 after migration"); +} + +#[tokio::test] +async fn test_block_hash_and_number() { + let (_node, mut grpc, rpc) = setup().await; + + let rpc_result = rpc.block_hash_and_number().await.expect("rpc block_hash_and_number failed"); + + let grpc_result = grpc + .block_hash_and_number(Request::new(BlockHashAndNumberRequest {})) + .await + .expect("grpc block_hash_and_number failed") + .into_inner(); + + assert_eq!(grpc_result.block_number, rpc_result.block_number); + let grpc_hash = proto_to_felt(&grpc_result.block_hash.expect("grpc missing block_hash")); + assert_eq!(grpc_hash, rpc_result.block_hash); +} + +#[tokio::test] +async fn test_get_block_with_txs() { + let (_node, mut grpc, rpc) = setup().await; + + let rpc_block = rpc.get_block_with_txs(BlockId::Number(0)).await.expect("rpc failed"); + + let grpc_result = grpc + .get_block_with_txs(Request::new(GetBlockRequest { block_id: grpc_block_id_number(0) })) + .await + .expect("grpc failed") + .into_inner(); + + let rpc_block = match rpc_block { + starknet::core::types::MaybePreConfirmedBlockWithTxs::Block(b) => b, + _ => panic!("Expected confirmed block from rpc"), + }; + + let grpc_block = match grpc_result.result { + Some(katana_grpc::proto::get_block_with_txs_response::Result::Block(b)) => b, + _ => panic!("Expected confirmed block from grpc"), + }; + + let grpc_header = grpc_block.header.expect("grpc missing block header"); + assert_eq!(grpc_header.block_number, rpc_block.block_number); + assert_eq!( + proto_to_felt(&grpc_header.block_hash.expect("grpc missing block_hash")), + rpc_block.block_hash + ); + assert_eq!(grpc_block.transactions.len(), rpc_block.transactions.len()); +} + +#[tokio::test] +async fn test_get_block_with_tx_hashes() { + let (_node, mut grpc, rpc) = setup().await; + + let rpc_block = rpc.get_block_with_tx_hashes(BlockId::Number(0)).await.expect("rpc failed"); + + let grpc_result = grpc + .get_block_with_tx_hashes(Request::new(GetBlockRequest { + block_id: grpc_block_id_number(0), + })) + .await + .expect("grpc failed") + .into_inner(); + + let rpc_block = match rpc_block { + starknet::core::types::MaybePreConfirmedBlockWithTxHashes::Block(b) => b, + _ => panic!("Expected confirmed block from rpc"), + }; + + let grpc_block = match grpc_result.result { + Some(katana_grpc::proto::get_block_with_tx_hashes_response::Result::Block(b)) => b, + _ => panic!("Expected confirmed block from grpc"), + }; + + let grpc_header = grpc_block.header.expect("grpc missing block header"); + assert_eq!(grpc_header.block_number, rpc_block.block_number); + assert_eq!( + proto_to_felt(&grpc_header.block_hash.expect("grpc missing block_hash")), + rpc_block.block_hash + ); + assert_eq!(grpc_block.transactions.len(), rpc_block.transactions.len()); + + for (grpc_tx_hash, rpc_tx_hash) in + grpc_block.transactions.iter().zip(rpc_block.transactions.iter()) + { + assert_eq!(proto_to_felt(grpc_tx_hash), *rpc_tx_hash); + } +} + +#[tokio::test] +async fn test_get_block_with_txs_latest() { + let (_node, mut grpc, rpc) = setup().await; + + let rpc_block = + rpc.get_block_with_txs(BlockId::Tag(StarknetBlockTag::Latest)).await.expect("rpc failed"); + + let grpc_result = grpc + .get_block_with_txs(Request::new(GetBlockRequest { block_id: grpc_block_id_latest() })) + .await + .expect("grpc failed") + .into_inner(); + + let rpc_block = match rpc_block { + starknet::core::types::MaybePreConfirmedBlockWithTxs::Block(b) => b, + _ => panic!("Expected confirmed block from rpc"), + }; + + let grpc_block = match grpc_result.result { + Some(katana_grpc::proto::get_block_with_txs_response::Result::Block(b)) => b, + _ => panic!("Expected confirmed block from grpc"), + }; + + let grpc_header = grpc_block.header.expect("grpc missing block header"); + assert_eq!(grpc_header.block_number, rpc_block.block_number); + assert_eq!(grpc_block.transactions.len(), rpc_block.transactions.len()); +} + +#[tokio::test] +async fn test_get_class_at() { + let (node, mut grpc, rpc) = setup().await; + let address = genesis_address(&node); + + let rpc_class = rpc + .get_class_at(BlockId::Tag(StarknetBlockTag::Latest), address) + .await + .expect("rpc get_class_at failed"); + + let grpc_result = grpc + .get_class_at(Request::new(GetClassAtRequest { + block_id: grpc_block_id_latest(), + contract_address: Some(felt_to_proto(address)), + })) + .await + .expect("grpc get_class_at failed") + .into_inner(); + + // Verify both return a Sierra class (not Legacy) + assert!( + matches!(rpc_class, starknet::core::types::ContractClass::Sierra(_)), + "Expected Sierra class from rpc" + ); + assert!( + matches!( + grpc_result.result, + Some(katana_grpc::proto::get_class_at_response::Result::ContractClass(_)) + ), + "Expected Sierra class from grpc" + ); +} + +#[tokio::test] +async fn test_get_class_hash_at() { + let (node, mut grpc, rpc) = setup().await; + let address = genesis_address(&node); + + let rpc_class_hash = rpc + .get_class_hash_at(BlockId::Tag(StarknetBlockTag::Latest), address) + .await + .expect("rpc get_class_hash_at failed"); + + let grpc_result = grpc + .get_class_hash_at(Request::new(GetClassHashAtRequest { + block_id: grpc_block_id_latest(), + contract_address: Some(felt_to_proto(address)), + })) + .await + .expect("grpc get_class_hash_at failed") + .into_inner(); + + let grpc_class_hash = proto_to_felt(&grpc_result.class_hash.expect("grpc missing class_hash")); + assert_eq!(grpc_class_hash, rpc_class_hash); +} + +#[tokio::test] +async fn test_get_storage_at() { + let (node, mut grpc, rpc) = setup().await; + let address = genesis_address(&node); + + let rpc_value = rpc + .get_storage_at(address, Felt::ZERO, BlockId::Tag(StarknetBlockTag::Latest)) + .await + .expect("rpc get_storage_at failed"); + + let grpc_result = grpc + .get_storage_at(Request::new(GetStorageAtRequest { + block_id: grpc_block_id_latest(), + contract_address: Some(felt_to_proto(address)), + key: Some(felt_to_proto(Felt::ZERO)), + })) + .await + .expect("grpc get_storage_at failed") + .into_inner(); + + let grpc_value = proto_to_felt(&grpc_result.value.expect("grpc missing value")); + assert_eq!(grpc_value, rpc_value); +} + +#[tokio::test] +async fn test_get_nonce() { + let (node, mut grpc, rpc) = setup().await; + let address = genesis_address(&node); + + let rpc_nonce = rpc + .get_nonce(BlockId::Tag(StarknetBlockTag::Latest), address) + .await + .expect("rpc get_nonce failed"); + + let grpc_result = grpc + .get_nonce(Request::new(GetNonceRequest { + block_id: grpc_block_id_latest(), + contract_address: Some(felt_to_proto(address)), + })) + .await + .expect("grpc get_nonce failed") + .into_inner(); + + let grpc_nonce = proto_to_felt(&grpc_result.nonce.expect("grpc missing nonce")); + assert_eq!(grpc_nonce, rpc_nonce); + assert!(rpc_nonce > Felt::ZERO, "Nonce should be > 0 after migration"); +} + +#[tokio::test] +async fn test_spec_version() { + let (_node, mut grpc, rpc) = setup().await; + + let rpc_version = rpc.spec_version().await.expect("rpc spec_version failed"); + + let grpc_version = grpc + .spec_version(Request::new(SpecVersionRequest {})) + .await + .expect("grpc spec_version failed") + .into_inner() + .version; + + assert_eq!(grpc_version, rpc_version); +} + +#[tokio::test] +async fn test_syncing() { + let (_node, mut grpc, rpc) = setup().await; + + let rpc_syncing = rpc.syncing().await.expect("rpc syncing failed"); + + let grpc_syncing = grpc + .syncing(Request::new(SyncingRequest {})) + .await + .expect("grpc syncing failed") + .into_inner(); + + assert!( + matches!(rpc_syncing, starknet::core::types::SyncStatusType::NotSyncing), + "Expected rpc to report not syncing" + ); + assert!( + matches!( + grpc_syncing.result, + Some(katana_grpc::proto::syncing_response::Result::NotSyncing(true)) + ), + "Expected grpc to report not syncing" + ); +} + +#[tokio::test] +async fn test_get_block_transaction_count() { + let (_node, mut grpc, rpc) = setup().await; + + let rpc_count = rpc + .get_block_transaction_count(BlockId::Number(0)) + .await + .expect("rpc get_block_transaction_count failed"); + + let grpc_count = grpc + .get_block_transaction_count(Request::new(GetBlockRequest { + block_id: grpc_block_id_number(0), + })) + .await + .expect("grpc get_block_transaction_count failed") + .into_inner() + .count; + + assert_eq!(grpc_count, rpc_count); +} + +#[tokio::test] +async fn test_get_state_update() { + let (_node, mut grpc, rpc) = setup().await; + + let rpc_state = + rpc.get_state_update(BlockId::Number(0)).await.expect("rpc get_state_update failed"); + + let grpc_result = grpc + .get_state_update(Request::new(GetBlockRequest { block_id: grpc_block_id_number(0) })) + .await + .expect("grpc get_state_update failed") + .into_inner(); + + let rpc_state = match rpc_state { + starknet::core::types::MaybePreConfirmedStateUpdate::Update(s) => s, + _ => panic!("Expected confirmed state update from rpc"), + }; + + let grpc_state = match grpc_result.result { + Some(katana_grpc::proto::get_state_update_response::Result::StateUpdate(s)) => s, + _ => panic!("Expected confirmed state update from grpc"), + }; + + assert_eq!( + proto_to_felt(&grpc_state.block_hash.expect("grpc missing block_hash")), + rpc_state.block_hash + ); + assert_eq!( + proto_to_felt(&grpc_state.new_root.expect("grpc missing new_root")), + rpc_state.new_root + ); + assert_eq!( + proto_to_felt(&grpc_state.old_root.expect("grpc missing old_root")), + rpc_state.old_root + ); +} + +#[tokio::test] +async fn test_get_events() { + let (_node, mut grpc, rpc) = setup().await; + + let rpc_events = rpc + .get_events( + EventFilter { + from_block: Some(BlockId::Number(0)), + to_block: Some(BlockId::Tag(StarknetBlockTag::Latest)), + address: None, + keys: None, + }, + None, + 100, + ) + .await + .expect("rpc get_events failed"); + + let grpc_events = grpc + .get_events(Request::new(GetEventsRequest { + filter: Some(katana_grpc::proto::EventFilter { + from_block: grpc_block_id_number(0), + to_block: grpc_block_id_latest(), + address: None, + keys: vec![], + }), + chunk_size: 100, + continuation_token: String::new(), + })) + .await + .expect("grpc get_events failed") + .into_inner(); + + assert_eq!(grpc_events.events.len(), rpc_events.events.len()); + assert!(!rpc_events.events.is_empty(), "Expected events after migration"); + + let rpc_event = &rpc_events.events[0]; + let grpc_event = &grpc_events.events[0]; + + assert_eq!( + proto_to_felt(grpc_event.from_address.as_ref().expect("grpc missing from_address")), + rpc_event.from_address + ); + assert_eq!( + proto_to_felt(grpc_event.transaction_hash.as_ref().expect("grpc missing tx_hash")), + rpc_event.transaction_hash + ); + assert_eq!(grpc_event.keys.len(), rpc_event.keys.len()); + assert_eq!(grpc_event.data.len(), rpc_event.data.len()); +} + +#[tokio::test] +async fn test_get_transaction_by_hash() { + let (_node, mut grpc, rpc) = setup().await; + + // Get a transaction hash from block 1 + let rpc_block = + rpc.get_block_with_tx_hashes(BlockId::Number(1)).await.expect("rpc get_block failed"); + + let tx_hash = match rpc_block { + starknet::core::types::MaybePreConfirmedBlockWithTxHashes::Block(b) => { + *b.transactions.first().expect("no transactions in block 1") + } + _ => panic!("Expected confirmed block"), + }; + + // Fetch via JSON-RPC + let rpc_tx = + rpc.get_transaction_by_hash(tx_hash).await.expect("rpc get_transaction_by_hash failed"); + + // Fetch via gRPC + let grpc_result = grpc + .get_transaction_by_hash(Request::new(GetTransactionByHashRequest { + transaction_hash: Some(felt_to_proto(tx_hash)), + })) + .await + .expect("grpc get_transaction_by_hash failed") + .into_inner(); + + // Verify RPC returned the correct hash + assert_eq!(*rpc_tx.transaction_hash(), tx_hash); + + // Verify gRPC returned a valid transaction + let grpc_tx = grpc_result.transaction.expect("grpc missing transaction"); + assert!(grpc_tx.transaction.is_some(), "grpc transaction should have a variant"); +} + +#[tokio::test] +async fn test_get_transaction_receipt() { + let (_node, mut grpc, rpc) = setup().await; + + // Get a transaction hash from block 1 + let rpc_block = + rpc.get_block_with_tx_hashes(BlockId::Number(1)).await.expect("rpc get_block failed"); + + let tx_hash = match rpc_block { + starknet::core::types::MaybePreConfirmedBlockWithTxHashes::Block(b) => { + *b.transactions.first().expect("no transactions in block 1") + } + _ => panic!("Expected confirmed block"), + }; + + // Fetch receipt via JSON-RPC + let rpc_receipt = + rpc.get_transaction_receipt(tx_hash).await.expect("rpc get_transaction_receipt failed"); + + // Fetch receipt via gRPC + let grpc_result = grpc + .get_transaction_receipt(Request::new(GetTransactionReceiptRequest { + transaction_hash: Some(felt_to_proto(tx_hash)), + })) + .await + .expect("grpc get_transaction_receipt failed") + .into_inner(); + + let grpc_receipt = grpc_result.receipt.expect("grpc missing receipt"); + + // Compare transaction hash + assert_eq!(*rpc_receipt.receipt.transaction_hash(), tx_hash); + assert_eq!( + proto_to_felt(&grpc_receipt.transaction_hash.expect("grpc missing tx_hash")), + tx_hash + ); + + // Compare block number + assert_eq!(grpc_receipt.block_number, rpc_receipt.block.block_number()); +} diff --git a/crates/storage/db/src/lib.rs b/crates/storage/db/src/lib.rs index 4f588d87f..2b0900486 100644 --- a/crates/storage/db/src/lib.rs +++ b/crates/storage/db/src/lib.rs @@ -100,6 +100,42 @@ impl Db { let dir = tempfile::Builder::new().disable_cleanup(true).tempdir()?; let path = dir.path(); + let version = if is_database_empty(path) { + fs::create_dir_all(path).with_context(|| { + format!("Creating database directory at path {}", path.display()) + })?; + + create_db_version_file(path, CURRENT_DB_VERSION).with_context(|| { + format!("Inserting database version file at path {}", path.display()) + })? + } else { + match get_db_version(path) { + Ok(version) if version != CURRENT_DB_VERSION => { + if !is_block_compatible_version(&version) { + return Err(anyhow!(DatabaseVersionError::MismatchVersion { + expected: CURRENT_DB_VERSION, + found: version + })); + } + debug!(target: "db", "Using database version {version} with block compatibility mode"); + version + } + + Ok(version) => version, + + Err(DatabaseVersionError::FileNotFound) => { + create_db_version_file(path, CURRENT_DB_VERSION).with_context(|| { + format!( + "No database version file found. Inserting version file at path {}", + path.display() + ) + })? + } + + Err(err) => return Err(anyhow!(err)), + } + }; + let env = mdbx::DbEnvBuilder::new() .max_size(GIGABYTE * 10) // 10gb .growth_step((GIGABYTE / 2) as isize) // 512mb @@ -108,7 +144,7 @@ impl Db { env.create_default_tables()?; - Ok(Self { env, version: CURRENT_DB_VERSION }) + Ok(Self { env, version }) } /// Opens an existing database at the given `path` with [`SyncMode::UtterlyNoSync`] for @@ -129,6 +165,7 @@ impl Db { .max_size(GIGABYTE * 10) .growth_step((GIGABYTE / 2) as isize) .sync(SyncMode::UtterlyNoSync) + .existing_page_size() .build(path)?; env.create_default_tables()?; diff --git a/crates/storage/db/src/mdbx/mod.rs b/crates/storage/db/src/mdbx/mod.rs index bc64a5cec..d50e30723 100644 --- a/crates/storage/db/src/mdbx/mod.rs +++ b/crates/storage/db/src/mdbx/mod.rs @@ -33,6 +33,7 @@ pub struct DbEnvBuilder { max_readers: u64, max_size: usize, growth_step: isize, + page_size: Option, } impl DbEnvBuilder { @@ -43,6 +44,7 @@ impl DbEnvBuilder { max_readers: DEFAULT_MAX_READERS, max_size: DEFAULT_MAX_SIZE, growth_step: DEFAULT_GROWTH_STEP, + page_size: Some(PageSize::Set(utils::default_page_size())), } } @@ -72,6 +74,15 @@ impl DbEnvBuilder { self } + /// Uses the page size from an existing database instead of forcing the OS default. + /// + /// This is required when opening databases created on a platform with a different + /// page size (e.g., macOS Apple Silicon uses 16KB pages vs Linux x86_64 4KB pages). + pub fn existing_page_size(mut self) -> Self { + self.page_size = None; + self + } + /// Builds the database environment at the specified path. pub fn build(self, path: impl AsRef) -> Result { let mut builder = libmdbx::Environment::builder(); @@ -85,7 +96,7 @@ impl DbEnvBuilder { growth_step: Some(self.growth_step), // The database never shrinks shrink_threshold: None, - page_size: Some(PageSize::Set(utils::default_page_size())), + page_size: self.page_size, }) .set_flags(EnvironmentFlags { mode: self.mode, diff --git a/crates/utils/Cargo.toml b/crates/utils/Cargo.toml index b74cbe90a..9d0a7106f 100644 --- a/crates/utils/Cargo.toml +++ b/crates/utils/Cargo.toml @@ -13,6 +13,7 @@ katana-rpc-types.workspace = true katana-utils-macro = { path = "macro" } anyhow.workspace = true +fs2 = "0.4" arbitrary.workspace = true assert_matches.workspace = true async-trait.workspace = true @@ -35,6 +36,7 @@ tempfile = { workspace = true, optional = true } [features] node = [ + "katana-node/grpc", "clap", "katana-chain-spec", "katana-core", diff --git a/crates/utils/src/bin/generate_migration_db.rs b/crates/utils/src/bin/generate_migration_db.rs index f1e7a91da..3d557f15e 100644 --- a/crates/utils/src/bin/generate_migration_db.rs +++ b/crates/utils/src/bin/generate_migration_db.rs @@ -26,6 +26,20 @@ struct Args { output: PathBuf, } +fn copy_dir_all(src: &Path, dst: &Path) -> std::io::Result<()> { + std::fs::create_dir_all(dst)?; + for entry in std::fs::read_dir(src)? { + let entry = entry?; + let dst_path = dst.join(entry.file_name()); + if entry.file_type()?.is_dir() { + copy_dir_all(&entry.path(), &dst_path)?; + } else { + std::fs::copy(entry.path(), dst_path)?; + } + } + Ok(()) +} + fn create_tar_gz(db_dir: &Path, output: &Path) -> std::io::Result<()> { // Derive the directory name from the output file stem (e.g., "spawn_and_move" from // "spawn_and_move.tar.gz") @@ -46,13 +60,8 @@ fn create_tar_gz(db_dir: &Path, output: &Path) -> std::io::Result<()> { let inner_dir = staging_dir.path().join(stem); std::fs::create_dir_all(&inner_dir)?; - // Copy db files into the staging directory - for entry in std::fs::read_dir(db_dir)? { - let entry = entry?; - if entry.file_type()?.is_file() { - std::fs::copy(entry.path(), inner_dir.join(entry.file_name()))?; - } - } + // Recursively copy all db files into the staging directory + copy_dir_all(db_dir, &inner_dir)?; let status = Command::new("tar") .args(["-czf"]) diff --git a/crates/utils/src/node.rs b/crates/utils/src/node.rs index 51edcdb99..7e9e92546 100644 --- a/crates/utils/src/node.rs +++ b/crates/utils/src/node.rs @@ -1,12 +1,20 @@ +use std::fs::File; use std::net::SocketAddr; use std::path::{Path, PathBuf}; use std::process::Command; use std::sync::Arc; +use std::time::Duration; + +use fs2::FileExt; + +/// Fixed path for the dojo repository clone to avoid recompiling for each test. +const DOJO_CACHE_DIR: &str = "/tmp/katana-test-dojo"; use katana_chain_spec::{dev, ChainSpec}; use katana_core::backend::Backend; use katana_executor::implementation::blockifier::BlockifierFactory; use katana_node::config::dev::DevConfig; +use katana_node::config::grpc::{GrpcConfig, DEFAULT_GRPC_ADDR}; use katana_node::config::rpc::{RpcConfig, RpcModulesList, DEFAULT_RPC_ADDR}; use katana_node::config::sequencing::SequencingConfig; use katana_node::config::Config; @@ -28,8 +36,8 @@ use starknet::signers::{LocalWallet, SigningKey}; /// Errors that can occur when migrating contracts to a test node. #[derive(Debug, thiserror::Error)] pub enum MigrateError { - #[error("Failed to create temp directory: {0}")] - TempDir(#[from] std::io::Error), + #[error("IO error: {0}")] + Io(#[from] std::io::Error), #[error("Git clone failed: {0}")] GitClone(String), #[error("Scarb build failed: {0}")] @@ -192,6 +200,11 @@ where katana_rpc_client::starknet::Client::new_with_client(client) } + /// Returns the address of the node's gRPC server (if enabled). + pub fn grpc_addr(&self) -> Option<&SocketAddr> { + self.node.grpc().map(|h| h.addr()) + } + /// Migrates the `spawn-and-move` example contracts from the dojo repository. /// /// This method requires `git`, `asdf`, and `sozo` to be available in PATH. @@ -215,6 +228,10 @@ where /// Clones the dojo repository, builds contracts with `scarb`, and deploys /// them with `sozo migrate`. /// + /// The dojo repository is cached at a fixed path to avoid recompiling for + /// each test run. The clone and build steps are skipped if the build + /// artifacts already exist. + /// /// This method requires `git`, `asdf`, and `sozo` to be available in PATH. /// The scarb version is managed by asdf using the `.tool-versions` file /// in the dojo repository. @@ -232,18 +249,35 @@ where let address_hex = address.to_string(); let private_key_hex = format!("{private_key:#x}"); - let example_path = format!("dojo/examples/{example}"); + let example = example.to_string(); tokio::task::spawn_blocking(move || { - let temp_dir = tempfile::tempdir()?; + let cache_dir = PathBuf::from(DOJO_CACHE_DIR); + let project_dir = cache_dir.join(format!("dojo/examples/{example}")); - // Clone dojo repository at v1.7.0 - run_git_clone(temp_dir.path())?; + // Create cache directory if it doesn't exist + std::fs::create_dir_all(&cache_dir)?; - let project_dir = temp_dir.path().join(&example_path); + // Use file-based lock for cross-process synchronization + let lock_file = File::create(cache_dir.join(".build.lock"))?; + lock_file.lock_exclusive()?; - // Build contracts using asdf to ensure correct scarb version - run_scarb_build(&project_dir)?; + // Double-check after acquiring lock - another process may have + // completed the build while we were waiting + if !project_dir.join("target").exists() { + // Clone dojo repository at v1.7.0 (remove existing if present) + let dojo_dir = cache_dir.join("dojo"); + if dojo_dir.exists() { + std::fs::remove_dir_all(&dojo_dir)?; + } + run_git_clone(&cache_dir)?; + + // Build contracts using asdf to ensure correct scarb version + run_scarb_build(&project_dir)?; + } + + // Release lock - this happens automatically when lock_file is dropped + drop(lock_file); // Deploy contracts to the katana node run_sozo_migrate(&project_dir, &rpc_url, &address_hex, &private_key_hex)?; @@ -269,8 +303,8 @@ fn run_git_clone(temp_dir: &Path) -> Result<(), MigrateError> { } fn run_scarb_build(project_dir: &Path) -> Result<(), MigrateError> { - let output = Command::new("asdf") - .args(["exec", "scarb", "build"]) + let output = Command::new("scarb") + .arg("build") .current_dir(project_dir) .output() .map_err(|e| MigrateError::ScarbBuild(e.to_string()))?; @@ -295,7 +329,7 @@ fn run_sozo_migrate( address: &str, private_key: &str, ) -> Result<(), MigrateError> { - let output = Command::new("sozo") + let status = Command::new("sozo") .args([ "migrate", "--rpc-url", @@ -306,30 +340,28 @@ fn run_sozo_migrate( private_key, ]) .current_dir(project_dir) - .output() + .stdout(std::process::Stdio::inherit()) + .stderr(std::process::Stdio::inherit()) + .status() .map_err(|e| MigrateError::SozoMigrate(e.to_string()))?; - if !output.status.success() { - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - let combined = format!("{stdout}\n{stderr}"); - - let lines: Vec<&str> = combined.lines().collect(); - let last_50: String = - lines.iter().rev().take(50).rev().cloned().collect::>().join("\n"); - - eprintln!("sozo migrate failed. Last 50 lines of output:\n{last_50}"); - - return Err(MigrateError::SozoMigrate(last_50)); + if !status.success() { + return Err(MigrateError::SozoMigrate(format!( + "sozo migrate exited with status: {status}" + ))); } Ok(()) } /// Copies all files from `src` to `dst` (flat copy, no subdirectories). +/// +/// The MDBX lock file (`mdbx.lck`) is intentionally skipped because it contains +/// platform-specific data (pthread mutexes, process IDs) that is not portable across +/// systems. MDBX creates a fresh lock file when the database is opened. fn copy_db_dir(src: &Path, dst: &Path) -> std::io::Result<()> { for entry in std::fs::read_dir(src)? { let entry = entry?; - if entry.file_type()?.is_file() { + if entry.file_type()?.is_file() && entry.file_name() != "mdbx.lck" { std::fs::copy(entry.path(), dst.join(entry.file_name()))?; } } @@ -355,5 +387,11 @@ pub fn test_config() -> Config { ..Default::default() }; - Config { sequencing, rpc, dev, chain: ChainSpec::Dev(chain).into(), ..Default::default() } + let grpc = Some(GrpcConfig { + addr: DEFAULT_GRPC_ADDR, + port: 0, // Use port 0 for auto-assignment + timeout: Some(Duration::from_secs(30)), + }); + + Config { sequencing, rpc, dev, chain: ChainSpec::Dev(chain).into(), grpc, ..Default::default() } } diff --git a/tests/fixtures/db/simple.tar.gz b/tests/fixtures/db/simple.tar.gz deleted file mode 100644 index e7913c5f2..000000000 Binary files a/tests/fixtures/db/simple.tar.gz and /dev/null differ diff --git a/tests/fixtures/db/spawn_and_move.tar.gz b/tests/fixtures/db/spawn_and_move.tar.gz deleted file mode 100644 index 2ebd22d88..000000000 Binary files a/tests/fixtures/db/spawn_and_move.tar.gz and /dev/null differ