From 4e8ba9b8f80cf1fa4baabdcb0a4a9e474ec9bc50 Mon Sep 17 00:00:00 2001 From: Ammar Arif Date: Wed, 29 Oct 2025 17:43:01 -0400 Subject: [PATCH 01/26] wip --- Cargo.lock | 4 + crates/node/Cargo.toml | 2 + crates/node/src/lib.rs | 5 +- crates/node/src/optimistic/config.rs | 28 + crates/node/src/optimistic/executor.rs | 230 ++++++ crates/node/src/optimistic/mod.rs | 263 +++++++ crates/node/src/optimistic/pool.rs | 77 ++ crates/oracle/gas/src/fixed.rs | 2 +- crates/oracle/gas/src/lib.rs | 2 +- crates/oracle/gas/src/sampled/mod.rs | 8 +- crates/pool/pool-api/Cargo.toml | 2 + crates/pool/pool-api/src/tx.rs | 77 ++ crates/rpc/rpc-server/src/starknet/mod.rs | 64 +- crates/rpc/rpc-server/src/starknet/pending.rs | 123 +++- crates/rpc/rpc-server/src/starknet/trace.rs | 4 +- crates/storage/provider/provider/Cargo.toml | 8 +- .../provider/src/providers/db/cached.rs | 686 ++++++++++++++++++ .../provider/provider/src/providers/db/mod.rs | 1 + .../provider/src/providers/fork/mod.rs | 13 +- .../provider/src/providers/fork/state.rs | 16 +- .../provider/provider/src/providers/mod.rs | 1 - 21 files changed, 1568 insertions(+), 48 deletions(-) create mode 100644 crates/node/src/optimistic/config.rs create mode 100644 crates/node/src/optimistic/executor.rs create mode 100644 crates/node/src/optimistic/mod.rs create mode 100644 crates/node/src/optimistic/pool.rs create mode 100644 crates/storage/provider/provider/src/providers/db/cached.rs diff --git a/Cargo.lock b/Cargo.lock index 4ba3a3587..5374fe15d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6309,11 +6309,13 @@ dependencies = [ "katana-metrics", "katana-pipeline", "katana-pool", + "katana-pool-api", "katana-primitives", "katana-provider", "katana-rpc-api", "katana-rpc-client", "katana-rpc-server", + "katana-rpc-types", "katana-stage", "katana-starknet", "katana-tasks", @@ -6391,7 +6393,9 @@ dependencies = [ "futures", "futures-util", "katana-primitives", + "katana-rpc-types", "parking_lot", + "starknet-types-core", "thiserror 1.0.69", "tokio", ] diff --git a/crates/node/Cargo.toml b/crates/node/Cargo.toml index 9394eb7a0..e0a90daa5 100644 --- a/crates/node/Cargo.toml +++ b/crates/node/Cargo.toml @@ -17,11 +17,13 @@ katana-messaging.workspace = true katana-metrics.workspace = true katana-pipeline.workspace = true katana-pool.workspace = true +katana-pool-api.workspace = true katana-primitives.workspace = true katana-provider.workspace = true katana-rpc-server = { workspace = true } katana-rpc-api.workspace = true katana-rpc-client.workspace = true +katana-rpc-types.workspace = true katana-stage.workspace = true katana-tasks.workspace = true katana-tracing.workspace = true diff --git a/crates/node/src/lib.rs b/crates/node/src/lib.rs index 8161f552f..12c202835 100644 --- a/crates/node/src/lib.rs +++ b/crates/node/src/lib.rs @@ -1,6 +1,7 @@ // #![cfg_attr(not(test), warn(unused_crate_dependencies))] pub mod full; +pub mod optimistic; pub mod config; pub mod exit; @@ -191,7 +192,7 @@ impl Node { let block_context_generator = BlockContextGenerator::default().into(); let backend = Arc::new(Backend { gas_oracle, - blockchain, + blockchain: blockchain.clone(), executor_factory, block_context_generator, chain_spec: config.chain.clone(), @@ -268,6 +269,7 @@ impl Node { task_spawner.clone(), starknet_api_cfg, block_producer.clone(), + blockchain, ) } else { StarknetApi::new( @@ -276,6 +278,7 @@ impl Node { task_spawner.clone(), starknet_api_cfg, block_producer.clone(), + blockchain, ) }; diff --git a/crates/node/src/optimistic/config.rs b/crates/node/src/optimistic/config.rs new file mode 100644 index 000000000..2f0925040 --- /dev/null +++ b/crates/node/src/optimistic/config.rs @@ -0,0 +1,28 @@ +use std::sync::Arc; + +use katana_chain_spec::ChainSpec; + +#[cfg(feature = "cartridge")] +use crate::config::paymaster; +use crate::config::{db::DbConfig, fork::ForkingConfig, metrics::MetricsConfig, rpc::RpcConfig}; + +/// Node configurations. +/// +/// List of all possible options that can be used to configure a node. +#[derive(Debug, Clone, Default, PartialEq, Eq)] +pub struct Config { + /// The chain specification. + pub chain: Arc, + + /// Database options. + pub db: DbConfig, + + /// Forking options. + pub forking: ForkingConfig, + + /// Rpc options. + pub rpc: RpcConfig, + + /// Metrics options. + pub metrics: Option, +} diff --git a/crates/node/src/optimistic/executor.rs b/crates/node/src/optimistic/executor.rs new file mode 100644 index 000000000..059000d66 --- /dev/null +++ b/crates/node/src/optimistic/executor.rs @@ -0,0 +1,230 @@ +use std::future::Future; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; + +use futures::stream::StreamExt; +use katana_core::backend::Backend; +use katana_executor::implementation::blockifier::BlockifierFactory; +use katana_executor::{ExecutionResult, ExecutorFactory}; +use katana_pool::{PendingTransactions, PoolOrd, PoolTransaction, TransactionPool, TxPool}; +use katana_primitives::transaction::ExecutableTxWithHash; +use katana_provider::api::state::StateFactoryProvider; +use katana_tasks::{JoinHandle, TaskSpawner}; +use tracing::{debug, error, info, trace, warn}; + +const LOG_TARGET: &str = "optimistic_executor"; + +/// The `OptimisticExecutor` is an actor-based component that listens to incoming transactions +/// from the pool and executes them optimistically as they arrive. +/// +/// This component subscribes to the pool's pending transaction stream and processes each +/// transaction as soon as it's available, without waiting for block production. +#[allow(missing_debug_implementations)] +pub struct OptimisticExecutor { + /// The transaction pool to subscribe to + pool: TxPool, + /// The backend containing the executor factory and blockchain state + backend: Arc>, + /// Task spawner for running the executor actor + task_spawner: TaskSpawner, +} + +impl OptimisticExecutor { + /// Creates a new `OptimisticExecutor` instance. + /// + /// # Arguments + /// + /// * `pool` - The transaction pool to monitor for new transactions + /// * `backend` - The backend containing the executor factory and blockchain state + /// * `task_spawner` - The task spawner used to run the executor actor + pub fn new( + pool: TxPool, + backend: Arc>, + task_spawner: TaskSpawner, + ) -> Self { + Self { pool, backend, task_spawner } + } + + /// Spawns the optimistic executor actor task. + /// + /// This method creates a subscription to the pool's pending transactions and spawns + /// an async task that continuously processes incoming transactions. + /// + /// # Returns + /// + /// A `JoinHandle` to the spawned executor task. + pub fn spawn(self) -> JoinHandle<()> { + info!(target: LOG_TARGET, "Starting optimistic executor"); + + let pending_txs = self.pool.pending_transactions(); + let actor = OptimisticExecutorActor::new(pending_txs, self.backend); + + self.task_spawner.build_task().name("Optimistic Executor").spawn(actor) + } +} + +/// The internal actor that processes transactions from the pending transactions stream. +#[allow(missing_debug_implementations)] +struct OptimisticExecutorActor +where + O: PoolOrd, +{ + /// Stream of pending transactions from the pool + pending_txs: PendingTransactions, + /// The backend for executing transactions + backend: Arc>, +} + +impl OptimisticExecutorActor +where + O: PoolOrd, +{ + /// Creates a new executor actor with the given pending transactions stream. + fn new( + pending_txs: PendingTransactions, + backend: Arc>, + ) -> Self { + Self { pending_txs, backend } + } + + /// Execute a single transaction optimistically against the latest state. + fn execute_transaction(&self, tx: ExecutableTxWithHash) -> Result { + let provider = self.backend.blockchain.provider(); + + // Get the latest state to execute against + let latest_state = + provider.latest().map_err(|e| format!("Failed to get latest state: {e}"))?; + + // Create an executor with the latest state + let mut executor = self.backend.executor_factory.with_state(latest_state); + + // Execute the transaction + let result = executor.execute_transactions(vec![tx.clone()]); + + match result { + Ok((executed_count, limit_error)) => { + if executed_count == 0 { + return Err("Transaction was not executed".to_string()); + } + + // Get the execution result from the executor + let transactions = executor.transactions(); + if let Some((_, exec_result)) = transactions.last() { + if let Some(err) = limit_error { + warn!( + target: LOG_TARGET, + tx_hash = format!("{:#x}", tx.hash), + error = %err, + "Transaction execution hit limits" + ); + } + Ok(exec_result.clone()) + } else { + Err("No execution result found".to_string()) + } + } + Err(e) => Err(format!("Execution failed: {e}")), + } + } +} + +impl Future for OptimisticExecutorActor +where + O: PoolOrd, +{ + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.get_mut(); + + // Drain all available transactions from the stream until it's exhausted (Poll::Pending) + // or the stream ends (Poll::Ready(None)). + // + // This ensures we process all pending transactions in a batch before yielding control + // back to the executor, which is more efficient than processing one transaction at a + // time. + loop { + match this.pending_txs.poll_next_unpin(cx) { + Poll::Ready(Some(pending_tx)) => { + let tx = pending_tx.tx.as_ref().clone(); + + let tx_hash = tx.hash; + let tx_sender = tx.sender(); + let tx_nonce = tx.nonce(); + + trace!( + target: LOG_TARGET, + tx_hash = format!("{:#x}", tx_hash), + sender = %tx_sender, + nonce = %tx_nonce, + "Received transaction from pool" + ); + + debug!( + target: LOG_TARGET, + tx_hash = format!("{:#x}", tx_hash), + "Executing transaction optimistically" + ); + + // Execute the transaction optimistically + match this.execute_transaction(tx) { + Ok(ExecutionResult::Success { receipt, .. }) => { + if let Some(reason) = receipt.revert_reason() { + warn!( + target: LOG_TARGET, + tx_hash = format!("{:#x}", tx_hash), + reason = %reason, + "Transaction reverted" + ); + } else { + debug!( + target: LOG_TARGET, + tx_hash = format!("{:#x}", tx_hash), + l1_gas = receipt.resources_used().gas.l1_gas, + cairo_steps = receipt.resources_used().computation_resources.n_steps, + "Transaction executed successfully" + ); + } + } + Ok(ExecutionResult::Failed { error }) => { + error!( + target: LOG_TARGET, + tx_hash = format!("{:#x}", tx_hash), + error = %error, + "Transaction execution failed" + ); + } + Err(e) => { + error!( + target: LOG_TARGET, + tx_hash = format!("{:#x}", tx_hash), + error = %e, + "Error executing transaction" + ); + } + } + + // Continue the loop to process the next transaction + continue; + } + + Poll::Ready(None) => { + // Stream has ended (pool was dropped) + info!(target: LOG_TARGET, "Transaction stream ended"); + return Poll::Ready(()); + } + + Poll::Pending => { + // Stream is exhausted - no more transactions available right now. + // Yield control back to the executor until we're polled again. + return Poll::Pending; + } + } + } + } +} + +// Tests are intentionally omitted as they would require a full backend setup with +// blockchain state. Integration tests should be written separately to properly test +// the optimistic executor with a real backend instance. diff --git a/crates/node/src/optimistic/mod.rs b/crates/node/src/optimistic/mod.rs new file mode 100644 index 000000000..941697ed6 --- /dev/null +++ b/crates/node/src/optimistic/mod.rs @@ -0,0 +1,263 @@ +use std::future::IntoFuture; +use std::sync::Arc; + +use anyhow::{Context, Result}; +use config::rpc::RpcModuleKind; +use config::Config; +use http::header::CONTENT_TYPE; +use http::Method; +use jsonrpsee::http_client::HttpClientBuilder; +use jsonrpsee::RpcModule; +use katana_chain_spec::{ChainSpec, SettlementLayer}; +use katana_core::backend::storage::Blockchain; +use katana_core::backend::Backend; +use katana_core::env::BlockContextGenerator; +use katana_core::service::block_producer::BlockProducer; +use katana_db::Db; +use katana_executor::implementation::blockifier::cache::ClassCache; +use katana_executor::implementation::blockifier::BlockifierFactory; +use katana_executor::ExecutionFlags; +use katana_gas_price_oracle::{FixedPriceOracle, GasPriceOracle}; +use katana_gateway_server::{GatewayServer, GatewayServerHandle}; +use katana_metrics::exporters::prometheus::PrometheusRecorder; +use katana_metrics::sys::DiskReporter; +use katana_metrics::{Report, Server as MetricsServer}; +use katana_pool::ordering::FiFo; +use katana_primitives::env::{CfgEnv, FeeTokenAddressses}; +#[cfg(feature = "cartridge")] +use katana_rpc::cartridge::CartridgeApi; +use katana_rpc::cors::Cors; +use katana_rpc::dev::DevApi; +use katana_rpc::starknet::forking::ForkedClient; +#[cfg(feature = "cartridge")] +use katana_rpc::starknet::PaymasterConfig; +use katana_rpc::starknet::{StarknetApi, StarknetApiConfig}; +use katana_rpc::{RpcServer, RpcServerHandle}; +#[cfg(feature = "cartridge")] +use katana_rpc_api::cartridge::CartridgeApiServer; +use katana_rpc_api::dev::DevApiServer; +use katana_rpc_api::starknet::{StarknetApiServer, StarknetTraceApiServer, StarknetWriteApiServer}; +#[cfg(feature = "explorer")] +use katana_rpc_api::starknet_ext::StarknetApiExtServer; +use katana_stage::Sequencing; +use katana_tasks::TaskManager; +use tracing::info; + +mod config; +mod executor; +mod pool; + +use crate::exit::NodeStoppedFuture; +use crate::optimistic::pool::{PoolValidator, TxPool}; +use config::Config; + +#[derive(Debug)] +pub struct Node { + config: Arc, + pool: TxPool, + db: katana_db::Db, + rpc_server: RpcServer, + task_manager: TaskManager, + backend: Arc>, +} + +impl Node { + pub async fn build(config: Config) -> Result { + let mut config = config; + + if config.metrics.is_some() { + // Metrics recorder must be initialized before calling any of the metrics macros, in + // order for it to be registered. + let _ = PrometheusRecorder::install("katana")?; + } + + // -- build task manager + + let task_manager = TaskManager::current(); + let task_spawner = task_manager.task_spawner(); + + // --- build executor factory + + let fee_token_addresses = match config.chain.as_ref() { + ChainSpec::Dev(cs) => { + FeeTokenAddressses { eth: cs.fee_contracts.eth, strk: cs.fee_contracts.strk } + } + ChainSpec::Rollup(cs) => { + FeeTokenAddressses { eth: cs.fee_contract.strk, strk: cs.fee_contract.strk } + } + }; + + let cfg_env = CfgEnv { + fee_token_addresses, + chain_id: config.chain.id(), + invoke_tx_max_n_steps: config.execution.invocation_max_steps, + validate_max_n_steps: config.execution.validation_max_steps, + max_recursion_depth: config.execution.max_recursion_depth, + }; + + let executor_factory = { + #[allow(unused_mut)] + let mut class_cache = ClassCache::builder(); + + #[cfg(feature = "native")] + { + info!(enabled = config.execution.compile_native, "Cairo native compilation"); + class_cache = class_cache.compile_native(config.execution.compile_native); + } + + let global_class_cache = class_cache.build_global()?; + + let factory = BlockifierFactory::new( + cfg_env, + ExecutionFlags::new(), + config.sequencing.block_limits(), + global_class_cache, + ); + + Arc::new(factory) + }; + + // --- build backend + + let chain_spec = Arc::get_mut(&mut config.chain).expect("get mut Arc"); + let ChainSpec::Dev(chain_spec) = chain_spec else { + return Err(anyhow::anyhow!("Forking is only supported in dev mode for now")); + }; + + let db = katana_db::Db::in_memory()?; + let (blockchain, block_num) = Blockchain::new_from_forked( + db.clone(), + config.forking.url.clone(), + config.forking.block, + chain_spec, + ) + .await?; + + // TODO: it'd bee nice if the client can be shared on both the rpc and forked backend + // side + let rpc_client = HttpClientBuilder::new().build(config.forking.url.as_ref())?; + let forked_client = ForkedClient::new(rpc_client, block_num); + + let gpo = GasPriceOracle::sampled_starknet(config.forking.url.clone()); + + let block_context_generator = BlockContextGenerator::default().into(); + let backend = Arc::new(Backend { + gas_oracle: gpo.clone(), + blockchain: blockchain.clone(), + executor_factory, + block_context_generator, + chain_spec: config.chain.clone(), + }); + + // --- build transaction pool + + let http_client = HttpClientBuilder::new().build(config.forking.url.as_str())?; + let starknet_client = katana_rpc_client::starknet::Client::new(http_client); + + let pool_validator = PoolValidator::new(starknet_client.clone()); + let pool = TxPool::new(pool_validator, FiFo::new()); + + // --- build rpc server + + let mut rpc_modules = RpcModule::new(()); + + let cors = Cors::new() + .allow_origins(config.rpc.cors_origins.clone()) + // Allow `POST` when accessing the resource + .allow_methods([Method::POST, Method::GET]) + .allow_headers([CONTENT_TYPE, "argent-client".parse().unwrap(), "argent-version".parse().unwrap()]); + + // --- build starknet api + + let starknet_api_cfg = StarknetApiConfig { + max_event_page_size: config.rpc.max_event_page_size, + max_proof_keys: config.rpc.max_proof_keys, + max_call_gas: config.rpc.max_call_gas, + max_concurrent_estimate_fee_requests: config.rpc.max_concurrent_estimate_fee_requests, + #[cfg(feature = "cartridge")] + paymaster: None, + }; + + let starknet_api = StarknetApi::new_forked( + backend.clone(), + pool.clone(), + forked_client, + task_spawner.clone(), + starknet_api_cfg, + starknet_client.clone(), + blockchain, + ); + + if config.rpc.apis.contains(&RpcModuleKind::Starknet) { + rpc_modules.merge(StarknetApiServer::into_rpc(starknet_api.clone()))?; + rpc_modules.merge(StarknetWriteApiServer::into_rpc(starknet_api.clone()))?; + rpc_modules.merge(StarknetTraceApiServer::into_rpc(starknet_api.clone()))?; + } + + #[allow(unused_mut)] + let mut rpc_server = + RpcServer::new().metrics(true).health_check(true).cors(cors).module(rpc_modules)?; + + if let Some(timeout) = config.rpc.timeout { + rpc_server = rpc_server.timeout(timeout); + }; + + if let Some(max_connections) = config.rpc.max_connections { + rpc_server = rpc_server.max_connections(max_connections); + } + + if let Some(max_request_body_size) = config.rpc.max_request_body_size { + rpc_server = rpc_server.max_request_body_size(max_request_body_size); + } + + if let Some(max_response_body_size) = config.rpc.max_response_body_size { + rpc_server = rpc_server.max_response_body_size(max_response_body_size); + } + + Ok(Node { db, pool, backend, rpc_server, config: config.into(), task_manager }) + } + + pub async fn launch(self) -> Result { + let chain = self.backend.chain_spec.id(); + info!(%chain, "Starting node."); + + // TODO: maybe move this to the build stage + if let Some(ref cfg) = self.config.metrics { + let db_metrics = Box::new(self.db.clone()) as Box; + let disk_metrics = Box::new(DiskReporter::new(self.db.path())?) as Box; + let reports: Vec> = vec![db_metrics, disk_metrics]; + + let exporter = PrometheusRecorder::current().expect("qed; should exist at this point"); + let server = MetricsServer::new(exporter).with_process_metrics().with_reports(reports); + + let addr = cfg.socket_addr(); + self.task_manager.task_spawner().build_task().spawn(server.start(addr)); + info!(%addr, "Metrics server started."); + } + + // --- start the rpc server + + let rpc_handle = self.rpc_server.start(self.config.rpc.socket_addr()).await?; + + // --- start the gas oracle worker task + + if let Some(worker) = self.backend.gas_oracle.run_worker() { + self.task_manager + .task_spawner() + .build_task() + .graceful_shutdown() + .name("gas oracle") + .spawn(worker); + } + + info!(target: "node", "Gas price oracle worker started."); + + Ok(LaunchedNode { node: self, rpc: rpc_handle }) + } +} + +#[derive(Debug)] +pub struct LaunchedNode { + node: Node, + rpc: RpcServerHandle, +} diff --git a/crates/node/src/optimistic/pool.rs b/crates/node/src/optimistic/pool.rs new file mode 100644 index 000000000..4733172cd --- /dev/null +++ b/crates/node/src/optimistic/pool.rs @@ -0,0 +1,77 @@ +use std::sync::Arc; + +use katana_pool::ordering::FiFo; +use katana_pool::pool::Pool; +use katana_pool_api::validation::{ + Error as ValidationError, InvalidTransactionError, ValidationOutcome, Validator, +}; +use katana_primitives::utils::get_contract_address; +use katana_rpc_client::starknet::Client; +use katana_rpc_types::BroadcastedTx; + +pub type TxPool = Pool>; + +/// A validator that forwards transactions to a remote Starknet RPC endpoint. +#[derive(Debug, Clone)] +pub struct PoolValidator { + client: Arc, +} + +impl PoolValidator { + pub fn new(client: Client) -> Self { + Self { client: Arc::new(client) } + } + + pub fn new_shared(client: Arc) -> Self { + Self { client } + } +} + +impl Validator for PoolValidator { + type Transaction = BroadcastedTx; + + async fn validate( + &self, + tx: Self::Transaction, + ) -> Result, ValidationError> { + // Forward the transaction to the remote node + let result = match &tx { + BroadcastedTx::Invoke(invoke_tx) => { + self.client.add_invoke_transaction(invoke_tx.clone()).await.map(|_| ()) + } + BroadcastedTx::Declare(declare_tx) => { + self.client.add_declare_transaction(declare_tx.clone()).await.map(|_| ()) + } + BroadcastedTx::DeployAccount(deploy_account_tx) => self + .client + .add_deploy_account_transaction(deploy_account_tx.clone()) + .await + .map(|_| ()), + }; + + match result { + Ok(_) => Ok(ValidationOutcome::Valid(tx)), + Err(err) => { + // For client-based validation, any error from the remote node + // indicates the transaction is invalid + let error = InvalidTransactionError::ValidationFailure { + address: match &tx { + BroadcastedTx::Invoke(tx) => tx.sender_address, + BroadcastedTx::Declare(tx) => tx.sender_address, + BroadcastedTx::DeployAccount(tx) => get_contract_address( + tx.contract_address_salt, + tx.class_hash, + &tx.constructor_calldata, + katana_primitives::Felt::ZERO, + ) + .into(), + }, + class_hash: Default::default(), + error: err.to_string(), + }; + + Ok(ValidationOutcome::Invalid { tx, error }) + } + } + } +} diff --git a/crates/oracle/gas/src/fixed.rs b/crates/oracle/gas/src/fixed.rs index 8d5bf7b31..d3f126c01 100644 --- a/crates/oracle/gas/src/fixed.rs +++ b/crates/oracle/gas/src/fixed.rs @@ -3,7 +3,7 @@ use std::num::NonZeroU128; use katana_primitives::block::{GasPrice, GasPrices}; -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct FixedPriceOracle { l2_gas_prices: GasPrices, l1_gas_prices: GasPrices, diff --git a/crates/oracle/gas/src/lib.rs b/crates/oracle/gas/src/lib.rs index 8555d9bf0..1c034736f 100644 --- a/crates/oracle/gas/src/lib.rs +++ b/crates/oracle/gas/src/lib.rs @@ -17,7 +17,7 @@ pub use sampled::{SampledPriceOracle, Sampler}; use crate::sampled::starknet::{StarknetGatewaySampler, StarknetJsonRpcSampler}; -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum GasPriceOracle { Fixed(fixed::FixedPriceOracle), Sampled(sampled::SampledPriceOracle>), diff --git a/crates/oracle/gas/src/sampled/mod.rs b/crates/oracle/gas/src/sampled/mod.rs index 4b0f0a1e6..6a0095cbf 100644 --- a/crates/oracle/gas/src/sampled/mod.rs +++ b/crates/oracle/gas/src/sampled/mod.rs @@ -23,11 +23,17 @@ pub trait Sampler: Debug + Send + Sync { fn sample(&self) -> BoxFuture<'_, anyhow::Result>; } -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct SampledPriceOracle { inner: Arc>, } +impl Clone for SampledPriceOracle { + fn clone(&self) -> Self { + Self { inner: Arc::clone(&self.inner) } + } +} + #[derive(Debug)] struct SampledPriceOracleInner { samples: Mutex, diff --git a/crates/pool/pool-api/Cargo.toml b/crates/pool/pool-api/Cargo.toml index a82ab82fc..7d80a9815 100644 --- a/crates/pool/pool-api/Cargo.toml +++ b/crates/pool/pool-api/Cargo.toml @@ -6,9 +6,11 @@ version.workspace = true [dependencies] katana-primitives.workspace = true +katana-rpc-types.workspace = true futures.workspace = true parking_lot.workspace = true +starknet-types-core.workspace = true thiserror.workspace = true tokio = { workspace = true, features = [ "sync" ] } diff --git a/crates/pool/pool-api/src/tx.rs b/crates/pool/pool-api/src/tx.rs index b0ece6bae..0d60a2d82 100644 --- a/crates/pool/pool-api/src/tx.rs +++ b/crates/pool/pool-api/src/tx.rs @@ -6,6 +6,9 @@ use katana_primitives::contract::{ContractAddress, Nonce}; use katana_primitives::transaction::{ DeclareTx, DeployAccountTx, ExecutableTx, ExecutableTxWithHash, InvokeTx, TxHash, }; +use katana_primitives::utils::get_contract_address; +use katana_primitives::Felt; +use katana_rpc_types::broadcasted::BroadcastedTx; use crate::ordering::PoolOrd; use crate::PoolTransaction; @@ -183,3 +186,77 @@ impl PoolTransaction for ExecutableTxWithHash { } } } + +impl PoolTransaction for BroadcastedTx { + fn hash(&self) -> TxHash { + // BroadcastedTx doesn't have a precomputed hash, so we compute a deterministic + // hash from the transaction content for pool identification purposes. + use starknet_types_core::hash::{Poseidon, StarkHash}; + + match self { + BroadcastedTx::Invoke(tx) => { + // Hash based on sender, nonce, and calldata + let mut data = vec![tx.sender_address.into(), tx.nonce]; + data.extend_from_slice(&tx.calldata); + Poseidon::hash_array(&data) + } + BroadcastedTx::Declare(tx) => { + // Hash based on sender, nonce, and compiled class hash + let data = [tx.sender_address.into(), tx.nonce, tx.compiled_class_hash.into()]; + Poseidon::hash_array(&data) + } + BroadcastedTx::DeployAccount(tx) => { + // Hash based on computed contract address, nonce, and class hash + let contract_address = get_contract_address( + tx.contract_address_salt, + tx.class_hash, + &tx.constructor_calldata, + Felt::ZERO, + ); + let data = [contract_address, tx.nonce, tx.class_hash.into()]; + Poseidon::hash_array(&data) + } + } + } + + fn nonce(&self) -> Nonce { + match self { + BroadcastedTx::Invoke(tx) => tx.nonce, + BroadcastedTx::Declare(tx) => tx.nonce, + BroadcastedTx::DeployAccount(tx) => tx.nonce, + } + } + + fn sender(&self) -> ContractAddress { + match self { + BroadcastedTx::Invoke(tx) => tx.sender_address, + BroadcastedTx::Declare(tx) => tx.sender_address, + BroadcastedTx::DeployAccount(tx) => { + // Compute the contract address for deploy account transactions + get_contract_address( + tx.contract_address_salt, + tx.class_hash, + &tx.constructor_calldata, + Felt::ZERO, + ) + .into() + } + } + } + + fn max_fee(&self) -> u128 { + // BroadcastedTx only supports V3 transactions which use resource bounds instead of max_fee. + // For V3 transactions, we can derive an equivalent max fee from resource bounds, + // but for simplicity in the pool, we return 0. + // The actual fee validation happens on the remote node. + 0 + } + + fn tip(&self) -> u64 { + match self { + BroadcastedTx::Invoke(tx) => tx.tip.into(), + BroadcastedTx::Declare(tx) => tx.tip.into(), + BroadcastedTx::DeployAccount(tx) => tx.tip.into(), + } + } +} diff --git a/crates/rpc/rpc-server/src/starknet/mod.rs b/crates/rpc/rpc-server/src/starknet/mod.rs index a18776d0b..c0dcda65a 100644 --- a/crates/rpc/rpc-server/src/starknet/mod.rs +++ b/crates/rpc/rpc-server/src/starknet/mod.rs @@ -4,6 +4,7 @@ use std::fmt::Debug; use std::future::Future; use std::sync::Arc; +use katana_core::backend::storage::Database; use katana_core::backend::Backend; use katana_executor::ExecutorFactory; use katana_pool::TransactionPool; @@ -12,7 +13,7 @@ use katana_primitives::class::{ClassHash, CompiledClass}; use katana_primitives::contract::{ContractAddress, Nonce, StorageKey, StorageValue}; use katana_primitives::env::BlockEnv; use katana_primitives::event::MaybeForkedContinuationToken; -use katana_primitives::transaction::{ExecutableTxWithHash, TxHash, TxNumber}; +use katana_primitives::transaction::{ExecutableTxWithHash, TxHash, TxNumber, TxWithHash}; use katana_primitives::Felt; use katana_provider::api::block::{BlockHashProvider, BlockIdReader, BlockNumberProvider}; use katana_provider::api::contract::ContractClassProvider; @@ -22,6 +23,7 @@ use katana_provider::api::transaction::{ ReceiptProvider, TransactionProvider, TransactionStatusProvider, TransactionsProviderExt, }; use katana_provider::api::ProviderError; +use katana_provider::BlockchainProvider; use katana_rpc_api::error::starknet::{ CompilationErrorData, PageSizeTooBigData, ProofLimitExceededData, StarknetApiError, }; @@ -92,6 +94,7 @@ where { pool: Pool, backend: Arc>, + storage_provider: BlockchainProvider>, forked_client: Option, task_spawner: TaskSpawner, estimate_fee_permit: Permits, @@ -138,8 +141,17 @@ where task_spawner: TaskSpawner, config: StarknetApiConfig, pending_block_provider: PP, + storage_provider: BlockchainProvider>, ) -> Self { - Self::new_inner(backend, pool, None, task_spawner, config, pending_block_provider) + Self::new_inner( + backend, + pool, + storage_provider, + None, + task_spawner, + config, + pending_block_provider, + ) } pub fn new_forked( @@ -149,10 +161,12 @@ where task_spawner: TaskSpawner, config: StarknetApiConfig, pending_block_provider: PP, + storage_provider: BlockchainProvider>, ) -> Self { Self::new_inner( backend, pool, + storage_provider, Some(forked_client), task_spawner, config, @@ -163,6 +177,7 @@ where fn new_inner( backend: Arc>, pool: Pool, + storage_provider: BlockchainProvider>, forked_client: Option, task_spawner: TaskSpawner, config: StarknetApiConfig, @@ -176,6 +191,7 @@ where let inner = StarknetApiInner { pool, backend, + storage_provider, task_spawner, forked_client, estimate_fee_permit, @@ -256,7 +272,7 @@ where } pub fn state(&self, block_id: &BlockIdOrTag) -> StarknetApiResult> { - let provider = self.inner.backend.blockchain.provider(); + let provider = &self.inner.storage_provider; let state = match block_id { BlockIdOrTag::PreConfirmed => { @@ -279,7 +295,7 @@ where } fn block_env_at(&self, block_id: &BlockIdOrTag) -> StarknetApiResult { - let provider = self.inner.backend.blockchain.provider(); + let provider = &self.inner.storage_provider; let env = match block_id { BlockIdOrTag::PreConfirmed => { @@ -325,7 +341,7 @@ where } fn block_hash_and_number(&self) -> StarknetApiResult { - let provider = self.inner.backend.blockchain.provider(); + let provider = &self.inner.storage_provider; let hash = provider.latest_hash()?; let number = provider.latest_number()?; Ok(BlockHashAndNumberResponse::new(hash, number)) @@ -422,7 +438,7 @@ where pub async fn block_tx_count(&self, block_id: BlockIdOrTag) -> StarknetApiResult { let count = self .on_io_blocking_task(move |this| { - let provider = this.inner.backend.blockchain.provider(); + let provider = &this.inner.storage_provider; let block_id: BlockHashOrNumber = match block_id { BlockIdOrTag::L1Accepted => return Ok(None), @@ -458,7 +474,7 @@ where async fn latest_block_number(&self) -> StarknetApiResult { self.on_io_blocking_task(move |this| { - let block_number = this.inner.backend.blockchain.provider().latest_number()?; + let block_number = this.inner.storage_provider.latest_number()?; Ok(BlockNumberResponse { block_number }) }) .await? @@ -498,7 +514,7 @@ where let tx = if BlockIdOrTag::PreConfirmed == block_id { this.inner.pending_block_provider.get_pending_transaction_by_index(index)? } else { - let provider = &this.inner.backend.blockchain.provider(); + let provider = &this.inner.storage_provider; let block_num = provider .convert_block_id(block_id)? @@ -533,9 +549,7 @@ where } else { let tx = this .inner - .backend - .blockchain - .provider() + .storage_provider .transaction_by_hash(hash)? .map(RpcTxWithHash::from); @@ -549,7 +563,9 @@ where } else if let Some(client) = &self.inner.forked_client { Ok(client.get_transaction_by_hash(hash).await?) } else { - Err(StarknetApiError::TxnHashNotFound) + let pool_tx = self.inner.pool.get(hash).ok_or(StarknetApiError::TxnHashNotFound)?; + let tx = TxWithHash::from(pool_tx.as_ref().clone()); + Ok(RpcTxWithHash::from(tx)) } } @@ -561,7 +577,7 @@ where { StarknetApiResult::Ok(pending_receipt) } else { - let provider = this.inner.backend.blockchain.provider(); + let provider = &this.inner.storage_provider; StarknetApiResult::Ok(ReceiptBuilder::new(hash, provider).build()?) } }) @@ -579,7 +595,7 @@ where async fn transaction_status(&self, hash: TxHash) -> StarknetApiResult { let status = self .on_io_blocking_task(move |this| { - let provider = this.inner.backend.blockchain.provider(); + let provider = &this.inner.storage_provider; let status = provider.transaction_status(hash)?; if let Some(status) = status { @@ -634,7 +650,7 @@ where ) -> StarknetApiResult { let block = self .on_io_blocking_task(move |this| { - let provider = this.inner.backend.blockchain.provider(); + let provider = &this.inner.storage_provider; if BlockIdOrTag::PreConfirmed == block_id { if let Some(block) = @@ -671,7 +687,7 @@ where ) -> StarknetApiResult { let block = self .on_io_blocking_task(move |this| { - let provider = this.inner.backend.blockchain.provider(); + let provider = &this.inner.storage_provider; if BlockIdOrTag::PreConfirmed == block_id { if let Some(block) = @@ -708,7 +724,7 @@ where ) -> StarknetApiResult { let block = self .on_io_blocking_task(move |this| { - let provider = this.inner.backend.blockchain.provider(); + let provider = &this.inner.storage_provider; if BlockIdOrTag::PreConfirmed == block_id { if let Some(block) = @@ -742,7 +758,7 @@ where pub async fn state_update(&self, block_id: BlockIdOrTag) -> StarknetApiResult { let state_update = self .on_io_blocking_task(move |this| { - let provider = this.inner.backend.blockchain.provider(); + let provider = &this.inner.storage_provider; let block_id = match block_id { BlockIdOrTag::Number(num) => BlockHashOrNumber::Num(num), @@ -839,7 +855,7 @@ where continuation_token: Option, chunk_size: u64, ) -> StarknetApiResult { - let provider = self.inner.backend.blockchain.provider(); + let provider = &self.inner.storage_provider; let from = self.resolve_event_block_id_if_forked(from_block)?; let to = self.resolve_event_block_id_if_forked(to_block)?; @@ -1056,7 +1072,7 @@ where &self, id: BlockIdOrTag, ) -> StarknetApiResult { - let provider = self.inner.backend.blockchain.provider(); + let provider = &self.inner.storage_provider; let id = match id { BlockIdOrTag::L1Accepted => EventBlockId::Pending, @@ -1096,7 +1112,7 @@ where contracts_storage_keys: Option>, ) -> StarknetApiResult { self.on_io_blocking_task(move |this| { - let provider = this.inner.backend.blockchain.provider(); + let provider = &this.inner.storage_provider; let Some(block_num) = provider.convert_block_id(block_id)? else { return Err(StarknetApiError::BlockNotFound); @@ -1192,7 +1208,7 @@ where { async fn blocks(&self, request: GetBlocksRequest) -> StarknetApiResult { self.on_io_blocking_task(move |this| { - let provider = this.inner.backend.blockchain.provider(); + let provider = &this.inner.storage_provider; // Parse continuation token to get starting point let start_from = if let Some(token_str) = request.result_page_request.continuation_token @@ -1268,7 +1284,7 @@ where request: GetTransactionsRequest, ) -> StarknetApiResult { self.on_io_blocking_task(move |this| { - let provider = this.inner.backend.blockchain.provider(); + let provider = &this.inner.storage_provider; // Resolve the starting point for this query. let start_from = if let Some(token_str) = request.result_page_request.continuation_token @@ -1338,7 +1354,7 @@ where async fn total_transactions(&self) -> StarknetApiResult { self.on_io_blocking_task(move |this| { - let provider = this.inner.backend.blockchain.provider(); + let provider = &this.inner.storage_provider; let total = provider.total_transactions()? as TxNumber; Ok(total) }) diff --git a/crates/rpc/rpc-server/src/starknet/pending.rs b/crates/rpc/rpc-server/src/starknet/pending.rs index 505c3add3..271e49a9b 100644 --- a/crates/rpc/rpc-server/src/starknet/pending.rs +++ b/crates/rpc/rpc-server/src/starknet/pending.rs @@ -2,7 +2,7 @@ use std::fmt::Debug; use katana_core::service::block_producer::{BlockProducer, BlockProducerMode}; use katana_executor::ExecutorFactory; -use katana_primitives::block::PartialHeader; +use katana_primitives::block::{BlockIdOrTag, PartialHeader}; use katana_primitives::da::L1DataAvailabilityMode; use katana_primitives::execution::TypedTransactionExecutionInfo; use katana_primitives::transaction::{TxHash, TxNumber}; @@ -272,3 +272,124 @@ impl PendingBlockProvider for BlockProducer { } } } + +impl PendingBlockProvider for katana_rpc_client::starknet::Client { + fn get_pending_state_update(&self) -> StarknetApiResult> { + let result = futures::executor::block_on(async { + self.get_state_update(BlockIdOrTag::PreConfirmed).await + }); + + match result { + Ok(state_update) => match state_update { + katana_rpc_types::state_update::StateUpdate::PreConfirmed(update) => { + Ok(Some(update)) + } + _ => Ok(None), + }, + Err(_) => Ok(None), + } + } + + fn get_pending_block_with_txs(&self) -> StarknetApiResult> { + let result = futures::executor::block_on(async { + self.get_block_with_txs(BlockIdOrTag::PreConfirmed).await + }); + + match result { + Ok(block) => match block { + katana_rpc_types::block::MaybePreConfirmedBlock::PreConfirmed(block) => { + Ok(Some(block)) + } + _ => Ok(None), + }, + Err(_) => Ok(None), + } + } + + fn get_pending_block_with_receipts( + &self, + ) -> StarknetApiResult> { + let result = futures::executor::block_on(async { + self.get_block_with_receipts(BlockIdOrTag::PreConfirmed).await + }); + + match result { + Ok(block) => match block { + katana_rpc_types::block::GetBlockWithReceiptsResponse::PreConfirmed(block) => { + Ok(Some(block)) + } + _ => Ok(None), + }, + Err(_) => Ok(None), + } + } + + fn get_pending_block_with_tx_hashes( + &self, + ) -> StarknetApiResult> { + let result = futures::executor::block_on(async { + self.get_block_with_tx_hashes(BlockIdOrTag::PreConfirmed).await + }); + + match result { + Ok(block) => match block { + katana_rpc_types::block::GetBlockWithTxHashesResponse::PreConfirmed(block) => { + Ok(Some(block)) + } + _ => Ok(None), + }, + Err(_) => Ok(None), + } + } + + fn get_pending_transaction(&self, hash: TxHash) -> StarknetApiResult> { + let result = + futures::executor::block_on(async { self.get_transaction_by_hash(hash).await }); + + match result { + Ok(tx) => Ok(Some(tx)), + Err(_) => Ok(None), + } + } + + fn get_pending_receipt( + &self, + hash: TxHash, + ) -> StarknetApiResult> { + let result = + futures::executor::block_on(async { self.get_transaction_receipt(hash).await }); + + match result { + Ok(receipt) => Ok(Some(receipt)), + Err(_) => Ok(None), + } + } + + fn get_pending_trace(&self, hash: TxHash) -> StarknetApiResult> { + let result = futures::executor::block_on(async { self.trace_transaction(hash).await }); + + match result { + Ok(trace) => Ok(Some(trace)), + Err(_) => Ok(None), + } + } + + fn get_pending_transaction_by_index( + &self, + index: TxNumber, + ) -> StarknetApiResult> { + let result = futures::executor::block_on(async { + self.get_transaction_by_block_id_and_index(BlockIdOrTag::PreConfirmed, index).await + }); + + match result { + Ok(tx) => Ok(Some(tx)), + Err(_) => Ok(None), + } + } + + fn pending_state(&self) -> StarknetApiResult>> { + // Client-based pending block provider doesn't provide state access + Ok(None) + } +} diff --git a/crates/rpc/rpc-server/src/starknet/trace.rs b/crates/rpc/rpc-server/src/starknet/trace.rs index 50b313a71..95609ea5f 100644 --- a/crates/rpc/rpc-server/src/starknet/trace.rs +++ b/crates/rpc/rpc-server/src/starknet/trace.rs @@ -97,7 +97,7 @@ where ) -> Result, StarknetApiError> { use StarknetApiError::BlockNotFound; - let provider = self.inner.backend.blockchain.provider(); + let provider = &self.inner.storage_provider; let block_id: BlockHashOrNumber = match block_id { ConfirmedBlockIdOrTag::L1Accepted => { @@ -131,7 +131,7 @@ where Ok(pending_trace) } else { // If not found in pending block, fallback to the provider - let provider = self.inner.backend.blockchain.provider(); + let provider = &self.inner.storage_provider; let trace = provider.transaction_execution(tx_hash)?.ok_or(TxnHashNotFound)?; Ok(TxTrace::from(trace)) } diff --git a/crates/storage/provider/provider/Cargo.toml b/crates/storage/provider/provider/Cargo.toml index 55a064eb4..6ae98016d 100644 --- a/crates/storage/provider/provider/Cargo.toml +++ b/crates/storage/provider/provider/Cargo.toml @@ -26,15 +26,15 @@ thiserror.workspace = true tracing.workspace = true # fork provider deps -futures = { workspace = true, optional = true } -tokio = { workspace = true, optional = true } +futures.workspace = true +tokio.workspace = true alloy-primitives = { workspace = true, optional = true } serde_json.workspace = true [features] -fork = [ "dep:futures", "dep:tokio" ] -in-memory = [ ] +fork = [] +in-memory = [] test-utils = [ "dep:alloy-primitives", "dep:katana-chain-spec" ] [dev-dependencies] diff --git a/crates/storage/provider/provider/src/providers/db/cached.rs b/crates/storage/provider/provider/src/providers/db/cached.rs new file mode 100644 index 000000000..f43b56c8b --- /dev/null +++ b/crates/storage/provider/provider/src/providers/db/cached.rs @@ -0,0 +1,686 @@ +use std::collections::{BTreeMap, HashMap}; +use std::ops::{Range, RangeInclusive}; +use std::sync::{Arc, RwLock}; + +use katana_db::abstraction::Database; +use katana_db::models::block::StoredBlockBodyIndices; +use katana_primitives::block::{ + Block, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithTxHashes, FinalityStatus, Header, + SealedBlockWithStatus, +}; +use katana_primitives::class::{ClassHash, CompiledClassHash, ContractClass}; +use katana_primitives::contract::{ContractAddress, Nonce, StorageKey, StorageValue}; +use katana_primitives::env::BlockEnv; +use katana_primitives::execution::TypedTransactionExecutionInfo; +use katana_primitives::receipt::Receipt; +use katana_primitives::state::{StateUpdates, StateUpdatesWithClasses}; +use katana_primitives::transaction::{TxHash, TxNumber, TxWithHash}; +use katana_provider_api::block::{ + BlockHashProvider, BlockNumberProvider, BlockProvider, BlockStatusProvider, BlockWriter, + HeaderProvider, +}; +use katana_provider_api::contract::{ContractClassProvider, ContractClassWriter}; +use katana_provider_api::env::BlockEnvProvider; +use katana_provider_api::stage::StageCheckpointProvider; +use katana_provider_api::state::{StateFactoryProvider, StateProvider, StateWriter}; +use katana_provider_api::state_update::StateUpdateProvider; +use katana_provider_api::transaction::{ + ReceiptProvider, TransactionProvider, TransactionStatusProvider, TransactionTraceProvider, + TransactionsProviderExt, +}; + +use crate::providers::fork::state::HistoricalStateProvider as ForkHistoricalStateProvider; +use crate::providers::fork::ForkedProvider; +use crate::ProviderResult; + +/// Inner cache data protected by a single lock for consistent snapshots. +#[derive(Debug, Default)] +struct StateCacheInner { + /// Cache for contract nonces: ContractAddress -> Nonce + nonces: HashMap, + /// Cache for storage values: (ContractAddress, StorageKey) -> StorageValue + storage: HashMap<(ContractAddress, StorageKey), StorageValue>, + /// Cache for contract class hashes: ContractAddress -> ClassHash + class_hashes: HashMap, + /// Cache for contract classes: ClassHash -> ContractClass + classes: HashMap, + /// Cache for compiled class hashes: ClassHash -> CompiledClassHash + compiled_class_hashes: HashMap, +} + +/// A cache for storing state data in memory. +/// +/// Uses a single read-write lock to ensure consistent snapshots across all cached data. +/// This prevents reading inconsistent state that could occur with multiple independent locks. +#[derive(Debug, Clone)] +pub struct StateCache { + inner: Arc>, +} + +impl Default for StateCache { + fn default() -> Self { + Self::new() + } +} + +impl StateCache { + fn new() -> Self { + Self { inner: Arc::new(RwLock::new(StateCacheInner::default())) } + } + + fn get_nonce(&self, address: ContractAddress) -> Option { + self.inner.read().ok()?.nonces.get(&address).copied() + } + + fn set_nonce(&self, address: ContractAddress, nonce: Nonce) { + if let Ok(mut cache) = self.inner.write() { + cache.nonces.insert(address, nonce); + } + } + + fn get_storage(&self, address: ContractAddress, key: StorageKey) -> Option { + self.inner.read().ok()?.storage.get(&(address, key)).copied() + } + + fn set_storage(&self, address: ContractAddress, key: StorageKey, value: StorageValue) { + if let Ok(mut cache) = self.inner.write() { + cache.storage.insert((address, key), value); + } + } + + fn get_class_hash(&self, address: ContractAddress) -> Option { + self.inner.read().ok()?.class_hashes.get(&address).copied() + } + + fn set_class_hash(&self, address: ContractAddress, class_hash: ClassHash) { + if let Ok(mut cache) = self.inner.write() { + cache.class_hashes.insert(address, class_hash); + } + } + + fn get_class(&self, hash: ClassHash) -> Option { + self.inner.read().ok()?.classes.get(&hash).cloned() + } + + fn set_class(&self, hash: ClassHash, class: ContractClass) { + if let Ok(mut cache) = self.inner.write() { + cache.classes.insert(hash, class); + } + } + + fn get_compiled_class_hash(&self, hash: ClassHash) -> Option { + self.inner.read().ok()?.compiled_class_hashes.get(&hash).copied() + } + + fn set_compiled_class_hash(&self, hash: ClassHash, compiled_hash: CompiledClassHash) { + if let Ok(mut cache) = self.inner.write() { + cache.compiled_class_hashes.insert(hash, compiled_hash); + } + } + + /// Clears all cached data. + pub fn clear(&self) { + if let Ok(mut cache) = self.inner.write() { + cache.nonces.clear(); + cache.storage.clear(); + cache.class_hashes.clear(); + cache.classes.clear(); + cache.compiled_class_hashes.clear(); + } + } +} + +/// A cached version of provider that wraps the underlying provider with an in-memory cache +/// for state data. +/// +/// The cache is used to store frequently accessed state information such as nonces, storage values, +/// class hashes, and contract classes. When querying state through the [`StateProvider`] interface, +/// the cache is checked first before falling back to the underlying database. +#[derive(Debug, Clone)] +pub struct CachedDbProvider { + /// The underlying provider + inner: ForkedProvider, + /// The in-memory cache for state data + cache: StateCache, +} + +impl CachedDbProvider { + /// Creates a new [`CachedDbProvider`] wrapping the given [`ForkedProvider`]. + pub fn new(provider: ForkedProvider) -> Self { + Self { inner: provider, cache: StateCache::new() } + } + + /// Returns a reference to the underlying [`ForkedProvider`]. + pub fn inner(&self) -> &ForkedProvider { + &self.inner + } +} + +impl CachedDbProvider { + /// Returns a reference to the cache. + pub fn cache(&self) -> &StateCache { + &self.cache + } + + /// Clears all cached data. + pub fn clear_cache(&self) { + self.cache.clear(); + } +} + +impl StateFactoryProvider for CachedDbProvider { + fn latest(&self) -> ProviderResult> { + Ok(Box::new(CachedStateProvider { state: self.inner.latest()?, cache: self.cache.clone() })) + } + + fn historical( + &self, + block_id: BlockHashOrNumber, + ) -> ProviderResult>> { + if let Some(state) = self.inner.historical(block_id)? { + Ok(Some(Box::new(CachedStateProvider { state, cache: self.cache.clone() }))) + } else { + Ok(None) + } + } +} + +impl BlockNumberProvider for CachedDbProvider { + fn block_number_by_hash(&self, hash: BlockHash) -> ProviderResult> { + self.inner.block_number_by_hash(hash) + } + + fn latest_number(&self) -> ProviderResult { + self.inner.latest_number() + } +} + +impl BlockHashProvider for CachedDbProvider { + fn latest_hash(&self) -> ProviderResult { + self.inner.latest_hash() + } + + fn block_hash_by_num(&self, num: BlockNumber) -> ProviderResult> { + self.inner.block_hash_by_num(num) + } +} + +impl HeaderProvider for CachedDbProvider { + fn header(&self, id: BlockHashOrNumber) -> ProviderResult> { + self.inner.header(id) + } +} + +impl BlockProvider for CachedDbProvider { + fn block_body_indices( + &self, + id: BlockHashOrNumber, + ) -> ProviderResult> { + self.inner.block_body_indices(id) + } + + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + self.inner.block(id) + } + + fn block_with_tx_hashes( + &self, + id: BlockHashOrNumber, + ) -> ProviderResult> { + self.inner.block_with_tx_hashes(id) + } + + fn blocks_in_range(&self, range: RangeInclusive) -> ProviderResult> { + self.inner.blocks_in_range(range) + } +} + +impl BlockStatusProvider for CachedDbProvider { + fn block_status(&self, id: BlockHashOrNumber) -> ProviderResult> { + self.inner.block_status(id) + } +} + +impl StateUpdateProvider for CachedDbProvider { + fn state_update(&self, block_id: BlockHashOrNumber) -> ProviderResult> { + self.inner.state_update(block_id) + } + + fn declared_classes( + &self, + block_id: BlockHashOrNumber, + ) -> ProviderResult>> { + self.inner.declared_classes(block_id) + } + + fn deployed_contracts( + &self, + block_id: BlockHashOrNumber, + ) -> ProviderResult>> { + self.inner.deployed_contracts(block_id) + } +} + +impl TransactionProvider for CachedDbProvider { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + self.inner.transaction_by_hash(hash) + } + + fn transactions_by_block( + &self, + block_id: BlockHashOrNumber, + ) -> ProviderResult>> { + self.inner.transactions_by_block(block_id) + } + + fn transaction_in_range(&self, range: Range) -> ProviderResult> { + self.inner.transaction_in_range(range) + } + + fn transaction_block_num_and_hash( + &self, + hash: TxHash, + ) -> ProviderResult> { + self.inner.transaction_block_num_and_hash(hash) + } + + fn transaction_by_block_and_idx( + &self, + block_id: BlockHashOrNumber, + idx: u64, + ) -> ProviderResult> { + self.inner.transaction_by_block_and_idx(block_id, idx) + } + + fn transaction_count_by_block( + &self, + block_id: BlockHashOrNumber, + ) -> ProviderResult> { + self.inner.transaction_count_by_block(block_id) + } +} + +impl TransactionsProviderExt for CachedDbProvider { + fn transaction_hashes_in_range(&self, range: Range) -> ProviderResult> { + self.inner.transaction_hashes_in_range(range) + } + + fn total_transactions(&self) -> ProviderResult { + self.inner.total_transactions() + } +} + +impl TransactionStatusProvider for CachedDbProvider { + fn transaction_status(&self, hash: TxHash) -> ProviderResult> { + self.inner.transaction_status(hash) + } +} + +impl TransactionTraceProvider for CachedDbProvider { + fn transaction_execution( + &self, + hash: TxHash, + ) -> ProviderResult> { + self.inner.transaction_execution(hash) + } + + fn transaction_executions_by_block( + &self, + block_id: BlockHashOrNumber, + ) -> ProviderResult>> { + self.inner.transaction_executions_by_block(block_id) + } + + fn transaction_executions_in_range( + &self, + range: Range, + ) -> ProviderResult> { + self.inner.transaction_executions_in_range(range) + } +} + +impl ReceiptProvider for CachedDbProvider { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + self.inner.receipt_by_hash(hash) + } + + fn receipts_by_block( + &self, + block_id: BlockHashOrNumber, + ) -> ProviderResult>> { + self.inner.receipts_by_block(block_id) + } +} + +impl BlockEnvProvider for CachedDbProvider { + fn block_env_at(&self, block_id: BlockHashOrNumber) -> ProviderResult> { + self.inner.block_env_at(block_id) + } +} + +impl BlockWriter for CachedDbProvider { + fn insert_block_with_states_and_receipts( + &self, + block: SealedBlockWithStatus, + states: StateUpdatesWithClasses, + receipts: Vec, + executions: Vec, + ) -> ProviderResult<()> { + self.inner.insert_block_with_states_and_receipts(block, states, receipts, executions) + } +} + +impl StageCheckpointProvider for CachedDbProvider { + fn checkpoint(&self, id: &str) -> ProviderResult> { + self.inner.checkpoint(id) + } + + fn set_checkpoint(&self, id: &str, block_number: BlockNumber) -> ProviderResult<()> { + self.inner.set_checkpoint(id, block_number) + } +} + +impl StateWriter for CachedDbProvider { + fn set_nonce(&self, address: ContractAddress, nonce: Nonce) -> ProviderResult<()> { + self.inner.set_nonce(address, nonce) + } + + fn set_storage( + &self, + address: ContractAddress, + storage_key: StorageKey, + storage_value: StorageValue, + ) -> ProviderResult<()> { + self.inner.set_storage(address, storage_key, storage_value) + } + + fn set_class_hash_of_contract( + &self, + address: ContractAddress, + class_hash: ClassHash, + ) -> ProviderResult<()> { + self.inner.set_class_hash_of_contract(address, class_hash) + } +} + +impl ContractClassWriter for CachedDbProvider { + fn set_class(&self, hash: ClassHash, class: ContractClass) -> ProviderResult<()> { + self.inner.set_class(hash, class) + } + + fn set_compiled_class_hash_of_class_hash( + &self, + hash: ClassHash, + compiled_hash: CompiledClassHash, + ) -> ProviderResult<()> { + self.inner.set_compiled_class_hash_of_class_hash(hash, compiled_hash) + } +} + +/// A cached version of fork [`LatestStateProvider`] that checks the cache before querying the +/// database. +#[derive(Debug)] +struct CachedStateProvider { + state: S, + cache: StateCache, +} + +impl ContractClassProvider for CachedStateProvider { + fn class(&self, hash: ClassHash) -> ProviderResult> { + // Check cache first + if let Some(class) = self.cache.get_class(hash) { + return Ok(Some(class)); + } + + // Query database and cache the result + let class = self.state.class(hash)?; + if let Some(ref c) = class { + self.cache.set_class(hash, c.clone()); + } + Ok(class) + } + + fn compiled_class_hash_of_class_hash( + &self, + hash: ClassHash, + ) -> ProviderResult> { + // Check cache first + if let Some(compiled_hash) = self.cache.get_compiled_class_hash(hash) { + return Ok(Some(compiled_hash)); + } + + // Query database and cache the result + let compiled_hash = self.state.compiled_class_hash_of_class_hash(hash)?; + if let Some(ch) = compiled_hash { + self.cache.set_compiled_class_hash(hash, ch); + } + Ok(compiled_hash) + } +} + +impl StateProvider for CachedStateProvider { + fn nonce(&self, address: ContractAddress) -> ProviderResult> { + // Check cache first + if let Some(nonce) = self.cache.get_nonce(address) { + return Ok(Some(nonce)); + } + + // Query database and cache the result + let nonce = self.state.nonce(address)?; + if let Some(n) = nonce { + self.cache.set_nonce(address, n); + } + Ok(nonce) + } + + fn storage( + &self, + address: ContractAddress, + storage_key: StorageKey, + ) -> ProviderResult> { + // Check cache first + if let Some(value) = self.cache.get_storage(address, storage_key) { + return Ok(Some(value)); + } + + // Query database and cache the result + let value = self.state.storage(address, storage_key)?; + if let Some(v) = value { + self.cache.set_storage(address, storage_key, v); + } + Ok(value) + } + + fn class_hash_of_contract( + &self, + address: ContractAddress, + ) -> ProviderResult> { + // Check cache first + if let Some(class_hash) = self.cache.get_class_hash(address) { + return Ok(Some(class_hash)); + } + + // Query database and cache the result + let class_hash = self.state.class_hash_of_contract(address)?; + if let Some(ch) = class_hash { + self.cache.set_class_hash(address, ch); + } + Ok(class_hash) + } +} + +impl katana_provider_api::state::StateProofProvider for CachedStateProvider { + fn class_multiproof(&self, classes: Vec) -> ProviderResult { + self.state.class_multiproof(classes) + } + + fn contract_multiproof( + &self, + addresses: Vec, + ) -> ProviderResult { + self.state.contract_multiproof(addresses) + } + + fn storage_multiproof( + &self, + address: ContractAddress, + storage_keys: Vec, + ) -> ProviderResult { + self.state.storage_multiproof(address, storage_keys) + } +} + +impl katana_provider_api::state::StateRootProvider for CachedStateProvider { + fn classes_root(&self) -> ProviderResult { + self.state.classes_root() + } + + fn contracts_root(&self) -> ProviderResult { + self.state.contracts_root() + } + + fn storage_root( + &self, + contract: ContractAddress, + ) -> ProviderResult> { + self.state.storage_root(contract) + } +} + +/// A cached version of fork [`HistoricalStateProvider`] that checks the cache before querying the +/// database. +#[derive(Debug)] +struct CachedHistoricalStateProvider { + inner: ForkHistoricalStateProvider, + cache: StateCache, +} + +impl ContractClassProvider for CachedHistoricalStateProvider { + fn class(&self, hash: ClassHash) -> ProviderResult> { + // Check cache first + if let Some(class) = self.cache.get_class(hash) { + return Ok(Some(class)); + } + + // Query database and cache the result + let class = self.inner.class(hash)?; + if let Some(ref c) = class { + self.cache.set_class(hash, c.clone()); + } + Ok(class) + } + + fn compiled_class_hash_of_class_hash( + &self, + hash: ClassHash, + ) -> ProviderResult> { + // Check cache first + if let Some(compiled_hash) = self.cache.get_compiled_class_hash(hash) { + return Ok(Some(compiled_hash)); + } + + // Query database and cache the result + let compiled_hash = self.inner.compiled_class_hash_of_class_hash(hash)?; + if let Some(ch) = compiled_hash { + self.cache.set_compiled_class_hash(hash, ch); + } + Ok(compiled_hash) + } +} + +impl StateProvider for CachedHistoricalStateProvider { + fn nonce(&self, address: ContractAddress) -> ProviderResult> { + // Check cache first + if let Some(nonce) = self.cache.get_nonce(address) { + return Ok(Some(nonce)); + } + + // Query database and cache the result + let nonce = self.inner.nonce(address)?; + if let Some(n) = nonce { + self.cache.set_nonce(address, n); + } + Ok(nonce) + } + + fn storage( + &self, + address: ContractAddress, + storage_key: StorageKey, + ) -> ProviderResult> { + // Check cache first + if let Some(value) = self.cache.get_storage(address, storage_key) { + return Ok(Some(value)); + } + + // Query database and cache the result + let value = self.inner.storage(address, storage_key)?; + if let Some(v) = value { + self.cache.set_storage(address, storage_key, v); + } + Ok(value) + } + + fn class_hash_of_contract( + &self, + address: ContractAddress, + ) -> ProviderResult> { + // Check cache first + if let Some(class_hash) = self.cache.get_class_hash(address) { + return Ok(Some(class_hash)); + } + + // Query database and cache the result + let class_hash = self.inner.class_hash_of_contract(address)?; + if let Some(ch) = class_hash { + self.cache.set_class_hash(address, ch); + } + Ok(class_hash) + } +} + +impl katana_provider_api::state::StateProofProvider + for CachedHistoricalStateProvider +{ + fn class_multiproof(&self, classes: Vec) -> ProviderResult { + self.inner.class_multiproof(classes) + } + + fn contract_multiproof( + &self, + addresses: Vec, + ) -> ProviderResult { + self.inner.contract_multiproof(addresses) + } + + fn storage_multiproof( + &self, + address: ContractAddress, + storage_keys: Vec, + ) -> ProviderResult { + self.inner.storage_multiproof(address, storage_keys) + } +} + +impl katana_provider_api::state::StateRootProvider + for CachedHistoricalStateProvider +{ + fn classes_root(&self) -> ProviderResult { + self.inner.classes_root() + } + + fn contracts_root(&self) -> ProviderResult { + self.inner.contracts_root() + } + + fn storage_root( + &self, + contract: ContractAddress, + ) -> ProviderResult> { + self.inner.storage_root(contract) + } + + fn state_root(&self) -> ProviderResult { + self.inner.state_root() + } +} diff --git a/crates/storage/provider/provider/src/providers/db/mod.rs b/crates/storage/provider/provider/src/providers/db/mod.rs index ff822dcd5..3f224a38e 100644 --- a/crates/storage/provider/provider/src/providers/db/mod.rs +++ b/crates/storage/provider/provider/src/providers/db/mod.rs @@ -1,3 +1,4 @@ +pub mod cached; pub mod state; pub mod trie; diff --git a/crates/storage/provider/provider/src/providers/fork/mod.rs b/crates/storage/provider/provider/src/providers/fork/mod.rs index 8a88ffe78..a0699addf 100644 --- a/crates/storage/provider/provider/src/providers/fork/mod.rs +++ b/crates/storage/provider/provider/src/providers/fork/mod.rs @@ -32,13 +32,13 @@ use katana_rpc_client::starknet::Client as StarknetClient; use super::db::{self, DbProvider}; use crate::ProviderResult; -mod state; -mod trie; +pub mod state; +pub mod trie; -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct ForkedProvider { backend: BackendClient, - provider: Arc>, + pub(crate) provider: Arc>, } impl ForkedProvider { @@ -56,6 +56,11 @@ impl ForkedProvider { pub fn backend(&self) -> &BackendClient { &self.backend } + + /// Returns a reference to the underlying [`DbProvider`]. + pub fn db_provider(&self) -> &DbProvider { + &self.provider + } } impl ForkedProvider { diff --git a/crates/storage/provider/provider/src/providers/fork/state.rs b/crates/storage/provider/provider/src/providers/fork/state.rs index a8db588ea..83e8ce6e8 100644 --- a/crates/storage/provider/provider/src/providers/fork/state.rs +++ b/crates/storage/provider/provider/src/providers/fork/state.rs @@ -62,10 +62,10 @@ where } #[derive(Debug)] -struct LatestStateProvider { - db: Arc>, - backend: BackendClient, - provider: db::state::LatestStateProvider, +pub struct LatestStateProvider { + pub(crate) db: Arc>, + pub(crate) backend: BackendClient, + pub(crate) provider: db::state::LatestStateProvider, } impl ContractClassProvider for LatestStateProvider @@ -202,10 +202,10 @@ where } #[derive(Debug)] -struct HistoricalStateProvider { - db: Arc>, - backend: BackendClient, - provider: db::state::HistoricalStateProvider, +pub struct HistoricalStateProvider { + pub(crate) db: Arc>, + pub(crate) backend: BackendClient, + pub(crate) provider: db::state::HistoricalStateProvider, } impl HistoricalStateProvider { diff --git a/crates/storage/provider/provider/src/providers/mod.rs b/crates/storage/provider/provider/src/providers/mod.rs index 28145734b..c1c1df7b5 100644 --- a/crates/storage/provider/provider/src/providers/mod.rs +++ b/crates/storage/provider/provider/src/providers/mod.rs @@ -1,5 +1,4 @@ pub mod db; -#[cfg(feature = "fork")] pub mod fork; use katana_primitives::class::{ClassHash, CompiledClassHash, ContractClass}; From 25438fc760825cc54929c0edd0382241dd2c6f99 Mon Sep 17 00:00:00 2001 From: Ammar Arif Date: Wed, 29 Oct 2025 19:04:50 -0400 Subject: [PATCH 02/26] wip --- crates/pool/pool-api/src/ordering.rs | 2 +- crates/pool/pool-api/src/validation.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/pool/pool-api/src/ordering.rs b/crates/pool/pool-api/src/ordering.rs index e43aa013e..3a399d9fb 100644 --- a/crates/pool/pool-api/src/ordering.rs +++ b/crates/pool/pool-api/src/ordering.rs @@ -2,7 +2,7 @@ use crate::PoolTransaction; // evaluates the priority of a transaction which would be used to determine how txs are ordered in // the pool. -pub trait PoolOrd { +pub trait PoolOrd: Send + Sync { type Transaction: PoolTransaction; /// The priority value type whose [Ord] implementation is used to order the transaction in the /// pool. diff --git a/crates/pool/pool-api/src/validation.rs b/crates/pool/pool-api/src/validation.rs index b9a0ccbef..b9f8db762 100644 --- a/crates/pool/pool-api/src/validation.rs +++ b/crates/pool/pool-api/src/validation.rs @@ -165,7 +165,7 @@ impl Error { pub type ValidationResult = Result, Error>; /// A trait for validating transactions before they are added to the transaction pool. -pub trait Validator { +pub trait Validator: Send + Sync { type Transaction: PoolTransaction; /// Validate a transaction. From 10455e28131308533ab9f27c64d9c65fca977259 Mon Sep 17 00:00:00 2001 From: Ammar Arif Date: Fri, 31 Oct 2025 16:24:22 -0400 Subject: [PATCH 03/26] wip --- crates/cli/src/options.rs | 12 +- crates/cli/src/utils.rs | 23 +- crates/core/src/backend/storage.rs | 13 +- crates/node/src/config/fork.rs | 6 +- crates/node/src/optimistic/config.rs | 5 +- crates/node/src/optimistic/executor.rs | 86 +++--- crates/node/src/optimistic/mod.rs | 9 +- crates/node/src/optimistic/pool.rs | 20 +- crates/primitives/src/block.rs | 12 + crates/rpc/rpc-server/src/starknet/list.rs | 13 +- crates/rpc/rpc-server/src/starknet/mod.rs | 58 ++-- crates/rpc/rpc-server/src/starknet/read.rs | 2 +- crates/rpc/rpc-server/src/starknet/trace.rs | 14 +- crates/rpc/rpc-server/src/starknet/write.rs | 16 +- crates/storage/fork/src/lib.rs | 13 +- .../provider/src/providers/db/cached.rs | 290 ++++++------------ .../provider/src/providers/fork/mod.rs | 10 +- 17 files changed, 249 insertions(+), 353 deletions(-) diff --git a/crates/cli/src/options.rs b/crates/cli/src/options.rs index cd730af9e..8e1985788 100644 --- a/crates/cli/src/options.rs +++ b/crates/cli/src/options.rs @@ -26,7 +26,7 @@ use katana_node::config::rpc::{RpcModulesList, DEFAULT_RPC_MAX_PROOF_KEYS}; use katana_node::config::rpc::{ DEFAULT_RPC_ADDR, DEFAULT_RPC_MAX_CALL_GAS, DEFAULT_RPC_MAX_EVENT_PAGE_SIZE, DEFAULT_RPC_PORT, }; -use katana_primitives::block::{BlockHashOrNumber, GasPrice}; +use katana_primitives::block::{BlockHashOrNumber, BlockIdOrTag, GasPrice}; use katana_primitives::chain::ChainId; #[cfg(feature = "server")] use katana_rpc_server::cors::HeaderValue; @@ -37,7 +37,7 @@ use url::Url; #[cfg(feature = "server")] use crate::utils::{deserialize_cors_origins, serialize_cors_origins}; -use crate::utils::{parse_block_hash_or_number, parse_genesis}; +use crate::utils::{parse_block_hash_or_number, parse_block_id_or_tag, parse_genesis}; const DEFAULT_DEV_SEED: &str = "0"; const DEFAULT_DEV_ACCOUNTS: u16 = 10; @@ -423,11 +423,11 @@ pub struct ForkingOptions { #[arg(long = "fork.provider", value_name = "URL", conflicts_with = "genesis")] pub fork_provider: Option, - /// Fork the network at a specific block id, can either be a hash (0x-prefixed) or a block - /// number. + /// Fork the network at a specific block id, can either be a hash (0x-prefixed), a block + /// number, or a tag (latest, l1accepted, preconfirmed). #[arg(long = "fork.block", value_name = "BLOCK", requires = "fork_provider")] - #[arg(value_parser = parse_block_hash_or_number)] - pub fork_block: Option, + #[arg(value_parser = parse_block_id_or_tag)] + pub fork_block: Option, } #[derive(Debug, Args, Clone, Serialize, Deserialize, Default, PartialEq)] diff --git a/crates/cli/src/utils.rs b/crates/cli/src/utils.rs index 6c1f08414..e1b656a3e 100644 --- a/crates/cli/src/utils.rs +++ b/crates/cli/src/utils.rs @@ -10,7 +10,7 @@ use katana_genesis::constant::{ }; use katana_genesis::json::GenesisJson; use katana_genesis::Genesis; -use katana_primitives::block::{BlockHash, BlockHashOrNumber, BlockNumber}; +use katana_primitives::block::{BlockHash, BlockHashOrNumber, BlockIdOrTag, BlockNumber}; use katana_primitives::chain::ChainId; use katana_primitives::class::ClassHash; use katana_primitives::contract::ContractAddress; @@ -51,6 +51,27 @@ pub fn parse_block_hash_or_number(value: &str) -> Result { } } +/// Parse a block id or tag from a string. Accepts: +/// - Block hashes (0x-prefixed) +/// - Block numbers (numeric) +/// - Block tags: "latest", "l1accepted", "preconfirmed" (case-insensitive) +pub fn parse_block_id_or_tag(value: &str) -> Result { + if value.starts_with("0x") { + Ok(BlockIdOrTag::Hash(BlockHash::from_hex(value)?)) + } else { + match value.to_lowercase().as_str() { + "latest" => Ok(BlockIdOrTag::Latest), + "l1accepted" => Ok(BlockIdOrTag::L1Accepted), + "preconfirmed" => Ok(BlockIdOrTag::PreConfirmed), + _ => { + let num = + value.parse::().context("could not parse block number or tag")?; + Ok(BlockIdOrTag::Number(num)) + } + } + } +} + pub fn print_intro(args: &SequencerNodeArgs, chain: &ChainSpec) { let mut accounts = chain.genesis().accounts().peekable(); let account_class_hash = accounts.peek().map(|e| e.1.class_hash()); diff --git a/crates/core/src/backend/storage.rs b/crates/core/src/backend/storage.rs index 63cb36c0e..1ba93096c 100644 --- a/crates/core/src/backend/storage.rs +++ b/crates/core/src/backend/storage.rs @@ -1,6 +1,6 @@ use anyhow::{bail, Context, Result}; use katana_primitives::block::{ - BlockHashOrNumber, BlockIdOrTag, BlockNumber, FinalityStatus, GasPrices, Header, SealedBlock, + BlockIdOrTag, BlockNumber, FinalityStatus, GasPrices, Header, SealedBlock, SealedBlockWithStatus, }; use katana_provider::api::block::{BlockProvider, BlockWriter}; @@ -88,7 +88,7 @@ impl Blockchain { pub async fn new_from_forked( db: katana_db::Db, fork_url: Url, - fork_block: Option, + fork_block: Option, chain: &mut katana_chain_spec::dev::ChainSpec, ) -> Result<(Self, BlockNumber)> { let provider = StarknetClient::new(HttpClientBuilder::new().build(fork_url)?); @@ -102,17 +102,12 @@ impl Blockchain { // If the fork block number is not specified, we use the latest accepted block on the forked // network. - let block_id = if let Some(id) = fork_block { - id - } else { - let res = provider.block_number().await?; - BlockHashOrNumber::Num(res.block_number) - }; + let block_id = if let Some(id) = fork_block { id } else { BlockIdOrTag::Latest }; info!(chain = %parsed_id, block = %block_id, "Forking chain."); let block = provider - .get_block_with_tx_hashes(BlockIdOrTag::from(block_id)) + .get_block_with_tx_hashes(block_id) .await .context("failed to fetch forked block")?; diff --git a/crates/node/src/config/fork.rs b/crates/node/src/config/fork.rs index 02a93c6f7..1be1e7d64 100644 --- a/crates/node/src/config/fork.rs +++ b/crates/node/src/config/fork.rs @@ -1,4 +1,4 @@ -use katana_primitives::block::BlockHashOrNumber; +use katana_primitives::block::BlockIdOrTag; use url::Url; /// Node forking configurations. @@ -6,6 +6,6 @@ use url::Url; pub struct ForkingConfig { /// The JSON-RPC URL of the network to fork from. pub url: Url, - /// The block number to fork from. If `None`, the latest block will be used. - pub block: Option, + /// The block id or tag to fork from. If `None`, the latest block will be used. + pub block: Option, } diff --git a/crates/node/src/optimistic/config.rs b/crates/node/src/optimistic/config.rs index 2f0925040..f16997e1d 100644 --- a/crates/node/src/optimistic/config.rs +++ b/crates/node/src/optimistic/config.rs @@ -2,9 +2,12 @@ use std::sync::Arc; use katana_chain_spec::ChainSpec; +use crate::config::db::DbConfig; +use crate::config::fork::ForkingConfig; +use crate::config::metrics::MetricsConfig; #[cfg(feature = "cartridge")] use crate::config::paymaster; -use crate::config::{db::DbConfig, fork::ForkingConfig, metrics::MetricsConfig, rpc::RpcConfig}; +use crate::config::rpc::RpcConfig; /// Node configurations. /// diff --git a/crates/node/src/optimistic/executor.rs b/crates/node/src/optimistic/executor.rs index 059000d66..a53189b26 100644 --- a/crates/node/src/optimistic/executor.rs +++ b/crates/node/src/optimistic/executor.rs @@ -4,29 +4,28 @@ use std::sync::Arc; use std::task::{Context, Poll}; use futures::stream::StreamExt; +use katana_core::backend::storage::Blockchain; use katana_core::backend::Backend; use katana_executor::implementation::blockifier::BlockifierFactory; use katana_executor::{ExecutionResult, ExecutorFactory}; -use katana_pool::{PendingTransactions, PoolOrd, PoolTransaction, TransactionPool, TxPool}; +use katana_pool::ordering::FiFo; +use katana_pool::{PendingTransactions, PoolOrd, PoolTransaction, TransactionPool}; use katana_primitives::transaction::ExecutableTxWithHash; use katana_provider::api::state::StateFactoryProvider; +use katana_provider::providers::db::cached::CachedDbProvider; +use katana_rpc_types::{BroadcastedTx, BroadcastedTxWithChainId}; use katana_tasks::{JoinHandle, TaskSpawner}; use tracing::{debug, error, info, trace, warn}; +use crate::optimistic::pool::TxPool; + const LOG_TARGET: &str = "optimistic_executor"; -/// The `OptimisticExecutor` is an actor-based component that listens to incoming transactions -/// from the pool and executes them optimistically as they arrive. -/// -/// This component subscribes to the pool's pending transaction stream and processes each -/// transaction as soon as it's available, without waiting for block production. -#[allow(missing_debug_implementations)] +#[derive(Debug)] pub struct OptimisticExecutor { - /// The transaction pool to subscribe to pool: TxPool, - /// The backend containing the executor factory and blockchain state - backend: Arc>, - /// Task spawner for running the executor actor + optimistic_state: CachedDbProvider, + executor_factory: Arc, task_spawner: TaskSpawner, } @@ -40,10 +39,11 @@ impl OptimisticExecutor { /// * `task_spawner` - The task spawner used to run the executor actor pub fn new( pool: TxPool, - backend: Arc>, + optimistic_state: CachedDbProvider, + executor_factory: Arc, task_spawner: TaskSpawner, ) -> Self { - Self { pool, backend, task_spawner } + Self { pool, optimistic_state, executor_factory, task_spawner } } /// Spawns the optimistic executor actor task. @@ -55,49 +55,37 @@ impl OptimisticExecutor { /// /// A `JoinHandle` to the spawned executor task. pub fn spawn(self) -> JoinHandle<()> { - info!(target: LOG_TARGET, "Starting optimistic executor"); - - let pending_txs = self.pool.pending_transactions(); - let actor = OptimisticExecutorActor::new(pending_txs, self.backend); - + let actor = + OptimisticExecutorActor::new(self.pool, self.optimistic_state, self.executor_factory); self.task_spawner.build_task().name("Optimistic Executor").spawn(actor) } } -/// The internal actor that processes transactions from the pending transactions stream. -#[allow(missing_debug_implementations)] -struct OptimisticExecutorActor -where - O: PoolOrd, -{ +#[derive(Debug)] +struct OptimisticExecutorActor { + pool: TxPool, + optimistic_state: CachedDbProvider, /// Stream of pending transactions from the pool - pending_txs: PendingTransactions, - /// The backend for executing transactions - backend: Arc>, + pending_txs: PendingTransactions>, + storage: Blockchain, + executor_factory: Arc, } -impl OptimisticExecutorActor -where - O: PoolOrd, -{ +impl OptimisticExecutorActor { /// Creates a new executor actor with the given pending transactions stream. fn new( - pending_txs: PendingTransactions, - backend: Arc>, + pool: TxPool, + optimistic_state: CachedDbProvider, + executor_factory: Arc, ) -> Self { - Self { pending_txs, backend } + let pending_txs = pool.pending_transactions(); + Self { pool, optimistic_state, pending_txs, storage, executor_factory } } /// Execute a single transaction optimistically against the latest state. - fn execute_transaction(&self, tx: ExecutableTxWithHash) -> Result { - let provider = self.backend.blockchain.provider(); - - // Get the latest state to execute against - let latest_state = - provider.latest().map_err(|e| format!("Failed to get latest state: {e}"))?; - - // Create an executor with the latest state - let mut executor = self.backend.executor_factory.with_state(latest_state); + fn execute_transaction(&self, tx: BroadcastedTxWithChainId) -> Result { + let latest_state = self.optimistic_state.latest().unwrap(); + let mut executor = self.executor_factory.with_state(latest_state); // Execute the transaction let result = executor.execute_transactions(vec![tx.clone()]); @@ -123,16 +111,20 @@ where } else { Err("No execution result found".to_string()) } + + let output = executor.take_execution_output().unwrap(); + self.optimistic_state.merge_state_updates(&output.states); + + // remove from pool + self.pool.remove_transactions(); } + Err(e) => Err(format!("Execution failed: {e}")), } } } -impl Future for OptimisticExecutorActor -where - O: PoolOrd, -{ +impl Future for OptimisticExecutorActor { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { diff --git a/crates/node/src/optimistic/mod.rs b/crates/node/src/optimistic/mod.rs index 941697ed6..a9299778d 100644 --- a/crates/node/src/optimistic/mod.rs +++ b/crates/node/src/optimistic/mod.rs @@ -24,17 +24,11 @@ use katana_metrics::sys::DiskReporter; use katana_metrics::{Report, Server as MetricsServer}; use katana_pool::ordering::FiFo; use katana_primitives::env::{CfgEnv, FeeTokenAddressses}; -#[cfg(feature = "cartridge")] -use katana_rpc::cartridge::CartridgeApi; use katana_rpc::cors::Cors; use katana_rpc::dev::DevApi; use katana_rpc::starknet::forking::ForkedClient; -#[cfg(feature = "cartridge")] -use katana_rpc::starknet::PaymasterConfig; use katana_rpc::starknet::{StarknetApi, StarknetApiConfig}; use katana_rpc::{RpcServer, RpcServerHandle}; -#[cfg(feature = "cartridge")] -use katana_rpc_api::cartridge::CartridgeApiServer; use katana_rpc_api::dev::DevApiServer; use katana_rpc_api::starknet::{StarknetApiServer, StarknetTraceApiServer, StarknetWriteApiServer}; #[cfg(feature = "explorer")] @@ -47,9 +41,10 @@ mod config; mod executor; mod pool; +use config::Config; + use crate::exit::NodeStoppedFuture; use crate::optimistic::pool::{PoolValidator, TxPool}; -use config::Config; #[derive(Debug)] pub struct Node { diff --git a/crates/node/src/optimistic/pool.rs b/crates/node/src/optimistic/pool.rs index 4733172cd..ad017ebfc 100644 --- a/crates/node/src/optimistic/pool.rs +++ b/crates/node/src/optimistic/pool.rs @@ -7,9 +7,9 @@ use katana_pool_api::validation::{ }; use katana_primitives::utils::get_contract_address; use katana_rpc_client::starknet::Client; -use katana_rpc_types::BroadcastedTx; +use katana_rpc_types::{BroadcastedTx, BroadcastedTxWithChainId}; -pub type TxPool = Pool>; +pub type TxPool = Pool>; /// A validator that forwards transactions to a remote Starknet RPC endpoint. #[derive(Debug, Clone)] @@ -28,14 +28,14 @@ impl PoolValidator { } impl Validator for PoolValidator { - type Transaction = BroadcastedTx; + type Transaction = BroadcastedTxWithChainId; async fn validate( &self, tx: Self::Transaction, ) -> Result, ValidationError> { // Forward the transaction to the remote node - let result = match &tx { + let result = match &tx.tx { BroadcastedTx::Invoke(invoke_tx) => { self.client.add_invoke_transaction(invoke_tx.clone()).await.map(|_| ()) } @@ -52,19 +52,11 @@ impl Validator for PoolValidator { match result { Ok(_) => Ok(ValidationOutcome::Valid(tx)), Err(err) => { - // For client-based validation, any error from the remote node - // indicates the transaction is invalid let error = InvalidTransactionError::ValidationFailure { - address: match &tx { + address: match &tx.tx { BroadcastedTx::Invoke(tx) => tx.sender_address, BroadcastedTx::Declare(tx) => tx.sender_address, - BroadcastedTx::DeployAccount(tx) => get_contract_address( - tx.contract_address_salt, - tx.class_hash, - &tx.constructor_calldata, - katana_primitives::Felt::ZERO, - ) - .into(), + BroadcastedTx::DeployAccount(tx) => tx.contract_address(), }, class_hash: Default::default(), error: err.to_string(), diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 15866a6ea..64e4f929b 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -38,6 +38,18 @@ impl From for BlockIdOrTag { } } +impl std::fmt::Display for BlockIdOrTag { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + BlockIdOrTag::Number(num) => write!(f, "{num}"), + BlockIdOrTag::Hash(hash) => write!(f, "{hash:#x}"), + BlockIdOrTag::L1Accepted => write!(f, "L1Accepted"), + BlockIdOrTag::Latest => write!(f, "Latest"), + BlockIdOrTag::PreConfirmed => write!(f, "PreConfirmed"), + } + } +} + /// Block identifier that refers to a confirmed block. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum ConfirmedBlockIdOrTag { diff --git a/crates/rpc/rpc-server/src/starknet/list.rs b/crates/rpc/rpc-server/src/starknet/list.rs index 138bf64d3..3d12c416a 100644 --- a/crates/rpc/rpc-server/src/starknet/list.rs +++ b/crates/rpc/rpc-server/src/starknet/list.rs @@ -5,16 +5,21 @@ use katana_executor::ExecutorFactory; use katana_pool::TransactionPool; use katana_primitives::transaction::TxNumber; use katana_rpc_api::starknet_ext::StarknetApiExtServer; -use katana_rpc_types::list::{ - GetBlocksRequest, GetBlocksResponse, GetTransactionsRequest, GetTransactionsResponse, +use katana_rpc_types::{ + list::{GetBlocksRequest, GetBlocksResponse, GetTransactionsRequest, GetTransactionsResponse}, + RpcTxWithHash, }; use super::StarknetApi; use crate::starknet::pending::PendingBlockProvider; #[async_trait] -impl - StarknetApiExtServer for StarknetApi +impl StarknetApiExtServer for StarknetApi +where + EF: ExecutorFactory, + Pool: TransactionPool + 'static, + ::Transaction: Into, + PP: PendingBlockProvider, { async fn get_blocks(&self, request: GetBlocksRequest) -> RpcResult { Ok(self.blocks(request).await?) diff --git a/crates/rpc/rpc-server/src/starknet/mod.rs b/crates/rpc/rpc-server/src/starknet/mod.rs index c0dcda65a..53a596722 100644 --- a/crates/rpc/rpc-server/src/starknet/mod.rs +++ b/crates/rpc/rpc-server/src/starknet/mod.rs @@ -102,33 +102,6 @@ where pending_block_provider: PP, } -impl StarknetApi -where - EF: ExecutorFactory, - Pool: TransactionPool, - PP: PendingBlockProvider, -{ - pub fn pool(&self) -> &Pool { - &self.inner.pool - } - - pub fn backend(&self) -> &Arc> { - &self.inner.backend - } - - pub fn forked_client(&self) -> Option<&ForkedClient> { - self.inner.forked_client.as_ref() - } - - pub fn estimate_fee_permit(&self) -> &Permits { - &self.inner.estimate_fee_permit - } - - pub fn config(&self) -> &StarknetApiConfig { - &self.inner.config - } -} - impl StarknetApi where EF: ExecutorFactory, @@ -256,6 +229,34 @@ where } } + pub fn pool(&self) -> &Pool { + &self.inner.pool + } + + pub fn backend(&self) -> &Arc> { + &self.inner.backend + } + + pub fn forked_client(&self) -> Option<&ForkedClient> { + self.inner.forked_client.as_ref() + } + + pub fn estimate_fee_permit(&self) -> &Permits { + &self.inner.estimate_fee_permit + } + + pub fn config(&self) -> &StarknetApiConfig { + &self.inner.config + } +} + +impl StarknetApi +where + EF: ExecutorFactory, + Pool: TransactionPool + 'static, + ::Transaction: Into, + PP: PendingBlockProvider, +{ fn estimate_fee_with( &self, transactions: Vec, @@ -564,8 +565,7 @@ where Ok(client.get_transaction_by_hash(hash).await?) } else { let pool_tx = self.inner.pool.get(hash).ok_or(StarknetApiError::TxnHashNotFound)?; - let tx = TxWithHash::from(pool_tx.as_ref().clone()); - Ok(RpcTxWithHash::from(tx)) + Ok(Into::into(pool_tx.as_ref().clone())) } } diff --git a/crates/rpc/rpc-server/src/starknet/read.rs b/crates/rpc/rpc-server/src/starknet/read.rs index 7e4eaa36e..0d0f9638e 100644 --- a/crates/rpc/rpc-server/src/starknet/read.rs +++ b/crates/rpc/rpc-server/src/starknet/read.rs @@ -44,7 +44,7 @@ impl StarknetApiServer for StarknetApi + Send + Sync + 'static, - PoolTx: From, + ::Transaction: Into, Pending: PendingBlockProvider, { async fn chain_id(&self) -> RpcResult { diff --git a/crates/rpc/rpc-server/src/starknet/trace.rs b/crates/rpc/rpc-server/src/starknet/trace.rs index 95609ea5f..af6789679 100644 --- a/crates/rpc/rpc-server/src/starknet/trace.rs +++ b/crates/rpc/rpc-server/src/starknet/trace.rs @@ -13,16 +13,16 @@ use katana_rpc_types::trace::{ to_rpc_fee_estimate, SimulatedTransactions, SimulatedTransactionsResponse, TraceBlockTransactionsResponse, TxTrace, TxTraceWithHash, }; -use katana_rpc_types::{BroadcastedTxWithChainId, SimulationFlag}; +use katana_rpc_types::{BroadcastedTxWithChainId, RpcTxWithHash, SimulationFlag}; use super::StarknetApi; use crate::starknet::pending::PendingBlockProvider; -impl StarknetApi +impl StarknetApi where EF: ExecutorFactory, - Pool: TransactionPool + Send + Sync + 'static, - PoolTx: From, + Pool: TransactionPool + Send + Sync + 'static, + ::Transaction: Into, Pending: PendingBlockProvider, { fn simulate_txs( @@ -139,11 +139,11 @@ where } #[async_trait] -impl StarknetTraceApiServer for StarknetApi +impl StarknetTraceApiServer for StarknetApi where EF: ExecutorFactory, - Pool: TransactionPool + Send + Sync + 'static, - PoolTx: From, + Pool: TransactionPool + Send + Sync + 'static, + ::Transaction: Into, Pending: PendingBlockProvider, { async fn trace_transaction(&self, transaction_hash: TxHash) -> RpcResult { diff --git a/crates/rpc/rpc-server/src/starknet/write.rs b/crates/rpc/rpc-server/src/starknet/write.rs index a5b16f052..aaebfd0ed 100644 --- a/crates/rpc/rpc-server/src/starknet/write.rs +++ b/crates/rpc/rpc-server/src/starknet/write.rs @@ -8,16 +8,17 @@ use katana_rpc_types::broadcasted::{ AddInvokeTransactionResponse, BroadcastedDeclareTx, BroadcastedDeployAccountTx, BroadcastedInvokeTx, }; -use katana_rpc_types::{BroadcastedTx, BroadcastedTxWithChainId}; +use katana_rpc_types::{BroadcastedTx, BroadcastedTxWithChainId, RpcTxWithHash}; use super::StarknetApi; use crate::starknet::pending::PendingBlockProvider; -impl StarknetApi +impl StarknetApi where EF: ExecutorFactory, - Pool: TransactionPool + Send + Sync + 'static, - PoolTx: From, + Pool: TransactionPool + Send + Sync + 'static, + ::Transaction: From, + ::Transaction: Into, Pending: PendingBlockProvider, { async fn add_invoke_transaction_impl( @@ -80,11 +81,12 @@ where } #[async_trait] -impl StarknetWriteApiServer for StarknetApi +impl StarknetWriteApiServer for StarknetApi where EF: ExecutorFactory, - Pool: TransactionPool + Send + Sync + 'static, - PoolTx: From, + Pool: TransactionPool + Send + Sync + 'static, + ::Transaction: From, + RpcTxWithHash: From<::Transaction>, Pending: PendingBlockProvider, { async fn add_invoke_transaction( diff --git a/crates/storage/fork/src/lib.rs b/crates/storage/fork/src/lib.rs index 97a53c45e..c3d221d14 100644 --- a/crates/storage/fork/src/lib.rs +++ b/crates/storage/fork/src/lib.rs @@ -14,7 +14,7 @@ use futures::channel::mpsc::{channel as async_channel, Receiver, SendError, Send use futures::future::BoxFuture; use futures::stream::Stream; use futures::{Future, FutureExt}; -use katana_primitives::block::{BlockHashOrNumber, BlockIdOrTag}; +use katana_primitives::block::BlockIdOrTag; use katana_primitives::class::{ ClassHash, CompiledClassHash, ComputeClassHashError, ContractClass, ContractClassCompilationError, @@ -140,7 +140,7 @@ pub struct Backend { /// A channel for receiving requests from the [BackendHandle]s. incoming: Receiver, /// Pinned block id for all requests. - block_id: BlockHashOrNumber, + block_id: BlockIdOrTag, } ///////////////////////////////////////////////////////////////// @@ -155,7 +155,7 @@ impl Backend { #[allow(clippy::new_ret_no_self)] pub fn new( provider: StarknetClient, - block_id: BlockHashOrNumber, + block_id: BlockIdOrTag, ) -> Result { let (handle, backend) = Self::new_inner(provider, block_id); @@ -175,10 +175,7 @@ impl Backend { Ok(handle) } - fn new_inner( - provider: StarknetClient, - block_id: BlockHashOrNumber, - ) -> (BackendClient, Backend) { + fn new_inner(provider: StarknetClient, block_id: BlockIdOrTag) -> (BackendClient, Backend) { // Create async channel to receive requests from the handle. let (tx, rx) = async_channel(100); let backend = Backend { @@ -196,7 +193,7 @@ impl Backend { /// This method is responsible for transforming the incoming request /// sent from a [BackendHandle] into a RPC request to the remote network. fn handle_requests(&mut self, request: BackendRequest) { - let block_id = BlockIdOrTag::from(self.block_id); + let block_id = self.block_id; let provider = self.provider.clone(); // Check if there are similar requests in the queue before sending the request diff --git a/crates/storage/provider/provider/src/providers/db/cached.rs b/crates/storage/provider/provider/src/providers/db/cached.rs index f43b56c8b..420502ef7 100644 --- a/crates/storage/provider/provider/src/providers/db/cached.rs +++ b/crates/storage/provider/provider/src/providers/db/cached.rs @@ -1,12 +1,12 @@ use std::collections::{BTreeMap, HashMap}; use std::ops::{Range, RangeInclusive}; -use std::sync::{Arc, RwLock}; +use std::sync::Arc; use katana_db::abstraction::Database; use katana_db::models::block::StoredBlockBodyIndices; use katana_primitives::block::{ - Block, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithTxHashes, FinalityStatus, Header, - SealedBlockWithStatus, + Block, BlockHash, BlockHashOrNumber, BlockIdOrTag, BlockNumber, BlockWithTxHashes, + FinalityStatus, Header, SealedBlockWithStatus, }; use katana_primitives::class::{ClassHash, CompiledClassHash, ContractClass}; use katana_primitives::contract::{ContractAddress, Nonce, StorageKey, StorageValue}; @@ -22,12 +22,15 @@ use katana_provider_api::block::{ use katana_provider_api::contract::{ContractClassProvider, ContractClassWriter}; use katana_provider_api::env::BlockEnvProvider; use katana_provider_api::stage::StageCheckpointProvider; -use katana_provider_api::state::{StateFactoryProvider, StateProvider, StateWriter}; +use katana_provider_api::state::{ + StateFactoryProvider, StateProofProvider, StateProvider, StateRootProvider, StateWriter, +}; use katana_provider_api::state_update::StateUpdateProvider; use katana_provider_api::transaction::{ ReceiptProvider, TransactionProvider, TransactionStatusProvider, TransactionTraceProvider, TransactionsProviderExt, }; +use parking_lot::RwLock; use crate::providers::fork::state::HistoricalStateProvider as ForkHistoricalStateProvider; use crate::providers::fork::ForkedProvider; @@ -69,64 +72,33 @@ impl StateCache { } fn get_nonce(&self, address: ContractAddress) -> Option { - self.inner.read().ok()?.nonces.get(&address).copied() - } - - fn set_nonce(&self, address: ContractAddress, nonce: Nonce) { - if let Ok(mut cache) = self.inner.write() { - cache.nonces.insert(address, nonce); - } + self.inner.read().nonces.get(&address).copied() } fn get_storage(&self, address: ContractAddress, key: StorageKey) -> Option { - self.inner.read().ok()?.storage.get(&(address, key)).copied() - } - - fn set_storage(&self, address: ContractAddress, key: StorageKey, value: StorageValue) { - if let Ok(mut cache) = self.inner.write() { - cache.storage.insert((address, key), value); - } + self.inner.read().storage.get(&(address, key)).copied() } fn get_class_hash(&self, address: ContractAddress) -> Option { - self.inner.read().ok()?.class_hashes.get(&address).copied() - } - - fn set_class_hash(&self, address: ContractAddress, class_hash: ClassHash) { - if let Ok(mut cache) = self.inner.write() { - cache.class_hashes.insert(address, class_hash); - } + self.inner.read().class_hashes.get(&address).copied() } fn get_class(&self, hash: ClassHash) -> Option { - self.inner.read().ok()?.classes.get(&hash).cloned() - } - - fn set_class(&self, hash: ClassHash, class: ContractClass) { - if let Ok(mut cache) = self.inner.write() { - cache.classes.insert(hash, class); - } + self.inner.read().classes.get(&hash).cloned() } fn get_compiled_class_hash(&self, hash: ClassHash) -> Option { - self.inner.read().ok()?.compiled_class_hashes.get(&hash).copied() - } - - fn set_compiled_class_hash(&self, hash: ClassHash, compiled_hash: CompiledClassHash) { - if let Ok(mut cache) = self.inner.write() { - cache.compiled_class_hashes.insert(hash, compiled_hash); - } + self.inner.read().compiled_class_hashes.get(&hash).copied() } /// Clears all cached data. pub fn clear(&self) { - if let Ok(mut cache) = self.inner.write() { - cache.nonces.clear(); - cache.storage.clear(); - cache.class_hashes.clear(); - cache.classes.clear(); - cache.compiled_class_hashes.clear(); - } + let mut cache = self.inner.write(); + cache.nonces.clear(); + cache.storage.clear(); + cache.class_hashes.clear(); + cache.classes.clear(); + cache.compiled_class_hashes.clear(); } } @@ -146,8 +118,13 @@ pub struct CachedDbProvider { impl CachedDbProvider { /// Creates a new [`CachedDbProvider`] wrapping the given [`ForkedProvider`]. - pub fn new(provider: ForkedProvider) -> Self { - Self { inner: provider, cache: StateCache::new() } + pub fn new( + db: Db, + block_id: BlockIdOrTag, + starknet_client: katana_rpc_client::starknet::Client, + ) -> Self { + let inner = ForkedProvider::new(db, block_id, starknet_client); + Self { inner, cache: StateCache::new() } } /// Returns a reference to the underlying [`ForkedProvider`]. @@ -166,6 +143,38 @@ impl CachedDbProvider { pub fn clear_cache(&self) { self.cache.clear(); } + + /// Merges state updates into the cache. + pub fn merge_state_updates(&self, updates: &StateUpdatesWithClasses) { + let mut cache = self.cache.inner.write(); + let state = &updates.state_updates; + + for (address, nonce) in &state.nonce_updates { + cache.nonces.insert(*address, *nonce); + } + + for (address, storage) in &state.storage_updates { + for (key, value) in storage { + cache.storage.insert((*address, *key), *value); + } + } + + for (address, class_hash) in &state.deployed_contracts { + cache.class_hashes.insert(*address, *class_hash); + } + + for (address, class_hash) in &state.replaced_classes { + cache.class_hashes.insert(*address, *class_hash); + } + + for (class_hash, compiled_hash) in &state.declared_classes { + cache.compiled_class_hashes.insert(*class_hash, *compiled_hash); + } + + for (class_hash, class) in &updates.classes { + cache.classes.insert(*class_hash, class.clone()); + } + } } impl StateFactoryProvider for CachedDbProvider { @@ -427,15 +436,13 @@ struct CachedStateProvider { impl ContractClassProvider for CachedStateProvider { fn class(&self, hash: ClassHash) -> ProviderResult> { - // Check cache first if let Some(class) = self.cache.get_class(hash) { return Ok(Some(class)); } - // Query database and cache the result let class = self.state.class(hash)?; - if let Some(ref c) = class { - self.cache.set_class(hash, c.clone()); + if let Some(ref c) = &class { + self.cache.inner.write().classes.insert(hash, c.clone()); } Ok(class) } @@ -444,15 +451,13 @@ impl ContractClassProvider for CachedStateProvider { &self, hash: ClassHash, ) -> ProviderResult> { - // Check cache first if let Some(compiled_hash) = self.cache.get_compiled_class_hash(hash) { return Ok(Some(compiled_hash)); } - // Query database and cache the result let compiled_hash = self.state.compiled_class_hash_of_class_hash(hash)?; if let Some(ch) = compiled_hash { - self.cache.set_compiled_class_hash(hash, ch); + self.cache.inner.write().compiled_class_hashes.insert(hash, ch); } Ok(compiled_hash) } @@ -460,17 +465,11 @@ impl ContractClassProvider for CachedStateProvider { impl StateProvider for CachedStateProvider { fn nonce(&self, address: ContractAddress) -> ProviderResult> { - // Check cache first if let Some(nonce) = self.cache.get_nonce(address) { - return Ok(Some(nonce)); - } - - // Query database and cache the result - let nonce = self.state.nonce(address)?; - if let Some(n) = nonce { - self.cache.set_nonce(address, n); + Ok(Some(nonce)) + } else { + Ok(self.state.nonce(address)?) } - Ok(nonce) } fn storage( @@ -478,75 +477,29 @@ impl StateProvider for CachedStateProvider { address: ContractAddress, storage_key: StorageKey, ) -> ProviderResult> { - // Check cache first if let Some(value) = self.cache.get_storage(address, storage_key) { - return Ok(Some(value)); - } - - // Query database and cache the result - let value = self.state.storage(address, storage_key)?; - if let Some(v) = value { - self.cache.set_storage(address, storage_key, v); + Ok(Some(value)) + } else { + Ok(self.state.storage(address, storage_key)?) } - Ok(value) } fn class_hash_of_contract( &self, address: ContractAddress, ) -> ProviderResult> { - // Check cache first if let Some(class_hash) = self.cache.get_class_hash(address) { return Ok(Some(class_hash)); } - // Query database and cache the result let class_hash = self.state.class_hash_of_contract(address)?; if let Some(ch) = class_hash { - self.cache.set_class_hash(address, ch); + self.cache.inner.write().class_hashes.insert(address, ch); } Ok(class_hash) } } -impl katana_provider_api::state::StateProofProvider for CachedStateProvider { - fn class_multiproof(&self, classes: Vec) -> ProviderResult { - self.state.class_multiproof(classes) - } - - fn contract_multiproof( - &self, - addresses: Vec, - ) -> ProviderResult { - self.state.contract_multiproof(addresses) - } - - fn storage_multiproof( - &self, - address: ContractAddress, - storage_keys: Vec, - ) -> ProviderResult { - self.state.storage_multiproof(address, storage_keys) - } -} - -impl katana_provider_api::state::StateRootProvider for CachedStateProvider { - fn classes_root(&self) -> ProviderResult { - self.state.classes_root() - } - - fn contracts_root(&self) -> ProviderResult { - self.state.contracts_root() - } - - fn storage_root( - &self, - contract: ContractAddress, - ) -> ProviderResult> { - self.state.storage_root(contract) - } -} - /// A cached version of fork [`HistoricalStateProvider`] that checks the cache before querying the /// database. #[derive(Debug)] @@ -557,50 +510,32 @@ struct CachedHistoricalStateProvider { impl ContractClassProvider for CachedHistoricalStateProvider { fn class(&self, hash: ClassHash) -> ProviderResult> { - // Check cache first if let Some(class) = self.cache.get_class(hash) { - return Ok(Some(class)); - } - - // Query database and cache the result - let class = self.inner.class(hash)?; - if let Some(ref c) = class { - self.cache.set_class(hash, c.clone()); + Ok(Some(class)) + } else { + Ok(self.inner.class(hash)?) } - Ok(class) } fn compiled_class_hash_of_class_hash( &self, hash: ClassHash, ) -> ProviderResult> { - // Check cache first if let Some(compiled_hash) = self.cache.get_compiled_class_hash(hash) { - return Ok(Some(compiled_hash)); - } - - // Query database and cache the result - let compiled_hash = self.inner.compiled_class_hash_of_class_hash(hash)?; - if let Some(ch) = compiled_hash { - self.cache.set_compiled_class_hash(hash, ch); + Ok(Some(compiled_hash)) + } else { + Ok(self.inner.compiled_class_hash_of_class_hash(hash)?) } - Ok(compiled_hash) } } impl StateProvider for CachedHistoricalStateProvider { fn nonce(&self, address: ContractAddress) -> ProviderResult> { - // Check cache first if let Some(nonce) = self.cache.get_nonce(address) { - return Ok(Some(nonce)); - } - - // Query database and cache the result - let nonce = self.inner.nonce(address)?; - if let Some(n) = nonce { - self.cache.set_nonce(address, n); + Ok(Some(nonce)) + } else { + Ok(self.inner.nonce(address)?) } - Ok(nonce) } fn storage( @@ -608,79 +543,26 @@ impl StateProvider for CachedHistoricalStateProvider { address: ContractAddress, storage_key: StorageKey, ) -> ProviderResult> { - // Check cache first if let Some(value) = self.cache.get_storage(address, storage_key) { - return Ok(Some(value)); - } - - // Query database and cache the result - let value = self.inner.storage(address, storage_key)?; - if let Some(v) = value { - self.cache.set_storage(address, storage_key, v); + Ok(Some(value)) + } else { + Ok(self.inner.storage(address, storage_key)?) } - Ok(value) } fn class_hash_of_contract( &self, address: ContractAddress, ) -> ProviderResult> { - // Check cache first if let Some(class_hash) = self.cache.get_class_hash(address) { - return Ok(Some(class_hash)); - } - - // Query database and cache the result - let class_hash = self.inner.class_hash_of_contract(address)?; - if let Some(ch) = class_hash { - self.cache.set_class_hash(address, ch); + Ok(Some(class_hash)) + } else { + Ok(self.inner.class_hash_of_contract(address)?) } - Ok(class_hash) - } -} - -impl katana_provider_api::state::StateProofProvider - for CachedHistoricalStateProvider -{ - fn class_multiproof(&self, classes: Vec) -> ProviderResult { - self.inner.class_multiproof(classes) - } - - fn contract_multiproof( - &self, - addresses: Vec, - ) -> ProviderResult { - self.inner.contract_multiproof(addresses) - } - - fn storage_multiproof( - &self, - address: ContractAddress, - storage_keys: Vec, - ) -> ProviderResult { - self.inner.storage_multiproof(address, storage_keys) } } -impl katana_provider_api::state::StateRootProvider - for CachedHistoricalStateProvider -{ - fn classes_root(&self) -> ProviderResult { - self.inner.classes_root() - } - - fn contracts_root(&self) -> ProviderResult { - self.inner.contracts_root() - } - - fn storage_root( - &self, - contract: ContractAddress, - ) -> ProviderResult> { - self.inner.storage_root(contract) - } - - fn state_root(&self) -> ProviderResult { - self.inner.state_root() - } -} +impl StateProofProvider for CachedStateProvider {} +impl StateRootProvider for CachedStateProvider {} +impl StateProofProvider for CachedHistoricalStateProvider {} +impl StateRootProvider for CachedHistoricalStateProvider {} diff --git a/crates/storage/provider/provider/src/providers/fork/mod.rs b/crates/storage/provider/provider/src/providers/fork/mod.rs index a0699addf..6787aceb3 100644 --- a/crates/storage/provider/provider/src/providers/fork/mod.rs +++ b/crates/storage/provider/provider/src/providers/fork/mod.rs @@ -6,8 +6,8 @@ use katana_db::abstraction::Database; use katana_db::models::block::StoredBlockBodyIndices; use katana_fork::{Backend, BackendClient}; use katana_primitives::block::{ - Block, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithTxHashes, FinalityStatus, Header, - SealedBlockWithStatus, + Block, BlockHash, BlockHashOrNumber, BlockIdOrTag, BlockNumber, BlockWithTxHashes, + FinalityStatus, Header, SealedBlockWithStatus, }; use katana_primitives::class::{ClassHash, CompiledClassHash}; use katana_primitives::contract::ContractAddress; @@ -45,9 +45,9 @@ impl ForkedProvider { /// ## Arguments /// /// - `db`: The database to use for the provider. - /// - `block_id`: The block number or hash to use as the fork point. + /// - `block_id`: The block id or tag to use as the fork point. /// - `provider`: The Starknet JSON-RPC client to use for the provider. - pub fn new(db: Db, block_id: BlockHashOrNumber, provider: StarknetClient) -> Self { + pub fn new(db: Db, block_id: BlockIdOrTag, provider: StarknetClient) -> Self { let backend = Backend::new(provider, block_id).expect("failed to create backend"); let provider = Arc::new(DbProvider::new(db)); Self { provider, backend } @@ -65,7 +65,7 @@ impl ForkedProvider { impl ForkedProvider { /// Creates a new [`ForkedProvider`] using an ephemeral database. - pub fn new_ephemeral(block_id: BlockHashOrNumber, provider: StarknetClient) -> Self { + pub fn new_ephemeral(block_id: BlockIdOrTag, provider: StarknetClient) -> Self { let backend = Backend::new(provider, block_id).expect("failed to create backend"); let provider = Arc::new(DbProvider::new_in_memory()); Self { provider, backend } From b32ad765b31b3b39fd47c8443ab2c02517469bf2 Mon Sep 17 00:00:00 2001 From: Ammar Arif Date: Fri, 31 Oct 2025 18:06:46 -0400 Subject: [PATCH 04/26] wip --- crates/pool/pool-api/src/tx.rs | 3 ++- crates/primitives/src/transaction.rs | 1 + crates/rpc/rpc-types/src/transaction.rs | 6 ++++++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/crates/pool/pool-api/src/tx.rs b/crates/pool/pool-api/src/tx.rs index 0d60a2d82..787bbb4ff 100644 --- a/crates/pool/pool-api/src/tx.rs +++ b/crates/pool/pool-api/src/tx.rs @@ -9,6 +9,7 @@ use katana_primitives::transaction::{ use katana_primitives::utils::get_contract_address; use katana_primitives::Felt; use katana_rpc_types::broadcasted::BroadcastedTx; +use katana_rpc_types::BroadcastedTxWithChainId; use crate::ordering::PoolOrd; use crate::PoolTransaction; @@ -187,7 +188,7 @@ impl PoolTransaction for ExecutableTxWithHash { } } -impl PoolTransaction for BroadcastedTx { +impl PoolTransaction for BroadcastedTxWithChainId { fn hash(&self) -> TxHash { // BroadcastedTx doesn't have a precomputed hash, so we compute a deterministic // hash from the transaction content for pool identification purposes. diff --git a/crates/primitives/src/transaction.rs b/crates/primitives/src/transaction.rs index 0c1748359..69c05c704 100644 --- a/crates/primitives/src/transaction.rs +++ b/crates/primitives/src/transaction.rs @@ -109,6 +109,7 @@ impl Tx { } /// Represents a transaction that has all the necessary data to be executed. +/// #[derive(Debug, Clone, From, PartialEq, Eq)] pub enum ExecutableTx { Invoke(InvokeTx), diff --git a/crates/rpc/rpc-types/src/transaction.rs b/crates/rpc/rpc-types/src/transaction.rs index dfb9d5e1b..9b1afd2cc 100644 --- a/crates/rpc/rpc-types/src/transaction.rs +++ b/crates/rpc/rpc-types/src/transaction.rs @@ -589,3 +589,9 @@ impl From for primitives::Tx { } } } + +impl From for RpcTxWithHash { + fn from(tx: primitives::ExecutableTxWithHash) -> Self { + todo!() + } +} From 832117cb2e02367c633594b92d33526df2b0e533 Mon Sep 17 00:00:00 2001 From: Ammar Arif Date: Fri, 31 Oct 2025 18:44:48 -0400 Subject: [PATCH 05/26] wip --- crates/node/src/optimistic/executor.rs | 95 +++++++------------------- crates/pool/pool-api/src/tx.rs | 46 ++----------- 2 files changed, 31 insertions(+), 110 deletions(-) diff --git a/crates/node/src/optimistic/executor.rs b/crates/node/src/optimistic/executor.rs index a53189b26..d08ea9fc3 100644 --- a/crates/node/src/optimistic/executor.rs +++ b/crates/node/src/optimistic/executor.rs @@ -5,17 +5,15 @@ use std::task::{Context, Poll}; use futures::stream::StreamExt; use katana_core::backend::storage::Blockchain; -use katana_core::backend::Backend; use katana_executor::implementation::blockifier::BlockifierFactory; -use katana_executor::{ExecutionResult, ExecutorFactory}; +use katana_executor::ExecutorFactory; use katana_pool::ordering::FiFo; -use katana_pool::{PendingTransactions, PoolOrd, PoolTransaction, TransactionPool}; -use katana_primitives::transaction::ExecutableTxWithHash; +use katana_pool::{PendingTransactions, PoolTransaction, TransactionPool}; use katana_provider::api::state::StateFactoryProvider; use katana_provider::providers::db::cached::CachedDbProvider; -use katana_rpc_types::{BroadcastedTx, BroadcastedTxWithChainId}; +use katana_rpc_types::BroadcastedTxWithChainId; use katana_tasks::{JoinHandle, TaskSpawner}; -use tracing::{debug, error, info, trace, warn}; +use tracing::{debug, error, info, trace}; use crate::optimistic::pool::TxPool; @@ -26,6 +24,7 @@ pub struct OptimisticExecutor { pool: TxPool, optimistic_state: CachedDbProvider, executor_factory: Arc, + storage: Blockchain, task_spawner: TaskSpawner, } @@ -39,11 +38,12 @@ impl OptimisticExecutor { /// * `task_spawner` - The task spawner used to run the executor actor pub fn new( pool: TxPool, + storage: Blockchain, optimistic_state: CachedDbProvider, executor_factory: Arc, task_spawner: TaskSpawner, ) -> Self { - Self { pool, optimistic_state, executor_factory, task_spawner } + Self { pool, optimistic_state, executor_factory, task_spawner, storage } } /// Spawns the optimistic executor actor task. @@ -55,9 +55,14 @@ impl OptimisticExecutor { /// /// A `JoinHandle` to the spawned executor task. pub fn spawn(self) -> JoinHandle<()> { - let actor = - OptimisticExecutorActor::new(self.pool, self.optimistic_state, self.executor_factory); - self.task_spawner.build_task().name("Optimistic Executor").spawn(actor) + self.task_spawner.build_task().name("Optimistic Executor").spawn( + OptimisticExecutorActor::new( + self.pool, + self.storage, + self.optimistic_state, + self.executor_factory, + ), + ) } } @@ -66,7 +71,7 @@ struct OptimisticExecutorActor { pool: TxPool, optimistic_state: CachedDbProvider, /// Stream of pending transactions from the pool - pending_txs: PendingTransactions>, + pending_txs: PendingTransactions>, storage: Blockchain, executor_factory: Arc, } @@ -75,6 +80,7 @@ impl OptimisticExecutorActor { /// Creates a new executor actor with the given pending transactions stream. fn new( pool: TxPool, + storage: Blockchain, optimistic_state: CachedDbProvider, executor_factory: Arc, ) -> Self { @@ -83,44 +89,20 @@ impl OptimisticExecutorActor { } /// Execute a single transaction optimistically against the latest state. - fn execute_transaction(&self, tx: BroadcastedTxWithChainId) -> Result { + fn execute_transaction(&self, tx: BroadcastedTxWithChainId) -> anyhow::Result<()> { let latest_state = self.optimistic_state.latest().unwrap(); let mut executor = self.executor_factory.with_state(latest_state); // Execute the transaction - let result = executor.execute_transactions(vec![tx.clone()]); + let tx_hash = tx.hash(); - match result { - Ok((executed_count, limit_error)) => { - if executed_count == 0 { - return Err("Transaction was not executed".to_string()); - } + let _ = executor.execute_transactions(vec![tx.into()]).unwrap(); - // Get the execution result from the executor - let transactions = executor.transactions(); - if let Some((_, exec_result)) = transactions.last() { - if let Some(err) = limit_error { - warn!( - target: LOG_TARGET, - tx_hash = format!("{:#x}", tx.hash), - error = %err, - "Transaction execution hit limits" - ); - } - Ok(exec_result.clone()) - } else { - Err("No execution result found".to_string()) - } - - let output = executor.take_execution_output().unwrap(); - self.optimistic_state.merge_state_updates(&output.states); - - // remove from pool - self.pool.remove_transactions(); - } + let output = executor.take_execution_output().unwrap(); + self.optimistic_state.merge_state_updates(&output.states); + self.pool.remove_transactions(&[tx_hash]); - Err(e) => Err(format!("Execution failed: {e}")), - } + Ok(()) } } @@ -141,7 +123,7 @@ impl Future for OptimisticExecutorActor { Poll::Ready(Some(pending_tx)) => { let tx = pending_tx.tx.as_ref().clone(); - let tx_hash = tx.hash; + let tx_hash = tx.hash(); let tx_sender = tx.sender(); let tx_nonce = tx.nonce(); @@ -161,32 +143,7 @@ impl Future for OptimisticExecutorActor { // Execute the transaction optimistically match this.execute_transaction(tx) { - Ok(ExecutionResult::Success { receipt, .. }) => { - if let Some(reason) = receipt.revert_reason() { - warn!( - target: LOG_TARGET, - tx_hash = format!("{:#x}", tx_hash), - reason = %reason, - "Transaction reverted" - ); - } else { - debug!( - target: LOG_TARGET, - tx_hash = format!("{:#x}", tx_hash), - l1_gas = receipt.resources_used().gas.l1_gas, - cairo_steps = receipt.resources_used().computation_resources.n_steps, - "Transaction executed successfully" - ); - } - } - Ok(ExecutionResult::Failed { error }) => { - error!( - target: LOG_TARGET, - tx_hash = format!("{:#x}", tx_hash), - error = %error, - "Transaction execution failed" - ); - } + Ok(()) => {} Err(e) => { error!( target: LOG_TARGET, diff --git a/crates/pool/pool-api/src/tx.rs b/crates/pool/pool-api/src/tx.rs index 787bbb4ff..974b81f0c 100644 --- a/crates/pool/pool-api/src/tx.rs +++ b/crates/pool/pool-api/src/tx.rs @@ -190,38 +190,11 @@ impl PoolTransaction for ExecutableTxWithHash { impl PoolTransaction for BroadcastedTxWithChainId { fn hash(&self) -> TxHash { - // BroadcastedTx doesn't have a precomputed hash, so we compute a deterministic - // hash from the transaction content for pool identification purposes. - use starknet_types_core::hash::{Poseidon, StarkHash}; - - match self { - BroadcastedTx::Invoke(tx) => { - // Hash based on sender, nonce, and calldata - let mut data = vec![tx.sender_address.into(), tx.nonce]; - data.extend_from_slice(&tx.calldata); - Poseidon::hash_array(&data) - } - BroadcastedTx::Declare(tx) => { - // Hash based on sender, nonce, and compiled class hash - let data = [tx.sender_address.into(), tx.nonce, tx.compiled_class_hash.into()]; - Poseidon::hash_array(&data) - } - BroadcastedTx::DeployAccount(tx) => { - // Hash based on computed contract address, nonce, and class hash - let contract_address = get_contract_address( - tx.contract_address_salt, - tx.class_hash, - &tx.constructor_calldata, - Felt::ZERO, - ); - let data = [contract_address, tx.nonce, tx.class_hash.into()]; - Poseidon::hash_array(&data) - } - } + self.calculate_hash() } fn nonce(&self) -> Nonce { - match self { + match &self.tx { BroadcastedTx::Invoke(tx) => tx.nonce, BroadcastedTx::Declare(tx) => tx.nonce, BroadcastedTx::DeployAccount(tx) => tx.nonce, @@ -229,19 +202,10 @@ impl PoolTransaction for BroadcastedTxWithChainId { } fn sender(&self) -> ContractAddress { - match self { + match &self.tx { BroadcastedTx::Invoke(tx) => tx.sender_address, BroadcastedTx::Declare(tx) => tx.sender_address, - BroadcastedTx::DeployAccount(tx) => { - // Compute the contract address for deploy account transactions - get_contract_address( - tx.contract_address_salt, - tx.class_hash, - &tx.constructor_calldata, - Felt::ZERO, - ) - .into() - } + BroadcastedTx::DeployAccount(tx) => tx.contract_address(), } } @@ -254,7 +218,7 @@ impl PoolTransaction for BroadcastedTxWithChainId { } fn tip(&self) -> u64 { - match self { + match &self.tx { BroadcastedTx::Invoke(tx) => tx.tip.into(), BroadcastedTx::Declare(tx) => tx.tip.into(), BroadcastedTx::DeployAccount(tx) => tx.tip.into(), From 6d933c7110a8a6395fac1bc188c9fe93328f9caf Mon Sep 17 00:00:00 2001 From: Ammar Arif Date: Fri, 31 Oct 2025 19:01:50 -0400 Subject: [PATCH 06/26] wip --- crates/node/src/optimistic/config.rs | 2 +- crates/rpc/rpc-server/src/starknet/mod.rs | 47 +++++++++++---------- crates/rpc/rpc-server/src/starknet/trace.rs | 4 +- 3 files changed, 27 insertions(+), 26 deletions(-) diff --git a/crates/node/src/optimistic/config.rs b/crates/node/src/optimistic/config.rs index f16997e1d..ed4f26752 100644 --- a/crates/node/src/optimistic/config.rs +++ b/crates/node/src/optimistic/config.rs @@ -12,7 +12,7 @@ use crate::config::rpc::RpcConfig; /// Node configurations. /// /// List of all possible options that can be used to configure a node. -#[derive(Debug, Clone, Default, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct Config { /// The chain specification. pub chain: Arc, diff --git a/crates/rpc/rpc-server/src/starknet/mod.rs b/crates/rpc/rpc-server/src/starknet/mod.rs index 53a596722..c4ea874f0 100644 --- a/crates/rpc/rpc-server/src/starknet/mod.rs +++ b/crates/rpc/rpc-server/src/starknet/mod.rs @@ -4,7 +4,7 @@ use std::fmt::Debug; use std::future::Future; use std::sync::Arc; -use katana_core::backend::storage::Database; +use katana_core::backend::storage::{Blockchain, Database}; use katana_core::backend::Backend; use katana_executor::ExecutorFactory; use katana_pool::TransactionPool; @@ -94,7 +94,7 @@ where { pool: Pool, backend: Arc>, - storage_provider: BlockchainProvider>, + storage_provider: Blockchain, forked_client: Option, task_spawner: TaskSpawner, estimate_fee_permit: Permits, @@ -114,7 +114,7 @@ where task_spawner: TaskSpawner, config: StarknetApiConfig, pending_block_provider: PP, - storage_provider: BlockchainProvider>, + storage_provider: Blockchain, ) -> Self { Self::new_inner( backend, @@ -134,7 +134,7 @@ where task_spawner: TaskSpawner, config: StarknetApiConfig, pending_block_provider: PP, - storage_provider: BlockchainProvider>, + storage_provider: Blockchain, ) -> Self { Self::new_inner( backend, @@ -150,7 +150,7 @@ where fn new_inner( backend: Arc>, pool: Pool, - storage_provider: BlockchainProvider>, + storage_provider: Blockchain, forked_client: Option, task_spawner: TaskSpawner, config: StarknetApiConfig, @@ -273,7 +273,7 @@ where } pub fn state(&self, block_id: &BlockIdOrTag) -> StarknetApiResult> { - let provider = &self.inner.storage_provider; + let provider = &self.inner.storage_provider.provider(); let state = match block_id { BlockIdOrTag::PreConfirmed => { @@ -296,7 +296,7 @@ where } fn block_env_at(&self, block_id: &BlockIdOrTag) -> StarknetApiResult { - let provider = &self.inner.storage_provider; + let provider = &self.inner.storage_provider.provider(); let env = match block_id { BlockIdOrTag::PreConfirmed => { @@ -342,7 +342,7 @@ where } fn block_hash_and_number(&self) -> StarknetApiResult { - let provider = &self.inner.storage_provider; + let provider = &self.inner.storage_provider.provider(); let hash = provider.latest_hash()?; let number = provider.latest_number()?; Ok(BlockHashAndNumberResponse::new(hash, number)) @@ -439,7 +439,7 @@ where pub async fn block_tx_count(&self, block_id: BlockIdOrTag) -> StarknetApiResult { let count = self .on_io_blocking_task(move |this| { - let provider = &this.inner.storage_provider; + let provider = &this.inner.storage_provider.provider(); let block_id: BlockHashOrNumber = match block_id { BlockIdOrTag::L1Accepted => return Ok(None), @@ -475,7 +475,7 @@ where async fn latest_block_number(&self) -> StarknetApiResult { self.on_io_blocking_task(move |this| { - let block_number = this.inner.storage_provider.latest_number()?; + let block_number = this.inner.storage_provider.provider().latest_number()?; Ok(BlockNumberResponse { block_number }) }) .await? @@ -515,7 +515,7 @@ where let tx = if BlockIdOrTag::PreConfirmed == block_id { this.inner.pending_block_provider.get_pending_transaction_by_index(index)? } else { - let provider = &this.inner.storage_provider; + let provider = &this.inner.storage_provider.provider(); let block_num = provider .convert_block_id(block_id)? @@ -551,6 +551,7 @@ where let tx = this .inner .storage_provider + .provider() .transaction_by_hash(hash)? .map(RpcTxWithHash::from); @@ -577,7 +578,7 @@ where { StarknetApiResult::Ok(pending_receipt) } else { - let provider = &this.inner.storage_provider; + let provider = &this.inner.storage_provider.provider(); StarknetApiResult::Ok(ReceiptBuilder::new(hash, provider).build()?) } }) @@ -595,7 +596,7 @@ where async fn transaction_status(&self, hash: TxHash) -> StarknetApiResult { let status = self .on_io_blocking_task(move |this| { - let provider = &this.inner.storage_provider; + let provider = &this.inner.storage_provider.provider(); let status = provider.transaction_status(hash)?; if let Some(status) = status { @@ -650,7 +651,7 @@ where ) -> StarknetApiResult { let block = self .on_io_blocking_task(move |this| { - let provider = &this.inner.storage_provider; + let provider = &this.inner.storage_provider.provider(); if BlockIdOrTag::PreConfirmed == block_id { if let Some(block) = @@ -687,7 +688,7 @@ where ) -> StarknetApiResult { let block = self .on_io_blocking_task(move |this| { - let provider = &this.inner.storage_provider; + let provider = &this.inner.storage_provider.provider(); if BlockIdOrTag::PreConfirmed == block_id { if let Some(block) = @@ -724,7 +725,7 @@ where ) -> StarknetApiResult { let block = self .on_io_blocking_task(move |this| { - let provider = &this.inner.storage_provider; + let provider = &this.inner.storage_provider.provider(); if BlockIdOrTag::PreConfirmed == block_id { if let Some(block) = @@ -758,7 +759,7 @@ where pub async fn state_update(&self, block_id: BlockIdOrTag) -> StarknetApiResult { let state_update = self .on_io_blocking_task(move |this| { - let provider = &this.inner.storage_provider; + let provider = &this.inner.storage_provider.provider(); let block_id = match block_id { BlockIdOrTag::Number(num) => BlockHashOrNumber::Num(num), @@ -855,7 +856,7 @@ where continuation_token: Option, chunk_size: u64, ) -> StarknetApiResult { - let provider = &self.inner.storage_provider; + let provider = &self.inner.storage_provider.provider(); let from = self.resolve_event_block_id_if_forked(from_block)?; let to = self.resolve_event_block_id_if_forked(to_block)?; @@ -1072,7 +1073,7 @@ where &self, id: BlockIdOrTag, ) -> StarknetApiResult { - let provider = &self.inner.storage_provider; + let provider = &self.inner.storage_provider.provider(); let id = match id { BlockIdOrTag::L1Accepted => EventBlockId::Pending, @@ -1112,7 +1113,7 @@ where contracts_storage_keys: Option>, ) -> StarknetApiResult { self.on_io_blocking_task(move |this| { - let provider = &this.inner.storage_provider; + let provider = &this.inner.storage_provider.provider(); let Some(block_num) = provider.convert_block_id(block_id)? else { return Err(StarknetApiError::BlockNotFound); @@ -1208,7 +1209,7 @@ where { async fn blocks(&self, request: GetBlocksRequest) -> StarknetApiResult { self.on_io_blocking_task(move |this| { - let provider = &this.inner.storage_provider; + let provider = &this.inner.storage_provider.provider(); // Parse continuation token to get starting point let start_from = if let Some(token_str) = request.result_page_request.continuation_token @@ -1284,7 +1285,7 @@ where request: GetTransactionsRequest, ) -> StarknetApiResult { self.on_io_blocking_task(move |this| { - let provider = &this.inner.storage_provider; + let provider = &this.inner.storage_provider.provider(); // Resolve the starting point for this query. let start_from = if let Some(token_str) = request.result_page_request.continuation_token @@ -1354,7 +1355,7 @@ where async fn total_transactions(&self) -> StarknetApiResult { self.on_io_blocking_task(move |this| { - let provider = &this.inner.storage_provider; + let provider = &this.inner.storage_provider.provider(); let total = provider.total_transactions()? as TxNumber; Ok(total) }) diff --git a/crates/rpc/rpc-server/src/starknet/trace.rs b/crates/rpc/rpc-server/src/starknet/trace.rs index af6789679..d1e8798f5 100644 --- a/crates/rpc/rpc-server/src/starknet/trace.rs +++ b/crates/rpc/rpc-server/src/starknet/trace.rs @@ -97,7 +97,7 @@ where ) -> Result, StarknetApiError> { use StarknetApiError::BlockNotFound; - let provider = &self.inner.storage_provider; + let provider = self.inner.storage_provider.provider(); let block_id: BlockHashOrNumber = match block_id { ConfirmedBlockIdOrTag::L1Accepted => { @@ -131,7 +131,7 @@ where Ok(pending_trace) } else { // If not found in pending block, fallback to the provider - let provider = &self.inner.storage_provider; + let provider = self.inner.storage_provider.provider(); let trace = provider.transaction_execution(tx_hash)?.ok_or(TxnHashNotFound)?; Ok(TxTrace::from(trace)) } From 9298818824fdadebb3c3c778ab5141ae4dd455ff Mon Sep 17 00:00:00 2001 From: Ammar Arif Date: Fri, 31 Oct 2025 20:54:14 -0400 Subject: [PATCH 07/26] wip --- crates/node/src/optimistic/mod.rs | 25 +++++------ crates/rpc/rpc-types/src/broadcasted.rs | 57 +++++++++++++++++++++++++ 2 files changed, 68 insertions(+), 14 deletions(-) diff --git a/crates/node/src/optimistic/mod.rs b/crates/node/src/optimistic/mod.rs index a9299778d..d742af616 100644 --- a/crates/node/src/optimistic/mod.rs +++ b/crates/node/src/optimistic/mod.rs @@ -2,8 +2,6 @@ use std::future::IntoFuture; use std::sync::Arc; use anyhow::{Context, Result}; -use config::rpc::RpcModuleKind; -use config::Config; use http::header::CONTENT_TYPE; use http::Method; use jsonrpsee::http_client::HttpClientBuilder; @@ -16,7 +14,7 @@ use katana_core::service::block_producer::BlockProducer; use katana_db::Db; use katana_executor::implementation::blockifier::cache::ClassCache; use katana_executor::implementation::blockifier::BlockifierFactory; -use katana_executor::ExecutionFlags; +use katana_executor::{BlockLimits, ExecutionFlags}; use katana_gas_price_oracle::{FixedPriceOracle, GasPriceOracle}; use katana_gateway_server::{GatewayServer, GatewayServerHandle}; use katana_metrics::exporters::prometheus::PrometheusRecorder; @@ -33,6 +31,7 @@ use katana_rpc_api::dev::DevApiServer; use katana_rpc_api::starknet::{StarknetApiServer, StarknetTraceApiServer, StarknetWriteApiServer}; #[cfg(feature = "explorer")] use katana_rpc_api::starknet_ext::StarknetApiExtServer; +use katana_rpc_client::starknet::Client as StarknetClient; use katana_stage::Sequencing; use katana_tasks::TaskManager; use tracing::info; @@ -41,6 +40,7 @@ mod config; mod executor; mod pool; +use crate::config::rpc::RpcModuleKind; use config::Config; use crate::exit::NodeStoppedFuture; @@ -85,9 +85,9 @@ impl Node { let cfg_env = CfgEnv { fee_token_addresses, chain_id: config.chain.id(), - invoke_tx_max_n_steps: config.execution.invocation_max_steps, - validate_max_n_steps: config.execution.validation_max_steps, - max_recursion_depth: config.execution.max_recursion_depth, + invoke_tx_max_n_steps: 10_000_000, + validate_max_n_steps: 10_000_000, + max_recursion_depth: 100, }; let executor_factory = { @@ -105,7 +105,7 @@ impl Node { let factory = BlockifierFactory::new( cfg_env, ExecutionFlags::new(), - config.sequencing.block_limits(), + BlockLimits::default(), global_class_cache, ); @@ -128,10 +128,10 @@ impl Node { ) .await?; - // TODO: it'd bee nice if the client can be shared on both the rpc and forked backend - // side - let rpc_client = HttpClientBuilder::new().build(config.forking.url.as_ref())?; - let forked_client = ForkedClient::new(rpc_client, block_num); + let http_client = HttpClientBuilder::new().build(config.forking.url.as_str())?; + let starknet_client = katana_rpc_client::starknet::Client::new(http_client); + + let forked_client = ForkedClient::new(starknet_client.clone(), block_num); let gpo = GasPriceOracle::sampled_starknet(config.forking.url.clone()); @@ -146,9 +146,6 @@ impl Node { // --- build transaction pool - let http_client = HttpClientBuilder::new().build(config.forking.url.as_str())?; - let starknet_client = katana_rpc_client::starknet::Client::new(http_client); - let pool_validator = PoolValidator::new(starknet_client.clone()); let pool = TxPool::new(pool_validator, FiFo::new()); diff --git a/crates/rpc/rpc-types/src/broadcasted.rs b/crates/rpc/rpc-types/src/broadcasted.rs index 7d416aae6..3d8fe921b 100644 --- a/crates/rpc/rpc-types/src/broadcasted.rs +++ b/crates/rpc/rpc-types/src/broadcasted.rs @@ -694,6 +694,63 @@ impl From for ExecutableTxWithHash { } } +impl From for crate::transaction::RpcTxWithHash { + fn from(value: BroadcastedTxWithChainId) -> Self { + use crate::transaction::{ + RpcDeclareTx, RpcDeclareTxV3, RpcDeployAccountTx, RpcDeployAccountTxV3, RpcInvokeTx, + RpcInvokeTxV3, RpcTx, + }; + + let transaction_hash = value.calculate_hash(); + let transaction = match value.tx { + BroadcastedTx::Invoke(tx) => RpcTx::Invoke(RpcInvokeTx::V3(RpcInvokeTxV3 { + sender_address: tx.sender_address, + calldata: tx.calldata, + signature: tx.signature, + nonce: tx.nonce, + resource_bounds: tx.resource_bounds, + tip: tx.tip, + paymaster_data: tx.paymaster_data, + account_deployment_data: tx.account_deployment_data, + nonce_data_availability_mode: tx.nonce_data_availability_mode, + fee_data_availability_mode: tx.fee_data_availability_mode, + })), + BroadcastedTx::Declare(tx) => { + let class_hash = tx.contract_class.hash().expect("failed to compute class hash"); + RpcTx::Declare(RpcDeclareTx::V3(RpcDeclareTxV3 { + sender_address: tx.sender_address, + compiled_class_hash: tx.compiled_class_hash, + signature: tx.signature, + nonce: tx.nonce, + class_hash, + resource_bounds: tx.resource_bounds, + tip: tx.tip, + paymaster_data: tx.paymaster_data, + account_deployment_data: tx.account_deployment_data, + nonce_data_availability_mode: tx.nonce_data_availability_mode, + fee_data_availability_mode: tx.fee_data_availability_mode, + })) + } + BroadcastedTx::DeployAccount(tx) => { + RpcTx::DeployAccount(RpcDeployAccountTx::V3(RpcDeployAccountTxV3 { + signature: tx.signature, + nonce: tx.nonce, + contract_address_salt: tx.contract_address_salt, + constructor_calldata: tx.constructor_calldata, + class_hash: tx.class_hash, + paymaster_data: tx.paymaster_data, + tip: tx.tip, + resource_bounds: tx.resource_bounds, + nonce_data_availability_mode: tx.nonce_data_availability_mode, + fee_data_availability_mode: tx.fee_data_availability_mode, + })) + } + }; + + crate::transaction::RpcTxWithHash { transaction_hash, transaction } + } +} + #[cfg(test)] mod tests { use assert_matches::assert_matches; From ab7620d8338324d84bba181d629eafd2ab51ba59 Mon Sep 17 00:00:00 2001 From: Ammar Arif Date: Fri, 31 Oct 2025 21:37:55 -0400 Subject: [PATCH 08/26] wip --- crates/core/src/backend/storage.rs | 112 +++++++++++++++ crates/node/src/optimistic/mod.rs | 61 +++++--- crates/primitives/src/transaction.rs | 1 - crates/rpc/rpc-server/src/starknet/forking.rs | 130 ++++++++---------- crates/rpc/rpc-server/src/starknet/list.rs | 6 +- crates/rpc/rpc-server/src/starknet/mod.rs | 3 +- .../provider/src/providers/db/cached.rs | 19 +++ 7 files changed, 231 insertions(+), 101 deletions(-) diff --git a/crates/core/src/backend/storage.rs b/crates/core/src/backend/storage.rs index 1ba93096c..e21b83912 100644 --- a/crates/core/src/backend/storage.rs +++ b/crates/core/src/backend/storage.rs @@ -14,6 +14,7 @@ use katana_provider::api::transaction::{ TransactionsProviderExt, }; use katana_provider::api::trie::TrieWriter; +use katana_provider::providers::db::cached::CachedDbProvider; use katana_provider::providers::db::DbProvider; use katana_provider::providers::fork::ForkedProvider; use katana_provider::BlockchainProvider; @@ -195,6 +196,117 @@ impl Blockchain { Ok((Self::new(database), block_num)) } + /// Builds a new blockchain with a forked block. + pub async fn new_optimistic( + db: katana_db::Db, + starknet_client: StarknetClient, + fork_block: Option, + chain: &mut katana_chain_spec::dev::ChainSpec, + ) -> Result { + let chain_id = + starknet_client.chain_id().await.context("failed to fetch forked network id")?; + + // if the id is not in ASCII encoding, we display the chain id as is in hex. + let parsed_id = match parse_cairo_short_string(&chain_id) { + Ok(id) => id, + Err(_) => format!("{chain_id:#x}"), + }; + + // If the fork block number is not specified, we use the latest accepted block on the forked + // network. + let block_id = if let Some(id) = fork_block { id } else { BlockIdOrTag::Latest }; + + info!(chain = %parsed_id, block = %block_id, "Forking chain."); + + let block = starknet_client + .get_block_with_tx_hashes(block_id) + .await + .context("failed to fetch forked block")?; + + let GetBlockWithTxHashesResponse::Block(forked_block) = block else { + bail!("forking a pending block is not allowed") + }; + + let block_num = forked_block.block_number; + + chain.id = chain_id.into(); + + // adjust the genesis to match the forked block + chain.genesis.timestamp = forked_block.timestamp; + chain.genesis.number = forked_block.block_number; + chain.genesis.state_root = forked_block.new_root; + chain.genesis.parent_hash = forked_block.parent_hash; + chain.genesis.sequencer_address = forked_block.sequencer_address; + + // TODO: remove gas price from genesis + let eth_l1_gas_price = + forked_block.l1_gas_price.price_in_wei.to_u128().expect("should fit in u128"); + let strk_l1_gas_price = + forked_block.l1_gas_price.price_in_fri.to_u128().expect("should fit in u128"); + chain.genesis.gas_prices = + unsafe { GasPrices::new_unchecked(eth_l1_gas_price, strk_l1_gas_price) }; + + // TODO: convert this to block number instead of BlockHashOrNumber so that it is easier to + // check if the requested block is within the supported range or not. + let database = CachedDbProvider::new(db, block_id, starknet_client.clone()); + + // initialize parent fork block + // + // NOTE: this is just a workaround for allowing forked genesis block to be initialize using + // `Backend::do_mine_block`. + { + let parent_block_id = BlockIdOrTag::from(forked_block.parent_hash); + let parent_block = starknet_client.get_block_with_tx_hashes(parent_block_id).await?; + + let GetBlockWithTxHashesResponse::Block(parent_block) = parent_block else { + bail!("parent block is a preconfirmed block"); + }; + + let parent_block = SealedBlockWithStatus { + block: SealedBlock { + hash: parent_block.block_hash, + body: Vec::new(), + header: Header { + parent_hash: parent_block.parent_hash, + timestamp: parent_block.timestamp, + number: parent_block.block_number, + state_root: parent_block.new_root, + sequencer_address: parent_block.sequencer_address, + ..Default::default() + }, + }, + status: FinalityStatus::AcceptedOnL2, + }; + + database + .insert_block_with_states_and_receipts( + parent_block, + Default::default(), + Default::default(), + Default::default(), + ) + .context("failed to initialize provider with the parent of the forked block")?; + } + + // update the genesis block with the forked block's data + // we dont update the `l1_gas_price` bcs its already done when we set the `gas_prices` in + // genesis. this flow is kinda flawed, we should probably refactor it out of the + // genesis. + let mut block = chain.block(); + + let eth_l1_data_gas_price = + forked_block.l1_data_gas_price.price_in_wei.to_u128().expect("should fit in u128"); + let strk_l1_data_gas_price = + forked_block.l1_data_gas_price.price_in_fri.to_u128().expect("should fit in u128"); + + block.header.l1_data_gas_prices = + unsafe { GasPrices::new_unchecked(eth_l1_data_gas_price, strk_l1_data_gas_price) }; + + block.header.l1_da_mode = forked_block.l1_da_mode; + + Ok(Self::new(database)) + } + pub fn provider(&self) -> &BlockchainProvider> { &self.inner } diff --git a/crates/node/src/optimistic/mod.rs b/crates/node/src/optimistic/mod.rs index d742af616..ab5d0e865 100644 --- a/crates/node/src/optimistic/mod.rs +++ b/crates/node/src/optimistic/mod.rs @@ -1,49 +1,40 @@ -use std::future::IntoFuture; use std::sync::Arc; -use anyhow::{Context, Result}; +use anyhow::Result; use http::header::CONTENT_TYPE; use http::Method; use jsonrpsee::http_client::HttpClientBuilder; use jsonrpsee::RpcModule; -use katana_chain_spec::{ChainSpec, SettlementLayer}; +use katana_chain_spec::ChainSpec; use katana_core::backend::storage::Blockchain; use katana_core::backend::Backend; use katana_core::env::BlockContextGenerator; -use katana_core::service::block_producer::BlockProducer; -use katana_db::Db; use katana_executor::implementation::blockifier::cache::ClassCache; use katana_executor::implementation::blockifier::BlockifierFactory; use katana_executor::{BlockLimits, ExecutionFlags}; -use katana_gas_price_oracle::{FixedPriceOracle, GasPriceOracle}; -use katana_gateway_server::{GatewayServer, GatewayServerHandle}; +use katana_gas_price_oracle::GasPriceOracle; use katana_metrics::exporters::prometheus::PrometheusRecorder; use katana_metrics::sys::DiskReporter; use katana_metrics::{Report, Server as MetricsServer}; use katana_pool::ordering::FiFo; +use katana_primitives::block::BlockIdOrTag; use katana_primitives::env::{CfgEnv, FeeTokenAddressses}; use katana_rpc::cors::Cors; -use katana_rpc::dev::DevApi; use katana_rpc::starknet::forking::ForkedClient; use katana_rpc::starknet::{StarknetApi, StarknetApiConfig}; use katana_rpc::{RpcServer, RpcServerHandle}; -use katana_rpc_api::dev::DevApiServer; use katana_rpc_api::starknet::{StarknetApiServer, StarknetTraceApiServer, StarknetWriteApiServer}; -#[cfg(feature = "explorer")] -use katana_rpc_api::starknet_ext::StarknetApiExtServer; -use katana_rpc_client::starknet::Client as StarknetClient; -use katana_stage::Sequencing; -use katana_tasks::TaskManager; +use katana_tasks::{JoinHandle, TaskManager}; use tracing::info; mod config; mod executor; mod pool; -use crate::config::rpc::RpcModuleKind; use config::Config; -use crate::exit::NodeStoppedFuture; +use crate::config::rpc::RpcModuleKind; +use crate::optimistic::executor::OptimisticExecutor; use crate::optimistic::pool::{PoolValidator, TxPool}; #[derive(Debug)] @@ -53,6 +44,7 @@ pub struct Node { db: katana_db::Db, rpc_server: RpcServer, task_manager: TaskManager, + executor: OptimisticExecutor, backend: Arc>, } @@ -120,10 +112,10 @@ impl Node { }; let db = katana_db::Db::in_memory()?; - let (blockchain, block_num) = Blockchain::new_from_forked( + let blockchain = Blockchain::new_from_forked( db.clone(), config.forking.url.clone(), - config.forking.block, + Some(BlockIdOrTag::Latest), chain_spec, ) .await?; @@ -149,6 +141,16 @@ impl Node { let pool_validator = PoolValidator::new(starknet_client.clone()); let pool = TxPool::new(pool_validator, FiFo::new()); + // -- build executor + + let executor = OptimisticExecutor::new( + pool.clone(), + blockchain, + optimistic_state, + executor_factory.clone(), + task_spawner.clone(), + ); + // --- build rpc server let mut rpc_modules = RpcModule::new(()); @@ -206,7 +208,7 @@ impl Node { rpc_server = rpc_server.max_response_body_size(max_response_body_size); } - Ok(Node { db, pool, backend, rpc_server, config: config.into(), task_manager }) + Ok(Node { db, pool, backend, rpc_server, config: config.into(), task_manager, executor }) } pub async fn launch(self) -> Result { @@ -244,12 +246,29 @@ impl Node { info!(target: "node", "Gas price oracle worker started."); - Ok(LaunchedNode { node: self, rpc: rpc_handle }) + let executor_handle = self.executor.spawn(); + + Ok(LaunchedNode { + rpc: rpc_handle, + backend: self.backend, + config: self.config, + db: self.db, + executor: executor_handle, + task_manager: self.task_manager, + pool: self.pool, + rpc_server: self.rpc_server, + }) } } #[derive(Debug)] pub struct LaunchedNode { - node: Node, + config: Arc, + pool: TxPool, + db: katana_db::Db, + rpc_server: RpcServer, + task_manager: TaskManager, + backend: Arc>, rpc: RpcServerHandle, + executor: JoinHandle<()>, } diff --git a/crates/primitives/src/transaction.rs b/crates/primitives/src/transaction.rs index 69c05c704..0c1748359 100644 --- a/crates/primitives/src/transaction.rs +++ b/crates/primitives/src/transaction.rs @@ -109,7 +109,6 @@ impl Tx { } /// Represents a transaction that has all the necessary data to be executed. -/// #[derive(Debug, Clone, From, PartialEq, Eq)] pub enum ExecutableTx { Invoke(InvokeTx), diff --git a/crates/rpc/rpc-server/src/starknet/forking.rs b/crates/rpc/rpc-server/src/starknet/forking.rs index edc18d879..5535a1ffd 100644 --- a/crates/rpc/rpc-server/src/starknet/forking.rs +++ b/crates/rpc/rpc-server/src/starknet/forking.rs @@ -31,19 +31,19 @@ pub enum Error { #[derive(Debug, Clone)] pub struct ForkedClient { /// The block number where the node is forked from. - block: BlockNumber, + block: BlockIdOrTag, /// The Starknet JSON-RPC client for doing the request to the forked network. client: Client, } impl ForkedClient { /// Creates a new forked client from the given [`Client`] and block number. - pub fn new(client: Client, block: BlockNumber) -> Self { + pub fn new(client: Client, block: BlockIdOrTag) -> Self { Self { block, client } } /// Returns the block number of the forked client. - pub fn block(&self) -> &BlockNumber { + pub fn block(&self) -> &BlockIdOrTag { &self.block } } @@ -55,10 +55,15 @@ impl ForkedClient { GetBlockWithTxHashesResponse::PreConfirmed(block) => block.block_number, }; - if number > self.block { - Err(Error::BlockOutOfRange) - } else { - Ok(number) + match self.block { + BlockIdOrTag::Number(fork_num) => { + if number > fork_num { + Err(Error::BlockOutOfRange) + } else { + Ok(number) + } + } + _ => Ok(number), } } @@ -73,8 +78,10 @@ impl ForkedClient { let receipt = self.client.get_transaction_receipt(hash).await?; if let ReceiptBlockInfo::Block { block_number, .. } = receipt.block { - if block_number > self.block { - return Err(Error::BlockOutOfRange); + if let BlockIdOrTag::Number(fork_num) = self.block { + if block_number > fork_num { + return Err(Error::BlockOutOfRange); + } } } @@ -100,8 +107,10 @@ impl ForkedClient { ) -> Result { match block_id { BlockIdOrTag::Number(num) => { - if num > self.block { - return Err(Error::BlockOutOfRange); + if let BlockIdOrTag::Number(fork_num) = self.block { + if num > fork_num { + return Err(Error::BlockOutOfRange); + } } Ok(self.client.get_transaction_by_block_id_and_index(block_id, idx).await?) @@ -120,8 +129,10 @@ impl ForkedClient { } }; - if number > self.block { - return Err(Error::BlockOutOfRange); + if let BlockIdOrTag::Number(fork_num) = self.block { + if number > fork_num { + return Err(Error::BlockOutOfRange); + } } Ok(tx?) @@ -138,17 +149,20 @@ impl ForkedClient { block_id: BlockIdOrTag, ) -> Result { let block = self.client.get_block_with_txs(block_id).await?; + Ok(block) - match block { - MaybePreConfirmedBlock::PreConfirmed(_) => Err(Error::UnexpectedPendingData), - MaybePreConfirmedBlock::Confirmed(ref b) => { - if b.block_number > self.block { - Err(Error::BlockOutOfRange) - } else { - Ok(block) - } - } - } + // match block { + // MaybePreConfirmedBlock::PreConfirmed(_) => Err(Error::UnexpectedPendingData), + // MaybePreConfirmedBlock::Confirmed(ref b) => { + // if let BlockIdOrTag::Number(fork_num) = self.block { + // if b.block_number > fork_num { + // return Err(Error::BlockOutOfRange); + // } + // } + + // Ok(block) + // } + // } } pub async fn get_block_with_receipts( @@ -157,16 +171,18 @@ impl ForkedClient { ) -> Result { let block = self.client.get_block_with_receipts(block_id).await?; - match block { - GetBlockWithReceiptsResponse::Block(ref b) => { - if b.block_number > self.block { - return Err(Error::BlockOutOfRange); - } - } - GetBlockWithReceiptsResponse::PreConfirmed(_) => { - return Err(Error::UnexpectedPendingData); - } - } + // match block { + // GetBlockWithReceiptsResponse::Block(ref b) => { + // if let BlockIdOrTag::Number(fork_num) = self.block { + // if b.block_number > fork_num { + // return Err(Error::BlockOutOfRange); + // } + // } + // } + // GetBlockWithReceiptsResponse::PreConfirmed(_) => { + // return Err(Error::UnexpectedPendingData); + // } + // } Ok(block) } @@ -179,8 +195,10 @@ impl ForkedClient { match block { GetBlockWithTxHashesResponse::Block(ref b) => { - if b.block_number > self.block { - return Err(Error::BlockOutOfRange); + if let BlockIdOrTag::Number(fork_num) = self.block { + if b.block_number > fork_num { + return Err(Error::BlockOutOfRange); + } } } GetBlockWithTxHashesResponse::PreConfirmed(_) => { @@ -192,46 +210,10 @@ impl ForkedClient { } pub async fn get_block_transaction_count(&self, block_id: BlockIdOrTag) -> Result { - match block_id { - BlockIdOrTag::Number(num) if num > self.block => { - return Err(Error::BlockOutOfRange); - } - BlockIdOrTag::Hash(hash) => { - let block = self.client.get_block_with_tx_hashes(BlockIdOrTag::Hash(hash)).await?; - if let GetBlockWithTxHashesResponse::Block(b) = block { - if b.block_number > self.block { - return Err(Error::BlockOutOfRange); - } - } - } - BlockIdOrTag::L1Accepted | BlockIdOrTag::Latest | BlockIdOrTag::PreConfirmed => { - return Err(Error::BlockTagNotAllowed); - } - BlockIdOrTag::Number(_) => {} - } - Ok(self.client.get_block_transaction_count(block_id).await?) } pub async fn get_state_update(&self, block_id: BlockIdOrTag) -> Result { - match block_id { - BlockIdOrTag::Number(num) if num > self.block => { - return Err(Error::BlockOutOfRange); - } - BlockIdOrTag::Hash(hash) => { - let block = self.client.get_block_with_tx_hashes(BlockIdOrTag::Hash(hash)).await?; - if let GetBlockWithTxHashesResponse::Block(b) = block { - if b.block_number > self.block { - return Err(Error::BlockOutOfRange); - } - } - } - BlockIdOrTag::L1Accepted | BlockIdOrTag::Latest | BlockIdOrTag::PreConfirmed => { - return Err(Error::BlockTagNotAllowed); - } - BlockIdOrTag::Number(_) => {} - } - Ok(self.client.get_state_update(block_id).await?) } @@ -247,9 +229,9 @@ impl ForkedClient { continuation_token: Option, chunk_size: u64, ) -> Result { - if from > self.block || to > self.block { - return Err(Error::BlockOutOfRange); - } + // if from > self.block || to > self.block { + // return Err(Error::BlockOutOfRange); + // } let from_block = Some(BlockIdOrTag::Number(from)); let to_block = Some(BlockIdOrTag::Number(to)); diff --git a/crates/rpc/rpc-server/src/starknet/list.rs b/crates/rpc/rpc-server/src/starknet/list.rs index 3d12c416a..94134373f 100644 --- a/crates/rpc/rpc-server/src/starknet/list.rs +++ b/crates/rpc/rpc-server/src/starknet/list.rs @@ -5,10 +5,10 @@ use katana_executor::ExecutorFactory; use katana_pool::TransactionPool; use katana_primitives::transaction::TxNumber; use katana_rpc_api::starknet_ext::StarknetApiExtServer; -use katana_rpc_types::{ - list::{GetBlocksRequest, GetBlocksResponse, GetTransactionsRequest, GetTransactionsResponse}, - RpcTxWithHash, +use katana_rpc_types::list::{ + GetBlocksRequest, GetBlocksResponse, GetTransactionsRequest, GetTransactionsResponse, }; +use katana_rpc_types::RpcTxWithHash; use super::StarknetApi; use crate::starknet::pending::PendingBlockProvider; diff --git a/crates/rpc/rpc-server/src/starknet/mod.rs b/crates/rpc/rpc-server/src/starknet/mod.rs index c4ea874f0..e87b8e968 100644 --- a/crates/rpc/rpc-server/src/starknet/mod.rs +++ b/crates/rpc/rpc-server/src/starknet/mod.rs @@ -13,7 +13,7 @@ use katana_primitives::class::{ClassHash, CompiledClass}; use katana_primitives::contract::{ContractAddress, Nonce, StorageKey, StorageValue}; use katana_primitives::env::BlockEnv; use katana_primitives::event::MaybeForkedContinuationToken; -use katana_primitives::transaction::{ExecutableTxWithHash, TxHash, TxNumber, TxWithHash}; +use katana_primitives::transaction::{ExecutableTxWithHash, TxHash, TxNumber}; use katana_primitives::Felt; use katana_provider::api::block::{BlockHashProvider, BlockIdReader, BlockNumberProvider}; use katana_provider::api::contract::ContractClassProvider; @@ -23,7 +23,6 @@ use katana_provider::api::transaction::{ ReceiptProvider, TransactionProvider, TransactionStatusProvider, TransactionsProviderExt, }; use katana_provider::api::ProviderError; -use katana_provider::BlockchainProvider; use katana_rpc_api::error::starknet::{ CompilationErrorData, PageSizeTooBigData, ProofLimitExceededData, StarknetApiError, }; diff --git a/crates/storage/provider/provider/src/providers/db/cached.rs b/crates/storage/provider/provider/src/providers/db/cached.rs index 420502ef7..6cf0604b5 100644 --- a/crates/storage/provider/provider/src/providers/db/cached.rs +++ b/crates/storage/provider/provider/src/providers/db/cached.rs @@ -30,6 +30,7 @@ use katana_provider_api::transaction::{ ReceiptProvider, TransactionProvider, TransactionStatusProvider, TransactionTraceProvider, TransactionsProviderExt, }; +use katana_provider_api::trie::TrieWriter; use parking_lot::RwLock; use crate::providers::fork::state::HistoricalStateProvider as ForkHistoricalStateProvider; @@ -566,3 +567,21 @@ impl StateProofProvider for CachedStateProvider {} impl StateRootProvider for CachedStateProvider {} impl StateProofProvider for CachedHistoricalStateProvider {} impl StateRootProvider for CachedHistoricalStateProvider {} + +impl TrieWriter for CachedDbProvider { + fn trie_insert_contract_updates( + &self, + block_number: BlockNumber, + state_updates: &StateUpdates, + ) -> ProviderResult { + todo!() + } + + fn trie_insert_declared_classes( + &self, + block_number: BlockNumber, + updates: &BTreeMap, + ) -> ProviderResult { + todo!() + } +} From f87bd07dfe715f53650c895347ef31a4579ba239 Mon Sep 17 00:00:00 2001 From: Ammar Arif Date: Mon, 3 Nov 2025 11:37:42 -0500 Subject: [PATCH 09/26] wip --- crates/core/src/backend/storage.rs | 111 ----- crates/node/src/lib.rs | 2 +- crates/node/src/optimistic/mod.rs | 27 +- crates/rpc/rpc-server/src/starknet/forking.rs | 15 +- crates/rpc/rpc-server/src/starknet/mod.rs | 425 +++++++++--------- 5 files changed, 233 insertions(+), 347 deletions(-) diff --git a/crates/core/src/backend/storage.rs b/crates/core/src/backend/storage.rs index e21b83912..961b19e22 100644 --- a/crates/core/src/backend/storage.rs +++ b/crates/core/src/backend/storage.rs @@ -196,117 +196,6 @@ impl Blockchain { Ok((Self::new(database), block_num)) } - /// Builds a new blockchain with a forked block. - pub async fn new_optimistic( - db: katana_db::Db, - starknet_client: StarknetClient, - fork_block: Option, - chain: &mut katana_chain_spec::dev::ChainSpec, - ) -> Result { - let chain_id = - starknet_client.chain_id().await.context("failed to fetch forked network id")?; - - // if the id is not in ASCII encoding, we display the chain id as is in hex. - let parsed_id = match parse_cairo_short_string(&chain_id) { - Ok(id) => id, - Err(_) => format!("{chain_id:#x}"), - }; - - // If the fork block number is not specified, we use the latest accepted block on the forked - // network. - let block_id = if let Some(id) = fork_block { id } else { BlockIdOrTag::Latest }; - - info!(chain = %parsed_id, block = %block_id, "Forking chain."); - - let block = starknet_client - .get_block_with_tx_hashes(block_id) - .await - .context("failed to fetch forked block")?; - - let GetBlockWithTxHashesResponse::Block(forked_block) = block else { - bail!("forking a pending block is not allowed") - }; - - let block_num = forked_block.block_number; - - chain.id = chain_id.into(); - - // adjust the genesis to match the forked block - chain.genesis.timestamp = forked_block.timestamp; - chain.genesis.number = forked_block.block_number; - chain.genesis.state_root = forked_block.new_root; - chain.genesis.parent_hash = forked_block.parent_hash; - chain.genesis.sequencer_address = forked_block.sequencer_address; - - // TODO: remove gas price from genesis - let eth_l1_gas_price = - forked_block.l1_gas_price.price_in_wei.to_u128().expect("should fit in u128"); - let strk_l1_gas_price = - forked_block.l1_gas_price.price_in_fri.to_u128().expect("should fit in u128"); - chain.genesis.gas_prices = - unsafe { GasPrices::new_unchecked(eth_l1_gas_price, strk_l1_gas_price) }; - - // TODO: convert this to block number instead of BlockHashOrNumber so that it is easier to - // check if the requested block is within the supported range or not. - let database = CachedDbProvider::new(db, block_id, starknet_client.clone()); - - // initialize parent fork block - // - // NOTE: this is just a workaround for allowing forked genesis block to be initialize using - // `Backend::do_mine_block`. - { - let parent_block_id = BlockIdOrTag::from(forked_block.parent_hash); - let parent_block = starknet_client.get_block_with_tx_hashes(parent_block_id).await?; - - let GetBlockWithTxHashesResponse::Block(parent_block) = parent_block else { - bail!("parent block is a preconfirmed block"); - }; - - let parent_block = SealedBlockWithStatus { - block: SealedBlock { - hash: parent_block.block_hash, - body: Vec::new(), - header: Header { - parent_hash: parent_block.parent_hash, - timestamp: parent_block.timestamp, - number: parent_block.block_number, - state_root: parent_block.new_root, - sequencer_address: parent_block.sequencer_address, - ..Default::default() - }, - }, - status: FinalityStatus::AcceptedOnL2, - }; - - database - .insert_block_with_states_and_receipts( - parent_block, - Default::default(), - Default::default(), - Default::default(), - ) - .context("failed to initialize provider with the parent of the forked block")?; - } - - // update the genesis block with the forked block's data - // we dont update the `l1_gas_price` bcs its already done when we set the `gas_prices` in - // genesis. this flow is kinda flawed, we should probably refactor it out of the - // genesis. - let mut block = chain.block(); - - let eth_l1_data_gas_price = - forked_block.l1_data_gas_price.price_in_wei.to_u128().expect("should fit in u128"); - let strk_l1_data_gas_price = - forked_block.l1_data_gas_price.price_in_fri.to_u128().expect("should fit in u128"); - - block.header.l1_data_gas_prices = - unsafe { GasPrices::new_unchecked(eth_l1_data_gas_price, strk_l1_data_gas_price) }; - - block.header.l1_da_mode = forked_block.l1_da_mode; - - Ok(Self::new(database)) - } - pub fn provider(&self) -> &BlockchainProvider> { &self.inner } diff --git a/crates/node/src/lib.rs b/crates/node/src/lib.rs index 12c202835..528af0d68 100644 --- a/crates/node/src/lib.rs +++ b/crates/node/src/lib.rs @@ -153,7 +153,7 @@ impl Node { // side let http_client = HttpClientBuilder::new().build(cfg.url.as_ref())?; let rpc_client = StarknetClient::new(http_client); - let forked_client = ForkedClient::new(rpc_client, block_num); + let forked_client = ForkedClient::new(rpc_client, block_num.into()); (bc, db, Some(forked_client)) } else if let Some(db_path) = &config.db.dir { diff --git a/crates/node/src/optimistic/mod.rs b/crates/node/src/optimistic/mod.rs index ab5d0e865..69ea5c0f6 100644 --- a/crates/node/src/optimistic/mod.rs +++ b/crates/node/src/optimistic/mod.rs @@ -19,6 +19,7 @@ use katana_metrics::{Report, Server as MetricsServer}; use katana_pool::ordering::FiFo; use katana_primitives::block::BlockIdOrTag; use katana_primitives::env::{CfgEnv, FeeTokenAddressses}; +use katana_provider::providers::db::cached::CachedDbProvider; use katana_rpc::cors::Cors; use katana_rpc::starknet::forking::ForkedClient; use katana_rpc::starknet::{StarknetApi, StarknetApiConfig}; @@ -106,24 +107,16 @@ impl Node { // --- build backend - let chain_spec = Arc::get_mut(&mut config.chain).expect("get mut Arc"); - let ChainSpec::Dev(chain_spec) = chain_spec else { - return Err(anyhow::anyhow!("Forking is only supported in dev mode for now")); - }; + let http_client = HttpClientBuilder::new().build(config.forking.url.as_str())?; + let starknet_client = katana_rpc_client::starknet::Client::new(http_client); let db = katana_db::Db::in_memory()?; - let blockchain = Blockchain::new_from_forked( - db.clone(), - config.forking.url.clone(), - Some(BlockIdOrTag::Latest), - chain_spec, - ) - .await?; + let forked_block_id = BlockIdOrTag::Latest; - let http_client = HttpClientBuilder::new().build(config.forking.url.as_str())?; - let starknet_client = katana_rpc_client::starknet::Client::new(http_client); + let database = CachedDbProvider::new(db.clone(), forked_block_id, starknet_client.clone()); + let blockchain = Blockchain::new(database.clone()); - let forked_client = ForkedClient::new(starknet_client.clone(), block_num); + let forked_client = ForkedClient::new(starknet_client.clone(), forked_block_id); let gpo = GasPriceOracle::sampled_starknet(config.forking.url.clone()); @@ -131,7 +124,7 @@ impl Node { let backend = Arc::new(Backend { gas_oracle: gpo.clone(), blockchain: blockchain.clone(), - executor_factory, + executor_factory: executor_factory.clone(), block_context_generator, chain_spec: config.chain.clone(), }); @@ -145,8 +138,8 @@ impl Node { let executor = OptimisticExecutor::new( pool.clone(), - blockchain, - optimistic_state, + blockchain.clone(), + database.clone(), executor_factory.clone(), task_spawner.clone(), ); diff --git a/crates/rpc/rpc-server/src/starknet/forking.rs b/crates/rpc/rpc-server/src/starknet/forking.rs index 5535a1ffd..e431f9954 100644 --- a/crates/rpc/rpc-server/src/starknet/forking.rs +++ b/crates/rpc/rpc-server/src/starknet/forking.rs @@ -222,20 +222,15 @@ impl ForkedClient { // BlockId in some way? pub async fn get_events( &self, - from: BlockNumber, - to: BlockNumber, + from: BlockIdOrTag, + to: BlockIdOrTag, address: Option, keys: Option>>, continuation_token: Option, chunk_size: u64, ) -> Result { - // if from > self.block || to > self.block { - // return Err(Error::BlockOutOfRange); - // } - - let from_block = Some(BlockIdOrTag::Number(from)); - let to_block = Some(BlockIdOrTag::Number(to)); - + let from_block = Some(from); + let to_block = Some(to); let event_filter = EventFilter { address, from_block, to_block, keys }; Ok(self.client.get_events(event_filter, continuation_token, chunk_size).await?) @@ -269,7 +264,7 @@ mod tests { async fn get_block_hash() { let http_client = HttpClientBuilder::new().build(SEPOLIA_URL).unwrap(); let rpc_client = Client::new(http_client); - let client = ForkedClient::new(rpc_client, FORK_BLOCK_NUMBER); + let client = ForkedClient::new(rpc_client, FORK_BLOCK_NUMBER.into()); // ----------------------------------------------------------------------- // Block before the forked block diff --git a/crates/rpc/rpc-server/src/starknet/mod.rs b/crates/rpc/rpc-server/src/starknet/mod.rs index e87b8e968..f9550ea76 100644 --- a/crates/rpc/rpc-server/src/starknet/mod.rs +++ b/crates/rpc/rpc-server/src/starknet/mod.rs @@ -855,214 +855,223 @@ where continuation_token: Option, chunk_size: u64, ) -> StarknetApiResult { - let provider = &self.inner.storage_provider.provider(); - - let from = self.resolve_event_block_id_if_forked(from_block)?; - let to = self.resolve_event_block_id_if_forked(to_block)?; - - // reserved buffer to fill up with events to avoid reallocations - let mut events = Vec::with_capacity(chunk_size as usize); - let filter = utils::events::Filter { address, keys: keys.clone() }; - - match (from, to) { - (EventBlockId::Num(from), EventBlockId::Num(to)) => { - // 1. check if the from and to block is lower than the forked block - // 2. if both are lower, then we can fetch the events from the provider - - // first determine whether the continuation token is from the forked client - let from_after_forked_if_any = if let Some(client) = &self.inner.forked_client { - let forked_block = *client.block(); - - // if the from block is lower than the forked block, we fetch events from the - // forked client - if from <= forked_block { - // if the to_block is greater than the forked block, we limit the to_block - // up until the forked block - let to = if to <= forked_block { to } else { forked_block }; - - // basically this is to determine that if the token is a katana native - // token, then we can skip fetching from the forked - // network. but if theres no token at all, or the - // token is a forked token, then we need to fetch from the forked network. - // - // TODO: simplify this - let forked_token = Some(continuation_token.clone()).and_then(|t| match t { - None => Some(None), - Some(t) => match t { - MaybeForkedContinuationToken::Token(_) => None, - MaybeForkedContinuationToken::Forked(t) => { - Some(Some(t.to_string())) - } - }, - }); - - // check if the continuation token is a forked continuation token - // if not we skip fetching from forked network - if let Some(token) = forked_token { - let forked_result = futures::executor::block_on( - client.get_events(from, to, address, keys, token, chunk_size), - )?; - - events.extend(forked_result.events); - - // return early if a token is present - if let Some(token) = forked_result.continuation_token { - let token = MaybeForkedContinuationToken::Forked(token); - let continuation_token = Some(token.to_string()); - return Ok(GetEventsResponse { events, continuation_token }); - } - } - } - - // we start from block + 1 because we dont have the events locally and we may - // have fetched it from the forked network earlier - *client.block() + 1 - } else { - from - }; - - let cursor = continuation_token.and_then(|t| t.to_token().map(|t| t.into())); - let block_range = from_after_forked_if_any..=to; - - let cursor = utils::events::fetch_events_at_blocks( - provider, - block_range, - &filter, - chunk_size, - cursor, - &mut events, - )?; - - let continuation_token = cursor.map(|c| c.into_rpc_cursor().to_string()); - let events_page = GetEventsResponse { events, continuation_token }; - - Ok(events_page) - } - - (EventBlockId::Num(from), EventBlockId::Pending) => { - // 1. check if the from and to block is lower than the forked block - // 2. if both are lower, then we can fetch the events from the provider - - // first determine whether the continuation token is from the forked client - let from_after_forked_if_any = if let Some(client) = &self.inner.forked_client { - let forked_block = *client.block(); - - // if the from block is lower than the forked block, we fetch events from the - // forked client - if from <= forked_block { - // we limit the to_block up until the forked block bcs pending block is - // pointing to a locally block - let to = forked_block; - - // basically this is to determine that if the token is a katana native - // token, then we can skip fetching from the forked - // network. but if theres no token at all, or the - // token is a forked token, then we need to fetch from the forked network. - // - // TODO: simplify this - let forked_token = Some(continuation_token.clone()).and_then(|t| match t { - None => Some(None), - Some(t) => match t { - MaybeForkedContinuationToken::Token(_) => None, - MaybeForkedContinuationToken::Forked(t) => { - Some(Some(t.to_string())) - } - }, - }); - - // check if the continuation token is a forked continuation token - // if not we skip fetching from forked network - if let Some(token) = forked_token { - let forked_result = futures::executor::block_on( - client.get_events(from, to, address, keys, token, chunk_size), - )?; - - events.extend(forked_result.events); - - // return early if a token is present - if let Some(token) = forked_result.continuation_token { - let token = MaybeForkedContinuationToken::Forked(token); - let continuation_token = Some(token.to_string()); - return Ok(GetEventsResponse { events, continuation_token }); - } - } - } - - // we start from block + 1 because we dont have the events locally and we may - // have fetched it from the forked network earlier - *client.block() + 1 - } else { - from - }; - - let cursor = continuation_token.and_then(|t| t.to_token().map(|t| t.into())); - let latest = provider.latest_number()?; - let block_range = from_after_forked_if_any..=latest; - - let int_cursor = utils::events::fetch_events_at_blocks( - provider, - block_range, - &filter, - chunk_size, - cursor.clone(), - &mut events, - )?; - - // if the internal cursor is Some, meaning the buffer is full and we havent - // reached the latest block. - if let Some(c) = int_cursor { - let continuation_token = Some(c.into_rpc_cursor().to_string()); - return Ok(GetEventsResponse { events, continuation_token }); - } - - if let Some(block) = - self.inner.pending_block_provider.get_pending_block_with_receipts()? - { - let cursor = utils::events::fetch_pending_events( - &block, - &filter, - chunk_size, - cursor, - &mut events, - )?; - - let continuation_token = Some(cursor.into_rpc_cursor().to_string()); - Ok(GetEventsResponse { events, continuation_token }) - } else { - let cursor = Cursor::new_block(latest + 1); - let continuation_token = Some(cursor.into_rpc_cursor().to_string()); - Ok(GetEventsResponse { events, continuation_token }) - } - } - - (EventBlockId::Pending, EventBlockId::Pending) => { - if let Some(block) = - self.inner.pending_block_provider.get_pending_block_with_receipts()? - { - let cursor = continuation_token.and_then(|t| t.to_token().map(|t| t.into())); - let new_cursor = utils::events::fetch_pending_events( - &block, - &filter, - chunk_size, - cursor, - &mut events, - )?; - - let continuation_token = Some(new_cursor.into_rpc_cursor().to_string()); - Ok(GetEventsResponse { events, continuation_token }) - } else { - let latest = provider.latest_number()?; - let new_cursor = Cursor::new_block(latest); - - let continuation_token = Some(new_cursor.into_rpc_cursor().to_string()); - Ok(GetEventsResponse { events, continuation_token }) - } - } - - (EventBlockId::Pending, EventBlockId::Num(_)) => Err(StarknetApiError::unexpected( - "Invalid block range; `from` block must be lower than `to`", - )), - } + let client = self.inner.forked_client.as_ref().unwrap(); + let token = continuation_token.map(|token| token.to_string()); + + let result = futures::executor::block_on( + client.get_events(from_block, to_block, address, keys, token, chunk_size), + )?; + + Ok(result) + + // let provider = &self.inner.storage_provider.provider(); + + // let from = self.resolve_event_block_id_if_forked(from_block)?; + // let to = self.resolve_event_block_id_if_forked(to_block)?; + + // // reserved buffer to fill up with events to avoid reallocations + // let mut events = Vec::with_capacity(chunk_size as usize); + // let filter = utils::events::Filter { address, keys: keys.clone() }; + + // match (from, to) { + // (EventBlockId::Num(from), EventBlockId::Num(to)) => { + // // 1. check if the from and to block is lower than the forked block + // // 2. if both are lower, then we can fetch the events from the provider + + // // first determine whether the continuation token is from the forked client + // let from_after_forked_if_any = if let Some(client) = &self.inner.forked_client { + // let forked_block = *client.block(); + + // // if the from block is lower than the forked block, we fetch events from the + // // forked client + // if from <= forked_block { + // // if the to_block is greater than the forked block, we limit the to_block + // // up until the forked block + // let to = if to <= forked_block { to } else { forked_block }; + + // // basically this is to determine that if the token is a katana native + // // token, then we can skip fetching from the forked + // // network. but if theres no token at all, or the + // // token is a forked token, then we need to fetch from the forked network. + // // + // // TODO: simplify this + // let forked_token = Some(continuation_token.clone()).and_then(|t| match t { + // None => Some(None), + // Some(t) => match t { + // MaybeForkedContinuationToken::Token(_) => None, + // MaybeForkedContinuationToken::Forked(t) => { + // Some(Some(t.to_string())) + // } + // }, + // }); + + // // check if the continuation token is a forked continuation token + // // if not we skip fetching from forked network + // if let Some(token) = forked_token { + // let forked_result = futures::executor::block_on( + // client.get_events(from, to, address, keys, token, chunk_size), + // )?; + + // events.extend(forked_result.events); + + // // return early if a token is present + // if let Some(token) = forked_result.continuation_token { + // let token = MaybeForkedContinuationToken::Forked(token); + // let continuation_token = Some(token.to_string()); + // return Ok(GetEventsResponse { events, continuation_token }); + // } + // } + // } + + // // we start from block + 1 because we dont have the events locally and we may + // // have fetched it from the forked network earlier + // *client.block() + 1 + // } else { + // from + // }; + + // let cursor = continuation_token.and_then(|t| t.to_token().map(|t| t.into())); + // let block_range = from_after_forked_if_any..=to; + + // let cursor = utils::events::fetch_events_at_blocks( + // provider, + // block_range, + // &filter, + // chunk_size, + // cursor, + // &mut events, + // )?; + + // let continuation_token = cursor.map(|c| c.into_rpc_cursor().to_string()); + // let events_page = GetEventsResponse { events, continuation_token }; + + // Ok(events_page) + // } + + // (EventBlockId::Num(from), EventBlockId::Pending) => { + // // 1. check if the from and to block is lower than the forked block + // // 2. if both are lower, then we can fetch the events from the provider + + // // first determine whether the continuation token is from the forked client + // let from_after_forked_if_any = if let Some(client) = &self.inner.forked_client { + // let forked_block = *client.block(); + + // // if the from block is lower than the forked block, we fetch events from the + // // forked client + // if from <= forked_block { + // // we limit the to_block up until the forked block bcs pending block is + // // pointing to a locally block + // let to = forked_block; + + // // basically this is to determine that if the token is a katana native + // // token, then we can skip fetching from the forked + // // network. but if theres no token at all, or the + // // token is a forked token, then we need to fetch from the forked network. + // // + // // TODO: simplify this + // let forked_token = Some(continuation_token.clone()).and_then(|t| match t { + // None => Some(None), + // Some(t) => match t { + // MaybeForkedContinuationToken::Token(_) => None, + // MaybeForkedContinuationToken::Forked(t) => { + // Some(Some(t.to_string())) + // } + // }, + // }); + + // // check if the continuation token is a forked continuation token + // // if not we skip fetching from forked network + // if let Some(token) = forked_token { + // let forked_result = futures::executor::block_on( + // client.get_events(from, to, address, keys, token, chunk_size), + // )?; + + // events.extend(forked_result.events); + + // // return early if a token is present + // if let Some(token) = forked_result.continuation_token { + // let token = MaybeForkedContinuationToken::Forked(token); + // let continuation_token = Some(token.to_string()); + // return Ok(GetEventsResponse { events, continuation_token }); + // } + // } + // } + + // // we start from block + 1 because we dont have the events locally and we may + // // have fetched it from the forked network earlier + // *client.block() + 1 + // } else { + // from + // }; + + // let cursor = continuation_token.and_then(|t| t.to_token().map(|t| t.into())); + // let latest = provider.latest_number()?; + // let block_range = from_after_forked_if_any..=latest; + + // let int_cursor = utils::events::fetch_events_at_blocks( + // provider, + // block_range, + // &filter, + // chunk_size, + // cursor.clone(), + // &mut events, + // )?; + + // // if the internal cursor is Some, meaning the buffer is full and we havent + // // reached the latest block. + // if let Some(c) = int_cursor { + // let continuation_token = Some(c.into_rpc_cursor().to_string()); + // return Ok(GetEventsResponse { events, continuation_token }); + // } + + // if let Some(block) = + // self.inner.pending_block_provider.get_pending_block_with_receipts()? + // { + // let cursor = utils::events::fetch_pending_events( + // &block, + // &filter, + // chunk_size, + // cursor, + // &mut events, + // )?; + + // let continuation_token = Some(cursor.into_rpc_cursor().to_string()); + // Ok(GetEventsResponse { events, continuation_token }) + // } else { + // let cursor = Cursor::new_block(latest + 1); + // let continuation_token = Some(cursor.into_rpc_cursor().to_string()); + // Ok(GetEventsResponse { events, continuation_token }) + // } + // } + + // (EventBlockId::Pending, EventBlockId::Pending) => { + // if let Some(block) = + // self.inner.pending_block_provider.get_pending_block_with_receipts()? + // { + // let cursor = continuation_token.and_then(|t| t.to_token().map(|t| t.into())); + // let new_cursor = utils::events::fetch_pending_events( + // &block, + // &filter, + // chunk_size, + // cursor, + // &mut events, + // )?; + + // let continuation_token = Some(new_cursor.into_rpc_cursor().to_string()); + // Ok(GetEventsResponse { events, continuation_token }) + // } else { + // let latest = provider.latest_number()?; + // let new_cursor = Cursor::new_block(latest); + + // let continuation_token = Some(new_cursor.into_rpc_cursor().to_string()); + // Ok(GetEventsResponse { events, continuation_token }) + // } + // } + + // (EventBlockId::Pending, EventBlockId::Num(_)) => Err(StarknetApiError::unexpected( + // "Invalid block range; `from` block must be lower than `to`", + // )), + // } } // Determine the block number based on its Id. In the case where the block id is a hash, we need From e90515607ba971d43184f11893f49c8b5f46e9fb Mon Sep 17 00:00:00 2001 From: Ammar Arif Date: Mon, 3 Nov 2025 14:44:58 -0500 Subject: [PATCH 10/26] wip --- Cargo.lock | 23 +++ Cargo.toml | 3 +- crates/node/Cargo.toml | 2 + crates/node/src/lib.rs | 2 + crates/node/src/optimistic/mod.rs | 13 +- crates/optimistic/Cargo.toml | 22 +++ .../optimistic => optimistic/src}/executor.rs | 135 +++++++++++++----- crates/optimistic/src/lib.rs | 2 + .../src/optimistic => optimistic/src}/pool.rs | 1 - crates/rpc/rpc-server/Cargo.toml | 1 + crates/rpc/rpc-server/src/starknet/mod.rs | 55 ++++++- crates/rpc/rpc-server/src/utils/events.rs | 10 +- 12 files changed, 211 insertions(+), 58 deletions(-) create mode 100644 crates/optimistic/Cargo.toml rename crates/{node/src/optimistic => optimistic/src}/executor.rs (51%) create mode 100644 crates/optimistic/src/lib.rs rename crates/{node/src/optimistic => optimistic/src}/pool.rs (97%) diff --git a/Cargo.lock b/Cargo.lock index 5374fe15d..624e2834f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6307,6 +6307,7 @@ dependencies = [ "katana-gateway-server", "katana-messaging", "katana-metrics", + "katana-optimistic", "katana-pipeline", "katana-pool", "katana-pool-api", @@ -6320,6 +6321,7 @@ dependencies = [ "katana-starknet", "katana-tasks", "katana-tracing", + "parking_lot", "serde", "serde_json", "strum 0.25.0", @@ -6353,6 +6355,26 @@ dependencies = [ "url", ] +[[package]] +name = "katana-optimistic" +version = "1.7.0" +dependencies = [ + "anyhow", + "futures", + "katana-core", + "katana-db", + "katana-executor", + "katana-pool", + "katana-pool-api", + "katana-primitives", + "katana-provider", + "katana-rpc-client", + "katana-rpc-types", + "katana-tasks", + "parking_lot", + "tracing", +] + [[package]] name = "katana-pipeline" version = "1.7.0" @@ -6537,6 +6559,7 @@ dependencies = [ "katana-messaging", "katana-metrics", "katana-node", + "katana-optimistic", "katana-pool", "katana-primitives", "katana-provider", diff --git a/Cargo.toml b/Cargo.toml index 085ea4b03..2e3805f77 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -47,7 +47,7 @@ members = [ "crates/utils", "tests/db-compat", "tests/reverse-proxy", - "tests/snos", + "tests/snos", "crates/optimistic", ] [workspace.package] @@ -73,6 +73,7 @@ serde-utils = { path = "crates/serde-utils" } katana-chain-spec = { path = "crates/chain-spec" } katana-cli = { path = "crates/cli" } +katana-optimistic = { path = "crates/optimistic" } katana-codecs = { path = "crates/storage/codecs" } katana-codecs-derive = { path = "crates/storage/codecs/derive" } katana-contracts = { path = "crates/contracts" } diff --git a/crates/node/Cargo.toml b/crates/node/Cargo.toml index e0a90daa5..11dc4562c 100644 --- a/crates/node/Cargo.toml +++ b/crates/node/Cargo.toml @@ -17,6 +17,7 @@ katana-messaging.workspace = true katana-metrics.workspace = true katana-pipeline.workspace = true katana-pool.workspace = true +katana-optimistic.workspace = true katana-pool-api.workspace = true katana-primitives.workspace = true katana-provider.workspace = true @@ -31,6 +32,7 @@ katana-tracing.workspace = true anyhow.workspace = true futures.workspace = true http.workspace = true +parking_lot.workspace = true jsonrpsee.workspace = true serde.workspace = true serde_json.workspace = true diff --git a/crates/node/src/lib.rs b/crates/node/src/lib.rs index 528af0d68..14bc15d72 100644 --- a/crates/node/src/lib.rs +++ b/crates/node/src/lib.rs @@ -270,6 +270,7 @@ impl Node { starknet_api_cfg, block_producer.clone(), blockchain, + None, ) } else { StarknetApi::new( @@ -279,6 +280,7 @@ impl Node { starknet_api_cfg, block_producer.clone(), blockchain, + None, ) }; diff --git a/crates/node/src/optimistic/mod.rs b/crates/node/src/optimistic/mod.rs index 69ea5c0f6..1321cb6aa 100644 --- a/crates/node/src/optimistic/mod.rs +++ b/crates/node/src/optimistic/mod.rs @@ -16,6 +16,8 @@ use katana_gas_price_oracle::GasPriceOracle; use katana_metrics::exporters::prometheus::PrometheusRecorder; use katana_metrics::sys::DiskReporter; use katana_metrics::{Report, Server as MetricsServer}; +use katana_optimistic::executor::{OptimisticExecutor, OptimisticState}; +use katana_optimistic::pool::{PoolValidator, TxPool}; use katana_pool::ordering::FiFo; use katana_primitives::block::BlockIdOrTag; use katana_primitives::env::{CfgEnv, FeeTokenAddressses}; @@ -29,14 +31,10 @@ use katana_tasks::{JoinHandle, TaskManager}; use tracing::info; mod config; -mod executor; -mod pool; use config::Config; use crate::config::rpc::RpcModuleKind; -use crate::optimistic::executor::OptimisticExecutor; -use crate::optimistic::pool::{PoolValidator, TxPool}; #[derive(Debug)] pub struct Node { @@ -51,8 +49,6 @@ pub struct Node { impl Node { pub async fn build(config: Config) -> Result { - let mut config = config; - if config.metrics.is_some() { // Metrics recorder must be initialized before calling any of the metrics macros, in // order for it to be registered. @@ -136,10 +132,12 @@ impl Node { // -- build executor + let optimistic_state = OptimisticState::new(database.clone()); + let executor = OptimisticExecutor::new( pool.clone(), blockchain.clone(), - database.clone(), + optimistic_state.clone(), executor_factory.clone(), task_spawner.clone(), ); @@ -173,6 +171,7 @@ impl Node { starknet_api_cfg, starknet_client.clone(), blockchain, + optimistic_state.clone(), ); if config.rpc.apis.contains(&RpcModuleKind::Starknet) { diff --git a/crates/optimistic/Cargo.toml b/crates/optimistic/Cargo.toml new file mode 100644 index 000000000..80a7cdbff --- /dev/null +++ b/crates/optimistic/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "katana-optimistic" +edition.workspace = true +license.workspace = true +repository.workspace = true +version.workspace = true + +[dependencies] +futures.workspace = true +anyhow.workspace = true +katana-core.workspace = true +katana-executor.workspace = true +katana-pool.workspace = true +katana-primitives.workspace = true +katana-provider.workspace = true +katana-rpc-types.workspace = true +katana-rpc-client.workspace = true +katana-tasks.workspace = true +katana-pool-api.workspace = true +parking_lot.workspace = true +tracing.workspace = true +katana-db.workspace = true diff --git a/crates/node/src/optimistic/executor.rs b/crates/optimistic/src/executor.rs similarity index 51% rename from crates/node/src/optimistic/executor.rs rename to crates/optimistic/src/executor.rs index d08ea9fc3..2a525ce6a 100644 --- a/crates/node/src/optimistic/executor.rs +++ b/crates/optimistic/src/executor.rs @@ -4,25 +4,40 @@ use std::sync::Arc; use std::task::{Context, Poll}; use futures::stream::StreamExt; +use futures::FutureExt; use katana_core::backend::storage::Blockchain; use katana_executor::implementation::blockifier::BlockifierFactory; -use katana_executor::ExecutorFactory; +use katana_executor::{ExecutionResult, ExecutorFactory}; use katana_pool::ordering::FiFo; use katana_pool::{PendingTransactions, PoolTransaction, TransactionPool}; +use katana_primitives::transaction::TxWithHash; use katana_provider::api::state::StateFactoryProvider; use katana_provider::providers::db::cached::CachedDbProvider; use katana_rpc_types::BroadcastedTxWithChainId; -use katana_tasks::{JoinHandle, TaskSpawner}; +use katana_tasks::{CpuBlockingJoinHandle, JoinHandle, Result as TaskResult, TaskSpawner}; +use parking_lot::RwLock; use tracing::{debug, error, info, trace}; -use crate::optimistic::pool::TxPool; +use crate::pool::TxPool; const LOG_TARGET: &str = "optimistic_executor"; +#[derive(Debug, Clone)] +pub struct OptimisticState { + pub state: CachedDbProvider, + pub transactions: Arc>>, +} + +impl OptimisticState { + pub fn new(state: CachedDbProvider) -> Self { + Self { state, transactions: Arc::new(RwLock::new(Vec::new())) } + } +} + #[derive(Debug)] pub struct OptimisticExecutor { pool: TxPool, - optimistic_state: CachedDbProvider, + optimistic_state: OptimisticState, executor_factory: Arc, storage: Blockchain, task_spawner: TaskSpawner, @@ -39,7 +54,7 @@ impl OptimisticExecutor { pub fn new( pool: TxPool, storage: Blockchain, - optimistic_state: CachedDbProvider, + optimistic_state: OptimisticState, executor_factory: Arc, task_spawner: TaskSpawner, ) -> Self { @@ -61,6 +76,7 @@ impl OptimisticExecutor { self.storage, self.optimistic_state, self.executor_factory, + self.task_spawner.clone(), ), ) } @@ -69,11 +85,12 @@ impl OptimisticExecutor { #[derive(Debug)] struct OptimisticExecutorActor { pool: TxPool, - optimistic_state: CachedDbProvider, - /// Stream of pending transactions from the pool + optimistic_state: OptimisticState, pending_txs: PendingTransactions>, storage: Blockchain, executor_factory: Arc, + task_spawner: TaskSpawner, + ongoing_execution: Option>>, } impl OptimisticExecutorActor { @@ -81,17 +98,31 @@ impl OptimisticExecutorActor { fn new( pool: TxPool, storage: Blockchain, - optimistic_state: CachedDbProvider, + optimistic_state: OptimisticState, executor_factory: Arc, + task_spawner: TaskSpawner, ) -> Self { let pending_txs = pool.pending_transactions(); - Self { pool, optimistic_state, pending_txs, storage, executor_factory } + Self { + pool, + optimistic_state, + pending_txs, + storage, + executor_factory, + task_spawner, + ongoing_execution: None, + } } /// Execute a single transaction optimistically against the latest state. - fn execute_transaction(&self, tx: BroadcastedTxWithChainId) -> anyhow::Result<()> { - let latest_state = self.optimistic_state.latest().unwrap(); - let mut executor = self.executor_factory.with_state(latest_state); + fn execute_transaction( + pool: TxPool, + optimistic_state: OptimisticState, + executor_factory: Arc, + tx: BroadcastedTxWithChainId, + ) -> anyhow::Result<()> { + let latest_state = optimistic_state.state.latest().unwrap(); + let mut executor = executor_factory.with_state(latest_state); // Execute the transaction let tx_hash = tx.hash(); @@ -99,8 +130,10 @@ impl OptimisticExecutorActor { let _ = executor.execute_transactions(vec![tx.into()]).unwrap(); let output = executor.take_execution_output().unwrap(); - self.optimistic_state.merge_state_updates(&output.states); - self.pool.remove_transactions(&[tx_hash]); + optimistic_state.state.merge_state_updates(&output.states); + optimistic_state.transactions; + + pool.remove_transactions(&[tx_hash]); Ok(()) } @@ -112,13 +145,42 @@ impl Future for OptimisticExecutorActor { fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); - // Drain all available transactions from the stream until it's exhausted (Poll::Pending) - // or the stream ends (Poll::Ready(None)). - // - // This ensures we process all pending transactions in a batch before yielding control - // back to the executor, which is more efficient than processing one transaction at a - // time. loop { + // First, poll any ongoing execution to completion before processing new transactions + if let Some(mut execution) = this.ongoing_execution.take() { + match execution.poll_unpin(cx) { + Poll::Ready(result) => { + match result { + TaskResult::Ok(Ok(())) => { + // Execution completed successfully, continue to next transaction + trace!(target: LOG_TARGET, "Transaction execution completed successfully"); + } + TaskResult::Ok(Err(e)) => { + error!( + target: LOG_TARGET, + error = %e, + "Error executing transaction" + ); + } + TaskResult::Err(e) => { + if e.is_cancelled() { + error!(target: LOG_TARGET, "Transaction execution task cancelled"); + } else { + std::panic::resume_unwind(e.into_panic()); + } + } + } + // Continue to process next transaction + } + Poll::Pending => { + // Execution is still ongoing, restore it and yield + this.ongoing_execution = Some(execution); + return Poll::Pending; + } + } + } + + // Process new transactions from the stream match this.pending_txs.poll_next_unpin(cx) { Poll::Ready(Some(pending_tx)) => { let tx = pending_tx.tx.as_ref().clone(); @@ -138,23 +200,24 @@ impl Future for OptimisticExecutorActor { debug!( target: LOG_TARGET, tx_hash = format!("{:#x}", tx_hash), - "Executing transaction optimistically" + "Spawning transaction execution on blocking pool" ); - // Execute the transaction optimistically - match this.execute_transaction(tx) { - Ok(()) => {} - Err(e) => { - error!( - target: LOG_TARGET, - tx_hash = format!("{:#x}", tx_hash), - error = %e, - "Error executing transaction" - ); - } - } + // Spawn the transaction execution on the blocking CPU pool + let pool = this.pool.clone(); + let optimistic_state = this.optimistic_state.clone(); + let executor_factory = this.executor_factory.clone(); + + let execution_future = this.task_spawner.cpu_bound().spawn(move || { + Self::execute_transaction(pool, optimistic_state, executor_factory, tx) + }); + + this.ongoing_execution = Some(execution_future); - // Continue the loop to process the next transaction + // Wake the task to poll the execution immediately + cx.waker().wake_by_ref(); + + // Continue the loop to poll the execution continue; } @@ -173,7 +236,3 @@ impl Future for OptimisticExecutorActor { } } } - -// Tests are intentionally omitted as they would require a full backend setup with -// blockchain state. Integration tests should be written separately to properly test -// the optimistic executor with a real backend instance. diff --git a/crates/optimistic/src/lib.rs b/crates/optimistic/src/lib.rs new file mode 100644 index 000000000..4e194790b --- /dev/null +++ b/crates/optimistic/src/lib.rs @@ -0,0 +1,2 @@ +pub mod executor; +pub mod pool; diff --git a/crates/node/src/optimistic/pool.rs b/crates/optimistic/src/pool.rs similarity index 97% rename from crates/node/src/optimistic/pool.rs rename to crates/optimistic/src/pool.rs index ad017ebfc..835e4795a 100644 --- a/crates/node/src/optimistic/pool.rs +++ b/crates/optimistic/src/pool.rs @@ -5,7 +5,6 @@ use katana_pool::pool::Pool; use katana_pool_api::validation::{ Error as ValidationError, InvalidTransactionError, ValidationOutcome, Validator, }; -use katana_primitives::utils::get_contract_address; use katana_rpc_client::starknet::Client; use katana_rpc_types::{BroadcastedTx, BroadcastedTxWithChainId}; diff --git a/crates/rpc/rpc-server/Cargo.toml b/crates/rpc/rpc-server/Cargo.toml index 34cc98096..c8a712a19 100644 --- a/crates/rpc/rpc-server/Cargo.toml +++ b/crates/rpc/rpc-server/Cargo.toml @@ -7,6 +7,7 @@ repository.workspace = true version.workspace = true [dependencies] +katana-optimistic.workspace = true katana-core.workspace = true katana-executor.workspace = true katana-explorer = { workspace = true, features = [ "jsonrpsee" ], optional = true } diff --git a/crates/rpc/rpc-server/src/starknet/mod.rs b/crates/rpc/rpc-server/src/starknet/mod.rs index f9550ea76..be228c69e 100644 --- a/crates/rpc/rpc-server/src/starknet/mod.rs +++ b/crates/rpc/rpc-server/src/starknet/mod.rs @@ -4,9 +4,10 @@ use std::fmt::Debug; use std::future::Future; use std::sync::Arc; -use katana_core::backend::storage::{Blockchain, Database}; +use katana_core::backend::storage::Blockchain; use katana_core::backend::Backend; use katana_executor::ExecutorFactory; +use katana_optimistic::executor::OptimisticState; use katana_pool::TransactionPool; use katana_primitives::block::{BlockHashOrNumber, BlockIdOrTag, FinalityStatus, GasPrices}; use katana_primitives::class::{ClassHash, CompiledClass}; @@ -49,7 +50,7 @@ use katana_tasks::{Result as TaskResult, TaskSpawner}; use crate::permit::Permits; use crate::starknet::pending::PendingBlockProvider; -use crate::utils::events::{Cursor, EventBlockId}; +use crate::utils::events::{fetch_events_at_blocks, fetch_tx_events, Cursor, EventBlockId, Filter}; use crate::{utils, DEFAULT_ESTIMATE_FEE_MAX_CONCURRENT_REQUESTS}; mod blockifier; @@ -99,6 +100,7 @@ where estimate_fee_permit: Permits, config: StarknetApiConfig, pending_block_provider: PP, + optimistic_state: Option, } impl StarknetApi @@ -114,6 +116,7 @@ where config: StarknetApiConfig, pending_block_provider: PP, storage_provider: Blockchain, + optimistic_state: Option, ) -> Self { Self::new_inner( backend, @@ -123,6 +126,7 @@ where task_spawner, config, pending_block_provider, + optimistic_state, ) } @@ -134,6 +138,7 @@ where config: StarknetApiConfig, pending_block_provider: PP, storage_provider: Blockchain, + optimistic_state: Option, ) -> Self { Self::new_inner( backend, @@ -143,6 +148,7 @@ where task_spawner, config, pending_block_provider, + optimistic_state, ) } @@ -154,6 +160,7 @@ where task_spawner: TaskSpawner, config: StarknetApiConfig, pending_block_provider: PP, + optimistic_state: Option, ) -> Self { let total_permits = config .max_concurrent_estimate_fee_requests @@ -169,6 +176,7 @@ where estimate_fee_permit, config, pending_block_provider, + optimistic_state, }; Self { inner: Arc::new(inner) } @@ -858,9 +866,44 @@ where let client = self.inner.forked_client.as_ref().unwrap(); let token = continuation_token.map(|token| token.to_string()); - let result = futures::executor::block_on( - client.get_events(from_block, to_block, address, keys, token, chunk_size), - )?; + let mut result = futures::executor::block_on(client.get_events( + from_block, + to_block, + address, + keys.clone(), + token, + chunk_size, + ))?; + + if let BlockIdOrTag::PreConfirmed = to_block { + // Handle pre-confirmed block logic here + if let Some(optimistic_state) = self.inner.optimistic_state.as_ref() { + let optimistic_txs = optimistic_state.transactions.read(); + let tx_filter = Filter { address, keys }; + + for (idx, tx, receipt) in + optimistic_txs.iter().enumerate().filter_map(|(idx, (tx, result))| { + if let Some(receipt) = result.receipt() { + Some((idx, tx, receipt)) + } else { + None + } + }) + { + let partial_cursor = fetch_tx_events( + 0, + None, + None, + idx, + tx.hash, + receipt.events(), + &tx_filter, + chunk_size as usize, + &mut result.events, + )?; + } + } + } Ok(result) @@ -873,7 +916,7 @@ where // let mut events = Vec::with_capacity(chunk_size as usize); // let filter = utils::events::Filter { address, keys: keys.clone() }; - // match (from, to) { + // match (from, to) ick th // (EventBlockId::Num(from), EventBlockId::Num(to)) => { // // 1. check if the from and to block is lower than the forked block // // 2. if both are lower, then we can fetch the events from the provider diff --git a/crates/rpc/rpc-server/src/utils/events.rs b/crates/rpc/rpc-server/src/utils/events.rs index a36e70e44..562d5c360 100644 --- a/crates/rpc/rpc-server/src/utils/events.rs +++ b/crates/rpc/rpc-server/src/utils/events.rs @@ -72,15 +72,15 @@ impl Cursor { /// A partial cursor that points to a specific event WITHIN a transaction. #[derive(Debug, Clone, PartialEq, Default)] -struct PartialCursor { +pub struct PartialCursor { /// The transaction index within a block. - idx: usize, + pub idx: usize, /// The event index within a transaction. - event: usize, + pub event: usize, } impl PartialCursor { - fn into_full(self, block: BlockNumber) -> Cursor { + pub fn into_full(self, block: BlockNumber) -> Cursor { Cursor { block, txn: self } } } @@ -292,7 +292,7 @@ impl<'a, I: Iterator> Iterator for FilteredEvents<'a, I> { /// * `chunk_size` - Maximum number of events that can be taken, based on user-specified chunk size /// * `buffer` - Buffer to store the matched events #[allow(clippy::too_many_arguments)] -fn fetch_tx_events( +pub fn fetch_tx_events( next_event_idx: usize, block_number: Option, block_hash: Option, From 40a7e4754abbee19bd9309642b6d7bd6abc8fc9d Mon Sep 17 00:00:00 2001 From: Ammar Arif Date: Mon, 3 Nov 2025 17:57:29 -0500 Subject: [PATCH 11/26] wip --- Cargo.lock | 1 + crates/cli/src/lib.rs | 6 + crates/cli/src/optimistic.rs | 127 ++++++ crates/node/src/optimistic/config.rs | 14 +- crates/node/src/optimistic/mod.rs | 6 +- crates/optimistic/Cargo.toml | 1 + crates/optimistic/src/executor.rs | 90 +++- crates/primitives/src/event.rs | 64 ++- crates/rpc/rpc-server/src/starknet/mod.rs | 502 +++++++++++----------- crates/rpc/rpc-server/src/utils/events.rs | 1 + 10 files changed, 544 insertions(+), 268 deletions(-) create mode 100644 crates/cli/src/optimistic.rs diff --git a/Cargo.lock b/Cargo.lock index 624e2834f..cd8eca183 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6372,6 +6372,7 @@ dependencies = [ "katana-rpc-types", "katana-tasks", "parking_lot", + "tokio", "tracing", ] diff --git a/crates/cli/src/lib.rs b/crates/cli/src/lib.rs index cb017de55..f919a2cb8 100644 --- a/crates/cli/src/lib.rs +++ b/crates/cli/src/lib.rs @@ -6,6 +6,7 @@ use clap::{Args, Subcommand}; pub mod args; pub mod file; pub mod full; +pub mod optimistic; pub mod options; pub mod utils; @@ -13,6 +14,7 @@ pub use args::SequencerNodeArgs; pub use options::*; use crate::full::FullNodeArgs; +use crate::optimistic::OptimisticNodeArgs; #[derive(Debug, Args, PartialEq)] pub struct NodeCli { @@ -27,6 +29,9 @@ pub enum NodeSubcommand { #[command(about = "Launch a sequencer node")] Sequencer(Box), + + #[command(about = "Launch an optimistic node")] + Optimistic(Box), } impl NodeCli { @@ -34,6 +39,7 @@ impl NodeCli { match self.command { NodeSubcommand::Full(args) => args.execute().await, NodeSubcommand::Sequencer(args) => args.with_config_file()?.execute().await, + NodeSubcommand::Optimistic(args) => args.execute().await, } } } diff --git a/crates/cli/src/optimistic.rs b/crates/cli/src/optimistic.rs new file mode 100644 index 000000000..f7e15227c --- /dev/null +++ b/crates/cli/src/optimistic.rs @@ -0,0 +1,127 @@ +use std::sync::Arc; + +use anyhow::Result; +pub use clap::Parser; +use katana_chain_spec::ChainSpec; +use serde::{Deserialize, Serialize}; +use tracing::info; +use url::Url; + +use crate::options::*; + +pub(crate) const LOG_TARGET: &str = "katana::cli::optimistic"; + +#[derive(Parser, Debug, Serialize, Deserialize, Clone, PartialEq)] +#[command(next_help_heading = "Optimistic node options")] +pub struct OptimisticNodeArgs { + /// Don't print anything on startup. + #[arg(long)] + #[serde(default)] + pub silent: bool, + + /// The Starknet RPC provider to fork from. + #[arg(long, value_name = "URL", alias = "rpc-url")] + #[arg(help = "The Starknet RPC provider to fork from.")] + pub fork_provider_url: Url, + + #[command(flatten)] + #[serde(default)] + pub logging: LoggingOptions, + + #[command(flatten)] + #[serde(default)] + pub tracer: TracerOptions, + + #[cfg(feature = "server")] + #[command(flatten)] + #[serde(default)] + pub metrics: MetricsOptions, + + #[cfg(feature = "server")] + #[command(flatten)] + #[serde(default)] + pub server: ServerOptions, +} + +impl OptimisticNodeArgs { + pub async fn execute(&self) -> Result<()> { + let config = self.config()?; + + #[cfg(feature = "server")] + let rpc_addr = config.rpc.socket_addr(); + + if !self.silent { + info!(target: LOG_TARGET, "Starting optimistic node..."); + } + + let node = katana_node::optimistic::Node::build(config).await?; + let _handle = node.launch().await?; + + #[cfg(feature = "server")] + { + info!(target: LOG_TARGET, %rpc_addr, "JSON-RPC server started."); + } + + // Wait indefinitely + tokio::signal::ctrl_c().await?; + + Ok(()) + } + + fn config(&self) -> Result { + let chain = self.chain_spec()?; + let rpc = self.rpc_config()?; + let forking = self.forking_config(); + let metrics = self.metrics_config()?; + Ok(katana_node::optimistic::config::Config { chain, rpc, forking, metrics }) + } + + fn chain_spec(&self) -> Result> { + // Always use dev chain spec for optimistic node + Ok(Arc::new(ChainSpec::Dev(Default::default()))) + } + + fn forking_config(&self) -> katana_node::optimistic::config::ForkingConfig { + use katana_node::optimistic::config::ForkingConfig; + ForkingConfig { url: self.fork_provider_url.clone(), block: None } + } + + fn rpc_config(&self) -> Result { + use katana_node::optimistic::config::{RpcConfig, RpcModuleKind, RpcModulesList}; + #[cfg(feature = "server")] + { + let mut apis = RpcModulesList::new(); + apis.add(RpcModuleKind::Starknet); + + Ok(RpcConfig { + addr: self.server.http_addr, + port: self.server.http_port, + apis, + max_connections: self.server.max_connections, + cors_origins: self.server.http_cors_origins.clone(), + ..Default::default() + }) + } + + #[cfg(not(feature = "server"))] + Ok(RpcConfig::default()) + } + + fn metrics_config(&self) -> Result> { + use katana_node::optimistic::config::MetricsConfig; + #[cfg(feature = "server")] + { + if self.metrics.metrics { + Ok(Some(MetricsConfig { + addr: self.metrics.metrics_addr, + port: self.metrics.metrics_port, + })) + } else { + Ok(None) + } + } + + #[cfg(not(feature = "server"))] + Ok(None) + } +} diff --git a/crates/node/src/optimistic/config.rs b/crates/node/src/optimistic/config.rs index ed4f26752..bf417b642 100644 --- a/crates/node/src/optimistic/config.rs +++ b/crates/node/src/optimistic/config.rs @@ -2,12 +2,11 @@ use std::sync::Arc; use katana_chain_spec::ChainSpec; -use crate::config::db::DbConfig; -use crate::config::fork::ForkingConfig; -use crate::config::metrics::MetricsConfig; -#[cfg(feature = "cartridge")] -use crate::config::paymaster; -use crate::config::rpc::RpcConfig; +pub use crate::config::db::DbConfig; +pub use crate::config::execution::ExecutionConfig; +pub use crate::config::fork::ForkingConfig; +pub use crate::config::metrics::MetricsConfig; +pub use crate::config::rpc::{RpcConfig, RpcModuleKind, RpcModulesList}; /// Node configurations. /// @@ -17,9 +16,6 @@ pub struct Config { /// The chain specification. pub chain: Arc, - /// Database options. - pub db: DbConfig, - /// Forking options. pub forking: ForkingConfig, diff --git a/crates/node/src/optimistic/mod.rs b/crates/node/src/optimistic/mod.rs index 1321cb6aa..1fbe48a3c 100644 --- a/crates/node/src/optimistic/mod.rs +++ b/crates/node/src/optimistic/mod.rs @@ -30,10 +30,11 @@ use katana_rpc_api::starknet::{StarknetApiServer, StarknetTraceApiServer, Starkn use katana_tasks::{JoinHandle, TaskManager}; use tracing::info; -mod config; +pub mod config; use config::Config; +pub use self::config::*; use crate::config::rpc::RpcModuleKind; #[derive(Debug)] @@ -140,6 +141,7 @@ impl Node { optimistic_state.clone(), executor_factory.clone(), task_spawner.clone(), + starknet_client.clone(), ); // --- build rpc server @@ -171,7 +173,7 @@ impl Node { starknet_api_cfg, starknet_client.clone(), blockchain, - optimistic_state.clone(), + Some(optimistic_state.clone()), ); if config.rpc.apis.contains(&RpcModuleKind::Starknet) { diff --git a/crates/optimistic/Cargo.toml b/crates/optimistic/Cargo.toml index 80a7cdbff..b26794588 100644 --- a/crates/optimistic/Cargo.toml +++ b/crates/optimistic/Cargo.toml @@ -20,3 +20,4 @@ katana-pool-api.workspace = true parking_lot.workspace = true tracing.workspace = true katana-db.workspace = true +tokio.workspace = true diff --git a/crates/optimistic/src/executor.rs b/crates/optimistic/src/executor.rs index 2a525ce6a..ba8519f52 100644 --- a/crates/optimistic/src/executor.rs +++ b/crates/optimistic/src/executor.rs @@ -2,6 +2,7 @@ use std::future::Future; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; +use std::time::Duration; use futures::stream::StreamExt; use futures::FutureExt; @@ -10,12 +11,15 @@ use katana_executor::implementation::blockifier::BlockifierFactory; use katana_executor::{ExecutionResult, ExecutorFactory}; use katana_pool::ordering::FiFo; use katana_pool::{PendingTransactions, PoolTransaction, TransactionPool}; +use katana_primitives::block::BlockIdOrTag; use katana_primitives::transaction::TxWithHash; use katana_provider::api::state::StateFactoryProvider; use katana_provider::providers::db::cached::CachedDbProvider; +use katana_rpc_client::starknet::Client; use katana_rpc_types::BroadcastedTxWithChainId; use katana_tasks::{CpuBlockingJoinHandle, JoinHandle, Result as TaskResult, TaskSpawner}; use parking_lot::RwLock; +use tokio::time::sleep; use tracing::{debug, error, info, trace}; use crate::pool::TxPool; @@ -41,6 +45,7 @@ pub struct OptimisticExecutor { executor_factory: Arc, storage: Blockchain, task_spawner: TaskSpawner, + client: Client, } impl OptimisticExecutor { @@ -51,14 +56,16 @@ impl OptimisticExecutor { /// * `pool` - The transaction pool to monitor for new transactions /// * `backend` - The backend containing the executor factory and blockchain state /// * `task_spawner` - The task spawner used to run the executor actor + /// * `client` - The RPC client used to poll for confirmed blocks pub fn new( pool: TxPool, storage: Blockchain, optimistic_state: OptimisticState, executor_factory: Arc, task_spawner: TaskSpawner, + client: Client, ) -> Self { - Self { pool, optimistic_state, executor_factory, task_spawner, storage } + Self { pool, optimistic_state, executor_factory, task_spawner, storage, client } } /// Spawns the optimistic executor actor task. @@ -70,15 +77,84 @@ impl OptimisticExecutor { /// /// A `JoinHandle` to the spawned executor task. pub fn spawn(self) -> JoinHandle<()> { - self.task_spawner.build_task().name("Optimistic Executor").spawn( + // Spawn the transaction execution task + let executor_handle = self.task_spawner.build_task().name("Optimistic Executor").spawn( OptimisticExecutorActor::new( self.pool, self.storage, - self.optimistic_state, + self.optimistic_state.clone(), self.executor_factory, self.task_spawner.clone(), ), - ) + ); + + // Spawn the block polling task + let client = self.client; + let optimistic_state = self.optimistic_state; + self.task_spawner.build_task().name("Block Polling").spawn(async move { + Self::poll_confirmed_blocks(client, optimistic_state).await; + }); + + executor_handle + } + + /// Polls for confirmed blocks every 2 seconds and removes transactions from the optimistic + /// state when they appear in confirmed blocks. + async fn poll_confirmed_blocks(client: Client, optimistic_state: OptimisticState) { + loop { + sleep(Duration::from_secs(2)).await; + + match client.get_block_with_tx_hashes(BlockIdOrTag::Latest).await { + Ok(block_response) => { + use katana_rpc_types::block::GetBlockWithTxHashesResponse; + + let (block_number, block_tx_hashes) = match block_response { + GetBlockWithTxHashesResponse::Block(block) => { + (block.block_number, block.transactions) + } + GetBlockWithTxHashesResponse::PreConfirmed(block) => { + (block.block_number, block.transactions) + } + }; + + if block_tx_hashes.is_empty() { + continue; + } + + trace!( + target: LOG_TARGET, + block_number = block_number, + tx_count = block_tx_hashes.len(), + "Polling confirmed block" + ); + + // Get the current optimistic transactions + let mut optimistic_txs = optimistic_state.transactions.write(); + + // Filter out transactions that are confirmed in this block + let initial_count = optimistic_txs.len(); + optimistic_txs.retain(|(tx, _)| !block_tx_hashes.contains(&tx.hash)); + + let removed_count = initial_count - optimistic_txs.len(); + if removed_count > 0 { + info!( + target: LOG_TARGET, + block_number = block_number, + removed_count = removed_count, + remaining_count = optimistic_txs.len(), + "Removed confirmed transactions from optimistic state" + ); + } + } + Err(e) => { + error!( + target: LOG_TARGET, + error = %e, + "Error polling for confirmed blocks" + ); + } + } + } } } @@ -131,7 +207,11 @@ impl OptimisticExecutorActor { let output = executor.take_execution_output().unwrap(); optimistic_state.state.merge_state_updates(&output.states); - optimistic_state.transactions; + + // Add the executed transactions to the optimistic state + for (tx, result) in output.transactions { + optimistic_state.transactions.write().push((tx, result)); + } pool.remove_transactions(&[tx_hash]); diff --git a/crates/primitives/src/event.rs b/crates/primitives/src/event.rs index b19cd77f8..e25181a5b 100644 --- a/crates/primitives/src/event.rs +++ b/crates/primitives/src/event.rs @@ -1,6 +1,9 @@ use core::fmt; use std::num::ParseIntError; +use crate::transaction::TxHash; +use crate::Felt; + /// Represents a continuation token for implementing paging in event queries. /// /// This struct stores the necessary information to resume fetching events @@ -17,6 +20,8 @@ pub struct ContinuationToken { pub txn_n: u64, /// The event number within the transaction to continue from. pub event_n: u64, + /// The transaction hash to continue from. Used for optimistic transactions. + pub transaction_hash: Option, } #[derive(PartialEq, Eq, Debug, thiserror::Error)] @@ -30,7 +35,7 @@ pub enum ContinuationTokenError { impl ContinuationToken { pub fn parse(token: &str) -> Result { let arr: Vec<&str> = token.split(',').collect(); - if arr.len() != 3 { + if arr.len() != 3 && arr.len() != 4 { return Err(ContinuationTokenError::InvalidToken); } let block_n = @@ -40,13 +45,29 @@ impl ContinuationToken { let event_n = u64::from_str_radix(arr[2], 16).map_err(ContinuationTokenError::ParseFailed)?; - Ok(ContinuationToken { block_n, txn_n: receipt_n, event_n }) + // Parse optional transaction hash (4th field) + let transaction_hash = if arr.len() == 4 { + let hash_str = arr[3]; + if hash_str.is_empty() { + None + } else { + Some(Felt::from_hex(hash_str).map_err(|_| ContinuationTokenError::InvalidToken)?) + } + } else { + None + }; + + Ok(ContinuationToken { block_n, txn_n: receipt_n, event_n, transaction_hash }) } } impl fmt::Display for ContinuationToken { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:x},{:x},{:x}", self.block_n, self.txn_n, self.event_n) + if let Some(tx_hash) = &self.transaction_hash { + write!(f, "{:x},{:x},{:x},{:#x}", self.block_n, self.txn_n, self.event_n, tx_hash) + } else { + write!(f, "{:x},{:x},{:x}", self.block_n, self.txn_n, self.event_n) + } } } @@ -108,11 +129,17 @@ mod test { #[test] fn to_string_works() { fn helper(block_n: u64, txn_n: u64, event_n: u64) -> String { - ContinuationToken { block_n, txn_n, event_n }.to_string() + ContinuationToken { block_n, txn_n, event_n, transaction_hash: None }.to_string() } assert_eq!(helper(0, 0, 0), "0,0,0"); assert_eq!(helper(30, 255, 4), "1e,ff,4"); + + // Test with transaction hash + let tx_hash = Felt::from_hex("0x123abc").unwrap(); + let token = + ContinuationToken { block_n: 0, txn_n: 0, event_n: 0, transaction_hash: Some(tx_hash) }; + assert_eq!(token.to_string(), "0,0,0,0x123abc"); } #[test] @@ -120,8 +147,22 @@ mod test { fn helper(token: &str) -> ContinuationToken { ContinuationToken::parse(token).unwrap() } - assert_eq!(helper("0,0,0"), ContinuationToken { block_n: 0, txn_n: 0, event_n: 0 }); - assert_eq!(helper("1e,ff,4"), ContinuationToken { block_n: 30, txn_n: 255, event_n: 4 }); + assert_eq!( + helper("0,0,0"), + ContinuationToken { block_n: 0, txn_n: 0, event_n: 0, transaction_hash: None } + ); + assert_eq!( + helper("1e,ff,4"), + ContinuationToken { block_n: 30, txn_n: 255, event_n: 4, transaction_hash: None } + ); + + // Test parsing with transaction hash + let tx_hash = Felt::from_hex("0x123abc").unwrap(); + let token = helper("0,0,0,0x123abc"); + assert_eq!( + token, + ContinuationToken { block_n: 0, txn_n: 0, event_n: 0, transaction_hash: Some(tx_hash) } + ); } #[test] @@ -170,6 +211,17 @@ mod test { assert_eq!(t.block_n, 30); assert_eq!(t.txn_n, 255); assert_eq!(t.event_n, 4); + assert_eq!(t.transaction_hash, None); + }); + + // Test with transaction hash + let regular_token_with_hash = "1e,ff,4,0x123abc"; + let parsed = MaybeForkedContinuationToken::parse(regular_token_with_hash).unwrap(); + assert_matches!(parsed, MaybeForkedContinuationToken::Token(t) => { + assert_eq!(t.block_n, 30); + assert_eq!(t.txn_n, 255); + assert_eq!(t.event_n, 4); + assert_eq!(t.transaction_hash, Some(Felt::from_hex("0x123abc").unwrap())); }); } } diff --git a/crates/rpc/rpc-server/src/starknet/mod.rs b/crates/rpc/rpc-server/src/starknet/mod.rs index be228c69e..792fad7ae 100644 --- a/crates/rpc/rpc-server/src/starknet/mod.rs +++ b/crates/rpc/rpc-server/src/starknet/mod.rs @@ -32,7 +32,9 @@ use katana_rpc_types::block::{ GetBlockWithTxHashesResponse, MaybePreConfirmedBlock, }; use katana_rpc_types::class::Class; -use katana_rpc_types::event::{EventFilterWithPage, GetEventsResponse, ResultPageRequest}; +use katana_rpc_types::event::{ + EmittedEvent, EventFilterWithPage, GetEventsResponse, ResultPageRequest, +}; use katana_rpc_types::list::{ ContinuationToken as ListContinuationToken, GetBlocksRequest, GetBlocksResponse, GetTransactionsRequest, GetTransactionsResponse, TransactionListItem, @@ -47,6 +49,7 @@ use katana_rpc_types::trie::{ use katana_rpc_types::{FeeEstimate, TxStatus}; use katana_rpc_types_builder::{BlockBuilder, ReceiptBuilder}; use katana_tasks::{Result as TaskResult, TaskSpawner}; +use tracing::trace; use crate::permit::Permits; use crate::starknet::pending::PendingBlockProvider; @@ -853,6 +856,122 @@ where .await? } + /// Extracts and filters events from the optimistic state transactions. + /// Returns a continuation token if there are more events to fetch. + fn fetch_optimistic_events( + &self, + address: Option, + keys: &Option>>, + events_buffer: &mut Vec, + chunk_size: u64, + continuation_token: Option<&katana_primitives::event::ContinuationToken>, + ) -> StarknetApiResult> { + if let Some(optimistic_state) = &self.inner.optimistic_state { + let transactions = optimistic_state.transactions.read(); + + // Determine starting position from continuation token + let (start_txn_idx, start_event_idx) = if let Some(token) = continuation_token { + // If transaction hash is present, use it to find the transaction + if let Some(tx_hash) = &token.transaction_hash { + // Find the transaction by hash + if let Some(idx) = transactions.iter().position(|(tx, _)| &tx.hash == tx_hash) { + (idx, token.event_n as usize) + } else { + // Transaction not found (likely removed by poll_confirmed_blocks) + // Start from the beginning + trace!( + target: "rpc::starknet", + tx_hash = format!("{:#x}", tx_hash), + "Transaction from continuation token not found in optimistic state, starting from beginning" + ); + (0, 0) + } + } else { + // // Use txn_n index if no hash is provided (backward compatibility) + // (token.txn_n as usize, token.event_n as usize) + unimplemented!() + } + } else { + (0, 0) + }; + + for (tx_idx, (tx, result)) in transactions.iter().enumerate() { + // Skip transactions before the continuation token + if tx_idx < start_txn_idx { + continue; + } + + // Stop if we've reached the chunk size limit + if events_buffer.len() >= chunk_size as usize { + break; + } + + // Only process successful executions + if let katana_executor::ExecutionResult::Success { receipt, .. } = result { + for (event_idx, event) in receipt.events().iter().enumerate() { + // Skip events before the continuation token in the current transaction + if tx_idx == start_txn_idx && event_idx < start_event_idx { + continue; + } + // Apply address filter + if let Some(filter_address) = address { + if event.from_address != filter_address { + continue; + } + } + + // Apply keys filter + if let Some(filter_keys) = keys { + let mut matches = true; + for (i, key_set) in filter_keys.iter().enumerate() { + if !key_set.is_empty() { + if let Some(event_key) = event.keys.get(i) { + if !key_set.contains(event_key) { + matches = false; + break; + } + } else { + matches = false; + break; + } + } + } + + if !matches { + continue; + } + } + + // Event matches the filter, add it to the buffer + events_buffer.push(EmittedEvent { + from_address: event.from_address, + keys: event.keys.clone(), + data: event.data.clone(), + block_hash: None, // Optimistic transactions don't have a block hash yet + block_number: None, /* Optimistic transactions don't have a block + * number yet */ + transaction_hash: tx.hash, + }); + + // Stop if we've reached the chunk size limit + if events_buffer.len() >= chunk_size as usize { + // Return a continuation token with the current position + let next_event_idx = event_idx + 1; + let token = katana_primitives::event::ContinuationToken { + block_n: 0, // Not used for optimistic transactions + txn_n: tx_idx as u64, + event_n: next_event_idx as u64, + transaction_hash: Some(tx.hash), + }; + return Ok(Some(token)); + } + } + } + } + } + Ok(None) + } + // TODO: should document more and possible find a simpler solution(?) fn events_inner( &self, @@ -863,258 +982,149 @@ where continuation_token: Option, chunk_size: u64, ) -> StarknetApiResult { - let client = self.inner.forked_client.as_ref().unwrap(); - let token = continuation_token.map(|token| token.to_string()); - - let mut result = futures::executor::block_on(client.get_events( - from_block, - to_block, - address, - keys.clone(), - token, - chunk_size, - ))?; - - if let BlockIdOrTag::PreConfirmed = to_block { - // Handle pre-confirmed block logic here - if let Some(optimistic_state) = self.inner.optimistic_state.as_ref() { - let optimistic_txs = optimistic_state.transactions.read(); - let tx_filter = Filter { address, keys }; - - for (idx, tx, receipt) in - optimistic_txs.iter().enumerate().filter_map(|(idx, (tx, result))| { - if let Some(receipt) = result.receipt() { - Some((idx, tx, receipt)) - } else { - None - } - }) - { - let partial_cursor = fetch_tx_events( - 0, - None, - None, - idx, - tx.hash, - receipt.events(), - &tx_filter, - chunk_size as usize, - &mut result.events, - )?; + let provider = self.inner.backend.blockchain.provider(); + + let from = self.resolve_event_block_id_if_forked(from_block)?; + let to = self.resolve_event_block_id_if_forked(to_block)?; + + // reserved buffer to fill up with events to avoid reallocations + let mut events = Vec::with_capacity(chunk_size as usize); + // let filter = utils::events::Filter { address, keys: keys.clone() }; + + match (from, to) { + (EventBlockId::Num(from), EventBlockId::Num(to)) => { + // Check if continuation token is a native (non-forked) token + let is_native_token = continuation_token + .as_ref() + .map_or(false, |t| matches!(t, MaybeForkedContinuationToken::Token(_))); + + // Only fetch from forked client if we don't have a native continuation token + if !is_native_token { + let client = &self.inner.forked_client.as_ref().unwrap(); + // Extract forked token if present + let forked_token = continuation_token.as_ref().and_then(|t| match t { + MaybeForkedContinuationToken::Forked(token) => Some(token.clone()), + _ => None, + }); + + let forked_result = futures::executor::block_on(client.get_events( + BlockIdOrTag::Number(from), + BlockIdOrTag::Number(to), + address, + keys.clone(), + forked_token, + chunk_size, + ))?; + + events.extend(forked_result.events); + + // Return early if there's a continuation token from forked network + if let Some(token) = forked_result.continuation_token { + let token = Some(MaybeForkedContinuationToken::Forked(token).to_string()); + return Ok(GetEventsResponse { events, continuation_token: token }); + } } + + // Fetch events from optimistic state transactions + // Extract native token if present + let native_token = continuation_token.as_ref().and_then(|t| match t { + MaybeForkedContinuationToken::Token(token) => Some(token), + _ => None, + }); + let opt_token = self.fetch_optimistic_events( + address, + &keys, + &mut events, + chunk_size, + native_token, + )?; + + let continuation_token = + opt_token.map(|t| MaybeForkedContinuationToken::Token(t).to_string()); + Ok(GetEventsResponse { events, continuation_token }) } - } - Ok(result) + (EventBlockId::Num(from), EventBlockId::Pending) => { + // Check if continuation token is a native (non-forked) token + let is_native_token = continuation_token + .as_ref() + .map_or(false, |t| matches!(t, MaybeForkedContinuationToken::Token(_))); + + // Only fetch from forked client if we don't have a native continuation token + if !is_native_token { + let client = &self.inner.forked_client.as_ref().unwrap(); + // Extract forked token if present + let forked_token = continuation_token.as_ref().and_then(|t| match t { + MaybeForkedContinuationToken::Forked(token) => Some(token.clone()), + _ => None, + }); + + let forked_result = futures::executor::block_on(client.get_events( + BlockIdOrTag::Number(from), + BlockIdOrTag::Latest, + address, + keys.clone(), + forked_token, + chunk_size, + ))?; + + events.extend(forked_result.events); + + // Return early if there's a continuation token from forked network + if let Some(token) = forked_result.continuation_token { + let token = MaybeForkedContinuationToken::Forked(token); + return Ok(GetEventsResponse { + events, + continuation_token: Some(token.to_string()), + }); + } + } + + // Fetch events from optimistic state transactions (which serve as "pending" + // transactions) + // Extract native token if present + let native_token = continuation_token.as_ref().and_then(|t| match t { + MaybeForkedContinuationToken::Token(token) => Some(token), + _ => None, + }); + let opt_token = self.fetch_optimistic_events( + address, + &keys, + &mut events, + chunk_size, + native_token, + )?; - // let provider = &self.inner.storage_provider.provider(); + let continuation_token = + opt_token.map(|t| MaybeForkedContinuationToken::Token(t).to_string()); + Ok(GetEventsResponse { events, continuation_token }) + } - // let from = self.resolve_event_block_id_if_forked(from_block)?; - // let to = self.resolve_event_block_id_if_forked(to_block)?; + (EventBlockId::Pending, EventBlockId::Pending) => { + // Fetch events from optimistic state transactions (which represent pending + // transactions) + // Extract native token if present + let native_token = continuation_token.as_ref().and_then(|t| match t { + MaybeForkedContinuationToken::Token(token) => Some(token), + _ => None, + }); + let opt_token = self.fetch_optimistic_events( + address, + &keys, + &mut events, + chunk_size, + native_token, + )?; - // // reserved buffer to fill up with events to avoid reallocations - // let mut events = Vec::with_capacity(chunk_size as usize); - // let filter = utils::events::Filter { address, keys: keys.clone() }; + let continuation_token = + opt_token.map(|t| MaybeForkedContinuationToken::Token(t).to_string()); + Ok(GetEventsResponse { events, continuation_token }) + } - // match (from, to) ick th - // (EventBlockId::Num(from), EventBlockId::Num(to)) => { - // // 1. check if the from and to block is lower than the forked block - // // 2. if both are lower, then we can fetch the events from the provider - - // // first determine whether the continuation token is from the forked client - // let from_after_forked_if_any = if let Some(client) = &self.inner.forked_client { - // let forked_block = *client.block(); - - // // if the from block is lower than the forked block, we fetch events from the - // // forked client - // if from <= forked_block { - // // if the to_block is greater than the forked block, we limit the to_block - // // up until the forked block - // let to = if to <= forked_block { to } else { forked_block }; - - // // basically this is to determine that if the token is a katana native - // // token, then we can skip fetching from the forked - // // network. but if theres no token at all, or the - // // token is a forked token, then we need to fetch from the forked network. - // // - // // TODO: simplify this - // let forked_token = Some(continuation_token.clone()).and_then(|t| match t { - // None => Some(None), - // Some(t) => match t { - // MaybeForkedContinuationToken::Token(_) => None, - // MaybeForkedContinuationToken::Forked(t) => { - // Some(Some(t.to_string())) - // } - // }, - // }); - - // // check if the continuation token is a forked continuation token - // // if not we skip fetching from forked network - // if let Some(token) = forked_token { - // let forked_result = futures::executor::block_on( - // client.get_events(from, to, address, keys, token, chunk_size), - // )?; - - // events.extend(forked_result.events); - - // // return early if a token is present - // if let Some(token) = forked_result.continuation_token { - // let token = MaybeForkedContinuationToken::Forked(token); - // let continuation_token = Some(token.to_string()); - // return Ok(GetEventsResponse { events, continuation_token }); - // } - // } - // } - - // // we start from block + 1 because we dont have the events locally and we may - // // have fetched it from the forked network earlier - // *client.block() + 1 - // } else { - // from - // }; - - // let cursor = continuation_token.and_then(|t| t.to_token().map(|t| t.into())); - // let block_range = from_after_forked_if_any..=to; - - // let cursor = utils::events::fetch_events_at_blocks( - // provider, - // block_range, - // &filter, - // chunk_size, - // cursor, - // &mut events, - // )?; - - // let continuation_token = cursor.map(|c| c.into_rpc_cursor().to_string()); - // let events_page = GetEventsResponse { events, continuation_token }; - - // Ok(events_page) - // } - - // (EventBlockId::Num(from), EventBlockId::Pending) => { - // // 1. check if the from and to block is lower than the forked block - // // 2. if both are lower, then we can fetch the events from the provider - - // // first determine whether the continuation token is from the forked client - // let from_after_forked_if_any = if let Some(client) = &self.inner.forked_client { - // let forked_block = *client.block(); - - // // if the from block is lower than the forked block, we fetch events from the - // // forked client - // if from <= forked_block { - // // we limit the to_block up until the forked block bcs pending block is - // // pointing to a locally block - // let to = forked_block; - - // // basically this is to determine that if the token is a katana native - // // token, then we can skip fetching from the forked - // // network. but if theres no token at all, or the - // // token is a forked token, then we need to fetch from the forked network. - // // - // // TODO: simplify this - // let forked_token = Some(continuation_token.clone()).and_then(|t| match t { - // None => Some(None), - // Some(t) => match t { - // MaybeForkedContinuationToken::Token(_) => None, - // MaybeForkedContinuationToken::Forked(t) => { - // Some(Some(t.to_string())) - // } - // }, - // }); - - // // check if the continuation token is a forked continuation token - // // if not we skip fetching from forked network - // if let Some(token) = forked_token { - // let forked_result = futures::executor::block_on( - // client.get_events(from, to, address, keys, token, chunk_size), - // )?; - - // events.extend(forked_result.events); - - // // return early if a token is present - // if let Some(token) = forked_result.continuation_token { - // let token = MaybeForkedContinuationToken::Forked(token); - // let continuation_token = Some(token.to_string()); - // return Ok(GetEventsResponse { events, continuation_token }); - // } - // } - // } - - // // we start from block + 1 because we dont have the events locally and we may - // // have fetched it from the forked network earlier - // *client.block() + 1 - // } else { - // from - // }; - - // let cursor = continuation_token.and_then(|t| t.to_token().map(|t| t.into())); - // let latest = provider.latest_number()?; - // let block_range = from_after_forked_if_any..=latest; - - // let int_cursor = utils::events::fetch_events_at_blocks( - // provider, - // block_range, - // &filter, - // chunk_size, - // cursor.clone(), - // &mut events, - // )?; - - // // if the internal cursor is Some, meaning the buffer is full and we havent - // // reached the latest block. - // if let Some(c) = int_cursor { - // let continuation_token = Some(c.into_rpc_cursor().to_string()); - // return Ok(GetEventsResponse { events, continuation_token }); - // } - - // if let Some(block) = - // self.inner.pending_block_provider.get_pending_block_with_receipts()? - // { - // let cursor = utils::events::fetch_pending_events( - // &block, - // &filter, - // chunk_size, - // cursor, - // &mut events, - // )?; - - // let continuation_token = Some(cursor.into_rpc_cursor().to_string()); - // Ok(GetEventsResponse { events, continuation_token }) - // } else { - // let cursor = Cursor::new_block(latest + 1); - // let continuation_token = Some(cursor.into_rpc_cursor().to_string()); - // Ok(GetEventsResponse { events, continuation_token }) - // } - // } - - // (EventBlockId::Pending, EventBlockId::Pending) => { - // if let Some(block) = - // self.inner.pending_block_provider.get_pending_block_with_receipts()? - // { - // let cursor = continuation_token.and_then(|t| t.to_token().map(|t| t.into())); - // let new_cursor = utils::events::fetch_pending_events( - // &block, - // &filter, - // chunk_size, - // cursor, - // &mut events, - // )?; - - // let continuation_token = Some(new_cursor.into_rpc_cursor().to_string()); - // Ok(GetEventsResponse { events, continuation_token }) - // } else { - // let latest = provider.latest_number()?; - // let new_cursor = Cursor::new_block(latest); - - // let continuation_token = Some(new_cursor.into_rpc_cursor().to_string()); - // Ok(GetEventsResponse { events, continuation_token }) - // } - // } - - // (EventBlockId::Pending, EventBlockId::Num(_)) => Err(StarknetApiError::unexpected( - // "Invalid block range; `from` block must be lower than `to`", - // )), - // } + (EventBlockId::Pending, EventBlockId::Num(_)) => Err(StarknetApiError::unexpected( + "Invalid block range; `from` block must be lower than `to`", + )), + } } // Determine the block number based on its Id. In the case where the block id is a hash, we need diff --git a/crates/rpc/rpc-server/src/utils/events.rs b/crates/rpc/rpc-server/src/utils/events.rs index 562d5c360..e36c64d6d 100644 --- a/crates/rpc/rpc-server/src/utils/events.rs +++ b/crates/rpc/rpc-server/src/utils/events.rs @@ -66,6 +66,7 @@ impl Cursor { block_n: self.block, txn_n: self.txn.idx as u64, event_n: self.txn.event as u64, + transaction_hash: None, } } } From f4a754b94234fe628d4b9716199fa6593ba1f579 Mon Sep 17 00:00:00 2001 From: Ammar Arif Date: Tue, 4 Nov 2025 13:00:41 -0500 Subject: [PATCH 12/26] wip --- .tool-versions | 1 + crates/cli/src/optimistic.rs | 10 + crates/core/src/backend/storage.rs | 1 - crates/node/src/optimistic/mod.rs | 25 +- crates/optimistic/src/executor.rs | 47 +- crates/rpc/rpc-server/src/starknet/mod.rs | 85 +++- crates/rpc/rpc-server/src/starknet/pending.rs | 124 ++++- .../provider/src/providers/db/cached.rs | 427 +----------------- 8 files changed, 281 insertions(+), 439 deletions(-) diff --git a/.tool-versions b/.tool-versions index cc60fd625..1a66fa783 100644 --- a/.tool-versions +++ b/.tool-versions @@ -1 +1,2 @@ scarb 2.8.2 +katana 1.7.0 diff --git a/crates/cli/src/optimistic.rs b/crates/cli/src/optimistic.rs index f7e15227c..bae8f66ed 100644 --- a/crates/cli/src/optimistic.rs +++ b/crates/cli/src/optimistic.rs @@ -45,6 +45,12 @@ pub struct OptimisticNodeArgs { impl OptimisticNodeArgs { pub async fn execute(&self) -> Result<()> { + let tracer_config = self.tracer_config(); + katana_tracing::init(self.logging.log_format, tracer_config).await?; + self.start_node().await + } + + async fn start_node(&self) -> Result<()> { let config = self.config()?; #[cfg(feature = "server")] @@ -76,6 +82,10 @@ impl OptimisticNodeArgs { Ok(katana_node::optimistic::config::Config { chain, rpc, forking, metrics }) } + fn tracer_config(&self) -> Option { + self.tracer.config() + } + fn chain_spec(&self) -> Result> { // Always use dev chain spec for optimistic node Ok(Arc::new(ChainSpec::Dev(Default::default()))) diff --git a/crates/core/src/backend/storage.rs b/crates/core/src/backend/storage.rs index 961b19e22..1ba93096c 100644 --- a/crates/core/src/backend/storage.rs +++ b/crates/core/src/backend/storage.rs @@ -14,7 +14,6 @@ use katana_provider::api::transaction::{ TransactionsProviderExt, }; use katana_provider::api::trie::TrieWriter; -use katana_provider::providers::db::cached::CachedDbProvider; use katana_provider::providers::db::DbProvider; use katana_provider::providers::fork::ForkedProvider; use katana_provider::BlockchainProvider; diff --git a/crates/node/src/optimistic/mod.rs b/crates/node/src/optimistic/mod.rs index 1fbe48a3c..b63df8de4 100644 --- a/crates/node/src/optimistic/mod.rs +++ b/crates/node/src/optimistic/mod.rs @@ -21,10 +21,10 @@ use katana_optimistic::pool::{PoolValidator, TxPool}; use katana_pool::ordering::FiFo; use katana_primitives::block::BlockIdOrTag; use katana_primitives::env::{CfgEnv, FeeTokenAddressses}; -use katana_provider::providers::db::cached::CachedDbProvider; +use katana_provider::providers::fork::ForkedProvider; use katana_rpc::cors::Cors; use katana_rpc::starknet::forking::ForkedClient; -use katana_rpc::starknet::{StarknetApi, StarknetApiConfig}; +use katana_rpc::starknet::{OptimisticPendingBlockProvider, StarknetApi, StarknetApiConfig}; use katana_rpc::{RpcServer, RpcServerHandle}; use katana_rpc_api::starknet::{StarknetApiServer, StarknetTraceApiServer, StarknetWriteApiServer}; use katana_tasks::{JoinHandle, TaskManager}; @@ -110,14 +110,13 @@ impl Node { let db = katana_db::Db::in_memory()?; let forked_block_id = BlockIdOrTag::Latest; - let database = CachedDbProvider::new(db.clone(), forked_block_id, starknet_client.clone()); - let blockchain = Blockchain::new(database.clone()); - + let storage_p = ForkedProvider::new(db.clone(), forked_block_id, starknet_client.clone()); let forked_client = ForkedClient::new(starknet_client.clone(), forked_block_id); + let blockchain = Blockchain::new(storage_p.clone()); let gpo = GasPriceOracle::sampled_starknet(config.forking.url.clone()); - let block_context_generator = BlockContextGenerator::default().into(); + let backend = Arc::new(Backend { gas_oracle: gpo.clone(), blockchain: blockchain.clone(), @@ -133,8 +132,9 @@ impl Node { // -- build executor - let optimistic_state = OptimisticState::new(database.clone()); + let optimistic_state = OptimisticState::new(); + // this is the component that will populate the optimistic state let executor = OptimisticExecutor::new( pool.clone(), blockchain.clone(), @@ -165,13 +165,20 @@ impl Node { paymaster: None, }; + // Create the optimistic pending block provider + let pending_block_provider = OptimisticPendingBlockProvider::new( + optimistic_state.clone(), + starknet_client.clone(), + blockchain.clone(), + ); + let starknet_api = StarknetApi::new_forked( backend.clone(), pool.clone(), forked_client, task_spawner.clone(), starknet_api_cfg, - starknet_client.clone(), + pending_block_provider, blockchain, Some(optimistic_state.clone()), ); @@ -202,6 +209,8 @@ impl Node { rpc_server = rpc_server.max_response_body_size(max_response_body_size); } + info!("Build complete."); + Ok(Node { db, pool, backend, rpc_server, config: config.into(), task_manager, executor }) } diff --git a/crates/optimistic/src/executor.rs b/crates/optimistic/src/executor.rs index ba8519f52..8ac8556a9 100644 --- a/crates/optimistic/src/executor.rs +++ b/crates/optimistic/src/executor.rs @@ -13,9 +13,10 @@ use katana_pool::ordering::FiFo; use katana_pool::{PendingTransactions, PoolTransaction, TransactionPool}; use katana_primitives::block::BlockIdOrTag; use katana_primitives::transaction::TxWithHash; -use katana_provider::api::state::StateFactoryProvider; -use katana_provider::providers::db::cached::CachedDbProvider; +use katana_provider::api::state::{StateFactoryProvider, StateProvider}; +use katana_provider::providers::db::cached::{CachedStateProvider, SharedStateCache}; use katana_rpc_client::starknet::Client; +use katana_rpc_types::block::GetBlockWithTxHashesResponse; use katana_rpc_types::BroadcastedTxWithChainId; use katana_tasks::{CpuBlockingJoinHandle, JoinHandle, Result as TaskResult, TaskSpawner}; use parking_lot::RwLock; @@ -28,13 +29,17 @@ const LOG_TARGET: &str = "optimistic_executor"; #[derive(Debug, Clone)] pub struct OptimisticState { - pub state: CachedDbProvider, + pub state: SharedStateCache, pub transactions: Arc>>, } impl OptimisticState { - pub fn new(state: CachedDbProvider) -> Self { - Self { state, transactions: Arc::new(RwLock::new(Vec::new())) } + pub fn new() -> Self { + Self { state: SharedStateCache::default(), transactions: Arc::new(RwLock::new(Vec::new())) } + } + + pub fn get_optimistic_state(&self, base: Box) -> Box { + Box::new(CachedStateProvider::new(base, self.state.clone())) } } @@ -101,13 +106,13 @@ impl OptimisticExecutor { /// Polls for confirmed blocks every 2 seconds and removes transactions from the optimistic /// state when they appear in confirmed blocks. async fn poll_confirmed_blocks(client: Client, optimistic_state: OptimisticState) { + let mut last_block_number = None; + loop { sleep(Duration::from_secs(2)).await; match client.get_block_with_tx_hashes(BlockIdOrTag::Latest).await { Ok(block_response) => { - use katana_rpc_types::block::GetBlockWithTxHashesResponse; - let (block_number, block_tx_hashes) = match block_response { GetBlockWithTxHashesResponse::Block(block) => { (block.block_number, block.transactions) @@ -117,6 +122,18 @@ impl OptimisticExecutor { } }; + // Check if this is a new block + if let Some(last_num) = last_block_number { + if block_number <= last_num { + // Same block, skip processing + continue; + } + } + + // Update the last seen block number + last_block_number = Some(block_number); + info!(%block_number, "New block received."); + if block_tx_hashes.is_empty() { continue; } @@ -193,12 +210,15 @@ impl OptimisticExecutorActor { /// Execute a single transaction optimistically against the latest state. fn execute_transaction( pool: TxPool, + storage: Blockchain, optimistic_state: OptimisticState, executor_factory: Arc, tx: BroadcastedTxWithChainId, ) -> anyhow::Result<()> { - let latest_state = optimistic_state.state.latest().unwrap(); - let mut executor = executor_factory.with_state(latest_state); + let latest_state = storage.provider().latest()?; + let state = optimistic_state.get_optimistic_state(latest_state); + + let mut executor = executor_factory.with_state(state); // Execute the transaction let tx_hash = tx.hash(); @@ -285,11 +305,18 @@ impl Future for OptimisticExecutorActor { // Spawn the transaction execution on the blocking CPU pool let pool = this.pool.clone(); + let storage = this.storage.clone(); let optimistic_state = this.optimistic_state.clone(); let executor_factory = this.executor_factory.clone(); let execution_future = this.task_spawner.cpu_bound().spawn(move || { - Self::execute_transaction(pool, optimistic_state, executor_factory, tx) + Self::execute_transaction( + pool, + storage, + optimistic_state, + executor_factory, + tx, + ) }); this.ongoing_execution = Some(execution_future); diff --git a/crates/rpc/rpc-server/src/starknet/mod.rs b/crates/rpc/rpc-server/src/starknet/mod.rs index 792fad7ae..037bc956f 100644 --- a/crates/rpc/rpc-server/src/starknet/mod.rs +++ b/crates/rpc/rpc-server/src/starknet/mod.rs @@ -69,6 +69,7 @@ mod write; pub use config::PaymasterConfig; pub use config::StarknetApiConfig; use forking::ForkedClient; +pub use pending::OptimisticPendingBlockProvider; type StarknetApiResult = Result; @@ -553,6 +554,18 @@ where async fn transaction(&self, hash: TxHash) -> StarknetApiResult { let tx = self .on_io_blocking_task(move |this| { + // First, check optimistic state for the transaction + if let Some(optimistic_state) = &this.inner.optimistic_state { + let transactions = optimistic_state.transactions.read(); + if let Some((tx, _result)) = transactions.iter().find(|(tx, _)| tx.hash == hash) + { + return Result::<_, StarknetApiError>::Ok(Some(RpcTxWithHash::from( + tx.clone(), + ))); + } + } + + // Check pending block provider if let pending_tx @ Some(..) = this.inner.pending_block_provider.get_pending_transaction(hash)? { @@ -583,6 +596,35 @@ where async fn receipt(&self, hash: Felt) -> StarknetApiResult { let receipt = self .on_io_blocking_task(move |this| { + // First, check optimistic state for the receipt + if let Some(optimistic_state) = &this.inner.optimistic_state { + let transactions = optimistic_state.transactions.read(); + if let Some((_tx, result)) = transactions.iter().find(|(tx, _)| tx.hash == hash) + { + if let katana_executor::ExecutionResult::Success { receipt, .. } = result { + // Get the latest block number to use as reference + let provider = &this.inner.storage_provider.provider(); + let latest_num = provider.latest_number()?; + + // Create block info as PreConfirmed (optimistic tx not yet in a block) + let block = katana_rpc_types::receipt::ReceiptBlockInfo::PreConfirmed { + block_number: latest_num + 1, + }; + + // Create receipt with block info + let receipt_with_block = TxReceiptWithBlockInfo::new( + block, + hash, + FinalityStatus::PreConfirmed, + receipt.clone(), + ); + + return StarknetApiResult::Ok(Some(receipt_with_block)); + } + } + } + + // Check pending block provider if let pending_receipt @ Some(..) = this.inner.pending_block_provider.get_pending_receipt(hash)? { @@ -606,6 +648,31 @@ where async fn transaction_status(&self, hash: TxHash) -> StarknetApiResult { let status = self .on_io_blocking_task(move |this| { + // First, check optimistic state for the transaction + if let Some(optimistic_state) = &this.inner.optimistic_state { + let transactions = optimistic_state.transactions.read(); + if let Some((_tx, result)) = transactions.iter().find(|(tx, _)| tx.hash == hash) + { + let exec_status = match result { + katana_executor::ExecutionResult::Success { receipt, .. } => { + if let Some(reason) = receipt.revert_reason() { + katana_rpc_types::ExecutionResult::Reverted { + reason: reason.to_string(), + } + } else { + katana_rpc_types::ExecutionResult::Succeeded + } + } + katana_executor::ExecutionResult::Failed { error } => { + katana_rpc_types::ExecutionResult::Reverted { + reason: error.to_string(), + } + } + }; + return Ok(Some(TxStatus::PreConfirmed(exec_status))); + } + } + let provider = &this.inner.storage_provider.provider(); let status = provider.transaction_status(hash)?; @@ -745,15 +812,17 @@ where } } - if let Some(num) = provider.convert_block_id(block_id)? { - let block = katana_rpc_types_builder::BlockBuilder::new(num.into(), provider) - .build_with_tx_hash()? - .map(GetBlockWithTxHashesResponse::Block); + // if let Some(num) = provider.convert_block_id(block_id)? { + // let block = katana_rpc_types_builder::BlockBuilder::new(num.into(), provider) + // .build_with_tx_hash()? + // .map(GetBlockWithTxHashesResponse::Block); - StarknetApiResult::Ok(block) - } else { - StarknetApiResult::Ok(None) - } + // StarknetApiResult::Ok(block) + // } else { + // StarknetApiResult::Ok(None) + // } + + StarknetApiResult::Ok(None) }) .await??; diff --git a/crates/rpc/rpc-server/src/starknet/pending.rs b/crates/rpc/rpc-server/src/starknet/pending.rs index 271e49a9b..febb4ecf7 100644 --- a/crates/rpc/rpc-server/src/starknet/pending.rs +++ b/crates/rpc/rpc-server/src/starknet/pending.rs @@ -7,7 +7,10 @@ use katana_primitives::da::L1DataAvailabilityMode; use katana_primitives::execution::TypedTransactionExecutionInfo; use katana_primitives::transaction::{TxHash, TxNumber}; use katana_primitives::version::CURRENT_STARKNET_VERSION; -use katana_provider::api::state::StateProvider; +use katana_provider::api::block::BlockNumberProvider; +use katana_provider::api::state::{StateFactoryProvider, StateProvider}; +use katana_provider::providers::db::cached::CachedStateProvider; +use katana_rpc_client::starknet::Client; use katana_rpc_types::{ FinalityStatus, PreConfirmedBlockWithReceipts, PreConfirmedBlockWithTxHashes, PreConfirmedBlockWithTxs, PreConfirmedStateUpdate, ReceiptBlockInfo, RpcTxWithHash, @@ -273,6 +276,125 @@ impl PendingBlockProvider for BlockProducer { } } +/// A pending block provider that checks the optimistic state for transactions/receipts, +/// then falls back to the client for all queries. +#[derive(Debug, Clone)] +pub struct OptimisticPendingBlockProvider { + optimistic_state: katana_optimistic::executor::OptimisticState, + client: Client, + storage: katana_core::backend::storage::Blockchain, +} + +impl OptimisticPendingBlockProvider { + pub fn new( + optimistic_state: katana_optimistic::executor::OptimisticState, + client: Client, + provider: katana_core::backend::storage::Blockchain, + ) -> Self { + Self { optimistic_state, client, storage: provider } + } +} + +impl PendingBlockProvider for OptimisticPendingBlockProvider { + fn pending_state(&self) -> StarknetApiResult>> { + let latest_state = self.storage.provider().latest()?; + Ok(Some(self.optimistic_state.get_optimistic_state(latest_state))) + } + + fn get_pending_state_update(&self) -> StarknetApiResult> { + self.client.get_pending_state_update() + } + + fn get_pending_block_with_txs(&self) -> StarknetApiResult> { + self.client.get_pending_block_with_txs() + } + + fn get_pending_block_with_receipts( + &self, + ) -> StarknetApiResult> { + self.client.get_pending_block_with_receipts() + } + + fn get_pending_block_with_tx_hashes( + &self, + ) -> StarknetApiResult> { + self.client.get_pending_block_with_tx_hashes() + } + + fn get_pending_transaction(&self, hash: TxHash) -> StarknetApiResult> { + // First, check optimistic state + let transactions = self.optimistic_state.transactions.read(); + if let Some((tx, _result)) = transactions.iter().find(|(tx, _)| tx.hash == hash) { + return Ok(Some(RpcTxWithHash::from(tx.clone()))); + } + + // Fall back to client + self.client.get_pending_transaction(hash) + } + + fn get_pending_receipt( + &self, + hash: TxHash, + ) -> StarknetApiResult> { + // First, check optimistic state + let transactions = self.optimistic_state.transactions.read(); + if let Some((_tx, result)) = transactions.iter().find(|(tx, _)| tx.hash == hash) { + if let katana_executor::ExecutionResult::Success { receipt, .. } = result { + // Get the latest block number to use as reference + let latest_num = self.storage.provider().latest_number().map_err(|e| { + crate::starknet::StarknetApiError::unexpected(format!( + "Failed to get latest block number: {e}" + )) + })?; + + // Create block info as PreConfirmed (optimistic tx not yet in a block) + let block = ReceiptBlockInfo::PreConfirmed { block_number: latest_num + 1 }; + + // Create receipt with block info + let receipt_with_block = TxReceiptWithBlockInfo::new( + block, + hash, + FinalityStatus::PreConfirmed, + receipt.clone(), + ); + + return Ok(Some(receipt_with_block)); + } + } + + // Fall back to client + self.client.get_pending_receipt(hash) + } + + fn get_pending_trace(&self, hash: TxHash) -> StarknetApiResult> { + // First, check optimistic state + let transactions = self.optimistic_state.transactions.read(); + if let Some((tx, result)) = transactions.iter().find(|(tx, _)| tx.hash == hash) { + if let katana_executor::ExecutionResult::Success { trace, .. } = result { + let typed_trace = TypedTransactionExecutionInfo::new(tx.r#type(), trace.clone()); + return Ok(Some(TxTrace::from(typed_trace))); + } + } + + // Fall back to client + self.client.get_pending_trace(hash) + } + + fn get_pending_transaction_by_index( + &self, + index: TxNumber, + ) -> StarknetApiResult> { + // Check optimistic state by index + let transactions = self.optimistic_state.transactions.read(); + if let Some((tx, _result)) = transactions.get(index as usize) { + return Ok(Some(RpcTxWithHash::from(tx.clone()))); + } + + // Fall back to client + self.client.get_pending_transaction_by_index(index) + } +} + impl PendingBlockProvider for katana_rpc_client::starknet::Client { fn get_pending_state_update(&self) -> StarknetApiResult> { let result = futures::executor::block_on(async { diff --git a/crates/storage/provider/provider/src/providers/db/cached.rs b/crates/storage/provider/provider/src/providers/db/cached.rs index 6cf0604b5..47e6ce170 100644 --- a/crates/storage/provider/provider/src/providers/db/cached.rs +++ b/crates/storage/provider/provider/src/providers/db/cached.rs @@ -1,40 +1,13 @@ -use std::collections::{BTreeMap, HashMap}; -use std::ops::{Range, RangeInclusive}; +use std::collections::HashMap; use std::sync::Arc; -use katana_db::abstraction::Database; -use katana_db::models::block::StoredBlockBodyIndices; -use katana_primitives::block::{ - Block, BlockHash, BlockHashOrNumber, BlockIdOrTag, BlockNumber, BlockWithTxHashes, - FinalityStatus, Header, SealedBlockWithStatus, -}; use katana_primitives::class::{ClassHash, CompiledClassHash, ContractClass}; use katana_primitives::contract::{ContractAddress, Nonce, StorageKey, StorageValue}; -use katana_primitives::env::BlockEnv; -use katana_primitives::execution::TypedTransactionExecutionInfo; -use katana_primitives::receipt::Receipt; -use katana_primitives::state::{StateUpdates, StateUpdatesWithClasses}; -use katana_primitives::transaction::{TxHash, TxNumber, TxWithHash}; -use katana_provider_api::block::{ - BlockHashProvider, BlockNumberProvider, BlockProvider, BlockStatusProvider, BlockWriter, - HeaderProvider, -}; -use katana_provider_api::contract::{ContractClassProvider, ContractClassWriter}; -use katana_provider_api::env::BlockEnvProvider; -use katana_provider_api::stage::StageCheckpointProvider; -use katana_provider_api::state::{ - StateFactoryProvider, StateProofProvider, StateProvider, StateRootProvider, StateWriter, -}; -use katana_provider_api::state_update::StateUpdateProvider; -use katana_provider_api::transaction::{ - ReceiptProvider, TransactionProvider, TransactionStatusProvider, TransactionTraceProvider, - TransactionsProviderExt, -}; -use katana_provider_api::trie::TrieWriter; +use katana_primitives::state::StateUpdatesWithClasses; +use katana_provider_api::contract::ContractClassProvider; +use katana_provider_api::state::{StateProofProvider, StateProvider, StateRootProvider}; use parking_lot::RwLock; -use crate::providers::fork::state::HistoricalStateProvider as ForkHistoricalStateProvider; -use crate::providers::fork::ForkedProvider; use crate::ProviderResult; /// Inner cache data protected by a single lock for consistent snapshots. @@ -57,17 +30,17 @@ struct StateCacheInner { /// Uses a single read-write lock to ensure consistent snapshots across all cached data. /// This prevents reading inconsistent state that could occur with multiple independent locks. #[derive(Debug, Clone)] -pub struct StateCache { +pub struct SharedStateCache { inner: Arc>, } -impl Default for StateCache { +impl Default for SharedStateCache { fn default() -> Self { Self::new() } } -impl StateCache { +impl SharedStateCache { fn new() -> Self { Self { inner: Arc::new(RwLock::new(StateCacheInner::default())) } } @@ -101,53 +74,10 @@ impl StateCache { cache.classes.clear(); cache.compiled_class_hashes.clear(); } -} - -/// A cached version of provider that wraps the underlying provider with an in-memory cache -/// for state data. -/// -/// The cache is used to store frequently accessed state information such as nonces, storage values, -/// class hashes, and contract classes. When querying state through the [`StateProvider`] interface, -/// the cache is checked first before falling back to the underlying database. -#[derive(Debug, Clone)] -pub struct CachedDbProvider { - /// The underlying provider - inner: ForkedProvider, - /// The in-memory cache for state data - cache: StateCache, -} - -impl CachedDbProvider { - /// Creates a new [`CachedDbProvider`] wrapping the given [`ForkedProvider`]. - pub fn new( - db: Db, - block_id: BlockIdOrTag, - starknet_client: katana_rpc_client::starknet::Client, - ) -> Self { - let inner = ForkedProvider::new(db, block_id, starknet_client); - Self { inner, cache: StateCache::new() } - } - - /// Returns a reference to the underlying [`ForkedProvider`]. - pub fn inner(&self) -> &ForkedProvider { - &self.inner - } -} - -impl CachedDbProvider { - /// Returns a reference to the cache. - pub fn cache(&self) -> &StateCache { - &self.cache - } - - /// Clears all cached data. - pub fn clear_cache(&self) { - self.cache.clear(); - } /// Merges state updates into the cache. pub fn merge_state_updates(&self, updates: &StateUpdatesWithClasses) { - let mut cache = self.cache.inner.write(); + let mut cache = self.inner.write(); let state = &updates.state_updates; for (address, nonce) in &state.nonce_updates { @@ -178,261 +108,18 @@ impl CachedDbProvider { } } -impl StateFactoryProvider for CachedDbProvider { - fn latest(&self) -> ProviderResult> { - Ok(Box::new(CachedStateProvider { state: self.inner.latest()?, cache: self.cache.clone() })) - } - - fn historical( - &self, - block_id: BlockHashOrNumber, - ) -> ProviderResult>> { - if let Some(state) = self.inner.historical(block_id)? { - Ok(Some(Box::new(CachedStateProvider { state, cache: self.cache.clone() }))) - } else { - Ok(None) - } - } -} - -impl BlockNumberProvider for CachedDbProvider { - fn block_number_by_hash(&self, hash: BlockHash) -> ProviderResult> { - self.inner.block_number_by_hash(hash) - } - - fn latest_number(&self) -> ProviderResult { - self.inner.latest_number() - } -} - -impl BlockHashProvider for CachedDbProvider { - fn latest_hash(&self) -> ProviderResult { - self.inner.latest_hash() - } - - fn block_hash_by_num(&self, num: BlockNumber) -> ProviderResult> { - self.inner.block_hash_by_num(num) - } -} - -impl HeaderProvider for CachedDbProvider { - fn header(&self, id: BlockHashOrNumber) -> ProviderResult> { - self.inner.header(id) - } -} - -impl BlockProvider for CachedDbProvider { - fn block_body_indices( - &self, - id: BlockHashOrNumber, - ) -> ProviderResult> { - self.inner.block_body_indices(id) - } - - fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { - self.inner.block(id) - } - - fn block_with_tx_hashes( - &self, - id: BlockHashOrNumber, - ) -> ProviderResult> { - self.inner.block_with_tx_hashes(id) - } - - fn blocks_in_range(&self, range: RangeInclusive) -> ProviderResult> { - self.inner.blocks_in_range(range) - } -} - -impl BlockStatusProvider for CachedDbProvider { - fn block_status(&self, id: BlockHashOrNumber) -> ProviderResult> { - self.inner.block_status(id) - } -} - -impl StateUpdateProvider for CachedDbProvider { - fn state_update(&self, block_id: BlockHashOrNumber) -> ProviderResult> { - self.inner.state_update(block_id) - } - - fn declared_classes( - &self, - block_id: BlockHashOrNumber, - ) -> ProviderResult>> { - self.inner.declared_classes(block_id) - } - - fn deployed_contracts( - &self, - block_id: BlockHashOrNumber, - ) -> ProviderResult>> { - self.inner.deployed_contracts(block_id) - } -} - -impl TransactionProvider for CachedDbProvider { - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { - self.inner.transaction_by_hash(hash) - } - - fn transactions_by_block( - &self, - block_id: BlockHashOrNumber, - ) -> ProviderResult>> { - self.inner.transactions_by_block(block_id) - } - - fn transaction_in_range(&self, range: Range) -> ProviderResult> { - self.inner.transaction_in_range(range) - } - - fn transaction_block_num_and_hash( - &self, - hash: TxHash, - ) -> ProviderResult> { - self.inner.transaction_block_num_and_hash(hash) - } - - fn transaction_by_block_and_idx( - &self, - block_id: BlockHashOrNumber, - idx: u64, - ) -> ProviderResult> { - self.inner.transaction_by_block_and_idx(block_id, idx) - } - - fn transaction_count_by_block( - &self, - block_id: BlockHashOrNumber, - ) -> ProviderResult> { - self.inner.transaction_count_by_block(block_id) - } -} - -impl TransactionsProviderExt for CachedDbProvider { - fn transaction_hashes_in_range(&self, range: Range) -> ProviderResult> { - self.inner.transaction_hashes_in_range(range) - } - - fn total_transactions(&self) -> ProviderResult { - self.inner.total_transactions() - } -} - -impl TransactionStatusProvider for CachedDbProvider { - fn transaction_status(&self, hash: TxHash) -> ProviderResult> { - self.inner.transaction_status(hash) - } -} - -impl TransactionTraceProvider for CachedDbProvider { - fn transaction_execution( - &self, - hash: TxHash, - ) -> ProviderResult> { - self.inner.transaction_execution(hash) - } - - fn transaction_executions_by_block( - &self, - block_id: BlockHashOrNumber, - ) -> ProviderResult>> { - self.inner.transaction_executions_by_block(block_id) - } - - fn transaction_executions_in_range( - &self, - range: Range, - ) -> ProviderResult> { - self.inner.transaction_executions_in_range(range) - } -} - -impl ReceiptProvider for CachedDbProvider { - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { - self.inner.receipt_by_hash(hash) - } - - fn receipts_by_block( - &self, - block_id: BlockHashOrNumber, - ) -> ProviderResult>> { - self.inner.receipts_by_block(block_id) - } -} - -impl BlockEnvProvider for CachedDbProvider { - fn block_env_at(&self, block_id: BlockHashOrNumber) -> ProviderResult> { - self.inner.block_env_at(block_id) - } -} - -impl BlockWriter for CachedDbProvider { - fn insert_block_with_states_and_receipts( - &self, - block: SealedBlockWithStatus, - states: StateUpdatesWithClasses, - receipts: Vec, - executions: Vec, - ) -> ProviderResult<()> { - self.inner.insert_block_with_states_and_receipts(block, states, receipts, executions) - } -} - -impl StageCheckpointProvider for CachedDbProvider { - fn checkpoint(&self, id: &str) -> ProviderResult> { - self.inner.checkpoint(id) - } - - fn set_checkpoint(&self, id: &str, block_number: BlockNumber) -> ProviderResult<()> { - self.inner.set_checkpoint(id, block_number) - } -} - -impl StateWriter for CachedDbProvider { - fn set_nonce(&self, address: ContractAddress, nonce: Nonce) -> ProviderResult<()> { - self.inner.set_nonce(address, nonce) - } - - fn set_storage( - &self, - address: ContractAddress, - storage_key: StorageKey, - storage_value: StorageValue, - ) -> ProviderResult<()> { - self.inner.set_storage(address, storage_key, storage_value) - } - - fn set_class_hash_of_contract( - &self, - address: ContractAddress, - class_hash: ClassHash, - ) -> ProviderResult<()> { - self.inner.set_class_hash_of_contract(address, class_hash) - } -} - -impl ContractClassWriter for CachedDbProvider { - fn set_class(&self, hash: ClassHash, class: ContractClass) -> ProviderResult<()> { - self.inner.set_class(hash, class) - } - - fn set_compiled_class_hash_of_class_hash( - &self, - hash: ClassHash, - compiled_hash: CompiledClassHash, - ) -> ProviderResult<()> { - self.inner.set_compiled_class_hash_of_class_hash(hash, compiled_hash) - } -} - /// A cached version of fork [`LatestStateProvider`] that checks the cache before querying the /// database. #[derive(Debug)] -struct CachedStateProvider { +pub struct CachedStateProvider { state: S, - cache: StateCache, + cache: SharedStateCache, +} + +impl CachedStateProvider { + pub fn new(state: S, cache: SharedStateCache) -> Self { + Self { state, cache } + } } impl ContractClassProvider for CachedStateProvider { @@ -501,87 +188,5 @@ impl StateProvider for CachedStateProvider { } } -/// A cached version of fork [`HistoricalStateProvider`] that checks the cache before querying the -/// database. -#[derive(Debug)] -struct CachedHistoricalStateProvider { - inner: ForkHistoricalStateProvider, - cache: StateCache, -} - -impl ContractClassProvider for CachedHistoricalStateProvider { - fn class(&self, hash: ClassHash) -> ProviderResult> { - if let Some(class) = self.cache.get_class(hash) { - Ok(Some(class)) - } else { - Ok(self.inner.class(hash)?) - } - } - - fn compiled_class_hash_of_class_hash( - &self, - hash: ClassHash, - ) -> ProviderResult> { - if let Some(compiled_hash) = self.cache.get_compiled_class_hash(hash) { - Ok(Some(compiled_hash)) - } else { - Ok(self.inner.compiled_class_hash_of_class_hash(hash)?) - } - } -} - -impl StateProvider for CachedHistoricalStateProvider { - fn nonce(&self, address: ContractAddress) -> ProviderResult> { - if let Some(nonce) = self.cache.get_nonce(address) { - Ok(Some(nonce)) - } else { - Ok(self.inner.nonce(address)?) - } - } - - fn storage( - &self, - address: ContractAddress, - storage_key: StorageKey, - ) -> ProviderResult> { - if let Some(value) = self.cache.get_storage(address, storage_key) { - Ok(Some(value)) - } else { - Ok(self.inner.storage(address, storage_key)?) - } - } - - fn class_hash_of_contract( - &self, - address: ContractAddress, - ) -> ProviderResult> { - if let Some(class_hash) = self.cache.get_class_hash(address) { - Ok(Some(class_hash)) - } else { - Ok(self.inner.class_hash_of_contract(address)?) - } - } -} - impl StateProofProvider for CachedStateProvider {} impl StateRootProvider for CachedStateProvider {} -impl StateProofProvider for CachedHistoricalStateProvider {} -impl StateRootProvider for CachedHistoricalStateProvider {} - -impl TrieWriter for CachedDbProvider { - fn trie_insert_contract_updates( - &self, - block_number: BlockNumber, - state_updates: &StateUpdates, - ) -> ProviderResult { - todo!() - } - - fn trie_insert_declared_classes( - &self, - block_number: BlockNumber, - updates: &BTreeMap, - ) -> ProviderResult { - todo!() - } -} From 5cecd8248e68616d3aa041821bd4e83aab485f5b Mon Sep 17 00:00:00 2001 From: Ammar Arif Date: Tue, 4 Nov 2025 15:17:30 -0500 Subject: [PATCH 13/26] wip --- crates/node/src/optimistic/mod.rs | 13 ++++++++ crates/optimistic/src/executor.rs | 50 +++++++++++++++++++++++++++---- 2 files changed, 57 insertions(+), 6 deletions(-) diff --git a/crates/node/src/optimistic/mod.rs b/crates/node/src/optimistic/mod.rs index b63df8de4..7f28ddc9b 100644 --- a/crates/node/src/optimistic/mod.rs +++ b/crates/node/src/optimistic/mod.rs @@ -21,6 +21,8 @@ use katana_optimistic::pool::{PoolValidator, TxPool}; use katana_pool::ordering::FiFo; use katana_primitives::block::BlockIdOrTag; use katana_primitives::env::{CfgEnv, FeeTokenAddressses}; +use katana_provider::api::block::BlockNumberProvider; +use katana_provider::api::env::BlockEnvProvider; use katana_provider::providers::fork::ForkedProvider; use katana_rpc::cors::Cors; use katana_rpc::starknet::forking::ForkedClient; @@ -134,6 +136,16 @@ impl Node { let optimistic_state = OptimisticState::new(); + // Get the initial block environment from the latest block + let latest_num = blockchain.provider().latest_number()?; + let mut initial_block_env = blockchain + .provider() + .block_env_at(latest_num.into())? + .ok_or_else(|| anyhow::anyhow!("Failed to get initial block environment"))?; + + // Update the block environment to the next block + backend.update_block_env(&mut initial_block_env); + // this is the component that will populate the optimistic state let executor = OptimisticExecutor::new( pool.clone(), @@ -142,6 +154,7 @@ impl Node { executor_factory.clone(), task_spawner.clone(), starknet_client.clone(), + initial_block_env, ); // --- build rpc server diff --git a/crates/optimistic/src/executor.rs b/crates/optimistic/src/executor.rs index 8ac8556a9..d7a5d2103 100644 --- a/crates/optimistic/src/executor.rs +++ b/crates/optimistic/src/executor.rs @@ -12,7 +12,9 @@ use katana_executor::{ExecutionResult, ExecutorFactory}; use katana_pool::ordering::FiFo; use katana_pool::{PendingTransactions, PoolTransaction, TransactionPool}; use katana_primitives::block::BlockIdOrTag; +use katana_primitives::env::BlockEnv; use katana_primitives::transaction::TxWithHash; +use katana_provider::api::env::BlockEnvProvider; use katana_provider::api::state::{StateFactoryProvider, StateProvider}; use katana_provider::providers::db::cached::{CachedStateProvider, SharedStateCache}; use katana_rpc_client::starknet::Client; @@ -51,6 +53,7 @@ pub struct OptimisticExecutor { storage: Blockchain, task_spawner: TaskSpawner, client: Client, + block_env: Arc>, } impl OptimisticExecutor { @@ -62,6 +65,7 @@ impl OptimisticExecutor { /// * `backend` - The backend containing the executor factory and blockchain state /// * `task_spawner` - The task spawner used to run the executor actor /// * `client` - The RPC client used to poll for confirmed blocks + /// * `block_env` - The initial block environment pub fn new( pool: TxPool, storage: Blockchain, @@ -69,8 +73,17 @@ impl OptimisticExecutor { executor_factory: Arc, task_spawner: TaskSpawner, client: Client, + block_env: BlockEnv, ) -> Self { - Self { pool, optimistic_state, executor_factory, task_spawner, storage, client } + Self { + pool, + optimistic_state, + executor_factory, + task_spawner, + storage, + client, + block_env: Arc::new(RwLock::new(block_env)), + } } /// Spawns the optimistic executor actor task. @@ -86,26 +99,34 @@ impl OptimisticExecutor { let executor_handle = self.task_spawner.build_task().name("Optimistic Executor").spawn( OptimisticExecutorActor::new( self.pool, - self.storage, + self.storage.clone(), self.optimistic_state.clone(), self.executor_factory, self.task_spawner.clone(), + self.block_env.clone(), ), ); // Spawn the block polling task let client = self.client; let optimistic_state = self.optimistic_state; + let block_env = self.block_env; + let storage = self.storage; self.task_spawner.build_task().name("Block Polling").spawn(async move { - Self::poll_confirmed_blocks(client, optimistic_state).await; + Self::poll_confirmed_blocks(client, optimistic_state, block_env, storage).await; }); executor_handle } /// Polls for confirmed blocks every 2 seconds and removes transactions from the optimistic - /// state when they appear in confirmed blocks. - async fn poll_confirmed_blocks(client: Client, optimistic_state: OptimisticState) { + /// state when they appear in confirmed blocks. Also updates the block environment. + async fn poll_confirmed_blocks( + client: Client, + optimistic_state: OptimisticState, + block_env: Arc>, + storage: Blockchain, + ) { let mut last_block_number = None; loop { @@ -134,6 +155,14 @@ impl OptimisticExecutor { last_block_number = Some(block_number); info!(%block_number, "New block received."); + // Update the block environment for the next optimistic execution + if let Ok(provider) = storage.provider().block_env_at(block_number.into()) { + if let Some(new_block_env) = provider { + *block_env.write() = new_block_env; + trace!(target: LOG_TARGET, block_number, "Updated block environment"); + } + } + if block_tx_hashes.is_empty() { continue; } @@ -184,6 +213,7 @@ struct OptimisticExecutorActor { executor_factory: Arc, task_spawner: TaskSpawner, ongoing_execution: Option>>, + block_env: Arc>, } impl OptimisticExecutorActor { @@ -194,6 +224,7 @@ impl OptimisticExecutorActor { optimistic_state: OptimisticState, executor_factory: Arc, task_spawner: TaskSpawner, + block_env: Arc>, ) -> Self { let pending_txs = pool.pending_transactions(); Self { @@ -204,6 +235,7 @@ impl OptimisticExecutorActor { executor_factory, task_spawner, ongoing_execution: None, + block_env, } } @@ -213,12 +245,16 @@ impl OptimisticExecutorActor { storage: Blockchain, optimistic_state: OptimisticState, executor_factory: Arc, + block_env: Arc>, tx: BroadcastedTxWithChainId, ) -> anyhow::Result<()> { let latest_state = storage.provider().latest()?; let state = optimistic_state.get_optimistic_state(latest_state); - let mut executor = executor_factory.with_state(state); + // Get the current block environment + let current_block_env = block_env.read().clone(); + + let mut executor = executor_factory.with_state_and_block_env(state, current_block_env); // Execute the transaction let tx_hash = tx.hash(); @@ -308,6 +344,7 @@ impl Future for OptimisticExecutorActor { let storage = this.storage.clone(); let optimistic_state = this.optimistic_state.clone(); let executor_factory = this.executor_factory.clone(); + let block_env = this.block_env.clone(); let execution_future = this.task_spawner.cpu_bound().spawn(move || { Self::execute_transaction( @@ -315,6 +352,7 @@ impl Future for OptimisticExecutorActor { storage, optimistic_state, executor_factory, + block_env, tx, ) }); From 20efc34908e333722ddfaf9c74655dfe6b8e13c1 Mon Sep 17 00:00:00 2001 From: Ammar Arif Date: Wed, 5 Nov 2025 17:16:48 -0500 Subject: [PATCH 14/26] wip --- Cargo.lock | 1 + crates/cli/src/optimistic.rs | 6 +- .../src/implementation/blockifier/utils.rs | 2 +- crates/explorer/ui | 2 +- crates/gateway/gateway-client/src/lib.rs | 6 +- crates/gateway/gateway-types/Cargo.toml | 1 + .../gateway/gateway-types/src/transaction.rs | 93 ++++++++++++ crates/node/src/optimistic/mod.rs | 12 +- crates/optimistic/Cargo.toml | 1 + crates/optimistic/src/executor.rs | 61 ++++++-- crates/optimistic/src/pool.rs | 21 ++- crates/pool/pool/src/pool.rs | 2 +- crates/rpc/rpc-server/src/logger.rs | 3 +- crates/rpc/rpc-server/src/starknet/forking.rs | 24 +-- crates/rpc/rpc-server/src/starknet/mod.rs | 141 +++++++----------- crates/rpc/rpc-server/src/starknet/pending.rs | 13 +- crates/rpc/rpc-server/src/starknet/read.rs | 2 +- .../provider/src/providers/db/cached.rs | 19 +-- 18 files changed, 247 insertions(+), 163 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cd8eca183..59cd38435 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6364,6 +6364,7 @@ dependencies = [ "katana-core", "katana-db", "katana-executor", + "katana-gateway-client", "katana-pool", "katana-pool-api", "katana-primitives", diff --git a/crates/cli/src/optimistic.rs b/crates/cli/src/optimistic.rs index bae8f66ed..65c80a6e1 100644 --- a/crates/cli/src/optimistic.rs +++ b/crates/cli/src/optimistic.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use anyhow::Result; pub use clap::Parser; use katana_chain_spec::ChainSpec; +use katana_primitives::chain::ChainId; use serde::{Deserialize, Serialize}; use tracing::info; use url::Url; @@ -87,8 +88,9 @@ impl OptimisticNodeArgs { } fn chain_spec(&self) -> Result> { - // Always use dev chain spec for optimistic node - Ok(Arc::new(ChainSpec::Dev(Default::default()))) + let mut dev_chain_spec = katana_chain_spec::dev::ChainSpec::default(); + dev_chain_spec.id = ChainId::SEPOLIA; + Ok(Arc::new(ChainSpec::Dev(dev_chain_spec))) } fn forking_config(&self) -> katana_node::optimistic::config::ForkingConfig { diff --git a/crates/executor/src/implementation/blockifier/utils.rs b/crates/executor/src/implementation/blockifier/utils.rs index 5fa4efbed..1e5183da0 100644 --- a/crates/executor/src/implementation/blockifier/utils.rs +++ b/crates/executor/src/implementation/blockifier/utils.rs @@ -469,7 +469,7 @@ pub fn block_context_from_envs(block_env: &BlockEnv, cfg_env: &CfgEnv) -> BlockC block_timestamp: BlockTimestamp(block_env.timestamp), sequencer_address: to_blk_address(block_env.sequencer_address), gas_prices, - use_kzg_da: false, + use_kzg_da: true, }; let chain_info = ChainInfo { fee_token_addresses, chain_id: to_blk_chain_id(cfg_env.chain_id) }; diff --git a/crates/explorer/ui b/crates/explorer/ui index 2be6bfc5e..5d03c18ce 160000 --- a/crates/explorer/ui +++ b/crates/explorer/ui @@ -1 +1 @@ -Subproject commit 2be6bfc5e6530756b20381257b1043ac599c396f +Subproject commit 5d03c18ce96a4e0aa0c2773a948285dbdd6c1acb diff --git a/crates/gateway/gateway-client/src/lib.rs b/crates/gateway/gateway-client/src/lib.rs index d5eeeb6de..c2808b28a 100644 --- a/crates/gateway/gateway-client/src/lib.rs +++ b/crates/gateway/gateway-client/src/lib.rs @@ -132,21 +132,21 @@ impl Client { pub async fn add_invoke_transaction( &self, - transaction: BroadcastedInvokeTx, + transaction: katana_rpc_types::broadcasted::BroadcastedInvokeTx, ) -> Result { self.gateway("add_transaction").json(&transaction).send().await } pub async fn add_declare_transaction( &self, - transaction: BroadcastedDeclareTx, + transaction: katana_rpc_types::broadcasted::BroadcastedDeclareTx, ) -> Result { self.gateway("add_transaction").json(&transaction).send().await } pub async fn add_deploy_account_transaction( &self, - transaction: BroadcastedDeployAccountTx, + transaction: katana_rpc_types::broadcasted::BroadcastedDeployAccountTx, ) -> Result { self.gateway("add_transaction").json(&transaction).send().await } diff --git a/crates/gateway/gateway-types/Cargo.toml b/crates/gateway/gateway-types/Cargo.toml index bb56b9f81..35e95acd3 100644 --- a/crates/gateway/gateway-types/Cargo.toml +++ b/crates/gateway/gateway-types/Cargo.toml @@ -16,6 +16,7 @@ cairo-lang-starknet-classes.workspace = true flate2.workspace = true serde_json.workspace = true serde.workspace = true +serde_json.workspace = true starknet.workspace = true thiserror.workspace = true diff --git a/crates/gateway/gateway-types/src/transaction.rs b/crates/gateway/gateway-types/src/transaction.rs index ae24a88ef..933d54837 100644 --- a/crates/gateway/gateway-types/src/transaction.rs +++ b/crates/gateway/gateway-types/src/transaction.rs @@ -659,6 +659,99 @@ impl From for katana_primitives::da::DataAvailabilityMode } } +// Custom serialization for contract class with gzip + base64 encoded sierra program +fn serialize_contract_class( + class: &std::sync::Arc, + serializer: S, +) -> Result { + use std::io::Write; + + use base64::Engine; + use flate2::write::GzEncoder; + use flate2::Compression; + use serde::ser::SerializeStruct; + + let mut state = serializer.serialize_struct("GatewaySierraClass", 4)?; + + // Convert sierra_program (Vec) to JSON array, then gzip compress, then base64 encode + let program_json = + serde_json::to_string(&class.sierra_program).map_err(serde::ser::Error::custom)?; + + // Gzip compress the JSON + let mut encoder = GzEncoder::new(Vec::new(), Compression::default()); + encoder + .write_all(program_json.as_bytes()) + .map_err(|e| serde::ser::Error::custom(format!("gzip compression failed: {e}")))?; + let compressed_bytes = encoder + .finish() + .map_err(|e| serde::ser::Error::custom(format!("gzip finish failed: {e}")))?; + + // Base64 encode + let program_base64 = base64::engine::general_purpose::STANDARD.encode(&compressed_bytes); + + state.serialize_field("sierra_program", &program_base64)?; + state.serialize_field("contract_class_version", &class.contract_class_version)?; + state.serialize_field("entry_points_by_type", &class.entry_points_by_type)?; + + // Serialize ABI - it's already in pythonic JSON format via SierraClassAbi's Serialize impl + let abi_str = class.abi.to_string(); + state.serialize_field("abi", &abi_str)?; + + state.end() +} + +fn deserialize_contract_class<'de, D: serde::Deserializer<'de>>( + deserializer: D, +) -> Result, D::Error> { + use std::io::Read; + + use base64::Engine; + use flate2::read::GzDecoder; + use serde::de; + + #[derive(Deserialize)] + struct GatewaySierraClass { + sierra_program: String, + contract_class_version: String, + entry_points_by_type: cairo_lang_starknet_classes::contract_class::ContractEntryPoints, + abi: String, + } + + let gateway_class = GatewaySierraClass::deserialize(deserializer)?; + + // Base64 decode + let compressed_bytes = base64::engine::general_purpose::STANDARD + .decode(&gateway_class.sierra_program) + .map_err(|e| de::Error::custom(format!("failed to decode base64 sierra_program: {e}")))?; + + // Gzip decompress + let mut decoder = GzDecoder::new(&compressed_bytes[..]); + let mut decompressed = String::new(); + decoder + .read_to_string(&mut decompressed) + .map_err(|e| de::Error::custom(format!("failed to decompress sierra_program: {e}")))?; + + // Parse JSON array to Vec + let sierra_program: Vec = serde_json::from_str(&decompressed) + .map_err(|e| de::Error::custom(format!("failed to parse sierra_program JSON: {e}")))?; + + // Deserialize ABI from pythonic JSON string + let abi: katana_rpc_types::class::SierraClassAbi = + if gateway_class.abi.is_empty() || gateway_class.abi == "[]" { + Default::default() + } else { + serde_json::from_str(&gateway_class.abi) + .map_err(|e| de::Error::custom(format!("invalid abi: {e}")))? + }; + + Ok(std::sync::Arc::new(katana_rpc_types::class::SierraClass { + sierra_program, + contract_class_version: gateway_class.contract_class_version, + entry_points_by_type: gateway_class.entry_points_by_type, + abi, + })) +} + fn deserialize_resource_bounds_mapping<'de, D: Deserializer<'de>>( deserializer: D, ) -> Result { diff --git a/crates/node/src/optimistic/mod.rs b/crates/node/src/optimistic/mod.rs index 7f28ddc9b..6eb9b63a8 100644 --- a/crates/node/src/optimistic/mod.rs +++ b/crates/node/src/optimistic/mod.rs @@ -136,16 +136,6 @@ impl Node { let optimistic_state = OptimisticState::new(); - // Get the initial block environment from the latest block - let latest_num = blockchain.provider().latest_number()?; - let mut initial_block_env = blockchain - .provider() - .block_env_at(latest_num.into())? - .ok_or_else(|| anyhow::anyhow!("Failed to get initial block environment"))?; - - // Update the block environment to the next block - backend.update_block_env(&mut initial_block_env); - // this is the component that will populate the optimistic state let executor = OptimisticExecutor::new( pool.clone(), @@ -154,7 +144,7 @@ impl Node { executor_factory.clone(), task_spawner.clone(), starknet_client.clone(), - initial_block_env, + Default::default(), ); // --- build rpc server diff --git a/crates/optimistic/Cargo.toml b/crates/optimistic/Cargo.toml index b26794588..137df1f24 100644 --- a/crates/optimistic/Cargo.toml +++ b/crates/optimistic/Cargo.toml @@ -11,6 +11,7 @@ anyhow.workspace = true katana-core.workspace = true katana-executor.workspace = true katana-pool.workspace = true +katana-gateway-client.workspace = true katana-primitives.workspace = true katana-provider.workspace = true katana-rpc-types.workspace = true diff --git a/crates/optimistic/src/executor.rs b/crates/optimistic/src/executor.rs index d7a5d2103..c9bd3745e 100644 --- a/crates/optimistic/src/executor.rs +++ b/crates/optimistic/src/executor.rs @@ -11,9 +11,10 @@ use katana_executor::implementation::blockifier::BlockifierFactory; use katana_executor::{ExecutionResult, ExecutorFactory}; use katana_pool::ordering::FiFo; use katana_pool::{PendingTransactions, PoolTransaction, TransactionPool}; -use katana_primitives::block::BlockIdOrTag; +use katana_primitives::block::{BlockIdOrTag, GasPrices}; use katana_primitives::env::BlockEnv; use katana_primitives::transaction::TxWithHash; +use katana_primitives::version::StarknetVersion; use katana_provider::api::env::BlockEnvProvider; use katana_provider::api::state::{StateFactoryProvider, StateProvider}; use katana_provider::providers::db::cached::{CachedStateProvider, SharedStateCache}; @@ -111,9 +112,8 @@ impl OptimisticExecutor { let client = self.client; let optimistic_state = self.optimistic_state; let block_env = self.block_env; - let storage = self.storage; self.task_spawner.build_task().name("Block Polling").spawn(async move { - Self::poll_confirmed_blocks(client, optimistic_state, block_env, storage).await; + Self::poll_confirmed_blocks(client, optimistic_state, block_env).await; }); executor_handle @@ -125,7 +125,6 @@ impl OptimisticExecutor { client: Client, optimistic_state: OptimisticState, block_env: Arc>, - storage: Blockchain, ) { let mut last_block_number = None; @@ -134,12 +133,50 @@ impl OptimisticExecutor { match client.get_block_with_tx_hashes(BlockIdOrTag::Latest).await { Ok(block_response) => { - let (block_number, block_tx_hashes) = match block_response { + let (block_number, block_tx_hashes, new_block_env) = match &block_response { GetBlockWithTxHashesResponse::Block(block) => { - (block.block_number, block.transactions) + let env = BlockEnv { + number: block.block_number, + timestamp: block.timestamp, + l2_gas_prices: GasPrices { + eth: block.l2_gas_price.price_in_wei.try_into().unwrap(), + strk: block.l2_gas_price.price_in_fri.try_into().unwrap(), + }, + l1_gas_prices: GasPrices { + eth: block.l1_gas_price.price_in_wei.try_into().unwrap(), + strk: block.l1_gas_price.price_in_fri.try_into().unwrap(), + }, + l1_data_gas_prices: GasPrices { + eth: block.l1_data_gas_price.price_in_wei.try_into().unwrap(), + strk: block.l1_data_gas_price.price_in_fri.try_into().unwrap(), + }, + sequencer_address: block.sequencer_address, + starknet_version: StarknetVersion::parse(&block.starknet_version) + .unwrap_or_default(), + }; + (block.block_number, block.transactions.clone(), env) } GetBlockWithTxHashesResponse::PreConfirmed(block) => { - (block.block_number, block.transactions) + let env = BlockEnv { + number: block.block_number, + timestamp: block.timestamp, + l2_gas_prices: GasPrices { + eth: block.l2_gas_price.price_in_wei.try_into().unwrap(), + strk: block.l2_gas_price.price_in_fri.try_into().unwrap(), + }, + l1_gas_prices: GasPrices { + eth: block.l1_gas_price.price_in_wei.try_into().unwrap(), + strk: block.l1_gas_price.price_in_fri.try_into().unwrap(), + }, + l1_data_gas_prices: GasPrices { + eth: block.l1_data_gas_price.price_in_wei.try_into().unwrap(), + strk: block.l1_data_gas_price.price_in_fri.try_into().unwrap(), + }, + sequencer_address: block.sequencer_address, + starknet_version: StarknetVersion::parse(&block.starknet_version) + .unwrap_or_default(), + }; + (block.block_number, block.transactions.clone(), env) } }; @@ -156,12 +193,8 @@ impl OptimisticExecutor { info!(%block_number, "New block received."); // Update the block environment for the next optimistic execution - if let Ok(provider) = storage.provider().block_env_at(block_number.into()) { - if let Some(new_block_env) = provider { - *block_env.write() = new_block_env; - trace!(target: LOG_TARGET, block_number, "Updated block environment"); - } - } + *block_env.write() = new_block_env; + trace!(target: LOG_TARGET, block_number, "Updated block environment"); if block_tx_hashes.is_empty() { continue; @@ -289,7 +322,7 @@ impl Future for OptimisticExecutorActor { match result { TaskResult::Ok(Ok(())) => { // Execution completed successfully, continue to next transaction - trace!(target: LOG_TARGET, "Transaction execution completed successfully"); + info!(target: LOG_TARGET, "Transaction execution completed successfully"); } TaskResult::Ok(Err(e)) => { error!( diff --git a/crates/optimistic/src/pool.rs b/crates/optimistic/src/pool.rs index 835e4795a..7862e585d 100644 --- a/crates/optimistic/src/pool.rs +++ b/crates/optimistic/src/pool.rs @@ -7,6 +7,7 @@ use katana_pool_api::validation::{ }; use katana_rpc_client::starknet::Client; use katana_rpc_types::{BroadcastedTx, BroadcastedTxWithChainId}; +use tracing::{debug, info}; pub type TxPool = Pool>; @@ -14,15 +15,16 @@ pub type TxPool = Pool, + gateway_client: katana_gateway_client::Client, } impl PoolValidator { pub fn new(client: Client) -> Self { - Self { client: Arc::new(client) } + Self { client: Arc::new(client), gateway_client: katana_gateway_client::Client::sepolia() } } pub fn new_shared(client: Arc) -> Self { - Self { client } + Self { client, gateway_client: katana_gateway_client::Client::sepolia() } } } @@ -36,21 +38,28 @@ impl Validator for PoolValidator { // Forward the transaction to the remote node let result = match &tx.tx { BroadcastedTx::Invoke(invoke_tx) => { - self.client.add_invoke_transaction(invoke_tx.clone()).await.map(|_| ()) + self.gateway_client.add_invoke_transaction(invoke_tx.clone()).await.map(|_| ()) + // self.client.add_invoke_transaction(invoke_tx.clone()).await.map(|_| ()) } BroadcastedTx::Declare(declare_tx) => { - self.client.add_declare_transaction(declare_tx.clone()).await.map(|_| ()) + self.gateway_client.add_declare_transaction(declare_tx.clone()).await.map(|_| ()) + // self.client.add_declare_transaction(declare_tx.clone()).await.map(|_| ()) } BroadcastedTx::DeployAccount(deploy_account_tx) => self - .client + .gateway_client .add_deploy_account_transaction(deploy_account_tx.clone()) .await - .map(|_| ()), + .map(|_| ()), /* self + * .client + * .add_deploy_account_transaction(deploy_account_tx.clone()) + * .await + * .map(|_| ()), */ }; match result { Ok(_) => Ok(ValidationOutcome::Valid(tx)), Err(err) => { + info!(error = ?err, "Gateway validation failure."); let error = InvalidTransactionError::ValidationFailure { address: match &tx.tx { BroadcastedTx::Invoke(tx) => tx.sender_address, diff --git a/crates/pool/pool/src/pool.rs b/crates/pool/pool/src/pool.rs index 3f60f2c53..8c5b72788 100644 --- a/crates/pool/pool/src/pool.rs +++ b/crates/pool/pool/src/pool.rs @@ -160,7 +160,7 @@ where // TODO: create a small cache for rejected transactions to respect the rpc spec // `getTransactionStatus` ValidationOutcome::Invalid { error, .. } => { - warn!(target: "pool", %error, "Invalid transaction."); + warn!(target: "pool", ?error, "Invalid transaction."); Err(PoolError::InvalidTransaction(Box::new(error))) } diff --git a/crates/rpc/rpc-server/src/logger.rs b/crates/rpc/rpc-server/src/logger.rs index 04a530b8e..26acd5ef4 100644 --- a/crates/rpc/rpc-server/src/logger.rs +++ b/crates/rpc/rpc-server/src/logger.rs @@ -3,7 +3,7 @@ use std::future::Future; use jsonrpsee::core::middleware; use jsonrpsee::core::middleware::{Batch, Notification}; use jsonrpsee::types::Request; -use tracing::Instrument; +use tracing::{info, Instrument}; /// RPC logger layer. #[derive(Copy, Clone, Debug)] @@ -41,6 +41,7 @@ where #[inline] #[tracing::instrument(target = "rpc", level = "trace", name = "rpc_call", skip_all, fields(method = req.method_name()))] fn call<'a>(&self, req: Request<'a>) -> impl Future + Send + 'a { + info!(method = %req.method, "Rpc called."); self.service.call(req).in_current_span() } diff --git a/crates/rpc/rpc-server/src/starknet/forking.rs b/crates/rpc/rpc-server/src/starknet/forking.rs index e431f9954..84e36b996 100644 --- a/crates/rpc/rpc-server/src/starknet/forking.rs +++ b/crates/rpc/rpc-server/src/starknet/forking.rs @@ -193,18 +193,18 @@ impl ForkedClient { ) -> Result { let block = self.client.get_block_with_tx_hashes(block_id).await?; - match block { - GetBlockWithTxHashesResponse::Block(ref b) => { - if let BlockIdOrTag::Number(fork_num) = self.block { - if b.block_number > fork_num { - return Err(Error::BlockOutOfRange); - } - } - } - GetBlockWithTxHashesResponse::PreConfirmed(_) => { - return Err(Error::UnexpectedPendingData); - } - } + // match block { + // GetBlockWithTxHashesResponse::Block(ref b) => { + // if let BlockIdOrTag::Number(fork_num) = self.block { + // if b.block_number > fork_num { + // return Err(Error::BlockOutOfRange); + // } + // } + // } + // GetBlockWithTxHashesResponse::PreConfirmed(_) => { + // return Err(Error::UnexpectedPendingData); + // } + // } Ok(block) } diff --git a/crates/rpc/rpc-server/src/starknet/mod.rs b/crates/rpc/rpc-server/src/starknet/mod.rs index 037bc956f..e867a42fd 100644 --- a/crates/rpc/rpc-server/src/starknet/mod.rs +++ b/crates/rpc/rpc-server/src/starknet/mod.rs @@ -485,11 +485,28 @@ where } async fn latest_block_number(&self) -> StarknetApiResult { - self.on_io_blocking_task(move |this| { - let block_number = this.inner.storage_provider.provider().latest_number()?; - Ok(BlockNumberResponse { block_number }) - }) - .await? + let result = self + .inner + .forked_client + .as_ref() + .unwrap() + .get_block_with_tx_hashes(BlockIdOrTag::PreConfirmed) + .await?; + + match result { + GetBlockWithTxHashesResponse::Block(block) => { + Ok(BlockNumberResponse { block_number: block.block_number }) + } + GetBlockWithTxHashesResponse::PreConfirmed(block) => { + Ok(BlockNumberResponse { block_number: block.block_number }) + } + } + + // self.on_io_blocking_task(move |this| { + // let block_number = this.inner.storage_provider.provider().latest_number()?; + // Ok(BlockNumberResponse { block_number }) + // }) + // .await? } pub async fn nonce_at( @@ -594,36 +611,9 @@ where } async fn receipt(&self, hash: Felt) -> StarknetApiResult { + println!("requesting receipt for tx {hash:#x}"); let receipt = self .on_io_blocking_task(move |this| { - // First, check optimistic state for the receipt - if let Some(optimistic_state) = &this.inner.optimistic_state { - let transactions = optimistic_state.transactions.read(); - if let Some((_tx, result)) = transactions.iter().find(|(tx, _)| tx.hash == hash) - { - if let katana_executor::ExecutionResult::Success { receipt, .. } = result { - // Get the latest block number to use as reference - let provider = &this.inner.storage_provider.provider(); - let latest_num = provider.latest_number()?; - - // Create block info as PreConfirmed (optimistic tx not yet in a block) - let block = katana_rpc_types::receipt::ReceiptBlockInfo::PreConfirmed { - block_number: latest_num + 1, - }; - - // Create receipt with block info - let receipt_with_block = TxReceiptWithBlockInfo::new( - block, - hash, - FinalityStatus::PreConfirmed, - receipt.clone(), - ); - - return StarknetApiResult::Ok(Some(receipt_with_block)); - } - } - } - // Check pending block provider if let pending_receipt @ Some(..) = this.inner.pending_block_provider.get_pending_receipt(hash)? @@ -648,66 +638,43 @@ where async fn transaction_status(&self, hash: TxHash) -> StarknetApiResult { let status = self .on_io_blocking_task(move |this| { - // First, check optimistic state for the transaction - if let Some(optimistic_state) = &this.inner.optimistic_state { - let transactions = optimistic_state.transactions.read(); - if let Some((_tx, result)) = transactions.iter().find(|(tx, _)| tx.hash == hash) - { - let exec_status = match result { - katana_executor::ExecutionResult::Success { receipt, .. } => { - if let Some(reason) = receipt.revert_reason() { - katana_rpc_types::ExecutionResult::Reverted { - reason: reason.to_string(), - } - } else { - katana_rpc_types::ExecutionResult::Succeeded - } - } - katana_executor::ExecutionResult::Failed { error } => { - katana_rpc_types::ExecutionResult::Reverted { - reason: error.to_string(), - } - } - }; - return Ok(Some(TxStatus::PreConfirmed(exec_status))); - } - } - - let provider = &this.inner.storage_provider.provider(); - let status = provider.transaction_status(hash)?; - - if let Some(status) = status { - // TODO: this might not work once we allow querying for 'failed' transactions - // from the provider - let Some(receipt) = provider.receipt_by_hash(hash)? else { - let error = StarknetApiError::unexpected( - "Transaction hash exist, but the receipt is missing", - ); - return Err(error); - }; - - let exec_status = if let Some(reason) = receipt.revert_reason() { - katana_rpc_types::ExecutionResult::Reverted { reason: reason.to_string() } - } else { - katana_rpc_types::ExecutionResult::Succeeded - }; - - let status = match status { - FinalityStatus::AcceptedOnL1 => TxStatus::AcceptedOnL1(exec_status), - FinalityStatus::AcceptedOnL2 => TxStatus::AcceptedOnL2(exec_status), - FinalityStatus::PreConfirmed => TxStatus::PreConfirmed(exec_status), - }; - - return Ok(Some(status)); - } - // seach in the pending block if the transaction is not found if let Some(receipt) = this.inner.pending_block_provider.get_pending_receipt(hash)? { Ok(Some(TxStatus::PreConfirmed(receipt.receipt.execution_result().clone()))) } else { - Ok(None) + let provider = &this.inner.storage_provider.provider(); + let status = provider.transaction_status(hash)?; + + if let Some(status) = status { + // TODO: this might not work once we allow querying for 'failed' transactions + // from the provider + let Some(receipt) = provider.receipt_by_hash(hash)? else { + let error = StarknetApiError::unexpected( + "Transaction hash exist, but the receipt is missing", + ); + return Err(error); + }; + + let exec_status = if let Some(reason) = receipt.revert_reason() { + katana_rpc_types::ExecutionResult::Reverted { + reason: reason.to_string(), + } + } else { + katana_rpc_types::ExecutionResult::Succeeded + }; + + let status = match status { + FinalityStatus::AcceptedOnL1 => TxStatus::AcceptedOnL1(exec_status), + FinalityStatus::AcceptedOnL2 => TxStatus::AcceptedOnL2(exec_status), + FinalityStatus::PreConfirmed => TxStatus::PreConfirmed(exec_status), + }; + + Ok(Some(status)) + } else { + Ok(None) + } } }) .await??; diff --git a/crates/rpc/rpc-server/src/starknet/pending.rs b/crates/rpc/rpc-server/src/starknet/pending.rs index febb4ecf7..62ac2d9a0 100644 --- a/crates/rpc/rpc-server/src/starknet/pending.rs +++ b/crates/rpc/rpc-server/src/starknet/pending.rs @@ -340,15 +340,9 @@ impl PendingBlockProvider for OptimisticPendingBlockProvider { let transactions = self.optimistic_state.transactions.read(); if let Some((_tx, result)) = transactions.iter().find(|(tx, _)| tx.hash == hash) { if let katana_executor::ExecutionResult::Success { receipt, .. } = result { - // Get the latest block number to use as reference - let latest_num = self.storage.provider().latest_number().map_err(|e| { - crate::starknet::StarknetApiError::unexpected(format!( - "Failed to get latest block number: {e}" - )) - })?; + println!("receipt found in optimsitic state. hash: {hash:#x}"); - // Create block info as PreConfirmed (optimistic tx not yet in a block) - let block = ReceiptBlockInfo::PreConfirmed { block_number: latest_num + 1 }; + let block = ReceiptBlockInfo::PreConfirmed { block_number: 0 }; // Create receipt with block info let receipt_with_block = TxReceiptWithBlockInfo::new( @@ -360,9 +354,12 @@ impl PendingBlockProvider for OptimisticPendingBlockProvider { return Ok(Some(receipt_with_block)); } + } else { + println!("receipt not found in optimsitic state. hash: {hash:#x}"); } // Fall back to client + println!("falling back to forked client to find receipt hash: {hash:#x}"); self.client.get_pending_receipt(hash) } diff --git a/crates/rpc/rpc-server/src/starknet/read.rs b/crates/rpc/rpc-server/src/starknet/read.rs index 0d0f9638e..2b5c941d7 100644 --- a/crates/rpc/rpc-server/src/starknet/read.rs +++ b/crates/rpc/rpc-server/src/starknet/read.rs @@ -147,7 +147,7 @@ where async fn call(&self, request: FunctionCall, block_id: BlockIdOrTag) -> RpcResult { self.on_io_blocking_task(move |this| { // get the state and block env at the specified block for function call execution - let state = this.state(&block_id)?; + let state = this.state(dbg!(&block_id))?; let env = this.block_env_at(&block_id)?; let cfg_env = this.inner.backend.executor_factory.cfg().clone(); let max_call_gas = this.inner.config.max_call_gas.unwrap_or(1_000_000_000); diff --git a/crates/storage/provider/provider/src/providers/db/cached.rs b/crates/storage/provider/provider/src/providers/db/cached.rs index 47e6ce170..e987d4bb2 100644 --- a/crates/storage/provider/provider/src/providers/db/cached.rs +++ b/crates/storage/provider/provider/src/providers/db/cached.rs @@ -126,13 +126,9 @@ impl ContractClassProvider for CachedStateProvider { fn class(&self, hash: ClassHash) -> ProviderResult> { if let Some(class) = self.cache.get_class(hash) { return Ok(Some(class)); + } else { + Ok(self.state.class(hash)?) } - - let class = self.state.class(hash)?; - if let Some(ref c) = &class { - self.cache.inner.write().classes.insert(hash, c.clone()); - } - Ok(class) } fn compiled_class_hash_of_class_hash( @@ -144,9 +140,6 @@ impl ContractClassProvider for CachedStateProvider { } let compiled_hash = self.state.compiled_class_hash_of_class_hash(hash)?; - if let Some(ch) = compiled_hash { - self.cache.inner.write().compiled_class_hashes.insert(hash, ch); - } Ok(compiled_hash) } } @@ -178,13 +171,9 @@ impl StateProvider for CachedStateProvider { ) -> ProviderResult> { if let Some(class_hash) = self.cache.get_class_hash(address) { return Ok(Some(class_hash)); + } else { + Ok(dbg!(self.state.class_hash_of_contract(address)?)) } - - let class_hash = self.state.class_hash_of_contract(address)?; - if let Some(ch) = class_hash { - self.cache.inner.write().class_hashes.insert(address, ch); - } - Ok(class_hash) } } From 4abd725081e4ef89be45e08f16c0c279fb98d26b Mon Sep 17 00:00:00 2001 From: Ammar Arif Date: Thu, 6 Nov 2025 16:18:28 -0500 Subject: [PATCH 15/26] wip --- crates/core/src/backend/storage.rs | 2 +- crates/rpc/rpc-server/src/starknet/forking.rs | 6 +++++- crates/rpc/rpc-server/src/starknet/mod.rs | 12 +++++++----- crates/rpc/rpc-server/src/starknet/read.rs | 2 +- 4 files changed, 14 insertions(+), 8 deletions(-) diff --git a/crates/core/src/backend/storage.rs b/crates/core/src/backend/storage.rs index 1ba93096c..74aa09267 100644 --- a/crates/core/src/backend/storage.rs +++ b/crates/core/src/backend/storage.rs @@ -104,7 +104,7 @@ impl Blockchain { // network. let block_id = if let Some(id) = fork_block { id } else { BlockIdOrTag::Latest }; - info!(chain = %parsed_id, block = %block_id, "Forking chain."); + info!(chain = %parsed_id, block = ?block_id, "Forking chain."); let block = provider .get_block_with_tx_hashes(block_id) diff --git a/crates/rpc/rpc-server/src/starknet/forking.rs b/crates/rpc/rpc-server/src/starknet/forking.rs index 84e36b996..426a004f1 100644 --- a/crates/rpc/rpc-server/src/starknet/forking.rs +++ b/crates/rpc/rpc-server/src/starknet/forking.rs @@ -11,7 +11,7 @@ use katana_rpc_types::event::{EventFilter, GetEventsResponse}; use katana_rpc_types::receipt::{ReceiptBlockInfo, TxReceiptWithBlockInfo}; use katana_rpc_types::state_update::StateUpdate; use katana_rpc_types::transaction::RpcTxWithHash; -use katana_rpc_types::TxStatus; +use katana_rpc_types::{BlockHashAndNumberResponse, TxStatus}; #[derive(Debug, thiserror::Error)] pub enum Error { @@ -49,6 +49,10 @@ impl ForkedClient { } impl ForkedClient { + pub async fn block_hash_and_number(&self) -> Result { + Ok(self.client.block_hash_and_number().await?) + } + pub async fn get_block_number_by_hash(&self, hash: BlockHash) -> Result { let number = match self.client.get_block_with_tx_hashes(BlockIdOrTag::Hash(hash)).await? { GetBlockWithTxHashesResponse::Block(block) => block.block_number, diff --git a/crates/rpc/rpc-server/src/starknet/mod.rs b/crates/rpc/rpc-server/src/starknet/mod.rs index e867a42fd..5e7d6f11f 100644 --- a/crates/rpc/rpc-server/src/starknet/mod.rs +++ b/crates/rpc/rpc-server/src/starknet/mod.rs @@ -352,11 +352,13 @@ where env.ok_or(StarknetApiError::BlockNotFound) } - fn block_hash_and_number(&self) -> StarknetApiResult { - let provider = &self.inner.storage_provider.provider(); - let hash = provider.latest_hash()?; - let number = provider.latest_number()?; - Ok(BlockHashAndNumberResponse::new(hash, number)) + pub async fn get_block_hash_and_number(&self) -> StarknetApiResult { + // let provider = &self.inner.storage_provider.provider(); + // let hash = provider.latest_hash()?; + // let number = provider.latest_number()?; + // Ok(BlockHashAndNumberResponse::new(hash, number)) + + Ok(self.inner.forked_client.as_ref().unwrap().block_hash_and_number().await?) } pub async fn class_at_hash( diff --git a/crates/rpc/rpc-server/src/starknet/read.rs b/crates/rpc/rpc-server/src/starknet/read.rs index 2b5c941d7..9af4757ea 100644 --- a/crates/rpc/rpc-server/src/starknet/read.rs +++ b/crates/rpc/rpc-server/src/starknet/read.rs @@ -80,7 +80,7 @@ where } async fn block_hash_and_number(&self) -> RpcResult { - self.on_io_blocking_task(move |this| Ok(this.block_hash_and_number()?)).await? + Ok(self.get_block_hash_and_number().await?) } async fn get_block_with_tx_hashes( From 73e677d819f00b4b4abef45d7228dbdaf8815d14 Mon Sep 17 00:00:00 2001 From: Ammar Arif Date: Thu, 6 Nov 2025 17:40:24 -0500 Subject: [PATCH 16/26] wip --- crates/optimistic/src/executor.rs | 22 ++++--------------- crates/rpc/rpc-server/src/starknet/read.rs | 2 +- .../provider/src/providers/db/cached.rs | 2 +- crates/tracing/src/lib.rs | 2 +- 4 files changed, 7 insertions(+), 21 deletions(-) diff --git a/crates/optimistic/src/executor.rs b/crates/optimistic/src/executor.rs index c9bd3745e..7af367b3a 100644 --- a/crates/optimistic/src/executor.rs +++ b/crates/optimistic/src/executor.rs @@ -15,7 +15,6 @@ use katana_primitives::block::{BlockIdOrTag, GasPrices}; use katana_primitives::env::BlockEnv; use katana_primitives::transaction::TxWithHash; use katana_primitives::version::StarknetVersion; -use katana_provider::api::env::BlockEnvProvider; use katana_provider::api::state::{StateFactoryProvider, StateProvider}; use katana_provider::providers::db::cached::{CachedStateProvider, SharedStateCache}; use katana_rpc_client::starknet::Client; @@ -28,7 +27,7 @@ use tracing::{debug, error, info, trace}; use crate::pool::TxPool; -const LOG_TARGET: &str = "optimistic_executor"; +const LOG_TARGET: &str = "optimistic"; #[derive(Debug, Clone)] pub struct OptimisticState { @@ -190,7 +189,7 @@ impl OptimisticExecutor { // Update the last seen block number last_block_number = Some(block_number); - info!(%block_number, "New block received."); + debug!(target: LOG_TARGET, %block_number, "New block received."); // Update the block environment for the next optimistic execution *block_env.write() = new_block_env; @@ -200,13 +199,6 @@ impl OptimisticExecutor { continue; } - trace!( - target: LOG_TARGET, - block_number = block_number, - tx_count = block_tx_hashes.len(), - "Polling confirmed block" - ); - // Get the current optimistic transactions let mut optimistic_txs = optimistic_state.transactions.write(); @@ -216,7 +208,7 @@ impl OptimisticExecutor { let removed_count = initial_count - optimistic_txs.len(); if removed_count > 0 { - info!( + debug!( target: LOG_TARGET, block_number = block_number, removed_count = removed_count, @@ -322,7 +314,7 @@ impl Future for OptimisticExecutorActor { match result { TaskResult::Ok(Ok(())) => { // Execution completed successfully, continue to next transaction - info!(target: LOG_TARGET, "Transaction execution completed successfully"); + trace!(target: LOG_TARGET, "Transaction execution completed successfully"); } TaskResult::Ok(Err(e)) => { error!( @@ -366,12 +358,6 @@ impl Future for OptimisticExecutorActor { "Received transaction from pool" ); - debug!( - target: LOG_TARGET, - tx_hash = format!("{:#x}", tx_hash), - "Spawning transaction execution on blocking pool" - ); - // Spawn the transaction execution on the blocking CPU pool let pool = this.pool.clone(); let storage = this.storage.clone(); diff --git a/crates/rpc/rpc-server/src/starknet/read.rs b/crates/rpc/rpc-server/src/starknet/read.rs index 9af4757ea..4ab54ad3b 100644 --- a/crates/rpc/rpc-server/src/starknet/read.rs +++ b/crates/rpc/rpc-server/src/starknet/read.rs @@ -147,7 +147,7 @@ where async fn call(&self, request: FunctionCall, block_id: BlockIdOrTag) -> RpcResult { self.on_io_blocking_task(move |this| { // get the state and block env at the specified block for function call execution - let state = this.state(dbg!(&block_id))?; + let state = this.state(&block_id)?; let env = this.block_env_at(&block_id)?; let cfg_env = this.inner.backend.executor_factory.cfg().clone(); let max_call_gas = this.inner.config.max_call_gas.unwrap_or(1_000_000_000); diff --git a/crates/storage/provider/provider/src/providers/db/cached.rs b/crates/storage/provider/provider/src/providers/db/cached.rs index e987d4bb2..a0dc60910 100644 --- a/crates/storage/provider/provider/src/providers/db/cached.rs +++ b/crates/storage/provider/provider/src/providers/db/cached.rs @@ -172,7 +172,7 @@ impl StateProvider for CachedStateProvider { if let Some(class_hash) = self.cache.get_class_hash(address) { return Ok(Some(class_hash)); } else { - Ok(dbg!(self.state.class_hash_of_contract(address)?)) + Ok(self.state.class_hash_of_contract(address)?) } } } diff --git a/crates/tracing/src/lib.rs b/crates/tracing/src/lib.rs index 3ce83f193..af5e52318 100644 --- a/crates/tracing/src/lib.rs +++ b/crates/tracing/src/lib.rs @@ -47,7 +47,7 @@ pub async fn init(format: LogFormat, telemetry_config: Option) -> const DEFAULT_LOG_FILTER: &str = "katana_db::mdbx=trace,cairo_native::compiler=off,pipeline=debug,stage=debug,tasks=debug,\ executor=trace,forking::backend=trace,blockifier=off,jsonrpsee_server=off,hyper=off,\ - messaging=debug,node=error,explorer=info,rpc=trace,pool=trace,info"; + messaging=debug,node=error,explorer=info,rpc=trace,pool=trace,optimistic=debug,info"; let default_filter = EnvFilter::try_new(DEFAULT_LOG_FILTER); let filter = EnvFilter::try_from_default_env().or(default_filter)?; From 64efdf942f82947fd9b327c926f527aac5bb49ea Mon Sep 17 00:00:00 2001 From: Ammar Arif Date: Thu, 6 Nov 2025 19:41:23 -0500 Subject: [PATCH 17/26] wip --- crates/explorer/ui | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/explorer/ui b/crates/explorer/ui index 5d03c18ce..2be6bfc5e 160000 --- a/crates/explorer/ui +++ b/crates/explorer/ui @@ -1 +1 @@ -Subproject commit 5d03c18ce96a4e0aa0c2773a948285dbdd6c1acb +Subproject commit 2be6bfc5e6530756b20381257b1043ac599c396f From 158c0fdf9ec763f26907cb9da78c6037ea666bbe Mon Sep 17 00:00:00 2001 From: Ammar Arif Date: Fri, 7 Nov 2025 16:03:23 -0500 Subject: [PATCH 18/26] wip --- crates/rpc/rpc-server/src/starknet/forking.rs | 6 +- crates/rpc/rpc-server/src/starknet/mod.rs | 93 +++++++++++-------- 2 files changed, 60 insertions(+), 39 deletions(-) diff --git a/crates/rpc/rpc-server/src/starknet/forking.rs b/crates/rpc/rpc-server/src/starknet/forking.rs index 426a004f1..54c29c6a2 100644 --- a/crates/rpc/rpc-server/src/starknet/forking.rs +++ b/crates/rpc/rpc-server/src/starknet/forking.rs @@ -11,7 +11,7 @@ use katana_rpc_types::event::{EventFilter, GetEventsResponse}; use katana_rpc_types::receipt::{ReceiptBlockInfo, TxReceiptWithBlockInfo}; use katana_rpc_types::state_update::StateUpdate; use katana_rpc_types::transaction::RpcTxWithHash; -use katana_rpc_types::{BlockHashAndNumberResponse, TxStatus}; +use katana_rpc_types::{BlockHashAndNumberResponse, BlockNumberResponse, TxStatus}; #[derive(Debug, thiserror::Error)] pub enum Error { @@ -49,6 +49,10 @@ impl ForkedClient { } impl ForkedClient { + pub async fn block_number(&self) -> Result { + Ok(self.client.block_number().await?.block_number) + } + pub async fn block_hash_and_number(&self) -> Result { Ok(self.client.block_hash_and_number().await?) } diff --git a/crates/rpc/rpc-server/src/starknet/mod.rs b/crates/rpc/rpc-server/src/starknet/mod.rs index 5e7d6f11f..583825dae 100644 --- a/crates/rpc/rpc-server/src/starknet/mod.rs +++ b/crates/rpc/rpc-server/src/starknet/mod.rs @@ -933,6 +933,7 @@ where (0, 0) }; + dbg!(transactions.len()); for (tx_idx, (tx, result)) in transactions.iter().enumerate() { // Skip transactions before the continuation token if tx_idx < start_txn_idx { @@ -943,7 +944,6 @@ where if events_buffer.len() >= chunk_size as usize { break; } - // Only process successful executions if let katana_executor::ExecutionResult::Success { receipt, .. } = result { for (event_idx, event) in receipt.events().iter().enumerate() { @@ -1006,7 +1006,21 @@ where } } } + + // if we already exhaust all the optimistic transactions then we return a continuation token pointing to the next optimistic transaction + return Ok(Some(katana_primitives::event::ContinuationToken { + block_n: 0, // Not used for optimistic transactions + txn_n: transactions.len() as u64, + event_n: transactions + .last() + .and_then(|(.., result)| { + result.receipt().map(|receipt| receipt.events().len() as u64) + }) + .unwrap_or(0), + transaction_hash: transactions.last().map(|(tx, ..)| tx.hash), + })); } + Ok(None) } @@ -1020,16 +1034,13 @@ where continuation_token: Option, chunk_size: u64, ) -> StarknetApiResult { - let provider = self.inner.backend.blockchain.provider(); - let from = self.resolve_event_block_id_if_forked(from_block)?; let to = self.resolve_event_block_id_if_forked(to_block)?; // reserved buffer to fill up with events to avoid reallocations let mut events = Vec::with_capacity(chunk_size as usize); - // let filter = utils::events::Filter { address, keys: keys.clone() }; - match (from, to) { + match dbg!((from, to)) { (EventBlockId::Num(from), EventBlockId::Num(to)) => { // Check if continuation token is a native (non-forked) token let is_native_token = continuation_token @@ -1063,34 +1074,24 @@ where } } - // Fetch events from optimistic state transactions - // Extract native token if present - let native_token = continuation_token.as_ref().and_then(|t| match t { - MaybeForkedContinuationToken::Token(token) => Some(token), - _ => None, + return Ok(GetEventsResponse { + events, + continuation_token: continuation_token.map(|t| t.to_string()), }); - let opt_token = self.fetch_optimistic_events( - address, - &keys, - &mut events, - chunk_size, - native_token, - )?; - - let continuation_token = - opt_token.map(|t| MaybeForkedContinuationToken::Token(t).to_string()); - Ok(GetEventsResponse { events, continuation_token }) } (EventBlockId::Num(from), EventBlockId::Pending) => { // Check if continuation token is a native (non-forked) token - let is_native_token = continuation_token + let fetch_from_fork = continuation_token .as_ref() - .map_or(false, |t| matches!(t, MaybeForkedContinuationToken::Token(_))); + // if not token is supplied then we need to fetch from forked client, or + // if token is a forked token + .map_or(true, |t| matches!(t, MaybeForkedContinuationToken::Forked(_))); // Only fetch from forked client if we don't have a native continuation token - if !is_native_token { + if dbg!(fetch_from_fork) { let client = &self.inner.forked_client.as_ref().unwrap(); + // Extract forked token if present let forked_token = continuation_token.as_ref().and_then(|t| match t { MaybeForkedContinuationToken::Forked(token) => Some(token.clone()), @@ -1109,12 +1110,15 @@ where events.extend(forked_result.events); // Return early if there's a continuation token from forked network + if let Some(token) = forked_result.continuation_token { - let token = MaybeForkedContinuationToken::Forked(token); - return Ok(GetEventsResponse { - events, - continuation_token: Some(token.to_string()), - }); + if dbg!(events.len() as u64 >= chunk_size) { + let token = MaybeForkedContinuationToken::Forked(token); + return Ok(GetEventsResponse { + events, + continuation_token: Some(token.to_string()), + }); + } } } @@ -1125,6 +1129,8 @@ where MaybeForkedContinuationToken::Token(token) => Some(token), _ => None, }); + + println!("fetching optimistic events"); let opt_token = self.fetch_optimistic_events( address, &keys, @@ -1133,12 +1139,16 @@ where native_token, )?; + dbg!(&opt_token); + let continuation_token = opt_token.map(|t| MaybeForkedContinuationToken::Token(t).to_string()); Ok(GetEventsResponse { events, continuation_token }) } (EventBlockId::Pending, EventBlockId::Pending) => { + println!("fetching optimistic events - pending - pending"); + // Fetch events from optimistic state transactions (which represent pending // transactions) // Extract native token if present @@ -1172,25 +1182,32 @@ where &self, id: BlockIdOrTag, ) -> StarknetApiResult { - let provider = &self.inner.storage_provider.provider(); - let id = match id { BlockIdOrTag::L1Accepted => EventBlockId::Pending, BlockIdOrTag::PreConfirmed => EventBlockId::Pending, BlockIdOrTag::Number(num) => EventBlockId::Num(num), BlockIdOrTag::Latest => { - let num = provider.convert_block_id(id)?; - EventBlockId::Num(num.ok_or(StarknetApiError::BlockNotFound)?) + // let num = provider.convert_block_id(id)?; + // EventBlockId::Num(num.ok_or(StarknetApiError::BlockNotFound)?) + if let Some(client) = self.forked_client() { + let num = futures::executor::block_on(client.block_number())?; + EventBlockId::Num(num) + } + // Otherwise the block hash is not found. + else { + return Err(StarknetApiError::BlockNotFound); + } } BlockIdOrTag::Hash(hash) => { - // Check first if the block hash belongs to a local block. - if let Some(num) = provider.convert_block_id(id)? { - EventBlockId::Num(num) - } + // // Check first if the block hash belongs to a local block. + // if let Some(num) = provider.convert_block_id(id)? { + // EventBlockId::Num(num) + // } // If not, check if the block hash belongs to a forked block. - else if let Some(client) = self.forked_client() { + // else + if let Some(client) = self.forked_client() { let num = futures::executor::block_on(client.get_block_number_by_hash(hash))?; EventBlockId::Num(num) } From f726066ace73ead77090ef47faeb6892e05375fc Mon Sep 17 00:00:00 2001 From: Ammar Arif Date: Fri, 7 Nov 2025 17:10:36 -0500 Subject: [PATCH 19/26] wip --- crates/gateway/gateway-client/src/lib.rs | 6 +++--- crates/gateway/gateway-types/Cargo.toml | 1 - crates/node/src/optimistic/mod.rs | 10 ++++------ crates/optimistic/src/pool.rs | 21 ++++++++------------- crates/rpc/rpc-types/src/broadcasted.rs | 2 +- 5 files changed, 16 insertions(+), 24 deletions(-) diff --git a/crates/gateway/gateway-client/src/lib.rs b/crates/gateway/gateway-client/src/lib.rs index c2808b28a..d5eeeb6de 100644 --- a/crates/gateway/gateway-client/src/lib.rs +++ b/crates/gateway/gateway-client/src/lib.rs @@ -132,21 +132,21 @@ impl Client { pub async fn add_invoke_transaction( &self, - transaction: katana_rpc_types::broadcasted::BroadcastedInvokeTx, + transaction: BroadcastedInvokeTx, ) -> Result { self.gateway("add_transaction").json(&transaction).send().await } pub async fn add_declare_transaction( &self, - transaction: katana_rpc_types::broadcasted::BroadcastedDeclareTx, + transaction: BroadcastedDeclareTx, ) -> Result { self.gateway("add_transaction").json(&transaction).send().await } pub async fn add_deploy_account_transaction( &self, - transaction: katana_rpc_types::broadcasted::BroadcastedDeployAccountTx, + transaction: BroadcastedDeployAccountTx, ) -> Result { self.gateway("add_transaction").json(&transaction).send().await } diff --git a/crates/gateway/gateway-types/Cargo.toml b/crates/gateway/gateway-types/Cargo.toml index 35e95acd3..bb56b9f81 100644 --- a/crates/gateway/gateway-types/Cargo.toml +++ b/crates/gateway/gateway-types/Cargo.toml @@ -16,7 +16,6 @@ cairo-lang-starknet-classes.workspace = true flate2.workspace = true serde_json.workspace = true serde.workspace = true -serde_json.workspace = true starknet.workspace = true thiserror.workspace = true diff --git a/crates/node/src/optimistic/mod.rs b/crates/node/src/optimistic/mod.rs index 6eb9b63a8..50a07d245 100644 --- a/crates/node/src/optimistic/mod.rs +++ b/crates/node/src/optimistic/mod.rs @@ -21,14 +21,12 @@ use katana_optimistic::pool::{PoolValidator, TxPool}; use katana_pool::ordering::FiFo; use katana_primitives::block::BlockIdOrTag; use katana_primitives::env::{CfgEnv, FeeTokenAddressses}; -use katana_provider::api::block::BlockNumberProvider; -use katana_provider::api::env::BlockEnvProvider; use katana_provider::providers::fork::ForkedProvider; -use katana_rpc::cors::Cors; -use katana_rpc::starknet::forking::ForkedClient; -use katana_rpc::starknet::{OptimisticPendingBlockProvider, StarknetApi, StarknetApiConfig}; -use katana_rpc::{RpcServer, RpcServerHandle}; use katana_rpc_api::starknet::{StarknetApiServer, StarknetTraceApiServer, StarknetWriteApiServer}; +use katana_rpc_server::cors::Cors; +use katana_rpc_server::starknet::forking::ForkedClient; +use katana_rpc_server::starknet::{OptimisticPendingBlockProvider, StarknetApi, StarknetApiConfig}; +use katana_rpc_server::{RpcServer, RpcServerHandle}; use katana_tasks::{JoinHandle, TaskManager}; use tracing::info; diff --git a/crates/optimistic/src/pool.rs b/crates/optimistic/src/pool.rs index 7862e585d..fc705583e 100644 --- a/crates/optimistic/src/pool.rs +++ b/crates/optimistic/src/pool.rs @@ -38,22 +38,17 @@ impl Validator for PoolValidator { // Forward the transaction to the remote node let result = match &tx.tx { BroadcastedTx::Invoke(invoke_tx) => { - self.gateway_client.add_invoke_transaction(invoke_tx.clone()).await.map(|_| ()) - // self.client.add_invoke_transaction(invoke_tx.clone()).await.map(|_| ()) + let gateway_tx = invoke_tx.clone().into(); + self.gateway_client.add_invoke_transaction(gateway_tx).await.map(|_| ()) } BroadcastedTx::Declare(declare_tx) => { - self.gateway_client.add_declare_transaction(declare_tx.clone()).await.map(|_| ()) - // self.client.add_declare_transaction(declare_tx.clone()).await.map(|_| ()) + let gateway_tx = declare_tx.clone().into(); + self.gateway_client.add_declare_transaction(gateway_tx).await.map(|_| ()) + } + BroadcastedTx::DeployAccount(deploy_account_tx) => { + let gateway_tx = deploy_account_tx.clone().into(); + self.gateway_client.add_deploy_account_transaction(gateway_tx).await.map(|_| ()) } - BroadcastedTx::DeployAccount(deploy_account_tx) => self - .gateway_client - .add_deploy_account_transaction(deploy_account_tx.clone()) - .await - .map(|_| ()), /* self - * .client - * .add_deploy_account_transaction(deploy_account_tx.clone()) - * .await - * .map(|_| ()), */ }; match result { diff --git a/crates/rpc/rpc-types/src/broadcasted.rs b/crates/rpc/rpc-types/src/broadcasted.rs index 3d8fe921b..762cf635f 100644 --- a/crates/rpc/rpc-types/src/broadcasted.rs +++ b/crates/rpc/rpc-types/src/broadcasted.rs @@ -716,7 +716,7 @@ impl From for crate::transaction::RpcTxWithHash { fee_data_availability_mode: tx.fee_data_availability_mode, })), BroadcastedTx::Declare(tx) => { - let class_hash = tx.contract_class.hash().expect("failed to compute class hash"); + let class_hash = tx.contract_class.hash(); RpcTx::Declare(RpcDeclareTx::V3(RpcDeclareTxV3 { sender_address: tx.sender_address, compiled_class_hash: tx.compiled_class_hash, From 8f016b855e8b1ebbcd343efbaeb8bd6ffd0d9011 Mon Sep 17 00:00:00 2001 From: Ammar Arif Date: Sat, 8 Nov 2025 14:56:00 -0500 Subject: [PATCH 20/26] return optimistic block --- crates/optimistic/src/executor.rs | 2 +- crates/rpc/rpc-server/src/starknet/pending.rs | 53 ++++++++++++++++--- 2 files changed, 48 insertions(+), 7 deletions(-) diff --git a/crates/optimistic/src/executor.rs b/crates/optimistic/src/executor.rs index 7af367b3a..d044870b9 100644 --- a/crates/optimistic/src/executor.rs +++ b/crates/optimistic/src/executor.rs @@ -128,7 +128,7 @@ impl OptimisticExecutor { let mut last_block_number = None; loop { - sleep(Duration::from_secs(2)).await; + sleep(Duration::from_secs(5)).await; match client.get_block_with_tx_hashes(BlockIdOrTag::Latest).await { Ok(block_response) => { diff --git a/crates/rpc/rpc-server/src/starknet/pending.rs b/crates/rpc/rpc-server/src/starknet/pending.rs index 62ac2d9a0..70c478aec 100644 --- a/crates/rpc/rpc-server/src/starknet/pending.rs +++ b/crates/rpc/rpc-server/src/starknet/pending.rs @@ -2,7 +2,7 @@ use std::fmt::Debug; use katana_core::service::block_producer::{BlockProducer, BlockProducerMode}; use katana_executor::ExecutorFactory; -use katana_primitives::block::{BlockIdOrTag, PartialHeader}; +use katana_primitives::block::{BlockIdOrTag, FinalityStatus, PartialHeader}; use katana_primitives::da::L1DataAvailabilityMode; use katana_primitives::execution::TypedTransactionExecutionInfo; use katana_primitives::transaction::{TxHash, TxNumber}; @@ -12,9 +12,9 @@ use katana_provider::api::state::{StateFactoryProvider, StateProvider}; use katana_provider::providers::db::cached::CachedStateProvider; use katana_rpc_client::starknet::Client; use katana_rpc_types::{ - FinalityStatus, PreConfirmedBlockWithReceipts, PreConfirmedBlockWithTxHashes, - PreConfirmedBlockWithTxs, PreConfirmedStateUpdate, ReceiptBlockInfo, RpcTxWithHash, - TxReceiptWithBlockInfo, TxTrace, + PreConfirmedBlockWithReceipts, PreConfirmedBlockWithTxHashes, PreConfirmedBlockWithTxs, + PreConfirmedStateUpdate, ReceiptBlockInfo, RpcTx, RpcTxReceiptWithHash, RpcTxWithHash, + RpcTxWithReceipt, TxReceiptWithBlockInfo, TxTrace, }; use crate::starknet::StarknetApiResult; @@ -306,13 +306,54 @@ impl PendingBlockProvider for OptimisticPendingBlockProvider { } fn get_pending_block_with_txs(&self) -> StarknetApiResult> { - self.client.get_pending_block_with_txs() + if let Some(block) = self.client.get_pending_block_with_txs()? { + let optimistic_transactions = self + .optimistic_state + .transactions + .read() + .iter() + .map(|(tx, ..)| tx.clone()) + .map(RpcTxWithHash::from) + .collect::>(); + + Ok(Some(PreConfirmedBlockWithTxs { transactions: optimistic_transactions, ..block })) + } else { + Ok(None) + } } fn get_pending_block_with_receipts( &self, ) -> StarknetApiResult> { - self.client.get_pending_block_with_receipts() + if let Some(block) = self.client.get_pending_block_with_receipts()? { + let optimistic_transactions = self + .optimistic_state + .transactions + .read() + .iter() + .filter_map(|(tx, result)| { + if let Some(receipt) = result.receipt() { + let transaction = RpcTx::from(tx.transaction.clone()); + let receipt = RpcTxReceiptWithHash::new( + tx.hash, + receipt.clone(), + FinalityStatus::PreConfirmed, + ); + + Some(RpcTxWithReceipt { transaction, receipt }) + } else { + None + } + }) + .collect::>(); + + Ok(Some(PreConfirmedBlockWithReceipts { + transactions: optimistic_transactions, + ..block + })) + } else { + Ok(None) + } } fn get_pending_block_with_tx_hashes( From 664a5d9e456b5c465fc59f4d1b02234a7f5a93f7 Mon Sep 17 00:00:00 2001 From: Ammar Arif Date: Sun, 9 Nov 2025 15:42:26 -0500 Subject: [PATCH 21/26] wip --- crates/optimistic/src/executor.rs | 6 ++-- crates/primitives/src/block.rs | 2 +- crates/rpc/rpc-server/src/logger.rs | 1 + crates/rpc/rpc-server/src/starknet/mod.rs | 28 +++++++++---------- crates/rpc/rpc-server/src/starknet/read.rs | 27 +++++++++++++++++- crates/rpc/rpc-types/src/event.rs | 6 ++-- .../provider/src/providers/fork/state.rs | 15 +++++----- 7 files changed, 56 insertions(+), 29 deletions(-) diff --git a/crates/optimistic/src/executor.rs b/crates/optimistic/src/executor.rs index d044870b9..4c07608e3 100644 --- a/crates/optimistic/src/executor.rs +++ b/crates/optimistic/src/executor.rs @@ -111,9 +111,9 @@ impl OptimisticExecutor { let client = self.client; let optimistic_state = self.optimistic_state; let block_env = self.block_env; - self.task_spawner.build_task().name("Block Polling").spawn(async move { - Self::poll_confirmed_blocks(client, optimistic_state, block_env).await; - }); + // self.task_spawner.build_task().name("Block Polling").spawn(async move { + // Self::poll_confirmed_blocks(client, optimistic_state, block_env).await; + // }); executor_handle } diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 64e4f929b..b4f7018c2 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -17,7 +17,7 @@ pub type BlockNumber = u64; /// Block hash type. pub type BlockHash = Felt; -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum BlockIdOrTag { Hash(BlockHash), Number(BlockNumber), diff --git a/crates/rpc/rpc-server/src/logger.rs b/crates/rpc/rpc-server/src/logger.rs index 26acd5ef4..af217293c 100644 --- a/crates/rpc/rpc-server/src/logger.rs +++ b/crates/rpc/rpc-server/src/logger.rs @@ -48,6 +48,7 @@ where #[inline] #[tracing::instrument(target = "rpc", level = "trace", name = "rpc_batch", skip_all, fields(batch_size = batch.len()) )] fn batch<'a>(&self, batch: Batch<'a>) -> impl Future + Send + 'a { + info!(batch_size = batch.len(), "Batch rpc called."); self.service.batch(batch).in_current_span() } diff --git a/crates/rpc/rpc-server/src/starknet/mod.rs b/crates/rpc/rpc-server/src/starknet/mod.rs index 583825dae..536ec779b 100644 --- a/crates/rpc/rpc-server/src/starknet/mod.rs +++ b/crates/rpc/rpc-server/src/starknet/mod.rs @@ -740,18 +740,21 @@ where if let Some(block) = this.inner.pending_block_provider.get_pending_block_with_receipts()? { - return Ok(Some(GetBlockWithReceiptsResponse::PreConfirmed(block))); + Ok(Some(GetBlockWithReceiptsResponse::PreConfirmed(block))) + } else { + Ok(None) } - } - - if let Some(num) = provider.convert_block_id(block_id)? { - let block = katana_rpc_types_builder::BlockBuilder::new(num.into(), provider) - .build_with_receipts()? - .map(GetBlockWithReceiptsResponse::Block); - - StarknetApiResult::Ok(block) } else { + // if let Some(num) = provider.convert_block_id(block_id)? { + // let block = + // katana_rpc_types_builder::BlockBuilder::new(num.into(), provider) + // .build_with_receipts()? + // .map(GetBlockWithReceiptsResponse::Block); + + // StarknetApiResult::Ok(block) + // } else { StarknetApiResult::Ok(None) + // } } }) .await??; @@ -1040,7 +1043,7 @@ where // reserved buffer to fill up with events to avoid reallocations let mut events = Vec::with_capacity(chunk_size as usize); - match dbg!((from, to)) { + match (from, to) { (EventBlockId::Num(from), EventBlockId::Num(to)) => { // Check if continuation token is a native (non-forked) token let is_native_token = continuation_token @@ -1074,10 +1077,7 @@ where } } - return Ok(GetEventsResponse { - events, - continuation_token: continuation_token.map(|t| t.to_string()), - }); + return Ok(GetEventsResponse { events, continuation_token: None }); } (EventBlockId::Num(from), EventBlockId::Pending) => { diff --git a/crates/rpc/rpc-server/src/starknet/read.rs b/crates/rpc/rpc-server/src/starknet/read.rs index 4ab54ad3b..66de4d0e3 100644 --- a/crates/rpc/rpc-server/src/starknet/read.rs +++ b/crates/rpc/rpc-server/src/starknet/read.rs @@ -141,7 +141,32 @@ where } async fn get_events(&self, filter: EventFilterWithPage) -> RpcResult { - Ok(self.events(filter).await?) + use std::collections::HashMap; + use std::sync::LazyLock; + use std::sync::Mutex; + + // Function-local static cache for events + static EVENTS_CACHE: LazyLock>> = + LazyLock::new(|| Mutex::new(HashMap::new())); + + // Check cache first + { + let cache = EVENTS_CACHE.lock().unwrap(); + if let Some(cached_result) = cache.get(&filter) { + return Ok(cached_result.clone()); + } + } + + // If not in cache, fetch the events + let result = self.events(filter.clone()).await?; + + // Store in cache + { + let mut cache = EVENTS_CACHE.lock().unwrap(); + cache.insert(filter, result.clone()); + } + + Ok(result) } async fn call(&self, request: FunctionCall, block_id: BlockIdOrTag) -> RpcResult { diff --git a/crates/rpc/rpc-types/src/event.rs b/crates/rpc/rpc-types/src/event.rs index d5fadd1a3..f53e330d0 100644 --- a/crates/rpc/rpc-types/src/event.rs +++ b/crates/rpc/rpc-types/src/event.rs @@ -4,7 +4,7 @@ use katana_primitives::{ContractAddress, Felt}; use serde::{Deserialize, Serialize}; /// Events request. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct EventFilterWithPage { #[serde(flatten)] pub event_filter: EventFilter, @@ -15,7 +15,7 @@ pub struct EventFilterWithPage { /// Event filter. /// /// An event filter/query. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct EventFilter { /// From block #[serde(skip_serializing_if = "Option::is_none")] @@ -32,7 +32,7 @@ pub struct EventFilter { } /// Result page request. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct ResultPageRequest { /// The token returned from the previous query. If no token is provided the first page is /// returned. diff --git a/crates/storage/provider/provider/src/providers/fork/state.rs b/crates/storage/provider/provider/src/providers/fork/state.rs index 83e8ce6e8..f66064012 100644 --- a/crates/storage/provider/provider/src/providers/fork/state.rs +++ b/crates/storage/provider/provider/src/providers/fork/state.rs @@ -41,13 +41,14 @@ where BlockHashOrNumber::Hash(hash) => self.provider.block_number_by_hash(hash)?, BlockHashOrNumber::Num(num) => { - let latest_num = self.provider.latest_number()?; - - match num.cmp(&latest_num) { - Ordering::Less => Some(num), - Ordering::Greater => return Ok(None), - Ordering::Equal => return self.latest().map(Some), - } + // let latest_num = self.provider.latest_number()?; + + // match num.cmp(&latest_num) { + // Ordering::Less => Some(num), + // Ordering::Greater => return Ok(None), + // Ordering::Equal => return self.latest().map(Some), + // } + Some(num) } }; From 5aaa8696915206ee273e57be1d3325a1f71a5d1e Mon Sep 17 00:00:00 2001 From: glihm Date: Mon, 10 Nov 2025 17:59:24 -0600 Subject: [PATCH 22/26] chore(ci): add release workflow for optimistic version --- .github/workflows/custom-release.yml | 275 ++++++++++++++++++++++++++- 1 file changed, 265 insertions(+), 10 deletions(-) diff --git a/.github/workflows/custom-release.yml b/.github/workflows/custom-release.yml index 90893eaf5..3f8928eb0 100644 --- a/.github/workflows/custom-release.yml +++ b/.github/workflows/custom-release.yml @@ -1,10 +1,265 @@ -# Due to workflow limitation of github, this file is merely here -# to support triggering the workflow on a dev branch, without having -# to merge it into main. -# -# It is useful to make releases which publish a container to the github container registry. -# CAUTION: it is recommended that you always enforce those custom releases to contain a preview tag, with `-custom.X` suffix where -# custom is a descriptive name for the release and X is the version number. -# -# gh workflow run custom-release.yml --ref feat/your-branch -f preview=v1.7.0-custom.1 -# \ No newline at end of file +name: manual release for Optimistic Katana + +on: + workflow_dispatch: + inputs: + preview: + description: "Preview tag for Optimistic Katana. Must be in format vX.Y.Z-optimistic.X" + type: string + required: true + +env: + RUST_VERSION: 1.89.0 + CARGO_TERM_COLOR: always + REGISTRY_IMAGE: ghcr.io/${{ github.repository }} + +jobs: + prepare: + runs-on: ubuntu-latest + outputs: + tag_name: ${{ steps.release_info.outputs.tag_name }} + steps: + - uses: actions/checkout@v4 + - name: Get version + id: release_info + run: | + # Validate that the tag follows the vX.Y.Z-optimistic.X format + if [[ ! "${{ inputs.preview }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+-optimistic\.[0-9]+$ ]]; then + echo "Invalid tag format. Must be in format vX.Y.Z-optimistic.X (e.g., v1.0.0-optimistic.1)" + exit 1 + fi + echo "tag_name=${{ inputs.preview }}" >> $GITHUB_OUTPUT + + build-contracts: + runs-on: ubuntu-latest + needs: prepare + container: + image: ghcr.io/dojoengine/katana-dev:latest + steps: + - uses: actions/checkout@v4 + + - name: Build contracts + run: make contracts + + - name: Upload contract artifacts + uses: actions/upload-artifact@v4 + with: + name: contract-artifacts + path: ./crates/contracts/build + retention-days: 1 + + release: + name: ${{ matrix.job.target }} (${{ matrix.job.os }}${{ matrix.job.native_build == true && ', native' || '' }}) + needs: [prepare, build-contracts] + runs-on: ${{ matrix.job.os }} + env: + PLATFORM_NAME: ${{ matrix.job.platform }} + TARGET: ${{ matrix.job.target }} + ARCH: ${{ matrix.job.arch }} + NATIVE_BUILD: ${{ matrix.job.native_build }} + strategy: + matrix: + job: + # The OS is used for the runner + # The platform is a generic platform name + # The target is used by Cargo + # The arch is either 386, arm64 or amd64 + # The svm target platform to use for the binary https://github.com/roynalnaruto/svm-rs/blob/84cbe0ac705becabdc13168bae28a45ad2299749/svm-builds/build.rs#L4-L24 + # Added native_build dimension to control build type + - os: ubuntu-latest-8-cores + platform: linux + target: x86_64-unknown-linux-gnu + arch: amd64 + native_build: false + - os: ubuntu-latest-8-cores-arm64 + platform: linux + target: aarch64-unknown-linux-gnu + arch: arm64 + svm_target_platform: linux-aarch64 + native_build: false + - os: macos-latest-xlarge + platform: darwin + target: aarch64-apple-darwin + arch: arm64 + native_build: false + + steps: + - uses: actions/checkout@v4 + + - name: Download contract artifacts + uses: actions/download-artifact@v4 + with: + name: contract-artifacts + path: ./crates/contracts/build + + - uses: actions-rust-lang/setup-rust-toolchain@v1 + name: Rust Toolchain Setup + with: + toolchain: ${{ env.RUST_VERSION }} + target: ${{ matrix.job.target }} + cache-on-failure: true + cache-key: ${{ matrix.job.target }} + + - uses: oven-sh/setup-bun@v2 + with: + bun-version: latest + + - name: Install LLVM ( Linux ) + if: ${{ matrix.job.platform == 'linux' && matrix.job.native_build == true }} + run: | + wget https://apt.llvm.org/llvm.sh + chmod +x llvm.sh + sudo ./llvm.sh 19 + sudo apt-get update -y + sudo apt-get install -y g++ llvm-19 llvm-19-dev llvm-19-runtime clang-19 clang-tools-19 lld-19 libpolly-19-dev libmlir-19-dev mlir-19-tools + echo "MLIR_SYS_190_PREFIX=/usr/lib/llvm-19" >> $GITHUB_ENV + echo "LLVM_SYS_191_PREFIX=/usr/lib/llvm-19" >> $GITHUB_ENV + echo "TABLEGEN_190_PREFIX=/usr/lib/llvm-19" >> $GITHUB_ENV + + - name: Install LLVM ( macOS ) + if: ${{ matrix.job.platform == 'darwin' && matrix.job.native_build == true }} + run: | + brew install llvm@19 --quiet + brew install zstd + echo "MLIR_SYS_190_PREFIX=$(brew --prefix llvm@19)" >> $GITHUB_ENV + echo "LLVM_SYS_191_PREFIX=$(brew --prefix llvm@19)" >> $GITHUB_ENV + echo "TABLEGEN_190_PREFIX=$(brew --prefix llvm@19)" >> $GITHUB_ENV + echo "LIBRARY_PATH=$(brew --prefix zstd)/lib:$LIBRARY_PATH" >> $GITHUB_ENV + echo "CPATH=$(brew --prefix zstd)/include:$CPATH" >> $GITHUB_ENV + + # - name: Install LLVM ( Windows ) + # if: ${{ matrix.job.platform == 'win32' && matrix.job.native_build == true }} + # run: | + # $llvmUrl = "https://github.com/llvm/llvm-project/releases/download/llvmorg-19.1.7/clang+llvm-19.1.7-x86_64-pc-windows-msvc.tar.xz" + # $llvmDir = "C:\Program Files (x86)\LLVM" + # $llvmDirBin = "C:\Program Files (x86)\LLVM\bin" + # + # Write-Host "Downloading LLVM from $llvmUrl" + # Invoke-WebRequest -Uri $llvmUrl -OutFile llvm.tar.xz + # + # Write-Host "Creating LLVM directory" + # New-Item -ItemType Directory -Path $llvmDir -Force + # + # Write-Host "Extracting LLVM" + # tar -xf llvm.tar.xz -C $llvmDir --strip-components=1 + # + # Write-Host "LLVM installed successfully to $llvmDir" + # + # Write-Host "Listing files in LLVM directory" + # Get-ChildItem -Path "$llvmDirBin" | ForEach-Object { Write-Host $_.Name } + + # # On Windows, use powershell syntax to write the env var to the file. + # # https://github.com/actions/runner/issues/1636#issuecomment-1024531638 + # - name: Set cairo-native LLVM environment variables ( Windows ) + # if: ${{ matrix.job.platform == 'win32' && matrix.job.native_build == true }} + # run: | + # echo "MLIR_SYS_190_PREFIX=C:\Program Files (x86)\LLVM" | Out-File -FilePath $env:GITHUB_ENV -Append + # echo "LLVM_SYS_191_PREFIX=C:\Program Files (x86)\LLVM" | Out-File -FilePath $env:GITHUB_ENV -Append + # echo "TABLEGEN_190_PREFIX=C:\Program Files (x86)\LLVM" | Out-File -FilePath $env:GITHUB_ENV -Append + + - name: Apple M1 setup + if: ${{ matrix.job.target == 'aarch64-apple-darwin' }} + run: | + echo "SDKROOT=$(xcrun -sdk macosx --show-sdk-path)" >> $GITHUB_ENV + echo "MACOSX_DEPLOYMENT_TARGET=$(xcrun -sdk macosx --show-sdk-platform-version)" >> $GITHUB_ENV + + - name: Linux ARM setup + if: ${{ matrix.job.target == 'aarch64-unknown-linux-gnu' }} + run: | + sudo apt-get update -y + sudo apt-get install -y gcc-aarch64-linux-gnu libssl-dev + # We build jemalloc with 64KB pagesize so that it works for all linux/arm64 pagesize variants + # See: https://github.com/jemalloc/jemalloc/issues/467 + echo "JEMALLOC_SYS_WITH_LG_PAGE=16" >> $GITHUB_ENV + + - name: Build binary + if: ${{ matrix.job.native_build == false }} + shell: bash + run: | + cargo build --bin katana --profile performance --target ${{ matrix.job.target }} + + - name: Build binary ( w/ cairo-native ) + if: ${{ matrix.job.native_build == true }} + shell: bash + run: | + cargo build --bin katana --profile performance --features native --target ${{ matrix.job.target }} + + - name: Archive binaries + id: artifacts + env: + VERSION_NAME: ${{ needs.prepare.outputs.tag_name }} + run: | + if [ "$NATIVE_BUILD" == "true" ]; then + SUFFIX="_native" + else + SUFFIX="" + fi + + if [ "$PLATFORM_NAME" == "linux" ]; then + tar -czvf "katana_${VERSION_NAME}_${PLATFORM_NAME}_${ARCH}${SUFFIX}.tar.gz" -C ./target/${TARGET}/performance katana + echo "file_name=katana_${VERSION_NAME}_${PLATFORM_NAME}_${ARCH}${SUFFIX}.tar.gz" >> $GITHUB_OUTPUT + elif [ "$PLATFORM_NAME" == "darwin" ]; then + # We need to use gtar here otherwise the archive is corrupt. + # See: https://github.com/actions/virtual-environments/issues/2619 + gtar -czvf "katana_${VERSION_NAME}_${PLATFORM_NAME}_${ARCH}${SUFFIX}.tar.gz" -C ./target/${TARGET}/performance katana + echo "file_name=katana_${VERSION_NAME}_${PLATFORM_NAME}_${ARCH}${SUFFIX}.tar.gz" >> $GITHUB_OUTPUT + fi + shell: bash + + # We move binaries so they match $TARGETPLATFORM in the Docker build + # Only move native binaries for Docker (we want the native version for Docker) + - name: Move binaries for Docker + if: ${{ env.PLATFORM_NAME == 'linux' }} + shell: bash + run: | + mkdir -p $PLATFORM_NAME/$ARCH + mv target/${TARGET}/performance/katana $PLATFORM_NAME/$ARCH + + - name: Upload Docker binaries + if: ${{ env.PLATFORM_NAME == 'linux' }} + uses: actions/upload-artifact@v4 + with: + name: binaries-${{ matrix.job.target }} + path: ${{ env.PLATFORM_NAME }} + retention-days: 1 + + - name: Upload release artifacts + uses: actions/upload-artifact@v4 + with: + name: artifacts-${{ matrix.job.target }}${{ matrix.job.native_build == true && '-native' || '' }} + path: ${{ steps.artifacts.outputs.file_name }} + retention-days: 1 + + docker-build-and-push: + runs-on: ubuntu-latest-8-cores + needs: [prepare, release] + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + + - name: Download binaries + uses: actions/download-artifact@v4 + with: + pattern: binaries-* + path: artifacts/linux + merge-multiple: true + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v1 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push docker image + uses: docker/build-push-action@v3 + with: + push: true + tags: ghcr.io/${{ github.repository }}:${{ needs.prepare.outputs.tag_name }} + platforms: linux/amd64,linux/arm64 + build-contexts: | + artifacts=artifacts \ No newline at end of file From 6fd5d204e233713f807bc189ae923f5d00c0ca45 Mon Sep 17 00:00:00 2001 From: glihm Date: Mon, 10 Nov 2025 18:08:37 -0600 Subject: [PATCH 23/26] chore(ci): fix rust version to the same as rust-toolchain --- .github/workflows/custom-release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/custom-release.yml b/.github/workflows/custom-release.yml index 3f8928eb0..80451d8e6 100644 --- a/.github/workflows/custom-release.yml +++ b/.github/workflows/custom-release.yml @@ -9,7 +9,7 @@ on: required: true env: - RUST_VERSION: 1.89.0 + RUST_VERSION: 1.86.0 CARGO_TERM_COLOR: always REGISTRY_IMAGE: ghcr.io/${{ github.repository }} From 5e93a012d88529c9bab7de7bde0b0fb7402595b1 Mon Sep 17 00:00:00 2001 From: glihm Date: Mon, 10 Nov 2025 19:57:40 -0600 Subject: [PATCH 24/26] fix: fix clippy and remove tx once confirmed on network --- crates/cli/src/optimistic.rs | 1 + crates/cli/src/options.rs | 4 ++-- crates/gateway/gateway-types/src/transaction.rs | 5 +++-- crates/node/src/optimistic/mod.rs | 14 ++++++++------ crates/optimistic/src/executor.rs | 12 +++++++++--- crates/optimistic/src/pool.rs | 3 ++- crates/pool/pool-api/src/tx.rs | 2 -- crates/rpc/rpc-server/src/starknet/mod.rs | 16 ++++++++++------ crates/rpc/rpc-server/src/starknet/pending.rs | 2 ++ crates/rpc/rpc-server/src/starknet/read.rs | 3 +-- crates/rpc/rpc-server/src/utils/events.rs | 2 ++ crates/rpc/rpc-types/src/transaction.rs | 2 +- .../provider/provider/src/providers/db/cached.rs | 4 ++-- .../provider/src/providers/fork/state.rs | 1 - 14 files changed, 43 insertions(+), 28 deletions(-) diff --git a/crates/cli/src/optimistic.rs b/crates/cli/src/optimistic.rs index 65c80a6e1..5b10725e9 100644 --- a/crates/cli/src/optimistic.rs +++ b/crates/cli/src/optimistic.rs @@ -87,6 +87,7 @@ impl OptimisticNodeArgs { self.tracer.config() } + #[allow(clippy::field_reassign_with_default)] fn chain_spec(&self) -> Result> { let mut dev_chain_spec = katana_chain_spec::dev::ChainSpec::default(); dev_chain_spec.id = ChainId::SEPOLIA; diff --git a/crates/cli/src/options.rs b/crates/cli/src/options.rs index 8e1985788..656989987 100644 --- a/crates/cli/src/options.rs +++ b/crates/cli/src/options.rs @@ -26,7 +26,7 @@ use katana_node::config::rpc::{RpcModulesList, DEFAULT_RPC_MAX_PROOF_KEYS}; use katana_node::config::rpc::{ DEFAULT_RPC_ADDR, DEFAULT_RPC_MAX_CALL_GAS, DEFAULT_RPC_MAX_EVENT_PAGE_SIZE, DEFAULT_RPC_PORT, }; -use katana_primitives::block::{BlockHashOrNumber, BlockIdOrTag, GasPrice}; +use katana_primitives::block::{BlockIdOrTag, GasPrice}; use katana_primitives::chain::ChainId; #[cfg(feature = "server")] use katana_rpc_server::cors::HeaderValue; @@ -37,7 +37,7 @@ use url::Url; #[cfg(feature = "server")] use crate::utils::{deserialize_cors_origins, serialize_cors_origins}; -use crate::utils::{parse_block_hash_or_number, parse_block_id_or_tag, parse_genesis}; +use crate::utils::{parse_block_id_or_tag, parse_genesis}; const DEFAULT_DEV_SEED: &str = "0"; const DEFAULT_DEV_ACCOUNTS: u16 = 10; diff --git a/crates/gateway/gateway-types/src/transaction.rs b/crates/gateway/gateway-types/src/transaction.rs index 933d54837..f3b1733be 100644 --- a/crates/gateway/gateway-types/src/transaction.rs +++ b/crates/gateway/gateway-types/src/transaction.rs @@ -660,7 +660,7 @@ impl From for katana_primitives::da::DataAvailabilityMode } // Custom serialization for contract class with gzip + base64 encoded sierra program -fn serialize_contract_class( +fn _serialize_contract_class( class: &std::sync::Arc, serializer: S, ) -> Result { @@ -700,7 +700,7 @@ fn serialize_contract_class( state.end() } -fn deserialize_contract_class<'de, D: serde::Deserializer<'de>>( +fn _deserialize_contract_class<'de, D: serde::Deserializer<'de>>( deserializer: D, ) -> Result, D::Error> { use std::io::Read; @@ -709,6 +709,7 @@ fn deserialize_contract_class<'de, D: serde::Deserializer<'de>>( use flate2::read::GzDecoder; use serde::de; + #[allow(dead_code)] #[derive(Deserialize)] struct GatewaySierraClass { sierra_program: String, diff --git a/crates/node/src/optimistic/mod.rs b/crates/node/src/optimistic/mod.rs index 50a07d245..5b85ee822 100644 --- a/crates/node/src/optimistic/mod.rs +++ b/crates/node/src/optimistic/mod.rs @@ -34,7 +34,7 @@ pub mod config; use config::Config; -pub use self::config::*; +// pub use self::config::*; use crate::config::rpc::RpcModuleKind; #[derive(Debug)] @@ -84,11 +84,12 @@ impl Node { #[allow(unused_mut)] let mut class_cache = ClassCache::builder(); - #[cfg(feature = "native")] - { - info!(enabled = config.execution.compile_native, "Cairo native compilation"); - class_cache = class_cache.compile_native(config.execution.compile_native); - } + // Ignore native for now in optimistic node. + // #[cfg(feature = "native")] + // { + // info!(enabled = config.execution.compile_native, "Cairo native compilation"); + // class_cache = class_cache.compile_native(config.execution.compile_native); + // } let global_class_cache = class_cache.build_global()?; @@ -266,6 +267,7 @@ impl Node { } #[derive(Debug)] +#[allow(dead_code)] pub struct LaunchedNode { config: Arc, pool: TxPool, diff --git a/crates/optimistic/src/executor.rs b/crates/optimistic/src/executor.rs index 4c07608e3..1b7b2cbcb 100644 --- a/crates/optimistic/src/executor.rs +++ b/crates/optimistic/src/executor.rs @@ -45,6 +45,12 @@ impl OptimisticState { } } +impl Default for OptimisticState { + fn default() -> Self { + Self::new() + } +} + #[derive(Debug)] pub struct OptimisticExecutor { pool: TxPool, @@ -111,9 +117,9 @@ impl OptimisticExecutor { let client = self.client; let optimistic_state = self.optimistic_state; let block_env = self.block_env; - // self.task_spawner.build_task().name("Block Polling").spawn(async move { - // Self::poll_confirmed_blocks(client, optimistic_state, block_env).await; - // }); + self.task_spawner.build_task().name("Block Polling").spawn(async move { + Self::poll_confirmed_blocks(client, optimistic_state, block_env).await; + }); executor_handle } diff --git a/crates/optimistic/src/pool.rs b/crates/optimistic/src/pool.rs index fc705583e..c7b2dcf7b 100644 --- a/crates/optimistic/src/pool.rs +++ b/crates/optimistic/src/pool.rs @@ -7,11 +7,12 @@ use katana_pool_api::validation::{ }; use katana_rpc_client::starknet::Client; use katana_rpc_types::{BroadcastedTx, BroadcastedTxWithChainId}; -use tracing::{debug, info}; +use tracing::info; pub type TxPool = Pool>; /// A validator that forwards transactions to a remote Starknet RPC endpoint. +#[allow(dead_code)] #[derive(Debug, Clone)] pub struct PoolValidator { client: Arc, diff --git a/crates/pool/pool-api/src/tx.rs b/crates/pool/pool-api/src/tx.rs index 974b81f0c..a2c7a0f87 100644 --- a/crates/pool/pool-api/src/tx.rs +++ b/crates/pool/pool-api/src/tx.rs @@ -6,8 +6,6 @@ use katana_primitives::contract::{ContractAddress, Nonce}; use katana_primitives::transaction::{ DeclareTx, DeployAccountTx, ExecutableTx, ExecutableTxWithHash, InvokeTx, TxHash, }; -use katana_primitives::utils::get_contract_address; -use katana_primitives::Felt; use katana_rpc_types::broadcasted::BroadcastedTx; use katana_rpc_types::BroadcastedTxWithChainId; diff --git a/crates/rpc/rpc-server/src/starknet/mod.rs b/crates/rpc/rpc-server/src/starknet/mod.rs index 536ec779b..8cfe243a6 100644 --- a/crates/rpc/rpc-server/src/starknet/mod.rs +++ b/crates/rpc/rpc-server/src/starknet/mod.rs @@ -1,3 +1,6 @@ +#![allow(unused_imports)] +#![allow(clippy::too_many_arguments)] +#![allow(clippy::unnecessary_map_or)] //! Server implementation for the Starknet JSON-RPC API. use std::fmt::Debug; @@ -650,8 +653,8 @@ where let status = provider.transaction_status(hash)?; if let Some(status) = status { - // TODO: this might not work once we allow querying for 'failed' transactions - // from the provider + // TODO: this might not work once we allow querying for 'failed' + // transactions from the provider let Some(receipt) = provider.receipt_by_hash(hash)? else { let error = StarknetApiError::unexpected( "Transaction hash exist, but the receipt is missing", @@ -734,7 +737,7 @@ where ) -> StarknetApiResult { let block = self .on_io_blocking_task(move |this| { - let provider = &this.inner.storage_provider.provider(); + let _provider = &this.inner.storage_provider.provider(); if BlockIdOrTag::PreConfirmed == block_id { if let Some(block) = @@ -774,7 +777,7 @@ where ) -> StarknetApiResult { let block = self .on_io_blocking_task(move |this| { - let provider = &this.inner.storage_provider.provider(); + let _provider = &this.inner.storage_provider.provider(); if BlockIdOrTag::PreConfirmed == block_id { if let Some(block) = @@ -1010,7 +1013,8 @@ where } } - // if we already exhaust all the optimistic transactions then we return a continuation token pointing to the next optimistic transaction + // if we already exhaust all the optimistic transactions then we return a continuation + // token pointing to the next optimistic transaction return Ok(Some(katana_primitives::event::ContinuationToken { block_n: 0, // Not used for optimistic transactions txn_n: transactions.len() as u64, @@ -1077,7 +1081,7 @@ where } } - return Ok(GetEventsResponse { events, continuation_token: None }); + Ok(GetEventsResponse { events, continuation_token: None }) } (EventBlockId::Num(from), EventBlockId::Pending) => { diff --git a/crates/rpc/rpc-server/src/starknet/pending.rs b/crates/rpc/rpc-server/src/starknet/pending.rs index 70c478aec..8d222829c 100644 --- a/crates/rpc/rpc-server/src/starknet/pending.rs +++ b/crates/rpc/rpc-server/src/starknet/pending.rs @@ -1,3 +1,5 @@ +#![allow(clippy::collapsible_match)] + use std::fmt::Debug; use katana_core::service::block_producer::{BlockProducer, BlockProducerMode}; diff --git a/crates/rpc/rpc-server/src/starknet/read.rs b/crates/rpc/rpc-server/src/starknet/read.rs index 66de4d0e3..819c69ef5 100644 --- a/crates/rpc/rpc-server/src/starknet/read.rs +++ b/crates/rpc/rpc-server/src/starknet/read.rs @@ -142,8 +142,7 @@ where async fn get_events(&self, filter: EventFilterWithPage) -> RpcResult { use std::collections::HashMap; - use std::sync::LazyLock; - use std::sync::Mutex; + use std::sync::{LazyLock, Mutex}; // Function-local static cache for events static EVENTS_CACHE: LazyLock>> = diff --git a/crates/rpc/rpc-server/src/utils/events.rs b/crates/rpc/rpc-server/src/utils/events.rs index e36c64d6d..1bb4788a5 100644 --- a/crates/rpc/rpc-server/src/utils/events.rs +++ b/crates/rpc/rpc-server/src/utils/events.rs @@ -1,3 +1,5 @@ +#![allow(dead_code)] + use std::cmp::Ordering; use std::ops::RangeInclusive; diff --git a/crates/rpc/rpc-types/src/transaction.rs b/crates/rpc/rpc-types/src/transaction.rs index 9b1afd2cc..5f46f965e 100644 --- a/crates/rpc/rpc-types/src/transaction.rs +++ b/crates/rpc/rpc-types/src/transaction.rs @@ -591,7 +591,7 @@ impl From for primitives::Tx { } impl From for RpcTxWithHash { - fn from(tx: primitives::ExecutableTxWithHash) -> Self { + fn from(_tx: primitives::ExecutableTxWithHash) -> Self { todo!() } } diff --git a/crates/storage/provider/provider/src/providers/db/cached.rs b/crates/storage/provider/provider/src/providers/db/cached.rs index a0dc60910..7c034da0d 100644 --- a/crates/storage/provider/provider/src/providers/db/cached.rs +++ b/crates/storage/provider/provider/src/providers/db/cached.rs @@ -125,7 +125,7 @@ impl CachedStateProvider { impl ContractClassProvider for CachedStateProvider { fn class(&self, hash: ClassHash) -> ProviderResult> { if let Some(class) = self.cache.get_class(hash) { - return Ok(Some(class)); + Ok(Some(class)) } else { Ok(self.state.class(hash)?) } @@ -170,7 +170,7 @@ impl StateProvider for CachedStateProvider { address: ContractAddress, ) -> ProviderResult> { if let Some(class_hash) = self.cache.get_class_hash(address) { - return Ok(Some(class_hash)); + Ok(Some(class_hash)) } else { Ok(self.state.class_hash_of_contract(address)?) } diff --git a/crates/storage/provider/provider/src/providers/fork/state.rs b/crates/storage/provider/provider/src/providers/fork/state.rs index f66064012..96c77efa3 100644 --- a/crates/storage/provider/provider/src/providers/fork/state.rs +++ b/crates/storage/provider/provider/src/providers/fork/state.rs @@ -1,4 +1,3 @@ -use std::cmp::Ordering; use std::sync::Arc; use katana_db::abstraction::{Database, DbTx, DbTxMut}; From b728773709b466e361eff5c59e4304c6fd32aa1e Mon Sep 17 00:00:00 2001 From: Ammar Arif Date: Sat, 3 Jan 2026 18:45:16 +0800 Subject: [PATCH 25/26] update optimistic executor --- crates/node/src/optimistic/mod.rs | 177 +++++++++--------- crates/optimistic/src/executor.rs | 60 ++++-- crates/rpc/rpc-server/Cargo.toml | 4 +- crates/rpc/rpc-server/src/starknet/pending.rs | 24 ++- 4 files changed, 157 insertions(+), 108 deletions(-) diff --git a/crates/node/src/optimistic/mod.rs b/crates/node/src/optimistic/mod.rs index 5b85ee822..a6c6bbc2b 100644 --- a/crates/node/src/optimistic/mod.rs +++ b/crates/node/src/optimistic/mod.rs @@ -6,7 +6,7 @@ use http::Method; use jsonrpsee::http_client::HttpClientBuilder; use jsonrpsee::RpcModule; use katana_chain_spec::ChainSpec; -use katana_core::backend::storage::Blockchain; +use katana_core::backend::storage::ProviderRO; use katana_core::backend::Backend; use katana_core::env::BlockContextGenerator; use katana_executor::implementation::blockifier::cache::ClassCache; @@ -21,8 +21,9 @@ use katana_optimistic::pool::{PoolValidator, TxPool}; use katana_pool::ordering::FiFo; use katana_primitives::block::BlockIdOrTag; use katana_primitives::env::{CfgEnv, FeeTokenAddressses}; -use katana_provider::providers::fork::ForkedProvider; +use katana_provider::ProviderFactory; use katana_rpc_api::starknet::{StarknetApiServer, StarknetTraceApiServer, StarknetWriteApiServer}; +use katana_rpc_client::starknet::Client as StarknetClient; use katana_rpc_server::cors::Cors; use katana_rpc_server::starknet::forking::ForkedClient; use katana_rpc_server::starknet::{OptimisticPendingBlockProvider, StarknetApi, StarknetApiConfig}; @@ -38,18 +39,77 @@ use config::Config; use crate::config::rpc::RpcModuleKind; #[derive(Debug)] -pub struct Node { +pub struct Node

+where + P: ProviderFactory, + P::Provider: ProviderRO, +{ config: Arc, pool: TxPool, db: katana_db::Db, rpc_server: RpcServer, task_manager: TaskManager, - executor: OptimisticExecutor, - backend: Arc>, + executor: OptimisticExecutor

, + backend: Arc>, } -impl Node { - pub async fn build(config: Config) -> Result { +impl

Node

+where + P: ProviderFactory + Clone + Unpin, + P::Provider: ProviderRO, +{ + pub async fn launch(self) -> Result> { + let chain = self.backend.chain_spec.id(); + info!(%chain, "Starting node."); + + // TODO: maybe move this to the build stage + if let Some(ref cfg) = self.config.metrics { + let db_metrics = Box::new(self.db.clone()) as Box; + let disk_metrics = Box::new(DiskReporter::new(self.db.path())?) as Box; + let reports: Vec> = vec![db_metrics, disk_metrics]; + + let exporter = PrometheusRecorder::current().expect("qed; should exist at this point"); + let server = MetricsServer::new(exporter).with_process_metrics().with_reports(reports); + + let addr = cfg.socket_addr(); + self.task_manager.task_spawner().build_task().spawn(server.start(addr)); + info!(%addr, "Metrics server started."); + } + + // --- start the rpc server + + let rpc_handle = self.rpc_server.start(self.config.rpc.socket_addr()).await?; + + // --- start the gas oracle worker task + + if let Some(worker) = self.backend.gas_oracle.run_worker() { + self.task_manager + .task_spawner() + .build_task() + .graceful_shutdown() + .name("gas oracle") + .spawn(worker); + } + + info!(target: "node", "Gas price oracle worker started."); + + let executor_handle = self.executor.spawn(); + + Ok(LaunchedNode { + rpc: rpc_handle, + backend: self.backend, + config: self.config, + db: self.db, + executor: executor_handle, + task_manager: self.task_manager, + pool: self.pool, + rpc_server: self.rpc_server, + }) + } +} + +impl Node { + pub async fn build(config: Config) -> Result { if config.metrics.is_some() { // Metrics recorder must be initialized before calling any of the metrics macros, in // order for it to be registered. @@ -63,23 +123,6 @@ impl Node { // --- build executor factory - let fee_token_addresses = match config.chain.as_ref() { - ChainSpec::Dev(cs) => { - FeeTokenAddressses { eth: cs.fee_contracts.eth, strk: cs.fee_contracts.strk } - } - ChainSpec::Rollup(cs) => { - FeeTokenAddressses { eth: cs.fee_contract.strk, strk: cs.fee_contract.strk } - } - }; - - let cfg_env = CfgEnv { - fee_token_addresses, - chain_id: config.chain.id(), - invoke_tx_max_n_steps: 10_000_000, - validate_max_n_steps: 10_000_000, - max_recursion_depth: 100, - }; - let executor_factory = { #[allow(unused_mut)] let mut class_cache = ClassCache::builder(); @@ -94,33 +137,40 @@ impl Node { let global_class_cache = class_cache.build_global()?; let factory = BlockifierFactory::new( - cfg_env, + None, ExecutionFlags::new(), BlockLimits::default(), global_class_cache, + config.chain.clone(), ); Arc::new(factory) }; - // --- build backend + // --- build storage provider - let http_client = HttpClientBuilder::new().build(config.forking.url.as_str())?; - let starknet_client = katana_rpc_client::starknet::Client::new(http_client); + let starknet_client = StarknetClient::new(config.forking.url.clone()); let db = katana_db::Db::in_memory()?; - let forked_block_id = BlockIdOrTag::Latest; - let storage_p = ForkedProvider::new(db.clone(), forked_block_id, starknet_client.clone()); + // Get the latest block number from the forked network + let forked_block_num = starknet_client.block_number().await?.block_number; + let forked_block_id = BlockIdOrTag::Number(forked_block_num); + + let provider = katana_provider::ForkProviderFactory::new( + db.clone(), + forked_block_num, + starknet_client.clone(), + ); + let forked_client = ForkedClient::new(starknet_client.clone(), forked_block_id); - let blockchain = Blockchain::new(storage_p.clone()); let gpo = GasPriceOracle::sampled_starknet(config.forking.url.clone()); let block_context_generator = BlockContextGenerator::default().into(); let backend = Arc::new(Backend { gas_oracle: gpo.clone(), - blockchain: blockchain.clone(), + storage: provider.clone(), executor_factory: executor_factory.clone(), block_context_generator, chain_spec: config.chain.clone(), @@ -138,7 +188,7 @@ impl Node { // this is the component that will populate the optimistic state let executor = OptimisticExecutor::new( pool.clone(), - blockchain.clone(), + provider.clone(), optimistic_state.clone(), executor_factory.clone(), task_spawner.clone(), @@ -171,7 +221,7 @@ impl Node { let pending_block_provider = OptimisticPendingBlockProvider::new( optimistic_state.clone(), starknet_client.clone(), - blockchain.clone(), + provider.clone(), ); let starknet_api = StarknetApi::new_forked( @@ -181,7 +231,7 @@ impl Node { task_spawner.clone(), starknet_api_cfg, pending_block_provider, - blockchain, + provider.clone(), Some(optimistic_state.clone()), ); @@ -215,66 +265,21 @@ impl Node { Ok(Node { db, pool, backend, rpc_server, config: config.into(), task_manager, executor }) } - - pub async fn launch(self) -> Result { - let chain = self.backend.chain_spec.id(); - info!(%chain, "Starting node."); - - // TODO: maybe move this to the build stage - if let Some(ref cfg) = self.config.metrics { - let db_metrics = Box::new(self.db.clone()) as Box; - let disk_metrics = Box::new(DiskReporter::new(self.db.path())?) as Box; - let reports: Vec> = vec![db_metrics, disk_metrics]; - - let exporter = PrometheusRecorder::current().expect("qed; should exist at this point"); - let server = MetricsServer::new(exporter).with_process_metrics().with_reports(reports); - - let addr = cfg.socket_addr(); - self.task_manager.task_spawner().build_task().spawn(server.start(addr)); - info!(%addr, "Metrics server started."); - } - - // --- start the rpc server - - let rpc_handle = self.rpc_server.start(self.config.rpc.socket_addr()).await?; - - // --- start the gas oracle worker task - - if let Some(worker) = self.backend.gas_oracle.run_worker() { - self.task_manager - .task_spawner() - .build_task() - .graceful_shutdown() - .name("gas oracle") - .spawn(worker); - } - - info!(target: "node", "Gas price oracle worker started."); - - let executor_handle = self.executor.spawn(); - - Ok(LaunchedNode { - rpc: rpc_handle, - backend: self.backend, - config: self.config, - db: self.db, - executor: executor_handle, - task_manager: self.task_manager, - pool: self.pool, - rpc_server: self.rpc_server, - }) - } } #[derive(Debug)] #[allow(dead_code)] -pub struct LaunchedNode { +pub struct LaunchedNode

+where + P: ProviderFactory, + P::Provider: ProviderRO, +{ config: Arc, pool: TxPool, db: katana_db::Db, rpc_server: RpcServer, task_manager: TaskManager, - backend: Arc>, + backend: Arc>, rpc: RpcServerHandle, executor: JoinHandle<()>, } diff --git a/crates/optimistic/src/executor.rs b/crates/optimistic/src/executor.rs index 80bc133dc..b8ae4c73e 100644 --- a/crates/optimistic/src/executor.rs +++ b/crates/optimistic/src/executor.rs @@ -6,6 +6,7 @@ use std::time::Duration; use futures::stream::StreamExt; use futures::FutureExt; +use katana_core::backend::storage::ProviderRO; use katana_executor::implementation::blockifier::BlockifierFactory; use katana_executor::{ExecutionResult, ExecutorFactory}; use katana_pool::ordering::FiFo; @@ -52,29 +53,39 @@ impl Default for OptimisticState { } #[derive(Debug)] -pub struct OptimisticExecutor { +pub struct OptimisticExecutor

+where + P: ProviderFactory, + P::Provider: ProviderRO, +{ pool: TxPool, optimistic_state: OptimisticState, executor_factory: Arc, - storage: Blockchain, + storage: P, task_spawner: TaskSpawner, client: Client, block_env: Arc>, } -impl OptimisticExecutor { +impl

OptimisticExecutor

+where + P: ProviderFactory + Clone + Unpin, + P::Provider: ProviderRO, +{ /// Creates a new `OptimisticExecutor` instance. /// /// # Arguments /// /// * `pool` - The transaction pool to monitor for new transactions - /// * `backend` - The backend containing the executor factory and blockchain state + /// * `storage` - The storage provider factory + /// * `optimistic_state` - The optimistic state to track executed transactions + /// * `executor_factory` - The executor factory for transaction execution /// * `task_spawner` - The task spawner used to run the executor actor /// * `client` - The RPC client used to poll for confirmed blocks /// * `block_env` - The initial block environment pub fn new( pool: TxPool, - storage: Blockchain, + storage: P, optimistic_state: OptimisticState, executor_factory: Arc, task_spawner: TaskSpawner, @@ -235,23 +246,40 @@ impl OptimisticExecutor { } } -#[derive(Debug)] -struct OptimisticExecutorActor { +struct OptimisticExecutorActor

+where + P: ProviderFactory, + P::Provider: ProviderRO, +{ pool: TxPool, optimistic_state: OptimisticState, pending_txs: PendingTransactions>, - storage: Blockchain, + storage: P, executor_factory: Arc, task_spawner: TaskSpawner, ongoing_execution: Option>>, block_env: Arc>, } -impl OptimisticExecutorActor { +impl

std::fmt::Debug for OptimisticExecutorActor

+where + P: ProviderFactory, + P::Provider: ProviderRO, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("OptimisticExecutorActor").finish_non_exhaustive() + } +} + +impl

OptimisticExecutorActor

+where + P: ProviderFactory + Clone, + P::Provider: ProviderRO, +{ /// Creates a new executor actor with the given pending transactions stream. fn new( pool: TxPool, - storage: Blockchain, + storage: P, optimistic_state: OptimisticState, executor_factory: Arc, task_spawner: TaskSpawner, @@ -272,12 +300,12 @@ impl OptimisticExecutorActor { /// Execute a single transaction optimistically against the latest state. fn execute_transaction( - pool: TxPool, - storage: Blockchain, + storage: P, optimistic_state: OptimisticState, executor_factory: Arc, block_env: Arc>, tx: BroadcastedTxWithChainId, + pool: TxPool, ) -> anyhow::Result<()> { let latest_state = storage.provider().latest()?; let state = optimistic_state.get_optimistic_state(latest_state); @@ -306,7 +334,11 @@ impl OptimisticExecutorActor { } } -impl Future for OptimisticExecutorActor { +impl

Future for OptimisticExecutorActor

+where + P: ProviderFactory + Clone + Unpin, + P::Provider: ProviderRO, +{ type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { @@ -373,12 +405,12 @@ impl Future for OptimisticExecutorActor { let execution_future = this.task_spawner.cpu_bound().spawn(move || { Self::execute_transaction( - pool, storage, optimistic_state, executor_factory, block_env, tx, + pool, ) }); diff --git a/crates/rpc/rpc-server/Cargo.toml b/crates/rpc/rpc-server/Cargo.toml index 282fbdec2..622ee49af 100644 --- a/crates/rpc/rpc-server/Cargo.toml +++ b/crates/rpc/rpc-server/Cargo.toml @@ -18,6 +18,7 @@ katana-primitives.workspace = true katana-genesis = { workspace = true, optional = true } katana-provider = { workspace = true, features = [ "test-utils" ] } katana-rpc-api = { workspace = true, features = [ "client" ] } +katana-rpc-client.workspace = true katana-rpc-types.workspace = true katana-rpc-types-builder.workspace = true katana-tasks.workspace = true @@ -26,12 +27,12 @@ katana-tracing.workspace = true anyhow.workspace = true auto_impl.workspace = true +futures.workspace = true http.workspace = true jsonrpsee = { workspace = true, features = [ "client", "server" ] } metrics.workspace = true serde_json.workspace = true starknet = { workspace = true, optional = true } -futures = { workspace = true, optional = true } thiserror.workspace = true tokio.workspace = true tower.workspace = true @@ -82,7 +83,6 @@ url.workspace = true [features] cartridge = [ "dep:cainome", - "dep:futures", "dep:cartridge", "dep:katana-genesis", "dep:starknet", diff --git a/crates/rpc/rpc-server/src/starknet/pending.rs b/crates/rpc/rpc-server/src/starknet/pending.rs index cd66b5c6a..a8999832a 100644 --- a/crates/rpc/rpc-server/src/starknet/pending.rs +++ b/crates/rpc/rpc-server/src/starknet/pending.rs @@ -288,23 +288,35 @@ where /// A pending block provider that checks the optimistic state for transactions/receipts, /// then falls back to the client for all queries. #[derive(Debug, Clone)] -pub struct OptimisticPendingBlockProvider { +pub struct OptimisticPendingBlockProvider

+where + P: ProviderFactory, + P::Provider: ProviderRO, +{ optimistic_state: katana_optimistic::executor::OptimisticState, client: Client, - storage: katana_core::backend::storage::Blockchain, + storage: P, } -impl OptimisticPendingBlockProvider { +impl

OptimisticPendingBlockProvider

+where + P: ProviderFactory, + P::Provider: ProviderRO, +{ pub fn new( optimistic_state: katana_optimistic::executor::OptimisticState, client: Client, - provider: katana_core::backend::storage::Blockchain, + storage: P, ) -> Self { - Self { optimistic_state, client, storage: provider } + Self { optimistic_state, client, storage } } } -impl PendingBlockProvider for OptimisticPendingBlockProvider { +impl

PendingBlockProvider for OptimisticPendingBlockProvider

+where + P: ProviderFactory + Clone, + P::Provider: ProviderRO, +{ fn pending_state(&self) -> StarknetApiResult>> { let latest_state = self.storage.provider().latest()?; Ok(Some(self.optimistic_state.get_optimistic_state(latest_state))) From a81b5b0bd2eb73933d2ef54a397525f14463fda8 Mon Sep 17 00:00:00 2001 From: Ammar Arif Date: Wed, 7 Jan 2026 13:56:26 -0600 Subject: [PATCH 26/26] fix --- crates/cli/src/optimistic.rs | 11 +- crates/node/src/full/mod.rs | 1 + crates/node/src/lib.rs | 5 +- crates/node/src/optimistic/mod.rs | 26 ++- crates/rpc/rpc-server/src/starknet/mod.rs | 187 +++++++-------------- crates/rpc/rpc-server/src/starknet/read.rs | 2 +- 6 files changed, 82 insertions(+), 150 deletions(-) diff --git a/crates/cli/src/optimistic.rs b/crates/cli/src/optimistic.rs index 5b10725e9..269724c3d 100644 --- a/crates/cli/src/optimistic.rs +++ b/crates/cli/src/optimistic.rs @@ -46,8 +46,17 @@ pub struct OptimisticNodeArgs { impl OptimisticNodeArgs { pub async fn execute(&self) -> Result<()> { + let logging = katana_tracing::LoggingConfig { + stdout_format: self.logging.stdout.stdout_format, + stdout_color: self.logging.stdout.color, + file_enabled: self.logging.file.enabled, + file_format: self.logging.file.file_format, + file_directory: self.logging.file.directory.clone(), + file_max_files: self.logging.file.max_files, + }; + let tracer_config = self.tracer_config(); - katana_tracing::init(self.logging.log_format, tracer_config).await?; + katana_tracing::init(logging, tracer_config).await?; self.start_node().await } diff --git a/crates/node/src/full/mod.rs b/crates/node/src/full/mod.rs index 21e8d4a0a..68375266b 100644 --- a/crates/node/src/full/mod.rs +++ b/crates/node/src/full/mod.rs @@ -181,6 +181,7 @@ impl Node { task_spawner.clone(), preconf_factory, GasPriceOracle::create_for_testing(), + None, // optimistic_state starknet_api_cfg, storage_provider.clone(), ); diff --git a/crates/node/src/lib.rs b/crates/node/src/lib.rs index 05c91c1df..2d53063da 100644 --- a/crates/node/src/lib.rs +++ b/crates/node/src/lib.rs @@ -30,7 +30,7 @@ use katana_metrics::sys::DiskReporter; use katana_metrics::{MetricsServer, MetricsServerHandle, Report}; use katana_pool::ordering::FiFo; use katana_pool::TxPool; -use katana_primitives::block::{BlockHashOrNumber, GasPrices}; +use katana_primitives::block::{BlockIdOrTag, GasPrices}; use katana_primitives::cairo::ShortString; use katana_primitives::env::VersionedConstantsOverrides; use katana_provider::{DbProviderFactory, ForkProviderFactory, ProviderFactory}; @@ -254,6 +254,7 @@ where task_spawner.clone(), block_producer.clone(), gas_oracle.clone(), + None, // optimistic_state starknet_api_cfg, provider.clone(), ); @@ -416,7 +417,7 @@ impl Node { id } else { let res = client.block_number().await?; - BlockHashOrNumber::Num(res.block_number) + BlockIdOrTag::Number(res.block_number) }; // if the id is not in ASCII encoding, we display the chain id as is in hex. diff --git a/crates/node/src/optimistic/mod.rs b/crates/node/src/optimistic/mod.rs index a6c6bbc2b..6dc253e56 100644 --- a/crates/node/src/optimistic/mod.rs +++ b/crates/node/src/optimistic/mod.rs @@ -3,9 +3,7 @@ use std::sync::Arc; use anyhow::Result; use http::header::CONTENT_TYPE; use http::Method; -use jsonrpsee::http_client::HttpClientBuilder; use jsonrpsee::RpcModule; -use katana_chain_spec::ChainSpec; use katana_core::backend::storage::ProviderRO; use katana_core::backend::Backend; use katana_core::env::BlockContextGenerator; @@ -15,17 +13,14 @@ use katana_executor::{BlockLimits, ExecutionFlags}; use katana_gas_price_oracle::GasPriceOracle; use katana_metrics::exporters::prometheus::PrometheusRecorder; use katana_metrics::sys::DiskReporter; -use katana_metrics::{Report, Server as MetricsServer}; +use katana_metrics::{MetricsServer, Report}; use katana_optimistic::executor::{OptimisticExecutor, OptimisticState}; use katana_optimistic::pool::{PoolValidator, TxPool}; use katana_pool::ordering::FiFo; -use katana_primitives::block::BlockIdOrTag; -use katana_primitives::env::{CfgEnv, FeeTokenAddressses}; use katana_provider::ProviderFactory; use katana_rpc_api::starknet::{StarknetApiServer, StarknetTraceApiServer, StarknetWriteApiServer}; use katana_rpc_client::starknet::Client as StarknetClient; use katana_rpc_server::cors::Cors; -use katana_rpc_server::starknet::forking::ForkedClient; use katana_rpc_server::starknet::{OptimisticPendingBlockProvider, StarknetApi, StarknetApiConfig}; use katana_rpc_server::{RpcServer, RpcServerHandle}; use katana_tasks::{JoinHandle, TaskManager}; @@ -69,10 +64,10 @@ where let reports: Vec> = vec![db_metrics, disk_metrics]; let exporter = PrometheusRecorder::current().expect("qed; should exist at this point"); - let server = MetricsServer::new(exporter).with_process_metrics().with_reports(reports); + let server = MetricsServer::new(exporter).with_process_metrics().reports(reports); let addr = cfg.socket_addr(); - self.task_manager.task_spawner().build_task().spawn(server.start(addr)); + let _metrics_handle = server.start(addr)?; info!(%addr, "Metrics server started."); } @@ -155,7 +150,6 @@ impl Node { // Get the latest block number from the forked network let forked_block_num = starknet_client.block_number().await?.block_number; - let forked_block_id = BlockIdOrTag::Number(forked_block_num); let provider = katana_provider::ForkProviderFactory::new( db.clone(), @@ -163,8 +157,6 @@ impl Node { starknet_client.clone(), ); - let forked_client = ForkedClient::new(starknet_client.clone(), forked_block_id); - let gpo = GasPriceOracle::sampled_starknet(config.forking.url.clone()); let block_context_generator = BlockContextGenerator::default().into(); @@ -213,6 +205,8 @@ impl Node { max_proof_keys: config.rpc.max_proof_keys, max_call_gas: config.rpc.max_call_gas, max_concurrent_estimate_fee_requests: config.rpc.max_concurrent_estimate_fee_requests, + simulation_flags: ExecutionFlags::new(), + versioned_constant_overrides: None, #[cfg(feature = "cartridge")] paymaster: None, }; @@ -224,15 +218,15 @@ impl Node { provider.clone(), ); - let starknet_api = StarknetApi::new_forked( - backend.clone(), + let starknet_api = StarknetApi::new( + config.chain.clone(), pool.clone(), - forked_client, task_spawner.clone(), - starknet_api_cfg, pending_block_provider, - provider.clone(), + gpo.clone(), Some(optimistic_state.clone()), + starknet_api_cfg, + provider.clone(), ); if config.rpc.apis.contains(&RpcModuleKind::Starknet) { diff --git a/crates/rpc/rpc-server/src/starknet/mod.rs b/crates/rpc/rpc-server/src/starknet/mod.rs index 5623e24c2..0cc0f990e 100644 --- a/crates/rpc/rpc-server/src/starknet/mod.rs +++ b/crates/rpc/rpc-server/src/starknet/mod.rs @@ -132,9 +132,9 @@ where task_spawner, config, pending_block_provider, - optimistic_state, gas_oracle, storage2, + optimistic_state, ) } @@ -551,45 +551,6 @@ where } } - async fn transaction(&self, hash: TxHash) -> StarknetApiResult { - let tx = self - .on_io_blocking_task(move |this| { - // First, check optimistic state for the transaction - if let Some(optimistic_state) = &this.inner.optimistic_state { - let transactions = optimistic_state.transactions.read(); - if let Some((tx, _result)) = transactions.iter().find(|(tx, _)| tx.hash == hash) - { - return Result::<_, StarknetApiError>::Ok(Some(RpcTxWithHash::from( - tx.clone(), - ))); - } - } - - // Check pending block provider - if let pending_tx @ Some(..) = - this.inner.pending_block_provider.get_pending_transaction(hash)? - { - Result::<_, StarknetApiError>::Ok(pending_tx) - } else { - let tx = this - .storage() - .provider() - .transaction_by_hash(hash)? - .map(RpcTxWithHash::from); - - Result::<_, StarknetApiError>::Ok(tx) - } - }) - .await??; - - if let Some(tx) = tx { - Ok(tx) - } else { - let pool_tx = self.inner.pool.get(hash).ok_or(StarknetApiError::TxnHashNotFound)?; - Ok(Into::into(pool_tx.as_ref().clone())) - } - } - async fn receipt(&self, hash: Felt) -> StarknetApiResult { println!("requesting receipt for tx {hash:#x}"); let receipt = self @@ -958,6 +919,8 @@ where block_number: None, /* Optimistic transactions don't have a block * number yet */ transaction_hash: tx.hash, + transaction_index: None, // Optimistic transactions don't have a tx index yet + event_index: Some(event_idx as u64), }); // Stop if we've reached the chunk size limit @@ -1013,84 +976,12 @@ where let mut events = Vec::with_capacity(chunk_size as usize); match (from, to) { - (EventBlockId::Num(from), EventBlockId::Num(to)) => { - // Check if continuation token is a native (non-forked) token - let is_native_token = continuation_token - .as_ref() - .map_or(false, |t| matches!(t, MaybeForkedContinuationToken::Token(_))); - - // Only fetch from forked client if we don't have a native continuation token - if !is_native_token { - let client = &self.inner.forked_client.as_ref().unwrap(); - // Extract forked token if present - let forked_token = continuation_token.as_ref().and_then(|t| match t { - MaybeForkedContinuationToken::Forked(token) => Some(token.clone()), - _ => None, - }); - - let forked_result = futures::executor::block_on(client.get_events( - BlockIdOrTag::Number(from), - BlockIdOrTag::Number(to), - address, - keys.clone(), - forked_token, - chunk_size, - ))?; - - events.extend(forked_result.events); - - // Return early if there's a continuation token from forked network - if let Some(token) = forked_result.continuation_token { - let token = Some(MaybeForkedContinuationToken::Forked(token).to_string()); - return Ok(GetEventsResponse { events, continuation_token: token }); - } - } - + (EventBlockId::Num(_from), EventBlockId::Num(_to)) => { + // TODO: implement fetching events from storage for non-pending block ranges Ok(GetEventsResponse { events, continuation_token: None }) } - (EventBlockId::Num(from), EventBlockId::Pending) => { - // Check if continuation token is a native (non-forked) token - let fetch_from_fork = continuation_token - .as_ref() - // if not token is supplied then we need to fetch from forked client, or - // if token is a forked token - .map_or(true, |t| matches!(t, MaybeForkedContinuationToken::Forked(_))); - - // Only fetch from forked client if we don't have a native continuation token - if dbg!(fetch_from_fork) { - let client = &self.inner.forked_client.as_ref().unwrap(); - - // Extract forked token if present - let forked_token = continuation_token.as_ref().and_then(|t| match t { - MaybeForkedContinuationToken::Forked(token) => Some(token.clone()), - _ => None, - }); - - let forked_result = futures::executor::block_on(client.get_events( - BlockIdOrTag::Number(from), - BlockIdOrTag::Latest, - address, - keys.clone(), - forked_token, - chunk_size, - ))?; - - events.extend(forked_result.events); - - // Return early if there's a continuation token from forked network - - if let Some(token) = forked_result.continuation_token { - if dbg!(events.len() as u64 >= chunk_size) { - let token = MaybeForkedContinuationToken::Forked(token); - return Ok(GetEventsResponse { - events, - continuation_token: Some(token.to_string()), - }); - } - } - } - + (EventBlockId::Num(_from), EventBlockId::Pending) => { // Fetch events from optimistic state transactions (which serve as "pending" // transactions) // Extract native token if present @@ -1099,7 +990,6 @@ where _ => None, }); - println!("fetching optimistic events"); let opt_token = self.fetch_optimistic_events( address, &keys, @@ -1108,16 +998,12 @@ where native_token, )?; - dbg!(&opt_token); - let continuation_token = opt_token.map(|t| MaybeForkedContinuationToken::Token(t).to_string()); Ok(GetEventsResponse { events, continuation_token }) } (EventBlockId::Pending, EventBlockId::Pending) => { - println!("fetching optimistic events - pending - pending"); - // Fetch events from optimistic state transactions (which represent pending // transactions) // Extract native token if present @@ -1159,16 +1045,8 @@ where BlockIdOrTag::Number(num) => EventBlockId::Num(num), BlockIdOrTag::Latest => { - // let num = provider.convert_block_id(id)?; - // EventBlockId::Num(num.ok_or(StarknetApiError::BlockNotFound)?) - if let Some(client) = self.forked_client() { - let num = futures::executor::block_on(client.block_number())?; - EventBlockId::Num(num) - } - // Otherwise the block hash is not found. - else { - return Err(StarknetApiError::BlockNotFound); - } + let num = provider.latest_number()?; + EventBlockId::Num(num) } BlockIdOrTag::Hash(..) => { @@ -1443,6 +1321,55 @@ where } } +// Separate impl block for methods that require the pool transaction to be convertible to RpcTxWithHash +impl StarknetApi +where + Pool: TransactionPool + 'static, + ::Transaction: Into, + PP: PendingBlockProvider, + PF: ProviderFactory, + ::Provider: ProviderRO, +{ + async fn transaction(&self, hash: TxHash) -> StarknetApiResult { + let tx = self + .on_io_blocking_task(move |this| { + // First, check optimistic state for the transaction + if let Some(optimistic_state) = &this.inner.optimistic_state { + let transactions = optimistic_state.transactions.read(); + if let Some((tx, _result)) = transactions.iter().find(|(tx, _)| tx.hash == hash) + { + return Result::<_, StarknetApiError>::Ok(Some(RpcTxWithHash::from( + tx.clone(), + ))); + } + } + + // Check pending block provider + if let pending_tx @ Some(..) = + this.inner.pending_block_provider.get_pending_transaction(hash)? + { + Result::<_, StarknetApiError>::Ok(pending_tx) + } else { + let tx = this + .storage() + .provider() + .transaction_by_hash(hash)? + .map(RpcTxWithHash::from); + + Result::<_, StarknetApiError>::Ok(tx) + } + }) + .await??; + + if let Some(tx) = tx { + Ok(tx) + } else { + let pool_tx = self.inner.pool.get(hash).ok_or(StarknetApiError::TxnHashNotFound)?; + Ok(Into::into(pool_tx.as_ref().clone())) + } + } +} + impl Clone for StarknetApi where Pool: TransactionPool, diff --git a/crates/rpc/rpc-server/src/starknet/read.rs b/crates/rpc/rpc-server/src/starknet/read.rs index 3e54fc27f..4dcfcc466 100644 --- a/crates/rpc/rpc-server/src/starknet/read.rs +++ b/crates/rpc/rpc-server/src/starknet/read.rs @@ -82,7 +82,7 @@ where } async fn block_hash_and_number(&self) -> RpcResult { - Ok(self.get_block_hash_and_number().await?) + Ok(self.block_hash_and_number()?) } async fn get_block_with_tx_hashes(