diff --git a/crates/circuits/batch-circuit/batch_exe_commit.rs b/crates/circuits/batch-circuit/batch_exe_commit.rs index 473e04c7..b0cc8b8b 100644 --- a/crates/circuits/batch-circuit/batch_exe_commit.rs +++ b/crates/circuits/batch-circuit/batch_exe_commit.rs @@ -1,4 +1,4 @@ #![cfg_attr(rustfmt, rustfmt_skip)] //! Generated by crates/build-guest. DO NOT EDIT! -pub const COMMIT: [u32; 8] = [178747149, 1696854377, 1815890506, 150427545, 401334246, 51510811, 264415608, 824562013]; +pub const COMMIT: [u32; 8] = [1580919329, 265989185, 1224652154, 798504231, 1475122249, 641962579, 1423168083, 1439637552]; diff --git a/crates/circuits/bundle-circuit/bundle_exe_commit.rs b/crates/circuits/bundle-circuit/bundle_exe_commit.rs index 34ac25b1..5f6782cd 100644 --- a/crates/circuits/bundle-circuit/bundle_exe_commit.rs +++ b/crates/circuits/bundle-circuit/bundle_exe_commit.rs @@ -1,4 +1,4 @@ #![cfg_attr(rustfmt, rustfmt_skip)] //! Generated by crates/build-guest. DO NOT EDIT! -pub const COMMIT: [u32; 8] = [802766894, 615817452, 244335787, 1930953923, 1470479700, 1992640945, 75163847, 91379146]; +pub const COMMIT: [u32; 8] = [167178914, 1281927786, 1900739724, 1306836878, 411321851, 48725702, 1627850162, 1478515368]; diff --git a/crates/circuits/bundle-circuit/bundle_exe_euclidv1_commit.rs b/crates/circuits/bundle-circuit/bundle_exe_euclidv1_commit.rs index df0b9431..537f146a 100644 --- a/crates/circuits/bundle-circuit/bundle_exe_euclidv1_commit.rs +++ b/crates/circuits/bundle-circuit/bundle_exe_euclidv1_commit.rs @@ -1,4 +1,4 @@ #![cfg_attr(rustfmt, rustfmt_skip)] //! Generated by crates/build-guest. DO NOT EDIT! -pub const COMMIT: [u32; 8] = [804972774, 1748855553, 1554766501, 27860072, 512941632, 1022941061, 564424735, 930491103]; +pub const COMMIT: [u32; 8] = [1770119848, 1297314873, 700982969, 649617141, 613497409, 1459003052, 6142964, 1766696387]; diff --git a/crates/circuits/chunk-circuit/chunk_exe_commit.rs b/crates/circuits/chunk-circuit/chunk_exe_commit.rs index 58ae961a..a3732f24 100644 --- a/crates/circuits/chunk-circuit/chunk_exe_commit.rs +++ b/crates/circuits/chunk-circuit/chunk_exe_commit.rs @@ -1,4 +1,4 @@ #![cfg_attr(rustfmt, rustfmt_skip)] //! Generated by crates/build-guest. DO NOT EDIT! -pub const COMMIT: [u32; 8] = [226012526, 1982142184, 912178093, 1599974879, 113751034, 1155109745, 24934449, 952145607]; +pub const COMMIT: [u32; 8] = [1249982070, 595924974, 845141753, 1418759924, 1709800350, 356596746, 1875946370, 817863560]; diff --git a/crates/circuits/chunk-circuit/chunk_exe_rv32_commit.rs b/crates/circuits/chunk-circuit/chunk_exe_rv32_commit.rs index c61a643e..0ce9f14d 100644 --- a/crates/circuits/chunk-circuit/chunk_exe_rv32_commit.rs +++ b/crates/circuits/chunk-circuit/chunk_exe_rv32_commit.rs @@ -1,4 +1,4 @@ #![cfg_attr(rustfmt, rustfmt_skip)] //! Generated by crates/build-guest. DO NOT EDIT! -pub const COMMIT: [u32; 8] = [409697356, 716149176, 978069094, 836659519, 1453773851, 1655648793, 1523498889, 979202098]; +pub const COMMIT: [u32; 8] = [985495823, 966783725, 1513817984, 1402759506, 1738397914, 248452579, 1197834671, 731543193]; diff --git a/crates/circuits/types/src/chunk/execute.rs b/crates/circuits/types/src/chunk/execute.rs index 7943523c..17fad265 100644 --- a/crates/circuits/types/src/chunk/execute.rs +++ b/crates/circuits/types/src/chunk/execute.rs @@ -1,4 +1,13 @@ +use crate::{ + chunk::{ + ArchivedChunkWitness, ChunkInfo, CodeDb, ForkName, NodesProvider, make_providers, + public_inputs::BlockContextV2, + }, + manually_drop_on_zkvm, +}; +use alloy_primitives::B256; use sbv_core::{EvmDatabase, EvmExecutor}; +use sbv_kv::null::NullProvider; use sbv_primitives::{ BlockWitness, chainspec::{ @@ -13,13 +22,7 @@ use sbv_primitives::{ scroll::ChunkInfoBuilder, }, }; - -use crate::{ - chunk::{ - ArchivedChunkWitness, ChunkInfo, ForkName, make_providers, public_inputs::BlockContextV2, - }, - manually_drop_on_zkvm, -}; +use std::{collections::BTreeMap, sync::Arc}; type Witness = ArchivedChunkWitness; @@ -33,6 +36,19 @@ pub fn execute(witness: &Witness) -> Result { if !witness.blocks.has_seq_block_number() { return Err("All witnesses must have sequential block numbers in chunk mode".into()); } + + // Build ChainSpec + let fork_name = ForkName::from(&witness.fork_name); + let chain = Chain::from_id(witness.blocks[0].chain_id()); + let chain_spec = build_chain_spec_cheap(chain, fork_name); + + // Get prev_state_root and post_state_root + let prev_state_root = witness.blocks[0].pre_state_root(); + let post_state_root = witness.blocks.last().unwrap().post_state_root(); + + // Initialize the providers + let providers = make_providers(&witness.blocks); + // Get the blocks to build the basic chunk-info. let blocks = manually_drop_on_zkvm!( witness @@ -42,11 +58,66 @@ pub fn execute(witness: &Witness) -> Result { .collect::>, _>>() .map_err(|e| e.to_string())? ); - let pre_state_root = witness.blocks[0].pre_state_root; - let fork_name = ForkName::from(&witness.fork_name); - let chain = Chain::from_id(witness.blocks[0].chain_id()); + let withdraw_root = if witness.batch_commit { + execute_inner_batched( + &providers, + &blocks, + chain_spec.clone(), + prev_state_root, + post_state_root, + )? + } else { + execute_inner_block_by_block(&providers, &blocks, chain_spec.clone(), witness)? + }; + + let mut rlp_buffer = manually_drop_on_zkvm!(Vec::with_capacity(2048)); + let (tx_data_length, tx_data_digest): (usize, B256) = blocks + .iter() + .flat_map(|b| b.body().transactions.iter()) + .tx_bytes_hash_in(rlp_buffer.as_mut()); + + let sbv_chunk_info = { + let mut sbv_chunk_info_builder = + ChunkInfoBuilder::new(&chain_spec, prev_state_root, &blocks); + if fork_name == ForkName::EuclidV2 { + sbv_chunk_info_builder.set_prev_msg_queue_hash(witness.prev_msg_queue_hash.into()); + } + sbv_chunk_info_builder.build(withdraw_root) + }; + + let chunk_info = ChunkInfo { + chain_id: chain.id(), + prev_state_root, + post_state_root, + data_hash: sbv_chunk_info + .clone() + .into_legacy() + .map(|x| x.data_hash) + .unwrap_or_default(), + withdraw_root, + tx_data_digest, + tx_data_length: tx_data_length as u64, + initial_block_number: witness.blocks[0].number(), + prev_msg_queue_hash: witness.prev_msg_queue_hash.into(), + post_msg_queue_hash: sbv_chunk_info + .into_euclid_v2() + .map(|x| x.post_msg_queue_hash) + .unwrap_or_default(), + block_ctxs: blocks.iter().map(BlockContextV2::from).collect(), + }; + + openvm::io::println(format!("withdraw_root = {:?}", withdraw_root)); + openvm::io::println(format!("tx_bytes_hash = {:?}", tx_data_digest)); + + // We should never touch that lazy lock... Or else we introduce 40M useless cycles. + assert!(std::sync::LazyLock::get(&MAINNET).is_none()); + + Ok(chunk_info) +} +#[inline(always)] +fn build_chain_spec_cheap(chain: Chain, fork_name: ForkName) -> Arc { // SCROLL_DEV_HARDFORKS will enable all forks let mut hardforks = (*SCROLL_DEV_HARDFORKS).clone(); if fork_name == ForkName::EuclidV1 { @@ -66,83 +137,98 @@ pub fn execute(witness: &Witness) -> Result { base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), prune_delete_limit: 20000, blob_params: Default::default(), + // We cannot use `..Default::default()` here, + // because it will trigger `MAINNET` genesis deserialization. }; let config = ScrollChainConfig::mainnet(); - let chain_spec: ScrollChainSpec = ScrollChainSpec { inner, config }; - - let (code_db, nodes_provider, block_hashes) = make_providers(&witness.blocks); - let nodes_provider = manually_drop_on_zkvm!(nodes_provider); + Arc::new(ScrollChainSpec { inner, config }) +} - let prev_state_root = witness.blocks[0].pre_state_root(); +#[inline(always)] +fn execute_inner_batched( + (code_db, nodes_provider): &(CodeDb, NodesProvider), + blocks: &[RecoveredBlock], + chain_spec: Arc, + prev_state_root: B256, + post_state_root: B256, +) -> Result { let mut db = manually_drop_on_zkvm!( - EvmDatabase::new_from_root(code_db, prev_state_root, &nodes_provider, block_hashes) + EvmDatabase::new_from_root(code_db, prev_state_root, nodes_provider, NullProvider) .map_err(|e| format!("failed to create EvmDatabase: {}", e))? ); for block in blocks.iter() { let output = manually_drop_on_zkvm!( - EvmExecutor::new(std::sync::Arc::new(chain_spec.clone()), &db, block) + EvmExecutor::new(chain_spec.clone(), &db, block) .execute() - .map_err(|e| format!("failed to execute block: {}", e))? + .map_err(|e| format!("failed to execute block#{}: {}", block.number, e))? ); - db.update(&nodes_provider, output.state.state.iter()) - .map_err(|e| format!("failed to update db: {}", e))?; + // sort the update by key - Address in ascending order, + // using reference to avoid cloning [`BundleAccount`]. + let state = manually_drop_on_zkvm!(BTreeMap::from_iter(output.state.state.iter())); + db.update(nodes_provider, state.iter().map(|(k, v)| (*k, *v))) + .map_err(|e| format!("failed to update db for block#{}: {}", block.number, e))?; + } + let db_post_state_root = db.commit_changes(); + if post_state_root != db_post_state_root { + return Err(format!( + "state root mismatch: expected={post_state_root}, found={db_post_state_root}" + )); } - - let post_state_root = db.commit_changes(); - let withdraw_root = db .withdraw_root() .map_err(|e| format!("failed to get withdraw root: {}", e))?; + Ok(withdraw_root) +} - let mut rlp_buffer = manually_drop_on_zkvm!(Vec::with_capacity(2048)); - let (tx_data_length, tx_data_digest) = blocks - .iter() - .flat_map(|b| b.body().transactions.iter()) - .tx_bytes_hash_in(rlp_buffer.as_mut()); - let _ = tx_data_length; - - let sbv_chunk_info = { - #[allow(unused_mut)] - let mut builder = ChunkInfoBuilder::new(&chain_spec, pre_state_root.into(), &blocks); - if fork_name == ForkName::EuclidV2 { - builder.set_prev_msg_queue_hash(witness.prev_msg_queue_hash.into()); +#[inline(always)] +fn execute_inner_block_by_block( + (code_db, nodes_provider): &(CodeDb, NodesProvider), + blocks: &[RecoveredBlock], + chain_spec: Arc, + witness: &Witness, +) -> Result { + let mut iter = blocks.iter().zip(witness.blocks.iter()).peekable(); + while let Some((block, witness)) = iter.next() { + // We construct the merkle trie for each block, should have the same behavior as geth stateless. + let mut db = manually_drop_on_zkvm!( + EvmDatabase::new_from_root( + code_db, + witness.pre_state_root(), + nodes_provider, + NullProvider + ) + .map_err(|e| format!( + "failed to create EvmDatabase for block#{}: {}", + block.number, e + ))? + ); + let output = manually_drop_on_zkvm!( + EvmExecutor::new(chain_spec.clone(), &db, block) + .execute() + .map_err(|e| format!("failed to execute block#{}: {}", block.number, e))? + ); + // sort the update by key - Address in ascending order, + // using reference to avoid cloning [`BundleAccount`]. + let state = manually_drop_on_zkvm!(BTreeMap::from_iter(output.state.state.iter())); + db.update(nodes_provider, state.iter().map(|(k, v)| (*k, *v))) + .map_err(|e| format!("failed to update db for block#{}: {}", block.number, e))?; + let post_state_root = db.commit_changes(); + // state root assertion happens for each block, instead of at the end. + if witness.post_state_root() != post_state_root { + return Err(format!( + "state root mismatch for block#{}: expected={}, found={}", + block.number, + witness.post_state_root(), + post_state_root + )); + } + // We reach the last block, we can return the withdraw root. + if iter.peek().is_none() { + let withdraw_root = db + .withdraw_root() + .map_err(|e| format!("failed to get withdraw root: {}", e))?; + return Ok(withdraw_root); } - builder.build(withdraw_root) - }; - if post_state_root != sbv_chunk_info.post_state_root() { - return Err(format!( - "state root mismatch: expected={}, found={}", - sbv_chunk_info.post_state_root(), - post_state_root - )); } - - let chunk_info = ChunkInfo { - chain_id: sbv_chunk_info.chain_id(), - prev_state_root: sbv_chunk_info.prev_state_root(), - post_state_root: sbv_chunk_info.post_state_root(), - data_hash: sbv_chunk_info - .clone() - .into_legacy() - .map(|x| x.data_hash) - .unwrap_or_default(), - withdraw_root, - tx_data_digest, - tx_data_length: u64::try_from(tx_data_length).expect("tx_data_length: u64"), - initial_block_number: blocks[0].header().number, - prev_msg_queue_hash: witness.prev_msg_queue_hash.into(), - post_msg_queue_hash: sbv_chunk_info - .into_euclid_v2() - .map(|x| x.post_msg_queue_hash) - .unwrap_or_default(), - block_ctxs: blocks.iter().map(BlockContextV2::from).collect(), - }; - - openvm::io::println(format!("withdraw_root = {:?}", withdraw_root)); - openvm::io::println(format!("tx_bytes_hash = {:?}", tx_data_digest)); - - // We should never touch that lazy lock... Or else we introduce 40M useless cycles. - assert!(std::sync::LazyLock::get(&MAINNET).is_none()); - - Ok(chunk_info) + unreachable!() } diff --git a/crates/circuits/types/src/chunk/mod.rs b/crates/circuits/types/src/chunk/mod.rs index 5123dadb..fb14b46e 100644 --- a/crates/circuits/types/src/chunk/mod.rs +++ b/crates/circuits/types/src/chunk/mod.rs @@ -8,7 +8,7 @@ mod utils; mod witness; -pub use utils::make_providers; +pub use utils::{CodeDb, NodesProvider, make_providers}; pub use witness::{ArchivedChunkWitness, ChunkWitness}; mod execute; diff --git a/crates/circuits/types/src/chunk/utils.rs b/crates/circuits/types/src/chunk/utils.rs index 123d8c2d..3ad81216 100644 --- a/crates/circuits/types/src/chunk/utils.rs +++ b/crates/circuits/types/src/chunk/utils.rs @@ -1,32 +1,34 @@ +use crate::manually_drop_on_zkvm; use sbv_kv::nohash::NoHashMap; use sbv_primitives::{B256, BlockWitness, Bytes, ext::BlockWitnessExt}; use sbv_trie::{BlockWitnessTrieExt, TrieNode}; -type CodeDb = NoHashMap; +#[cfg(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64"))] +pub type CodeDb = NoHashMap; +#[cfg(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64"))] +pub type NodesProvider = NoHashMap; -type NodesProvider = NoHashMap; +#[cfg(not(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64")))] +pub type CodeDb = std::mem::ManuallyDrop>; +#[cfg(not(any(target_arch = "x86", target_arch = "x86_64", target_arch = "aarch64")))] +pub type NodesProvider = std::mem::ManuallyDrop>; -type BlockHashProvider = sbv_kv::null::NullProvider; - -pub fn make_providers( - witnesses: &[W], -) -> (CodeDb, NodesProvider, BlockHashProvider) { - let code_db = { +pub fn make_providers(witnesses: &[W]) -> (CodeDb, NodesProvider) { + let code_db = manually_drop_on_zkvm!({ // build code db let num_codes = witnesses.iter().map(|w| w.codes_iter().len()).sum(); let mut code_db = NoHashMap::::with_capacity_and_hasher(num_codes, Default::default()); witnesses.import_codes(&mut code_db); code_db - }; - let nodes_provider = { + }); + let nodes_provider = manually_drop_on_zkvm!({ let num_states = witnesses.iter().map(|w| w.states_iter().len()).sum(); let mut nodes_provider = NoHashMap::::with_capacity_and_hasher(num_states, Default::default()); witnesses.import_nodes(&mut nodes_provider).unwrap(); nodes_provider - }; - let block_hashes = sbv_kv::null::NullProvider; + }); - (code_db, nodes_provider, block_hashes) + (code_db, nodes_provider) } diff --git a/crates/circuits/types/src/chunk/witness.rs b/crates/circuits/types/src/chunk/witness.rs index 72d375eb..71b4f33c 100644 --- a/crates/circuits/types/src/chunk/witness.rs +++ b/crates/circuits/types/src/chunk/witness.rs @@ -22,6 +22,8 @@ pub struct ChunkWitness { pub prev_msg_queue_hash: B256, /// The code version specify the chain spec pub fork_name: ForkName, + /// If `batch_commit` is true, we commit after all the blocks in the chunk. + pub batch_commit: bool, } impl ChunkWitness { @@ -58,6 +60,7 @@ impl ChunkWitness { blocks, prev_msg_queue_hash, fork_name, + batch_commit: true, } } diff --git a/crates/prover/src/prover/chunk.rs b/crates/prover/src/prover/chunk.rs index 82c0499c..3c72328b 100644 --- a/crates/prover/src/prover/chunk.rs +++ b/crates/prover/src/prover/chunk.rs @@ -1,4 +1,6 @@ -use scroll_zkvm_circuit_input_types::chunk::{ArchivedChunkWitness, ChunkWitness, execute}; +use scroll_zkvm_circuit_input_types::chunk::{ + ArchivedChunkWitness, ChunkInfo, ChunkWitness, execute, +}; use crate::{ Error, Prover, ProverType, @@ -61,29 +63,36 @@ impl ProverType for GenericChunkProverType { ))); } - let chunk_witness = ChunkWitness::new( + let mut chunk_witness = ChunkWitness::new( &task.block_witnesses, task.prev_msg_queue_hash, task.fork_name.as_str().into(), ); - let serialized = rkyv::to_bytes::(&chunk_witness).map_err(|e| { - Error::GenProof(format!( - "{}: failed to serialize chunk witness: {}", - err_prefix, e - )) - })?; - let chunk_witness = rkyv::access::( - &serialized, - ) - .map_err(|e| { - Error::GenProof(format!( - "{}: rkyv deserialisation of chunk witness bytes failed: {}", - err_prefix, e - )) - })?; - - let chunk_info = execute(chunk_witness) - .map_err(|e| Error::GenProof(format!("{}: {}", err_prefix, e)))?; + + let execute_serialized = |chunk_witness: &ChunkWitness| -> Result { + let serialized = rkyv::to_bytes::(chunk_witness).map_err(|e| { + Error::GenProof(format!( + "{err_prefix}: failed to serialize chunk witness: {e}" + )) + })?; + let chunk_witness = + rkyv::access::(&serialized) + .map_err(|e| { + Error::GenProof(format!( + "{err_prefix}: rkyv deserialisation of chunk witness bytes failed: {e}" + )) + })?; + execute(chunk_witness).map_err(|e| Error::GenProof(format!("{err_prefix}: {e}"))) + }; + + let chunk_info = match execute_serialized(&chunk_witness) { + Ok(chunk_info) => chunk_info, + Err(e) => { + tracing::warn!("{e}, disable batch commit and retry"); + chunk_witness.batch_commit = false; + execute_serialized(&chunk_witness)? + } + }; Ok(ChunkProofMetadata { chunk_info }) } diff --git a/crates/prover/src/task/chunk.rs b/crates/prover/src/task/chunk.rs index e9780d5c..f1d2b34d 100644 --- a/crates/prover/src/task/chunk.rs +++ b/crates/prover/src/task/chunk.rs @@ -71,16 +71,12 @@ impl ProvingTask for ChunkProvingTask { format!("{first}-{last}") } - fn fork_name(&self) -> ForkName { - ForkName::from(self.fork_name.as_str()) - } - fn build_guest_input(&self) -> Result { - let witness = ChunkWitness { - blocks: self.block_witnesses.to_vec(), - prev_msg_queue_hash: self.prev_msg_queue_hash, - fork_name: self.fork_name.to_lowercase().as_str().into(), - }; + let witness = ChunkWitness::new( + &self.block_witnesses, + self.prev_msg_queue_hash, + self.fork_name.to_lowercase().as_str().into(), + ); let serialized = rkyv::to_bytes::(&witness)?; @@ -88,4 +84,8 @@ impl ProvingTask for ChunkProvingTask { stdin.write_bytes(&serialized); Ok(stdin) } + + fn fork_name(&self) -> ForkName { + ForkName::from(self.fork_name.as_str()) + } }