diff --git a/Cargo.lock b/Cargo.lock index 6fe1d51456..d43c53c040 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1145,7 +1145,7 @@ checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "basilisk" -version = "19.1.0" +version = "20.0.0" dependencies = [ "basilisk-runtime", "clap", @@ -1177,7 +1177,7 @@ dependencies = [ "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-service", - "primitives 6.7.0", + "primitives 6.7.1", "sc-basic-authorship", "sc-block-builder", "sc-chain-spec", @@ -1258,7 +1258,7 @@ dependencies = [ "polkadot-parachain-primitives", "pretty_assertions", "primitive-types 0.13.1", - "primitives 6.7.0", + "primitives 6.7.1", "scale-info", "sp-core", "sp-io", @@ -1289,7 +1289,7 @@ dependencies = [ [[package]] name = "basilisk-runtime" -version = "129.0.0" +version = "130.0.0" dependencies = [ "basilisk-adapters", "basilisk-math", @@ -1372,7 +1372,7 @@ dependencies = [ "polkadot-parachain-primitives", "polkadot-runtime-common", "primitive-types 0.13.1", - "primitives 6.7.0", + "primitives 6.7.1", "scale-info", "serde", "smallvec", @@ -8638,7 +8638,7 @@ dependencies = [ "pallet-uniques", "parity-scale-codec", "pretty_assertions", - "primitives 6.7.0", + "primitives 6.7.1", "scale-info", "serde", "sp-arithmetic", @@ -9060,7 +9060,7 @@ dependencies = [ "pallet-currencies", "parity-scale-codec", "pretty_assertions", - "primitives 6.7.0", + "primitives 6.7.1", "scale-info", "serde", "sp-core", @@ -9326,7 +9326,7 @@ dependencies = [ "pallet-utility", "pallet-xyk", "parity-scale-codec", - "primitives 6.7.0", + "primitives 6.7.1", "scale-info", "sp-api", "sp-core", @@ -9572,7 +9572,7 @@ dependencies = [ "pallet-xyk", "parity-scale-codec", "pretty_assertions", - "primitives 6.7.0", + "primitives 6.7.1", "scale-info", "sp-arithmetic", "sp-core", @@ -9601,7 +9601,7 @@ dependencies = [ "pallet-xyk", "pallet-xyk-liquidity-mining", "parity-scale-codec", - "primitives 6.7.0", + "primitives 6.7.1", "scale-info", "sp-arithmetic", "sp-core", @@ -11361,7 +11361,7 @@ dependencies = [ [[package]] name = "primitives" -version = "6.7.0" +version = "6.7.1" dependencies = [ "frame-support", "parity-scale-codec", @@ -12380,7 +12380,7 @@ dependencies = [ "polkadot-runtime-parachains", "polkadot-service", "pretty_assertions", - "primitives 6.7.0", + "primitives 6.7.1", "rococo-runtime", "sc-consensus-grandpa", "sp-api", diff --git a/README.md b/README.md index afd51c605b..a7388f22b1 100644 --- a/README.md +++ b/README.md @@ -36,9 +36,12 @@ Prerequisites: * Grab `zombienet` utility used to start network from [releases](https://github.com/paritytech/zombienet/releases) -``` +```bash cd ./launch-configs/zombienet zombienet spawn local.json + +// Enable 2s blocktime +node scripts/assign_cores.js ``` ### Interaction with the node diff --git a/launch-configs/zombienet/local.json b/launch-configs/zombienet/local.json index a784b3d66d..16fadfc6e4 100644 --- a/launch-configs/zombienet/local.json +++ b/launch-configs/zombienet/local.json @@ -15,8 +15,11 @@ "patch": { "configuration": { "config": { + "scheduler_params": { + "num_cores": 3 + }, "async_backing_params": { - "max_candidate_depth": 3, + "max_candidate_depth": 6, "allowed_ancestry_len": 2 } } @@ -28,12 +31,23 @@ { "name": "alice", "ws_port": 9944, + "rpc_port": 9945, "validator": true }, { "name": "bob", "ws_port": 9955, "validator": true + }, + { + "name": "charlie", + "ws_port": 9966, + "validator": true + }, + { + "name": "dave", + "ws_port": 9977, + "validator": true } ] }, diff --git a/node/Cargo.toml b/node/Cargo.toml index 6e3945a1a1..749c2ffa54 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "basilisk" -version = "19.1.0" +version = "20.0.0" description = "Basilisk node" authors = ["GalacticCouncil"] edition = "2021" diff --git a/node/src/service.rs b/node/src/service.rs index 0f6e2201a9..ec2f624482 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -31,6 +31,7 @@ use basilisk_runtime::{ // Cumulus Imports use cumulus_client_collator::service::CollatorService; +use cumulus_client_consensus_aura::collators::slot_based::{SlotBasedBlockImport, SlotBasedBlockImportHandle}; use cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport; use cumulus_client_consensus_proposer::Proposer; use cumulus_client_service::{ @@ -65,7 +66,8 @@ type ParachainClient = TFullClient< type ParachainBackend = TFullBackend; -type ParachainBlockImport = TParachainBlockImport, ParachainBackend>; +type ParachainBlockImport = + TParachainBlockImport, ParachainClient>, ParachainBackend>; /// Starts a `ServiceBuilder` for a full service. /// @@ -80,7 +82,12 @@ pub fn new_partial( (), sc_consensus::DefaultImportQueue, sc_transaction_pool::TransactionPoolHandle, - (ParachainBlockImport, Option, Option), + ( + ParachainBlockImport, + SlotBasedBlockImportHandle, + Option, + Option, + ), >, sc_service::Error, > { @@ -138,7 +145,8 @@ pub fn new_partial( .build(), ); - let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); + let (slot_based_block_import, block_import_handle) = SlotBasedBlockImport::new(client.clone(), client.clone()); + let block_import = ParachainBlockImport::new(slot_based_block_import, backend.clone()); let import_queue = build_import_queue( client.clone(), @@ -156,7 +164,7 @@ pub fn new_partial( task_manager, transaction_pool, select_chain: (), - other: (block_import, telemetry, telemetry_worker_handle), + other: (block_import, block_import_handle, telemetry, telemetry_worker_handle), }) } @@ -174,7 +182,7 @@ async fn start_node_impl( let parachain_config = prepare_node_config(parachain_config); let params = new_partial(¶chain_config)?; - let (block_import, mut telemetry, telemetry_worker_handle) = params.other; + let (block_import, block_import_handle, mut telemetry, telemetry_worker_handle) = params.other; let prometheus_registry = parachain_config.prometheus_registry().cloned(); let net_config = sc_network::config::FullNetworkConfiguration::<_, _, sc_network::NetworkWorker>::new( @@ -329,6 +337,7 @@ async fn start_node_impl( client.clone(), backend.clone(), block_import, + block_import_handle, prometheus_registry.as_ref(), telemetry.as_ref().map(|t| t.handle()), &task_manager, @@ -379,6 +388,7 @@ fn start_consensus( client: Arc, backend: Arc, block_import: ParachainBlockImport, + block_import_handle: SlotBasedBlockImportHandle, prometheus_registry: Option<&Registry>, telemetry: Option, task_manager: &TaskManager, @@ -388,13 +398,10 @@ fn start_consensus( relay_chain_slot_duration: Duration, para_id: ParaId, collator_key: CollatorPair, - overseer_handle: OverseerHandle, + _overseer_handle: OverseerHandle, announce_block: Arc>) + Send + Sync>, ) -> Result<(), sc_service::Error> { - use cumulus_client_consensus_aura::collators::lookahead::{self as aura, Params as AuraParams}; - - // NOTE: because we use Aura here explicitly, we can use `CollatorSybilResistance::Resistant` - // when starting the network. + use cumulus_client_consensus_aura::collators::slot_based::{self as slot_based, Params as SlotBasedParams}; let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( task_manager.spawn_handle(), @@ -413,27 +420,35 @@ fn start_consensus( client.clone(), ); - let params = AuraParams { + let client_for_aura = client.clone(); + let params = SlotBasedParams { create_inherent_data_providers: move |_, ()| async move { Ok(()) }, block_import, para_client: client.clone(), para_backend: backend.clone(), relay_client: relay_chain_interface, - code_hash_provider: move |block_hash| client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()), + code_hash_provider: move |block_hash| { + client_for_aura + .code_at(block_hash) + .ok() + .map(|c| ValidationCode::from(c).hash()) + }, keystore, collator_key, para_id, - overseer_handle, - relay_chain_slot_duration, proposer, collator_service, - authoring_duration: Duration::from_millis(1500), + authoring_duration: Duration::from_millis(2000), reinitialize: false, - max_pov_percentage: None, // Defaults to 85% of max PoV size (safe default) + slot_offset: Duration::from_secs(1), + block_import_handle, + spawner: task_manager.spawn_handle(), + relay_chain_slot_duration, + export_pov: None, + max_pov_percentage: None, }; - let fut = aura::run::(params); - task_manager.spawn_essential_handle().spawn("aura", None, fut); + slot_based::run::(params); Ok(()) } diff --git a/primitives/Cargo.toml b/primitives/Cargo.toml index 023a509d44..a664a1afc2 100644 --- a/primitives/Cargo.toml +++ b/primitives/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "primitives" -version = "6.7.0" +version = "6.7.1" authors = ["GalacticCouncil"] edition = "2021" repository = "https://github.com/galacticcouncil/Basilisk-node" diff --git a/primitives/src/constants.rs b/primitives/src/constants.rs index c2d60f79f5..942e65c6ab 100644 --- a/primitives/src/constants.rs +++ b/primitives/src/constants.rs @@ -44,8 +44,12 @@ pub mod time { /// up by `pallet_aura` to implement `fn slot_duration()`. /// /// Change this to adjust the block time. - pub const MILLISECS_PER_BLOCK: u64 = 6000; - pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; + pub const MILLISECS_PER_BLOCK: u64 = 2000; + + // The slot duration determines the length of each author's turn and is decoupled from the block + // production interval. During their slot, authors are allowed to produce multiple blocks. **The + // slot duration is required to be at least 6s (same as on the relay chain).** + pub const SLOT_DURATION: u64 = 6000; // Time is measured by number of blocks. pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); @@ -75,9 +79,9 @@ pub mod chain { /// Minimum pool liquidity pub const MIN_POOL_LIQUIDITY: Balance = 1000; - /// We allow for 2 seconds of compute with a 6 second average block. + /// We allow for 1.5 seconds of compute with a 2 second average block. pub const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts( - WEIGHT_REF_TIME_PER_SECOND.saturating_mul(2), + WEIGHT_REF_TIME_PER_SECOND.saturating_mul(3).saturating_div(2), polkadot_primitives::v8::MAX_POV_SIZE as u64, ); @@ -96,10 +100,10 @@ mod tests { assert_eq!(DAYS / 24, HOURS); // 60 minuts in an hour assert_eq!(HOURS / 60, MINUTES); - // 1 minute = 60s = 10 blocks 6s each - assert_eq!(MINUTES, 10); - // 6s per block - assert_eq!(SECS_PER_BLOCK, 6); + // 1 minute = 60s = 10 blocks 2s each + assert_eq!(MINUTES, 30); + // 2s per block + assert_eq!(SECS_PER_BLOCK, 2); // 1s = 1000ms assert_eq!(MILLISECS_PER_BLOCK / 1000, SECS_PER_BLOCK); // Extra check for epoch time because changing it bricks the block production and requires regenesis diff --git a/runtime/basilisk/Cargo.toml b/runtime/basilisk/Cargo.toml index 8e9cfe1a58..c23e74535b 100644 --- a/runtime/basilisk/Cargo.toml +++ b/runtime/basilisk/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "basilisk-runtime" -version = "129.0.0" +version = "130.0.0" authors = ["GalacticCouncil"] edition = "2021" homepage = "https://github.com/galacticcouncil/Basilisk-node" diff --git a/runtime/basilisk/src/apis.rs b/runtime/basilisk/src/apis.rs index b2688c03b2..b08b680fba 100644 --- a/runtime/basilisk/src/apis.rs +++ b/runtime/basilisk/src/apis.rs @@ -290,6 +290,21 @@ impl_runtime_apis! { } } + impl cumulus_primitives_core::RelayParentOffsetApi for Runtime { + fn relay_parent_offset() -> u32 { + RELAY_PARENT_OFFSET + } + } + + impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { + fn core_selector() -> ( + cumulus_primitives_core::CoreSelector, + cumulus_primitives_core::ClaimQueueOffset, + ) { + ParachainSystem::core_selector() + } + } + #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark for Runtime { fn benchmark_metadata(extra: bool) -> ( diff --git a/runtime/basilisk/src/lib.rs b/runtime/basilisk/src/lib.rs index cfd66cbd6d..941f359f6d 100644 --- a/runtime/basilisk/src/lib.rs +++ b/runtime/basilisk/src/lib.rs @@ -103,7 +103,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: Cow::Borrowed("basilisk"), impl_name: Cow::Borrowed("basilisk"), authoring_version: 1, - spec_version: 129, + spec_version: 130, impl_version: 0, apis: apis::RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/runtime/basilisk/src/system.rs b/runtime/basilisk/src/system.rs index 15a8d102fc..b76fd2ce6b 100644 --- a/runtime/basilisk/src/system.rs +++ b/runtime/basilisk/src/system.rs @@ -57,12 +57,14 @@ pub const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_perthousand(25); /// by Operational extrinsics. pub const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -/// Maximum number of blocks simultaneously accepted by the Runtime, not yet included into the -/// relay chain. -pub const UNINCLUDED_SEGMENT_CAPACITY: u32 = 3; +/// Build with an offset of 1 behind the relay chain best block. +pub const RELAY_PARENT_OFFSET: u32 = 0; /// How many parachain blocks are processed by the relay chain per parent. Limits the number of /// blocks authored per slot. -pub const BLOCK_PROCESSING_VELOCITY: u32 = 1; +pub const BLOCK_PROCESSING_VELOCITY: u32 = 3; +/// Maximum number of blocks simultaneously accepted by the Runtime, not yet included into the +/// relay chain. +pub const UNINCLUDED_SEGMENT_CAPACITY: u32 = (3 + RELAY_PARENT_OFFSET) * BLOCK_PROCESSING_VELOCITY; /// Relay chain slot duration, in milliseconds. pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; @@ -481,7 +483,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ConsensusHook = ConsensusHook; type WeightInfo = weights::cumulus_pallet_parachain_system::BasiliskWeight; type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; - type RelayParentOffset = ConstU32<0>; + type RelayParentOffset = ConstU32; } pub type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< @@ -595,7 +597,7 @@ impl pallet_transaction_pause::Config for Runtime { } parameter_types! { - pub const RewardPerCollator: Balance = 15_240_000_000_000_000; // 6.35[BSX/block] * 2400[block] + pub const RewardPerCollator: Balance = 15_240_000_000_000_000; // fixed reward per session //GalacticCouncil collators pub ExcludedCollators: Vec = vec![ // bXn5CfJB2qHvqnuMqTpXn6un9Fjch8mwkb9i3JUsGVD4ChLoe diff --git a/scripts/assign_cores.js b/scripts/assign_cores.js new file mode 100644 index 0000000000..63c18635c3 --- /dev/null +++ b/scripts/assign_cores.js @@ -0,0 +1,165 @@ +#!/usr/bin/env node + +let ApiPromise; +let WsProvider; +let Keyring; +let cryptoWaitReady; + +try { + ({ ApiPromise, WsProvider, Keyring } = require("@polkadot/api")); + ({ cryptoWaitReady } = require("@polkadot/util-crypto")); +} catch (error) { + console.error( + "Missing JS deps. Run without installing into the repo via:\n" + + "npm exec --yes --package=@polkadot/api --package=@polkadot/util-crypto -- node scripts/assign_cores.js", + ); + process.exit(1); +} + +function parseArgs(argv) { + const defaults = { + ws: "ws://127.0.0.1:9945", + suri: "//Alice", + paraId: 2090, + cores: [0, 1, 2], + begin: 0, + finalized: true, + }; + + for (let i = 0; i < argv.length; i += 1) { + const arg = argv[i]; + if (arg === "--ws") { + defaults.ws = argv[++i]; + } else if (arg === "--suri") { + defaults.suri = argv[++i]; + } else if (arg === "--para") { + defaults.paraId = Number(argv[++i]); + } else if (arg === "--cores") { + defaults.cores = argv[++i] + .split(",") + .filter(Boolean) + .map((value) => Number(value.trim())); + } else if (arg === "--begin") { + defaults.begin = Number(argv[++i]); + } else if (arg === "--in-block") { + defaults.finalized = false; + } else if (arg === "--help" || arg === "-h") { + printHelp(); + process.exit(0); + } else { + throw new Error(`Unknown argument: ${arg}`); + } + } + + if (!defaults.cores.length || defaults.cores.some((core) => Number.isNaN(core))) { + throw new Error("Expected --cores to contain a comma-separated list of integers"); + } + + if (Number.isNaN(defaults.paraId)) { + throw new Error("Expected --para to be an integer"); + } + + if (Number.isNaN(defaults.begin)) { + throw new Error("Expected --begin to be an integer"); + } + + return defaults; +} + +function printHelp() { + console.log(`Assign relay-chain cores to a parachain on a local Zombienet relay node. + +Usage: + node scripts/assign_cores.js [options] + +Options: + --ws Relay-chain websocket endpoint (default: ws://127.0.0.1:9945) + --suri Signing account SURI (default: //Alice) + --para Parachain id to assign cores to (default: 2090) + --cores Comma-separated core indexes (default: 0,1,2) + --begin Relay block number to start assignment from (default: 0) + --in-block Exit once included in a block instead of waiting for finalization + --help, -h Show this message +`); +} + +async function main() { + const { ws, suri, paraId, cores, begin, finalized } = parseArgs(process.argv.slice(2)); + + await cryptoWaitReady(); + + const provider = new WsProvider(ws); + const api = await ApiPromise.create({ provider }); + const keyring = new Keyring({ type: "sr25519" }); + const signer = keyring.addFromUri(suri); + + const calls = cores.map((core) => + api.tx.coretime.assignCore( + core, + begin, + [[{ Task: paraId }, 57600]], + null, + ), + ); + + const tx = api.tx.sudo.sudo(api.tx.utility.batch(calls)); + + console.log( + `Submitting assign_core for para ${paraId} on cores [${cores.join(", ")}] via ${ws} as ${signer.address}`, + ); + + await new Promise(async (resolve, reject) => { + let unsub = null; + + try { + unsub = await tx.signAndSend(signer, ({ status, dispatchError, events }) => { + if (dispatchError) { + if (dispatchError.isModule) { + const decoded = api.registry.findMetaError(dispatchError.asModule); + reject( + new Error( + `${decoded.section}.${decoded.name}: ${decoded.docs.join(" ")}`, + ), + ); + } else { + reject(new Error(dispatchError.toString())); + } + return; + } + + if (status.isInBlock) { + console.log(`Included at ${status.asInBlock.toHex()}`); + if (!finalized) { + if (unsub) { + unsub(); + } + resolve(); + } + } + + if (status.isFinalized) { + console.log(`Finalized at ${status.asFinalized.toHex()}`); + for (const { event } of events) { + console.log(`Event: ${event.section}.${event.method}`); + } + if (unsub) { + unsub(); + } + resolve(); + } + }); + } catch (error) { + if (unsub) { + unsub(); + } + reject(error); + } + }); + + await api.disconnect(); +} + +main().catch((error) => { + console.error(error.message || error); + process.exit(1); +});