Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
647 changes: 308 additions & 339 deletions Cargo.lock

Large diffs are not rendered by default.

6 changes: 4 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ starknet_api = { git = "https://github.com/karnotxyz/sequencer", rev = "e04617e0
anyhow = { version = "1.0.95", default-features = false }
bigdecimal = { version = "0.3.1", default-features = false }
cairo-vm = "=2.5.0"
celestia-rpc = { version = "0.8.0", default-features = false }
celestia-types = { version = "0.9.0", default-features = false }
celestia-rpc = { version = "0.16.2", default-features = false }
celestia-types = { version = "0.20.0", default-features = false }
ciborium = { version = "0.2.2", default-features = false }
clap = { version = "4.5.23", default-features = false, features = ["derive", "env", "std"] }
env_logger = {version = "0.11.6",features = ["unstable-kv"]}
Expand Down Expand Up @@ -54,3 +54,5 @@ tokio-util = { version = "0.7.13", default-features = false }
url = { version = "2.5.4", default-features = false }
zip = { version = "2.2.2", default-features = false, features = ["deflate"] }
sqlx = { version = "0.8.2", features = [ "chrono", "macros", "regexp", "runtime-async-std", "runtime-tokio", "sqlite", "uuid" ] }
piltover = {package = "piltover", git = "https://github.com/cartridge-gg/piltover.git", rev = "ab34aaa" }
cainome = { version = "0.10.1", features = ["abigen-rs"] }
87 changes: 87 additions & 0 deletions bin/saya/src/any.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,12 @@
use anyhow::Result;
use saya_core::{
block_ingestor::BlockInfo,
data_availability::{
CelestiaDataAvailabilityBackend, CelestiaDataAvailabilityBackendBuilder,
DataAvailabilityBackend, DataAvailabilityBackendBuilder, DataAvailabilityCursor,
DataAvailabilityPayload, DataAvailabilityPointer, NoopDataAvailabilityBackend,
NoopDataAvailabilityBackendBuilder,
},
prover::{
AtlanticLayoutBridgeProver, AtlanticLayoutBridgeProverBuilder, MockLayoutBridgeProver,
MockLayoutBridgeProverBuilder, Prover, ProverBuilder, SnosProof,
Expand All @@ -22,6 +28,87 @@ pub enum AnyLayoutBridgeProverBuilder<DB> {
Mock(MockLayoutBridgeProverBuilder<DB>),
}

#[derive(Debug)]
pub enum AnyDataAvailabilityLayer<P> {
Celestia(Box<CelestiaDataAvailabilityBackend<P>>),
Noop(NoopDataAvailabilityBackend<P>),
}

#[derive(Debug)]
pub enum AnyDataAvailabilityLayerBuilder<P> {
Celestia(Box<CelestiaDataAvailabilityBackendBuilder<P>>),
Noop(NoopDataAvailabilityBackendBuilder<P>),
}

impl<P> DataAvailabilityBackend for AnyDataAvailabilityLayer<P>
where
P: DataAvailabilityPayload + 'static,
{
type Payload = P;
}

impl<P> Daemon for AnyDataAvailabilityLayer<P>
where
P: DataAvailabilityPayload + 'static,
{
fn shutdown_handle(&self) -> ShutdownHandle {
match self {
Self::Celestia(inner) => inner.shutdown_handle(),
Self::Noop(inner) => inner.shutdown_handle(),
}
}

fn start(self) {
match self {
Self::Celestia(inner) => inner.start(),
Self::Noop(inner) => inner.start(),
}
}
}

impl<P> DataAvailabilityBackendBuilder for AnyDataAvailabilityLayerBuilder<P>
where
P: DataAvailabilityPayload + 'static,
{
type Backend = AnyDataAvailabilityLayer<P>;

fn build(self) -> Result<Self::Backend> {
Ok(match self {
Self::Celestia(inner) => AnyDataAvailabilityLayer::Celestia(Box::new(inner.build()?)),
Self::Noop(inner) => AnyDataAvailabilityLayer::Noop(inner.build()?),
})
}

fn last_pointer(self, last_pointer: Option<DataAvailabilityPointer>) -> Self {
match self {
Self::Celestia(inner) => Self::Celestia(Box::new(inner.last_pointer(last_pointer))),
Self::Noop(inner) => Self::Noop(inner.last_pointer(last_pointer)),
}
}

fn proof_channel(
self,
proof_channel: Receiver<<Self::Backend as DataAvailabilityBackend>::Payload>,
) -> Self {
match self {
Self::Celestia(inner) => Self::Celestia(Box::new(inner.proof_channel(proof_channel))),
Self::Noop(inner) => Self::Noop(inner.proof_channel(proof_channel)),
}
}

fn cursor_channel(
self,
cursor_channel: Sender<
DataAvailabilityCursor<<Self::Backend as DataAvailabilityBackend>::Payload>,
>,
) -> Self {
match self {
Self::Celestia(inner) => Self::Celestia(Box::new(inner.cursor_channel(cursor_channel))),
Self::Noop(inner) => Self::Noop(inner.cursor_channel(cursor_channel)),
}
}
}

impl<DB> Prover for AnyLayoutBridgeProver<DB>
where
DB: PersistantStorage + Send + Sync + Clone + 'static,
Expand Down
46 changes: 43 additions & 3 deletions bin/saya/src/persistent.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,9 @@ use anyhow::Result;
use clap::{Parser, Subcommand};
use saya_core::{
block_ingestor::PollingBlockIngestorBuilder,
data_availability::NoopDataAvailabilityBackendBuilder,
data_availability::{
CelestiaDataAvailabilityBackendBuilder, NoopDataAvailabilityBackendBuilder,
},
orchestrator::PersistentOrchestratorBuilder,
prover::{
AtlanticLayoutBridgeProverBuilder, AtlanticSnosProverBuilder,
Expand All @@ -23,8 +25,9 @@ use starknet_types_core::felt::Felt;
use url::Url;

use crate::{
any::AnyLayoutBridgeProverBuilder,
any::{AnyDataAvailabilityLayerBuilder, AnyLayoutBridgeProverBuilder},
common::{calculate_workers_per_stage, NUMBER_OF_STAGES, SAYA_DB_PATH},
sovereign::validate_non_empty,
};

/// 10 seconds.
Expand Down Expand Up @@ -82,6 +85,27 @@ struct Start {
/// Configuration for OS pie generation
#[clap(flatten)]
hints: HintsConfiguration,
/// Celestia configuration
#[clap(flatten)]
celestia: CelestiaConfiguration,
}

#[derive(Debug, Parser, Clone)]
struct CelestiaConfiguration {
/// Celestia RPC endpoint URL
#[clap(long, env)]
celestia_rpc: Option<Url>,
/// Celestia RPC node auth token
#[clap(long, env)]
celestia_token: Option<String>,
/// Celestia key name
#[clap(long, env)]
celestia_key_name: Option<String>,
/// Celestia namespace
#[clap(long, env)]
#[clap(default_value = "sayaproofs")]
#[clap(value_parser = validate_non_empty)]
celestia_namespace: String,
}

#[derive(Debug, Parser, Clone)]
Expand Down Expand Up @@ -185,7 +209,23 @@ impl Start {
),
layout_bridge_prover_builder,
);
let da_builder = NoopDataAvailabilityBackendBuilder::new();

let da_builder = if let (Some(celestia_rpc), Some(celestia_token)) =
(self.celestia.celestia_rpc, self.celestia.celestia_token)
{
AnyDataAvailabilityLayerBuilder::Celestia(Box::new(
CelestiaDataAvailabilityBackendBuilder::new(
celestia_rpc,
celestia_token,
self.celestia.celestia_namespace,
self.celestia.celestia_key_name,
)
.unwrap(),
))
} else {
AnyDataAvailabilityLayerBuilder::Noop(NoopDataAvailabilityBackendBuilder::new())
};

let settlement_builder = PiltoverSettlementBackendBuilder::new(
self.settlement_rpc,
self.settlement_piltover_address,
Expand Down
23 changes: 13 additions & 10 deletions bin/saya/src/sovereign.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use std::{io::Read, path::PathBuf, time::Duration};
use std::{path::PathBuf, time::Duration};

use anyhow::Result;
use clap::{Parser, Subcommand};
Expand All @@ -11,6 +11,10 @@ use saya_core::{
storage::{InMemoryStorageBackend, SqliteDb},
ChainId, OsHintsConfiguration,
};
use starknet::{
core::utils::parse_cairo_short_string,
providers::{jsonrpc::HttpTransport, JsonRpcClient, Provider},
};
use url::Url;

use crate::common::{calculate_workers_per_stage, SAYA_DB_PATH};
Expand All @@ -35,9 +39,6 @@ struct Start {
/// Starknet JSON-RPC URL (v0.7.1)
#[clap(long, env)]
starknet_rpc: Url,
/// Path to the compiled Starknet OS program
#[clap(long, env)]
snos_program: PathBuf,
/// Whether to mock the SNOS proof by extracting the output from the PIE and using it from a proof.
#[clap(long)]
mock_snos_from_pie: bool,
Expand Down Expand Up @@ -70,7 +71,7 @@ struct Start {
}

/// Validate that the value is not empty.
fn validate_non_empty(s: &str) -> Result<String, String> {
pub fn validate_non_empty(s: &str) -> Result<String, String> {
if s.trim().is_empty() {
Err("Value cannot be empty".to_string())
} else {
Expand All @@ -97,10 +98,6 @@ impl Sovereign {

impl Start {
pub async fn run(self) -> Result<()> {
let mut snos_file = std::fs::File::open(self.snos_program)?;
let mut snos = Vec::with_capacity(snos_file.metadata()?.len() as usize);
snos_file.read_to_end(&mut snos)?;

let saya_path = self
.db_dir
.map(|db_dir| format!("{}/{}", db_dir.display(), SAYA_DB_PATH))
Expand All @@ -112,6 +109,12 @@ impl Start {
let [snos_worker_count, _layout_bridge_workers_count, ingestor_worker_count] =
workers_distribution;

let chain_id = parse_cairo_short_string(
&JsonRpcClient::new(HttpTransport::new(self.starknet_rpc.clone()))
.chain_id()
.await?,
)?;

let block_ingestor_builder = PollingBlockIngestorBuilder::new(
self.starknet_rpc,
db.clone(),
Expand All @@ -121,7 +124,7 @@ impl Start {
full_output: false,
use_kzg_da: false,
},
ChainId::Other("KATANA3".to_string()),
ChainId::Other(chain_id),
);

let prover_builder = AtlanticSnosProverBuilder::new(
Expand Down
2 changes: 2 additions & 0 deletions saya/core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -38,3 +38,5 @@ sqlx.workspace = true
cairo-vm.workspace = true
generate-pie.workspace = true
starknet_api.workspace = true
piltover.workspace = true
cainome.workspace = true
2 changes: 2 additions & 0 deletions saya/core/src/block_ingestor/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
use anyhow::Result;

use starknet::core::types::StateUpdate;
use tokio::sync::mpsc::Sender;

mod polling;
Expand All @@ -24,4 +25,5 @@ pub trait BlockIngestor: Daemon {}
pub struct BlockInfo {
pub number: u64,
pub status: BlockStatus,
pub state_update: Option<StateUpdate>,
}
24 changes: 23 additions & 1 deletion saya/core/src/block_ingestor/polling.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,10 @@ use generate_pie::{
types::{ChainConfig, OsHintsConfiguration},
};
use log::{debug, error, info, trace};
use starknet::providers::{jsonrpc::HttpTransport, JsonRpcClient, Provider};
use starknet::{
core::types::BlockId,
providers::{jsonrpc::HttpTransport, JsonRpcClient, Provider},
};
use starknet_api::{contract_address, core::ChainId};
use tokio::{
sync::{
Expand Down Expand Up @@ -106,6 +109,23 @@ where
db.initialize_block(block_number.try_into().unwrap())
.await
.unwrap();
let state_update = &JsonRpcClient::new(HttpTransport::new(rpc_url.clone()))
.get_state_update(BlockId::Number(block_number))
.await
.unwrap();
let state_update = match state_update {
starknet::core::types::MaybePreConfirmedStateUpdate::Update(state_update) => {
state_update
}
//TODO: handle this case properly
starknet::core::types::MaybePreConfirmedStateUpdate::PreConfirmedUpdate(_) => {
panic!("PreConfirmedStateUpdate not supported")
}
};

db.add_state_update(block_number.try_into().unwrap(), state_update.clone())
.await
.unwrap();
Comment on lines +112 to +128
Copy link

Copilot AI Feb 4, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Multiple unwrap() calls in the worker function will cause panics on errors. The RPC call to get_state_update (line 115), the add_state_update database operation (line 128), and the pattern matching (line 116) all use unwrap(). Consider proper error handling to prevent process crashes on network or database errors.

Copilot uses AI. Check for mistakes.

match db
.get_pie(block_number.try_into().unwrap(), Step::Snos)
Expand All @@ -116,6 +136,7 @@ where
let new_block = BlockInfo {
number: block_number,
status: BlockStatus::SnosPieGenerated,
state_update: Some(state_update.clone()),
};
trace!(block_number; "Pie generated");

Expand Down Expand Up @@ -159,6 +180,7 @@ where
let new_block = BlockInfo {
number: block_number,
status: BlockStatus::SnosPieGenerated,
state_update: Some(state_update.clone()),
};

let pie_bytes = compress_pie(pie.clone()).await.unwrap();
Expand Down
17 changes: 9 additions & 8 deletions saya/core/src/data_availability/celestia.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use anyhow::Result;
use celestia_rpc::{BlobClient, Client};
use celestia_types::{nmt::Namespace, AppVersion, Blob, TxConfig};
use celestia_rpc::{BlobClient, Client, TxConfig};
use celestia_types::{nmt::Namespace, AppVersion, Blob};
use log::{debug, info};
use tokio::sync::mpsc::{Receiver, Sender};
use url::Url;
Expand Down Expand Up @@ -53,7 +53,7 @@ where
debug!("Received new proof");

// TODO: error handling
let client = Client::new(self.rpc_url.as_ref(), Some(&self.auth_token))
let client = Client::new(self.rpc_url.as_ref(), Some(&self.auth_token), None, None)
.await
.unwrap();

Expand All @@ -68,8 +68,9 @@ where
ciborium::into_writer(&packet, &mut serialized_packet).unwrap();

// TODO: error handling
Copy link

Copilot AI Feb 4, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The third parameter (None) passed to Blob::new appears to be undocumented in the diff. Consider adding a comment explaining what this parameter represents in the updated Celestia API, as it wasn't present in the previous version and its purpose is unclear.

Suggested change
// TODO: error handling
// TODO: error handling
// The third argument is the NMT share version (Option<u8>); `None` means
// "use the default share version" as defined by the Celestia node/API.

Copilot uses AI. Check for mistakes.
let blob = Blob::new(self.namespace, serialized_packet, AppVersion::V3).unwrap();
let commitment = blob.commitment.0;
let blob = Blob::new(self.namespace, serialized_packet, None, AppVersion::V7).unwrap();
let commitment = blob.clone().commitment;
let commitment = commitment.hash();
Comment on lines +72 to +73
Copy link

Copilot AI Feb 4, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The blob is cloned before accessing the commitment field. Consider accessing commitment directly from blob without cloning to improve performance, as cloning the entire blob (which may contain large data) just to access the commitment is inefficient.

Suggested change
let commitment = blob.clone().commitment;
let commitment = commitment.hash();
let commitment = blob.commitment.hash();

Copilot uses AI. Check for mistakes.

let tx_config = TxConfig {
key_name: self.key_name.clone(),
Expand All @@ -88,7 +89,7 @@ where

self.last_pointer = Some(DataAvailabilityPointer {
height: celestia_block,
commitment,
commitment: *commitment,
});

info!(
Expand All @@ -102,7 +103,7 @@ where
block_number: new_proof.block_number(),
pointer: Some(DataAvailabilityPointer {
height: celestia_block,
commitment,
commitment: *commitment,
}),
full_payload: new_proof,
};
Expand Down Expand Up @@ -131,7 +132,7 @@ impl<P> CelestiaDataAvailabilityBackendBuilder<P> {
auth_token,
namespace: Namespace::new_v0(namespace.as_bytes())?,
key_name,
last_pointer: None,
last_pointer: Some(None),
proof_channel: None,
cursor_channel: None,
})
Expand Down
Loading
Loading