From 06c5295b8804fbef523035e1e3c7806e762a95ba Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Tue, 16 Sep 2025 10:18:26 +0200 Subject: [PATCH 01/91] cli: support send-note --- .../trusted_base_cli/commands/get_notes.rs | 21 +++++- cli/src/trusted_base_cli/commands/mod.rs | 1 + .../trusted_base_cli/commands/send_note.rs | 71 +++++++++++++++++++ cli/src/trusted_base_cli/mod.rs | 59 ++++++++------- 4 files changed, 123 insertions(+), 29 deletions(-) create mode 100644 cli/src/trusted_base_cli/commands/send_note.rs diff --git a/cli/src/trusted_base_cli/commands/get_notes.rs b/cli/src/trusted_base_cli/commands/get_notes.rs index 4d0249259..ef259e390 100644 --- a/cli/src/trusted_base_cli/commands/get_notes.rs +++ b/cli/src/trusted_base_cli/commands/get_notes.rs @@ -26,7 +26,7 @@ use ita_stf::{ TrustedGetter, }; use itp_stf_primitives::types::{KeyPair, TrustedOperation}; -use itp_types::Moment; +use itp_types::{AccountId, Moment}; use log::error; use pallet_notes::{BucketIndex, TimestampedTrustedNote, TrustedNote}; use sp_core::Pair; @@ -42,8 +42,9 @@ pub struct GetNotesCommand { impl GetNotesCommand { pub(crate) fn run(&self, cli: &Cli, trusted_args: &TrustedCli) -> CliResult { let who = get_pair_from_str(cli, trusted_args, self.account.as_str()); + let who_accountid: AccountId = who.public().into(); let top = TrustedOperation::::get(Getter::trusted( - TrustedGetter::notes_for(who.public().into(), self.bucket_index) + TrustedGetter::notes_for(who_accountid.clone(), self.bucket_index) .sign(&KeyPair::Sr25519(Box::new(who))), )); let notes = perform_trusted_operation::>>( @@ -104,6 +105,22 @@ impl GetNotesCommand { sender, guess, ); }, + TrustedCall::send_note(from, to, note) => + if from == who_accountid { + println!( + "[{}] Message to: {:?}: {}", + datetime_str, + to, + String::from_utf8_lossy(note.as_ref()) + ); + } else { + println!( + "[{}] Message from: {:?}: {}", + datetime_str, + from, + String::from_utf8_lossy(note.as_ref()) + ); + }, _ => println!("[{}] {:?}", datetime_str, call), } } else { diff --git a/cli/src/trusted_base_cli/commands/mod.rs b/cli/src/trusted_base_cli/commands/mod.rs index ff2a872b5..e0d6864e0 100644 --- a/cli/src/trusted_base_cli/commands/mod.rs +++ b/cli/src/trusted_base_cli/commands/mod.rs @@ -14,6 +14,7 @@ pub mod get_total_issuance; pub mod get_undistributed_fees; pub mod nonce; pub mod note_bloat; +pub mod send_note; pub mod spam_extrinsics; pub mod transfer; pub mod unshield_funds; diff --git a/cli/src/trusted_base_cli/commands/send_note.rs b/cli/src/trusted_base_cli/commands/send_note.rs new file mode 100644 index 000000000..594849d8d --- /dev/null +++ b/cli/src/trusted_base_cli/commands/send_note.rs @@ -0,0 +1,71 @@ +/* + Copyright 2021 Integritee AG and Supercomputing Systems AG + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +use crate::{ + get_basic_signing_info_from_args, + trusted_cli::TrustedCli, + trusted_command_utils::{get_accountid_from_str, get_trusted_account_info}, + trusted_operation::{perform_trusted_operation, send_direct_request}, + Cli, CliResult, CliResultOk, +}; +use ita_stf::{Getter, TrustedCall, TrustedCallSigned}; +use itp_stf_primitives::{ + traits::TrustedCallSigning, + types::{KeyPair, TrustedOperation}, +}; +use log::*; +use std::boxed::Box; + +#[derive(Parser)] +pub struct SendNoteCommand { + /// sender's account. AccountId in ss58check format, mnemonic or hex seed. + sender: String, + /// recipient of note. AccountId in ss58check format. + recipient: String, + + /// plain message body in UTF8 encoding + message: String, + + /// session proxy who can sign on behalf of the account + #[clap(long)] + session_proxy: Option, +} + +impl SendNoteCommand { + pub(crate) fn run(&self, cli: &Cli, trusted_args: &TrustedCli) -> CliResult { + let (sender, signer, mrenclave, shard) = + get_basic_signing_info_from_args!(self.sender, self.session_proxy, cli, trusted_args); + + let to = get_accountid_from_str(&self.recipient); + println!("send trusted call send-note to {}: {}", to, self.message); + + let nonce = get_trusted_account_info(cli, trusted_args, &sender, &signer) + .map(|info| info.nonce) + .unwrap_or_default(); + let top: TrustedOperation = + TrustedCall::send_note(sender, to, self.message.as_bytes().to_vec()) + .sign(&KeyPair::Sr25519(Box::new(signer)), nonce, &mrenclave, &shard) + .into_trusted_operation(trusted_args.direct); + + if trusted_args.direct { + Ok(send_direct_request(cli, trusted_args, &top).map(|_| CliResultOk::None)?) + } else { + Ok(perform_trusted_operation::<()>(cli, trusted_args, &top) + .map(|_| CliResultOk::None)?) + } + } +} diff --git a/cli/src/trusted_base_cli/mod.rs b/cli/src/trusted_base_cli/mod.rs index d452601c9..404419f81 100644 --- a/cli/src/trusted_base_cli/mod.rs +++ b/cli/src/trusted_base_cli/mod.rs @@ -27,8 +27,9 @@ use crate::{ get_shard_info::GetShardInfoCommand, get_shard_vault::GetShardVaultCommand, get_total_issuance::GetTotalIssuanceCommand, get_undistributed_fees::GetUndistributedFeesCommand, nonce::NonceCommand, - note_bloat::NoteBloatCommand, spam_extrinsics::SpamExtrinsicsCommand, - transfer::TransferCommand, unshield_funds::UnshieldFundsCommand, version::VersionCommand, + note_bloat::NoteBloatCommand, send_note::SendNoteCommand, + spam_extrinsics::SpamExtrinsicsCommand, transfer::TransferCommand, + unshield_funds::UnshieldFundsCommand, version::VersionCommand, waste_time::WasteTimeCommand, watchdog::WatchdogCommand, }, trusted_cli::TrustedCli, @@ -118,6 +119,9 @@ pub enum TrustedBaseCommand { /// run a chatbot service Chatbot(ChatbotCommand), + /// send a private note to someone + SendNote(SendNoteCommand), + /// get a version string for the enclave Version(VersionCommand), } @@ -125,32 +129,33 @@ pub enum TrustedBaseCommand { impl TrustedBaseCommand { pub fn run(&self, cli: &Cli, trusted_cli: &TrustedCli) -> CliResult { match self { - TrustedBaseCommand::NewAccount => new_account(cli, trusted_cli), - TrustedBaseCommand::ListAccounts => list_accounts(cli, trusted_cli), - TrustedBaseCommand::Transfer(cmd) => cmd.run(cli, trusted_cli), + Self::NewAccount => new_account(cli, trusted_cli), + Self::ListAccounts => list_accounts(cli, trusted_cli), + Self::Transfer(cmd) => cmd.run(cli, trusted_cli), #[cfg(feature = "test")] - TrustedBaseCommand::SetBalance(cmd) => cmd.run(cli, trusted_cli), - TrustedBaseCommand::Balance(cmd) => cmd.run(cli, trusted_cli), - TrustedBaseCommand::UnshieldFunds(cmd) => cmd.run(cli, trusted_cli), - TrustedBaseCommand::Nonce(cmd) => cmd.run(cli, trusted_cli), - TrustedBaseCommand::GetFingerprint(cmd) => cmd.run(cli, trusted_cli), - TrustedBaseCommand::GetParentchainsInfo(cmd) => cmd.run(cli, trusted_cli), - TrustedBaseCommand::GetNoteBucketsInfo(cmd) => cmd.run(cli, trusted_cli), - TrustedBaseCommand::GetNotes(cmd) => cmd.run(cli, trusted_cli), - TrustedBaseCommand::GetShard(cmd) => cmd.run(cli, trusted_cli), - TrustedBaseCommand::GetShardInfo(cmd) => cmd.run(cli, trusted_cli), - TrustedBaseCommand::GetShardVault(cmd) => cmd.run(cli, trusted_cli), - TrustedBaseCommand::GetSidechainHeader(cmd) => cmd.run(cli, trusted_cli), - TrustedBaseCommand::GetTotalIssuance(cmd) => cmd.run(cli, trusted_cli), - TrustedBaseCommand::GetUndistributedFees(cmd) => cmd.run(cli, trusted_cli), - TrustedBaseCommand::AddSessionProxy(cmd) => cmd.run(cli, trusted_cli), - TrustedBaseCommand::GetSessionProxies(cmd) => cmd.run(cli, trusted_cli), - TrustedBaseCommand::NoteBloat(cmd) => cmd.run(cli, trusted_cli), - TrustedBaseCommand::WasteTime(cmd) => cmd.run(cli, trusted_cli), - TrustedBaseCommand::SpamExtrinsics(cmd) => cmd.run(cli, trusted_cli), - TrustedBaseCommand::Watchdog(cmd) => cmd.run(cli, trusted_cli), - TrustedBaseCommand::Chatbot(cmd) => cmd.run(cli, trusted_cli), - TrustedBaseCommand::Version(cmd) => cmd.run(cli, trusted_cli), + Self::SetBalance(cmd) => cmd.run(cli, trusted_cli), + Self::Balance(cmd) => cmd.run(cli, trusted_cli), + Self::UnshieldFunds(cmd) => cmd.run(cli, trusted_cli), + Self::Nonce(cmd) => cmd.run(cli, trusted_cli), + Self::GetFingerprint(cmd) => cmd.run(cli, trusted_cli), + Self::GetParentchainsInfo(cmd) => cmd.run(cli, trusted_cli), + Self::GetNoteBucketsInfo(cmd) => cmd.run(cli, trusted_cli), + Self::GetNotes(cmd) => cmd.run(cli, trusted_cli), + Self::GetShard(cmd) => cmd.run(cli, trusted_cli), + Self::GetShardInfo(cmd) => cmd.run(cli, trusted_cli), + Self::GetShardVault(cmd) => cmd.run(cli, trusted_cli), + Self::GetSidechainHeader(cmd) => cmd.run(cli, trusted_cli), + Self::GetTotalIssuance(cmd) => cmd.run(cli, trusted_cli), + Self::GetUndistributedFees(cmd) => cmd.run(cli, trusted_cli), + Self::AddSessionProxy(cmd) => cmd.run(cli, trusted_cli), + Self::GetSessionProxies(cmd) => cmd.run(cli, trusted_cli), + Self::NoteBloat(cmd) => cmd.run(cli, trusted_cli), + Self::SendNote(cmd) => cmd.run(cli, trusted_cli), + Self::WasteTime(cmd) => cmd.run(cli, trusted_cli), + Self::SpamExtrinsics(cmd) => cmd.run(cli, trusted_cli), + Self::Watchdog(cmd) => cmd.run(cli, trusted_cli), + Self::Chatbot(cmd) => cmd.run(cli, trusted_cli), + Self::Version(cmd) => cmd.run(cli, trusted_cli), } } } From e080066f128395097b0ec69e41090499cf757eee Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Tue, 16 Sep 2025 11:58:27 +0200 Subject: [PATCH 02/91] add new TOPs for relayed notes --- app-libs/stf/src/lib.rs | 1 + app-libs/stf/src/relayed_note.rs | 29 ++++++++++++++++++ app-libs/stf/src/trusted_call.rs | 52 ++++++++++++++++++++++++++++---- 3 files changed, 76 insertions(+), 6 deletions(-) create mode 100644 app-libs/stf/src/relayed_note.rs diff --git a/app-libs/stf/src/lib.rs b/app-libs/stf/src/lib.rs index ab3d8d82f..ad589439d 100644 --- a/app-libs/stf/src/lib.rs +++ b/app-libs/stf/src/lib.rs @@ -39,6 +39,7 @@ pub mod guess_the_number; pub mod hash; pub mod helpers; pub mod parentchain_mirror; +pub mod relayed_note; pub mod stf_sgx; pub mod stf_sgx_primitives; #[cfg(all(feature = "test", feature = "sgx"))] diff --git a/app-libs/stf/src/relayed_note.rs b/app-libs/stf/src/relayed_note.rs new file mode 100644 index 000000000..c99a9e6fd --- /dev/null +++ b/app-libs/stf/src/relayed_note.rs @@ -0,0 +1,29 @@ +use codec::{Decode, Encode}; +use itp_types::IpfsHash; +use sp_std::vec::Vec; +pub type ConversationId = u32; + +#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)] +pub struct RelayedNote { + pub conversation_id: ConversationId, + pub retreival_info: RelayedNoteRetreivalInfo, +} + +/// Necessary information for recipient to retrieve and potentially decrypt a relayed note +#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)] +pub enum RelayedNoteRetreivalInfo { + /// the message is included within and not actually relayed + Here { msg: Vec }, + /// the message is stored on ipfs, encrypted with the provided key + Ipfs { cid: IpfsHash, encryption_key: [u8; 32] }, + /// the message is relayed through an undeclared channel which is assumed to be + /// known by the recipient, but the encryption key is provided + Undeclared { encryption_key: [u8; 32] }, +} + +/// A user request to relay a note to a specific conversation. +#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)] +pub struct RelayedNoteRequest { + pub conversation_id: ConversationId, + pub msg: Vec, +} diff --git a/app-libs/stf/src/trusted_call.rs b/app-libs/stf/src/trusted_call.rs index 98397969c..875a40d08 100644 --- a/app-libs/stf/src/trusted_call.rs +++ b/app-libs/stf/src/trusted_call.rs @@ -30,6 +30,7 @@ use crate::{ enclave_signer_account, ensure_enclave_signer_account, ensure_maintainer_account, get_mortality, shard_vault, shielding_target_genesis_hash, store_note, wrap_bytes, }, + relayed_note::{ConversationId, RelayedNoteRequest, RelayedNoteRetreivalInfo}, Getter, STF_BYTE_FEE_UNIT_DIVIDER, STF_SESSION_PROXY_DEPOSIT_DIVIDER, STF_SHIELDING_FEE_AMOUNT_DIVIDER, STF_TX_FEE_UNIT_DIVIDER, }; @@ -43,9 +44,9 @@ use ita_parentchain_specs::MinimalChainSpec; #[cfg(feature = "evm")] use ita_sgx_runtime::{AddressMapping, HashedAddressMapping}; use ita_sgx_runtime::{ - Assets, ParentchainInstanceIntegritee, ParentchainInstanceTargetA, ParentchainInstanceTargetB, - ParentchainIntegritee, Runtime, SessionProxyCredentials, SessionProxyRole, ShardManagement, - System, + Assets, MaxNoteSize, ParentchainInstanceIntegritee, ParentchainInstanceTargetA, + ParentchainInstanceTargetB, ParentchainIntegritee, Runtime, SessionProxyCredentials, + SessionProxyRole, ShardManagement, System, }; pub use ita_sgx_runtime::{Balance, Index}; use itp_node_api::metadata::{provider::AccessNodeMetadata, NodeMetadataTrait}; @@ -75,7 +76,7 @@ use sp_core::{ ed25519, }; use sp_runtime::{traits::Verify, MultiAddress, MultiSignature}; -use std::{format, prelude::v1::*, sync::Arc, vec}; +use std::{cmp::min, format, prelude::v1::*, sync::Arc, vec}; #[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)] #[allow(non_camel_case_types)] @@ -95,6 +96,8 @@ pub enum TrustedCall { waste_time(AccountId, u32) = 11, spam_extrinsics(AccountId, u32, ParentchainId) = 12, send_note(AccountId, AccountId, Vec) = 20, + send_relayed_note(AccountId, AccountId, ConversationId, RelayedNoteRequest) = 21, + send_relayed_note_stripped(AccountId, AccountId, ConversationId, RelayedNoteRetreivalInfo) = 22, // without payload add_session_proxy(AccountId, AccountId, SessionProxyCredentials) = 30, assets_transfer(AccountId, AccountId, AssetId, Balance) = 42, assets_unshield(AccountId, AccountId, AssetId, Balance, ShardIdentifier) = 43, @@ -165,6 +168,8 @@ impl TrustedCall { sender_account, Self::timestamp_set(sender_account, ..) => sender_account, Self::send_note(sender_account, ..) => sender_account, + Self::send_relayed_note(sender_account, ..) => sender_account, + Self::send_relayed_note_stripped(sender_account, ..) => sender_account, Self::spam_extrinsics(sender_account, ..) => sender_account, Self::add_session_proxy(sender_account, ..) => sender_account, Self::note_bloat(sender_account, ..) => sender_account, @@ -619,11 +624,28 @@ where Ok(()) }, TrustedCall::send_note(from, to, _note) => { - let _origin = ita_sgx_runtime::RuntimeOrigin::signed(from.clone()); std::println!("⣿STF⣿ 🔄 send_note from ⣿⣿⣿ to ⣿⣿⣿ with note ⣿⣿⣿"); store_note(&from, self.call, vec![from.clone(), to])?; Ok(()) }, + TrustedCall::send_relayed_note(from, to, conversation_id, _blob) => { + std::println!("⣿STF⣿ 🔄 send_relayed_note from ⣿⣿⣿ to ⣿⣿⣿ with note ⣿⣿⣿"); + let retreival_info = + RelayedNoteRetreivalInfo::Undeclared { encryption_key: [0u8; 32] }; + let stripped_call = TrustedCall::send_relayed_note_stripped( + from.clone(), + to.clone(), + conversation_id, + retreival_info, + ); + store_note(&from, stripped_call, vec![from.clone(), to])?; + Ok(()) + }, + TrustedCall::send_relayed_note_stripped(from, to, _conversation_id, _retreival) => { + std::println!("⣿STF⣿ 🔄 send_relayed_note_stripped from ⣿⣿⣿ to ⣿⣿⣿ with note ⣿⣿⣿"); + store_note(&from, self.call, vec![from.clone(), to])?; + Ok(()) + }, TrustedCall::add_session_proxy(delegator, delegate, credentials) => { let origin = ita_sgx_runtime::RuntimeOrigin::signed(delegator.clone()); std::println!("⣿STF⣿ 🔄 add_proxy delegator ⣿⣿⣿ delegate ⣿⣿⣿"); @@ -915,7 +937,8 @@ where let unshield_amount = balance.saturating_sub( MinimalChainSpec::one_unit( shielding_target_genesis_hash().unwrap_or_default(), - ) / STF_TX_FEE_UNIT_DIVIDER * 3, + ) / STF_TX_FEE_UNIT_DIVIDER + * 3, ); let parentchain_call = parentchain_vault_proxy_call( unshield_native_from_vault_parentchain_call( @@ -988,6 +1011,23 @@ fn get_fee_for(tc: &TrustedCallSigned, fee_asset: Option) -> Fee { TrustedCall::send_note(_, _, note) => one / STF_TX_FEE_UNIT_DIVIDER + (one.saturating_mul(Balance::from(note.len() as u32))) / STF_BYTE_FEE_UNIT_DIVIDER, + TrustedCall::send_relayed_note(_, _, _, blob) => + one / STF_TX_FEE_UNIT_DIVIDER + + one.saturating_mul(Balance::from(min( + MaxNoteSize::get(), + blob.encoded_size() as u32, + ))) / STF_BYTE_FEE_UNIT_DIVIDER, + TrustedCall::send_relayed_note_stripped(_, _, _, retrieval_info) => { + let byte_fee = match retrieval_info { + RelayedNoteRetreivalInfo::Undeclared { .. } => 32 * one / STF_BYTE_FEE_UNIT_DIVIDER, // flat fee for undeclared + RelayedNoteRetreivalInfo::Ipfs { .. } => + (46 + 32) * one / STF_BYTE_FEE_UNIT_DIVIDER, // flat fee for ipfs + RelayedNoteRetreivalInfo::Here { msg } => + (one.saturating_mul(Balance::from(msg.len() as u32))) + / STF_BYTE_FEE_UNIT_DIVIDER, + }; + byte_fee + one / STF_TX_FEE_UNIT_DIVIDER + }, #[cfg(feature = "evm")] TrustedCall::evm_call(..) => one / STF_TX_FEE_UNIT_DIVIDER, #[cfg(feature = "evm")] From 5d7cfd956b8fd5648fc9df222a0fe019c3ede7d3 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Tue, 16 Sep 2025 11:58:53 +0200 Subject: [PATCH 03/91] add explicit rpc call size limit before a TOP enters the pool --- core-primitives/top-pool-author/src/author.rs | 3 +-- sidechain/rpc-handler/src/constants.rs | 1 + sidechain/rpc-handler/src/direct_top_pool_api.rs | 9 ++++++++- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/core-primitives/top-pool-author/src/author.rs b/core-primitives/top-pool-author/src/author.rs index eb4e9befc..a64f7dd0b 100644 --- a/core-primitives/top-pool-author/src/author.rs +++ b/core-primitives/top-pool-author/src/author.rs @@ -170,8 +170,7 @@ where Err(_) => return Box::pin(ready(Err(ClientError::BadFormat.into()))), }; - trace!("decrypted indirect invocation: {:?}", trusted_operation); - + trace!("decrypted TOP: {:?}", trusted_operation); // apply top filter - return error if this specific type of trusted operation // is not allowed by the filter if !self.top_filter.filter(&trusted_operation) { diff --git a/sidechain/rpc-handler/src/constants.rs b/sidechain/rpc-handler/src/constants.rs index b3b5659b8..a75e4b6e0 100644 --- a/sidechain/rpc-handler/src/constants.rs +++ b/sidechain/rpc-handler/src/constants.rs @@ -20,3 +20,4 @@ // RPC method names. pub const RPC_METHOD_NAME_IMPORT_BLOCKS: &str = "sidechain_importBlock"; pub const RPC_METHOD_NAME_FETCH_BLOCKS_FROM_PEER: &str = "sidechain_fetchBlocksFromPeer"; +pub const MAX_TOP_SIZE_TO_ENTER_POOL: usize = 102_400; // 100 KB diff --git a/sidechain/rpc-handler/src/direct_top_pool_api.rs b/sidechain/rpc-handler/src/direct_top_pool_api.rs index 820a52d8e..fc8b62f3e 100644 --- a/sidechain/rpc-handler/src/direct_top_pool_api.rs +++ b/sidechain/rpc-handler/src/direct_top_pool_api.rs @@ -25,6 +25,7 @@ use rust_base58::base58::FromBase58; #[cfg(feature = "sgx")] use base58::FromBase58; +use crate::constants::MAX_TOP_SIZE_TO_ENTER_POOL; use codec::{Decode, Encode}; use itp_enclave_metrics::EnclaveMetric; use itp_ocall_api::EnclaveMetricsOCallApi; @@ -154,7 +155,6 @@ where G: PartialEq + Encode + Decode + Debug + Send + Sync + 'static, { debug!("Author submit and watch trusted operation.."); - let hex_encoded_params = params.parse::>().map_err(|e| format!("{:?}", e))?; let request = @@ -162,6 +162,13 @@ where let shard: ShardIdentifier = request.shard; let encrypted_trusted_call: Vec = request.cyphertext; + + if encrypted_trusted_call.len() > MAX_TOP_SIZE_TO_ENTER_POOL { + let error_msg = format!("Trusted operation too large"); + error!("{}", error_msg); + return Err(error_msg) + } + let result = async { author.watch_top(encrypted_trusted_call, shard).await }; let response: Result = executor::block_on(result); From 69d356758c9c5f6187cb2ec281751e66ed576f3a Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Tue, 16 Sep 2025 13:53:42 +0200 Subject: [PATCH 04/91] generalize ParentchainCall to TrustedCallSideEffect and introduce Ipfs option without implementing it --- app-libs/stf/src/guess_the_number.rs | 4 +- app-libs/stf/src/stf_sgx.rs | 19 +++--- app-libs/stf/src/trusted_call.rs | 19 +++--- core-primitives/stf-executor/src/executor.rs | 50 +++++++++------- core-primitives/stf-executor/src/lib.rs | 19 +++--- core-primitives/stf-interface/src/lib.rs | 10 ++-- core-primitives/stf-interface/src/mocks.rs | 10 ++-- core-primitives/test/src/mock/stf_mock.rs | 12 ++-- core-primitives/types/src/lib.rs | 10 +++- core-primitives/types/src/parentchain.rs | 4 +- core/offchain-worker-executor/src/executor.rs | 58 ++++++++++++++----- .../src/test/sidechain_aura_tests.rs | 8 +-- .../src/test/sidechain_event_tests.rs | 8 +-- enclave-runtime/src/top_pool_execution.rs | 52 ++++++++++++----- sidechain/consensus/aura/src/slot_proposer.rs | 4 +- sidechain/consensus/common/src/lib.rs | 7 ++- sidechain/consensus/slots/src/lib.rs | 12 ++-- 17 files changed, 190 insertions(+), 116 deletions(-) diff --git a/app-libs/stf/src/guess_the_number.rs b/app-libs/stf/src/guess_the_number.rs index 6612d52d9..033f41514 100644 --- a/app-libs/stf/src/guess_the_number.rs +++ b/app-libs/stf/src/guess_the_number.rs @@ -32,7 +32,7 @@ use itp_node_api_metadata::NodeMetadataTrait; use itp_sgx_runtime_primitives::types::{Balance, Moment, ShardIdentifier}; use itp_stf_interface::{ExecuteCall, ExecuteGetter}; use itp_stf_primitives::error::StfError; -use itp_types::{parentchain::ParentchainCall, AccountId}; +use itp_types::{AccountId, TrustedCallSideEffect}; use itp_utils::stringify::account_id_to_string; use log::*; use sp_std::{sync::Arc, vec, vec::Vec}; @@ -85,7 +85,7 @@ where fn execute( self, - _calls: &mut Vec, + _side_effects: &mut Vec, _shard: &ShardIdentifier, _node_metadata_repo: Arc, ) -> Result<(), Self::Error> { diff --git a/app-libs/stf/src/stf_sgx.rs b/app-libs/stf/src/stf_sgx.rs index 8749cf80b..be88b169d 100644 --- a/app-libs/stf/src/stf_sgx.rs +++ b/app-libs/stf/src/stf_sgx.rs @@ -55,7 +55,10 @@ use itp_stf_primitives::{ types::{ShardIdentifier, Signature}, }; use itp_storage::storage_value_key; -use itp_types::parentchain::{AccountId, BlockNumber, Hash, Index, ParentchainCall, ParentchainId}; +use itp_types::{ + parentchain::{AccountId, BlockNumber, Hash, Index, ParentchainId}, + TrustedCallSideEffect, +}; use itp_utils::{hex::hex_encode, stringify::account_id_to_string}; use log::*; use sp_runtime::traits::StaticLookup; @@ -225,10 +228,10 @@ where state: &mut State, shard: &ShardIdentifier, call: TCS, - calls: &mut Vec, + side_effects: &mut Vec, node_metadata_repo: Arc, ) -> Result<(), Self::Error> { - state.execute_with(|| call.execute(calls, shard, node_metadata_repo)) + state.execute_with(|| call.execute(side_effects, shard, node_metadata_repo)) } fn on_initialize( @@ -269,7 +272,7 @@ where state: &mut State, shard: &ShardIdentifier, integritee_block_number: BlockNumber, - calls: &mut Vec, + side_effects: &mut Vec, node_metadata_repo: Arc, ) -> Result<(), Self::Error> { state.execute_with(|| { @@ -314,7 +317,7 @@ where retire_account( account, &mut enclave_nonce, - calls, + side_effects, shard, node_metadata_repo.clone(), ); @@ -342,7 +345,7 @@ where fn retire_account( account: AccountId, enclave_nonce: &mut Index, - calls: &mut Vec, + side_effects: &mut Vec, shard: &ShardIdentifier, node_metadata_repo: Arc, ) where @@ -368,7 +371,7 @@ fn retire_account( signature: fake_signature.clone(), }; // Replace with `inspect_err` once it's stable. - tcs.execute(calls, shard, node_metadata_repo.clone()) + tcs.execute(side_effects, shard, node_metadata_repo.clone()) .map_err(|e| { error!( "Failed to force-unshield {:?} for {}: {:?}", @@ -390,7 +393,7 @@ fn retire_account( signature: fake_signature, }; // Replace with `inspect_err` once it's stable. - tcs.execute(calls, shard, node_metadata_repo) + tcs.execute(side_effects, shard, node_metadata_repo) .map_err(|e| { error!( "Failed to force-unshield native for {:?}: {:?}", diff --git a/app-libs/stf/src/trusted_call.rs b/app-libs/stf/src/trusted_call.rs index 875a40d08..45a621404 100644 --- a/app-libs/stf/src/trusted_call.rs +++ b/app-libs/stf/src/trusted_call.rs @@ -65,7 +65,7 @@ use itp_stf_primitives::{ }; use itp_types::{ parentchain::{GenericMortality, ParentchainCall, ParentchainId, ProxyType}, - Address, Moment, OpaqueCall, + Address, Moment, OpaqueCall, TrustedCallSideEffect, }; use itp_utils::stringify::account_id_to_string; use log::*; @@ -302,7 +302,7 @@ where fn execute( self, - calls: &mut Vec, + side_effects: &mut Vec, shard: &ShardIdentifier, node_metadata_repo: Arc, ) -> Result<(), Self::Error> { @@ -415,7 +415,7 @@ where self.call, vec![account_incognito.clone(), beneficiary], ); - calls.push(parentchain_call); + side_effects.push(TrustedCallSideEffect::ParentchainCall(parentchain_call)); Ok(()) }, TrustedCall::balance_unshield_through_enclave_bridge_pallet( @@ -463,7 +463,7 @@ where let mortality = get_mortality(ParentchainId::Integritee, 32) .unwrap_or_else(GenericMortality::immortal); let parentchain_call = ParentchainCall::Integritee { call, mortality }; - calls.push(parentchain_call); + side_effects.push(TrustedCallSideEffect::ParentchainCall(parentchain_call)); Ok(()) }, TrustedCall::balance_shield(enclave_account, who, value, parentchain_id) => { @@ -619,7 +619,7 @@ where ParentchainId::TargetB => ParentchainCall::TargetB { call, mortality: mortality.clone() }, }; - calls.push(pcall); + side_effects.push(TrustedCallSideEffect::ParentchainCall(pcall)); } Ok(()) }, @@ -739,7 +739,7 @@ where self.call, vec![account_incognito.clone(), beneficiary], )?; - calls.push(parentchain_call); + side_effects.push(TrustedCallSideEffect::ParentchainCall(parentchain_call)); Ok(()) }, TrustedCall::assets_shield(enclave_account, who, asset_id, value, parentchain_id) => { @@ -883,7 +883,8 @@ where info!("Trying to create evm contract with address {:?}", contract_address); Ok(()) }, - TrustedCall::guess_the_number(call) => call.execute(calls, shard, node_metadata_repo), + TrustedCall::guess_the_number(call) => + call.execute(side_effects, shard, node_metadata_repo), TrustedCall::force_unshield_all(enclave_account, who, maybe_asset_id) => { ensure_enclave_signer_account(&enclave_account)?; if let Some(asset_id) = maybe_asset_id { @@ -909,7 +910,7 @@ where store_note(&who, self.call, vec![who.clone()])?; burn_assets(&who, balance, asset_id)?; if unshield_amount > 0 { - calls.push(parentchain_call); + side_effects.push(TrustedCallSideEffect::ParentchainCall(parentchain_call)); } } else { let info = System::account(&who); @@ -964,7 +965,7 @@ where Self::Error::Dispatch(format!("Balance burn balance error: {:?}", e.error)) })?; if unshield_amount > 0 { - calls.push(parentchain_call); + side_effects.push(TrustedCallSideEffect::ParentchainCall(parentchain_call)); } } Ok(()) diff --git a/core-primitives/stf-executor/src/executor.rs b/core-primitives/stf-executor/src/executor.rs index 8487694ee..4087d53c8 100644 --- a/core-primitives/stf-executor/src/executor.rs +++ b/core-primitives/stf-executor/src/executor.rs @@ -42,7 +42,7 @@ use itp_time_utils::{duration_now, now_as_millis}; use itp_types::{ parentchain::{BlockNumber, Header as ParentchainHeader, ParentchainCall, ParentchainId}, storage::StorageEntryVerified, - Balance, ShardConfig, UpgradableShardConfig, H256, + Balance, ShardConfig, TrustedCallSideEffect, UpgradableShardConfig, H256, }; use log::*; use sp_runtime::{traits::Header as HeaderTrait, SaturatedConversion}; @@ -124,12 +124,12 @@ where } debug!("execute on STF, call with nonce {}", trusted_call.nonce()); - let mut extrinsic_call_backs: Vec = Vec::new(); + let mut trusted_call_side_effects: Vec = Vec::new(); if let Err(e) = Stf::execute_call( state, shard, trusted_call.clone(), - &mut extrinsic_call_backs, + &mut trusted_call_side_effects, self.node_metadata_repo.clone(), ) { error!("Stf execute failed: {:?}", e); @@ -143,23 +143,29 @@ where state.prune_state_diff(); } - for call in extrinsic_call_backs.clone() { - match call { - ParentchainCall::Integritee { call, mortality } => trace!( - "trusted_call wants to send encoded call to [Integritee] parentchain: 0x{} with mortality {:?}", - hex::encode(call.encode()), mortality - ), - ParentchainCall::TargetA { call, mortality } => trace!( - "trusted_call wants to send encoded call to [TargetA] parentchain: 0x{} with mortality {:?}", - hex::encode(call.encode()), mortality - ), - ParentchainCall::TargetB { call, mortality } => trace!( - "trusted_call wants to send encoded call to [TargetB] parentchain: 0x{} with mortality {:?}", - hex::encode(call.encode()), mortality - ), + for side_effect in trusted_call_side_effects.clone() { + match side_effect { + TrustedCallSideEffect::ParentchainCall(call) => match call { + ParentchainCall::Integritee { call, mortality } => trace!( + "trusted_call wants to send encoded call to [Integritee] parentchain: 0x{} with mortality {:?}", + hex::encode(call.encode()), mortality + ), + ParentchainCall::TargetA { call, mortality } => trace!( + "trusted_call wants to send encoded call to [TargetA] parentchain: 0x{} with mortality {:?}", + hex::encode(call.encode()), mortality + ), + ParentchainCall::TargetB { call, mortality } => trace!( + "trusted_call wants to send encoded call to [TargetB] parentchain: 0x{} with mortality {:?}", + hex::encode(call.encode()), mortality + ), + }, + TrustedCallSideEffect::IpfsAdd(blob) => trace!( + "trusted_call wants to add blob of size {} to ipfs", + blob.len() + ), } } - Ok(ExecutedOperation::success(operation_hash, top_or_hash, extrinsic_call_backs)) + Ok(ExecutedOperation::success(operation_hash, top_or_hash, trusted_call_side_effects)) } } @@ -318,25 +324,25 @@ where // the risk of overdue block production is minimal as all user calls are filtered during maintenance mode anyway if maintenance_mode { info!("Maintenance mode is active."); - let mut extrinsic_call_backs: Vec = Vec::new(); + let mut trusted_call_side_effects: Vec = Vec::new(); Stf::maintenance_mode_tasks( &mut state, &shard, *header.number(), - &mut extrinsic_call_backs, + &mut trusted_call_side_effects, self.node_metadata_repo.clone(), ) .map_err(|e| error!("maintenance_mode tasks failed: {:?}", e)) .ok(); info!( "maintenance tasks have triggered {} parentchain calls", - extrinsic_call_backs.len() + trusted_call_side_effects.len() ); // we're hacking our unshielding calls into the queue executed_and_failed_calls.push(ExecutedOperation::success( H256::default(), TrustedOperationOrHash::Hash(H256::default()), - extrinsic_call_backs, + trusted_call_side_effects, )); } diff --git a/core-primitives/stf-executor/src/lib.rs b/core-primitives/stf-executor/src/lib.rs index a3e22f4de..9da6fc993 100644 --- a/core-primitives/stf-executor/src/lib.rs +++ b/core-primitives/stf-executor/src/lib.rs @@ -28,7 +28,7 @@ use codec::{Decode, Encode}; use core::fmt::Debug; use itp_sgx_externalities::SgxExternalitiesTrait; use itp_stf_primitives::types::TrustedOperationOrHash; -use itp_types::{parentchain::ParentchainCall, H256}; +use itp_types::{TrustedCallSideEffect, H256}; use std::vec::Vec; // re-export module to properly feature gate sgx and regular std environment @@ -60,12 +60,12 @@ pub mod mocks; /// any extrinsic callbacks (e.g. unshield extrinsics) that need to be executed on-chain #[derive(Clone, Debug, PartialEq)] pub enum ExecutionStatus { - Success(H256, Vec), + Success(H256, Vec), Failure, } impl ExecutionStatus { - pub fn get_extrinsic_callbacks(&self) -> Vec { + pub fn get_extrinsic_callbacks(&self) -> Vec { match self { ExecutionStatus::Success(_, opaque_calls) => opaque_calls.clone(), _ => Vec::new(), @@ -102,7 +102,7 @@ where pub fn success( operation_hash: H256, trusted_operation_or_hash: TrustedOperationOrHash, - extrinsic_call_backs: Vec, + extrinsic_call_backs: Vec, ) -> Self { ExecutedOperation { status: ExecutionStatus::Success(operation_hash, extrinsic_call_backs), @@ -141,7 +141,7 @@ where TCS: PartialEq + Encode + Decode + Debug + Clone + Send + Sync, G: PartialEq + Encode + Decode + Debug + Clone + Send + Sync, { - pub fn get_extrinsic_callbacks(&self) -> Vec { + pub fn get_extrinsic_callbacks(&self) -> Vec { self.executed_operations .iter() .flat_map(|e| e.status.get_extrinsic_callbacks()) @@ -235,10 +235,11 @@ mod tests { int: u8, ) -> (ExecutedOperation, H256) { let hash = H256::from([int; 32]); - let opaque_call: Vec = vec![ParentchainCall::Integritee { - call: OpaqueCall(vec![int; 10]), - mortality: GenericMortality { era: Era::mortal(0, 0), mortality_checkpoint: None }, - }]; + let opaque_call: Vec = + vec![TrustedCallSideEffect::ParentchainCall(ParentchainCall::Integritee { + call: OpaqueCall(vec![int; 10]), + mortality: GenericMortality { era: Era::mortal(0, 0), mortality_checkpoint: None }, + })]; let operation = ExecutedOperation::success(hash, TrustedOperationOrHash::Hash(hash), opaque_call); (operation, hash) diff --git a/core-primitives/stf-interface/src/lib.rs b/core-primitives/stf-interface/src/lib.rs index 82ccdb8ba..65171ae7e 100644 --- a/core-primitives/stf-interface/src/lib.rs +++ b/core-primitives/stf-interface/src/lib.rs @@ -29,8 +29,8 @@ use itp_node_api_metadata::NodeMetadataTrait; use itp_node_api_metadata_provider::AccessNodeMetadata; use itp_stf_primitives::traits::TrustedCallVerification; use itp_types::{ - parentchain::{AccountId, BlockHash, BlockNumber, ParentchainCall, ParentchainId}, - Moment, ShardIdentifier, + parentchain::{AccountId, BlockHash, BlockNumber, ParentchainId}, + Moment, ShardIdentifier, TrustedCallSideEffect, }; #[cfg(feature = "mocks")] @@ -83,7 +83,7 @@ where state: &mut State, shard: &ShardIdentifier, call: TCS, - calls: &mut Vec, + side_effects: &mut Vec, node_metadata_repo: Arc, ) -> Result<(), Self::Error>; @@ -100,7 +100,7 @@ where state: &mut State, shard: &itp_stf_primitives::types::ShardIdentifier, integritee_block_number: BlockNumber, - calls: &mut Vec, + side_effects: &mut Vec, node_metadata_repo: Arc, ) -> Result<(), Self::Error>; @@ -131,7 +131,7 @@ where /// Execute a call. Callbacks are added as an `OpaqueCall`. fn execute( self, - calls: &mut Vec, + side_effects: &mut Vec, shard: &ShardIdentifier, node_metadata_repo: Arc, ) -> Result<(), Self::Error>; diff --git a/core-primitives/stf-interface/src/mocks.rs b/core-primitives/stf-interface/src/mocks.rs index 3707a991c..542f3fd84 100644 --- a/core-primitives/stf-interface/src/mocks.rs +++ b/core-primitives/stf-interface/src/mocks.rs @@ -29,8 +29,8 @@ use itp_node_api_metadata::metadata_mocks::NodeMetadataMock; use itp_node_api_metadata_provider::NodeMetadataRepository; use itp_stf_primitives::traits::TrustedCallVerification; use itp_types::{ - parentchain::{BlockNumber, ParentchainCall, ParentchainId}, - AccountId, Index, Moment, ShardIdentifier, + parentchain::{BlockNumber, ParentchainId}, + AccountId, Index, Moment, ShardIdentifier, TrustedCallSideEffect, }; #[derive(Default)] @@ -71,7 +71,7 @@ where _state: &mut State, _shard: &ShardIdentifier, _call: TCS, - _calls: &mut Vec, + _side_effects: &mut Vec, _node_metadata_repo: Arc>, ) -> Result<(), Self::Error> { unimplemented!() @@ -90,7 +90,7 @@ where _state: &mut State, _shard: &itp_stf_primitives::types::ShardIdentifier, _integritee_block_number: BlockNumber, - _calls: &mut Vec, + _side_effects: &mut Vec, _node_metadata_repo: Arc>, ) -> Result<(), Self::Error> { todo!() @@ -138,7 +138,7 @@ impl ExecuteCall> for CallExecutorMock fn execute( self, - _calls: &mut Vec, + _side_effects: &mut Vec, _shard: &ShardIdentifier, _node_metadata_repo: Arc>, ) -> Result<(), Self::Error> { diff --git a/core-primitives/test/src/mock/stf_mock.rs b/core-primitives/test/src/mock/stf_mock.rs index aeda7d017..4772f82d3 100644 --- a/core-primitives/test/src/mock/stf_mock.rs +++ b/core-primitives/test/src/mock/stf_mock.rs @@ -31,8 +31,8 @@ use itp_stf_primitives::{ types::{KeyPair, Nonce, TrustedOperation}, }; use itp_types::{ - parentchain::{BlockNumber, ParentchainCall, ParentchainId}, - AccountId, Balance, Index, Moment, ShardIdentifier, Signature, + parentchain::{BlockNumber, ParentchainId}, + AccountId, Balance, Index, Moment, ShardIdentifier, Signature, TrustedCallSideEffect, }; use log::*; use sp_core::{sr25519, Pair}; @@ -75,10 +75,10 @@ impl StateCallInterface, + side_effects: &mut Vec, node_metadata_repo: Arc, ) -> Result<(), Self::Error> { - state.execute_with(|| call.execute(calls, shard, node_metadata_repo)) + state.execute_with(|| call.execute(side_effects, shard, node_metadata_repo)) } fn on_initialize( @@ -95,7 +95,7 @@ impl StateCallInterface, + _calls: &mut Vec, _node_metadata_repo: Arc, ) -> Result<(), Self::Error> { todo!() @@ -202,7 +202,7 @@ impl ExecuteCall for TrustedCallSignedMock { fn execute( self, - _calls: &mut Vec, + _calls: &mut Vec, _shard: &ShardIdentifier, _node_metadata_repo: Arc, ) -> Result<(), Self::Error> { diff --git a/core-primitives/types/src/lib.rs b/core-primitives/types/src/lib.rs index c3ad5d144..d4b20b0a6 100644 --- a/core-primitives/types/src/lib.rs +++ b/core-primitives/types/src/lib.rs @@ -57,14 +57,16 @@ pub type ShardConfig = enclave_bridge_primitives::ShardConfig; pub type UpgradableShardConfig = enclave_bridge_primitives::UpgradableShardConfig; +use crate::parentchain::ParentchainCall; pub use enclave_bridge_primitives::Request; pub use teerex_primitives::{ EnclaveFingerprint, MultiEnclave, SgxBuildMode, SgxEnclave, SgxReportData, SgxStatus, }; + pub type Enclave = MultiEnclave>; /// Simple blob to hold an encoded call -#[derive(Debug, PartialEq, Eq, Clone, Default)] +#[derive(Decode, Debug, PartialEq, Eq, Clone, Default)] pub struct OpaqueCall(pub Vec); impl OpaqueCall { @@ -133,6 +135,12 @@ pub enum WorkerResponse { NextNonce(Option), } +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub enum TrustedCallSideEffect { + ParentchainCall(ParentchainCall), + IpfsAdd(Vec), +} + impl From>> for StorageEntry> { fn from(response: WorkerResponse>) -> Self { match response { diff --git a/core-primitives/types/src/parentchain.rs b/core-primitives/types/src/parentchain.rs index 24b663669..15c5305eb 100644 --- a/core-primitives/types/src/parentchain.rs +++ b/core-primitives/types/src/parentchain.rs @@ -418,7 +418,7 @@ impl From for () { } // All info for additionalParam except tip whi -#[derive(Encode, Debug, Clone, PartialEq, Eq)] +#[derive(Encode, Decode, Debug, Clone, PartialEq, Eq)] pub struct GenericMortality { pub era: Era, pub mortality_checkpoint: Option, @@ -431,7 +431,7 @@ impl GenericMortality { } /// a wrapper to target calls to specific parentchains -#[derive(Encode, Debug, Clone, PartialEq, Eq)] +#[derive(Encode, Decode, Debug, Clone, PartialEq, Eq)] pub enum ParentchainCall { Integritee { call: OpaqueCall, mortality: GenericMortality }, TargetA { call: OpaqueCall, mortality: GenericMortality }, diff --git a/core/offchain-worker-executor/src/executor.rs b/core/offchain-worker-executor/src/executor.rs index 8ad30c38c..ab5532091 100644 --- a/core/offchain-worker-executor/src/executor.rs +++ b/core/offchain-worker-executor/src/executor.rs @@ -29,8 +29,8 @@ use itp_stf_primitives::{traits::TrustedCallVerification, types::TrustedOperatio use itp_stf_state_handler::{handle_state::HandleState, query_shard_state::QueryShardState}; use itp_top_pool_author::traits::AuthorApi; use itp_types::{ - parentchain::{BlockNumber, GenericMortality, ParentchainCall}, - OpaqueCall, ShardIdentifier, H256, + parentchain::{BlockNumber, GenericMortality}, + OpaqueCall, ShardIdentifier, TrustedCallSideEffect, H256, }; use log::*; use sp_runtime::traits::{Block, Header}; @@ -84,7 +84,8 @@ impl< Stf, TCS, G, - > where + > +where ParentchainBlock: Block, ParentchainBlock::Header: Header, StfExecutor: StateUpdateProposer, @@ -118,7 +119,7 @@ impl< let max_duration = Duration::from_secs(5); let latest_parentchain_header = self.get_latest_parentchain_header()?; - let mut parentchain_effects: Vec = Vec::new(); + let mut trusted_call_side_effects: Vec = Vec::new(); let shards = self.state_handler.list_shards()?; trace!("Executing calls on {} shard(s)", shards.len()); @@ -142,7 +143,7 @@ impl< }, )?; - parentchain_effects + trusted_call_side_effects .append(&mut batch_execution_result.get_extrinsic_callbacks().clone()); let failed_operations = batch_execution_result.get_failed_operations(); @@ -164,8 +165,8 @@ impl< // TODO: notify parentchain about executed operations? -> add to parentchain effects } - if !parentchain_effects.is_empty() { - self.send_parentchain_effects(parentchain_effects)?; + if !trusted_call_side_effects.is_empty() { + self.execute_trusted_call_side_effects(trusted_call_side_effects)?; } Ok(()) @@ -188,18 +189,40 @@ impl< Ok(()) } - fn send_parentchain_effects(&self, parentchain_effects: Vec) -> Result<()> { - let integritee_calls: Vec<(OpaqueCall, GenericMortality)> = parentchain_effects + fn execute_trusted_call_side_effects( + &self, + side_effects: Vec, + ) -> Result<()> { + let integritee_calls: Vec<(OpaqueCall, GenericMortality)> = side_effects .iter() - .filter_map(|parentchain_call| parentchain_call.as_integritee()) + .filter_map(|side_effect| match side_effect { + TrustedCallSideEffect::ParentchainCall(call) => Some(call.clone()), + _ => None, + }) + .filter_map(|call| call.as_integritee()) .collect(); - let target_a_calls: Vec<(OpaqueCall, GenericMortality)> = parentchain_effects + let target_a_calls: Vec<(OpaqueCall, GenericMortality)> = side_effects .iter() - .filter_map(|parentchain_call| parentchain_call.as_target_a()) + .filter_map(|side_effect| match side_effect { + TrustedCallSideEffect::ParentchainCall(call) => Some(call.clone()), + _ => None, + }) + .filter_map(|call| call.as_target_a()) .collect(); - let target_b_calls: Vec<(OpaqueCall, GenericMortality)> = parentchain_effects + let target_b_calls: Vec<(OpaqueCall, GenericMortality)> = side_effects .iter() - .filter_map(|parentchain_call| parentchain_call.as_target_b()) + .filter_map(|side_effect| match side_effect { + TrustedCallSideEffect::ParentchainCall(call) => Some(call.clone()), + _ => None, + }) + .filter_map(|call| call.as_target_b()) + .collect(); + let ipfs_blobs_to_add: Vec> = side_effects + .iter() + .filter_map(|side_effect| match side_effect { + TrustedCallSideEffect::IpfsAdd(blob) => Some(blob.clone()), + _ => None, + }) .collect(); debug!( "stf wants to send calls to parentchains: Integritee: {} TargetA: {} TargetB: {}", @@ -218,6 +241,13 @@ impl< self.extrinsics_factory.create_extrinsics(integritee_calls.as_slice(), None)?; self.validator_accessor .execute_mut_on_validator(|v| v.send_extrinsics(extrinsics))?; + + if !ipfs_blobs_to_add.is_empty() { + warn!( + "stf wants to add {} blobs to ipfs, which is unimplemented for the OCW", + ipfs_blobs_to_add.len() + ) + } Ok(()) } diff --git a/enclave-runtime/src/test/sidechain_aura_tests.rs b/enclave-runtime/src/test/sidechain_aura_tests.rs index 08f7bca82..f93e4d319 100644 --- a/enclave-runtime/src/test/sidechain_aura_tests.rs +++ b/enclave-runtime/src/test/sidechain_aura_tests.rs @@ -27,7 +27,7 @@ use crate::{ }, mocks::{propose_to_import_call_mock::ProposeToImportOCallApi, types::*}, }, - top_pool_execution::{exec_aura_on_slot, send_blocks_and_extrinsics}, + top_pool_execution::{exec_aura_on_slot, send_blocks_and_execute_side_effects}, }; use codec::Decode; use ita_stf::{ @@ -167,7 +167,7 @@ pub fn produce_sidechain_block_and_import_it() { let state_hash_before_block_production = get_state_hash(state_handler.as_ref(), &shard_id); info!("Executing AURA on slot.."); - let (blocks, opaque_calls) = + let (blocks, side_effects) = exec_aura_on_slot::<_, ParentchainBlock, SignedSidechainBlock, _, _, _, _, _>( slot_info, signer, @@ -201,9 +201,9 @@ pub fn produce_sidechain_block_and_import_it() { let propose_to_block_import_ocall_api = Arc::new(ProposeToImportOCallApi::new(parentchain_header, block_importer)); - send_blocks_and_extrinsics::( + send_blocks_and_execute_side_effects::( blocks, - opaque_calls, + side_effects, propose_to_block_import_ocall_api, ) .unwrap(); diff --git a/enclave-runtime/src/test/sidechain_event_tests.rs b/enclave-runtime/src/test/sidechain_event_tests.rs index fcc1e5432..82ae7eb3a 100644 --- a/enclave-runtime/src/test/sidechain_event_tests.rs +++ b/enclave-runtime/src/test/sidechain_event_tests.rs @@ -25,7 +25,7 @@ use crate::{ }, mocks::{propose_to_import_call_mock::ProposeToImportOCallApi, types::*}, }, - top_pool_execution::{exec_aura_on_slot, send_blocks_and_extrinsics}, + top_pool_execution::{exec_aura_on_slot, send_blocks_and_execute_side_effects}, }; use ita_sgx_runtime::Runtime; use ita_stf::{helpers::set_block_number, Getter, TrustedCallSigned}; @@ -139,7 +139,7 @@ pub fn ensure_events_get_reset_upon_block_proposal() { ); info!("Executing AURA on slot.."); - let (blocks, opaque_calls) = + let (blocks, side_effects) = exec_aura_on_slot::<_, ParentchainBlock, SignedSidechainBlock, _, _, _, _, _>( slot_info, signer, @@ -156,9 +156,9 @@ pub fn ensure_events_get_reset_upon_block_proposal() { let propose_to_block_import_ocall_api = Arc::new(ProposeToImportOCallApi::new(parentchain_header, block_importer)); - send_blocks_and_extrinsics::( + send_blocks_and_execute_side_effects::( blocks, - opaque_calls, + side_effects, propose_to_block_import_ocall_api, ) .unwrap(); diff --git a/enclave-runtime/src/top_pool_execution.rs b/enclave-runtime/src/top_pool_execution.rs index fbff20f77..697ff6b71 100644 --- a/enclave-runtime/src/top_pool_execution.rs +++ b/enclave-runtime/src/top_pool_execution.rs @@ -56,8 +56,8 @@ use itp_sgx_crypto::key_repository::AccessKey; use itp_stf_state_handler::query_shard_state::QueryShardState; use itp_time_utils::duration_now; use itp_types::{ - parentchain::{GenericMortality, ParentchainCall, ParentchainId, SidechainBlockConfirmation}, - Block, OpaqueCall, H256, + parentchain::{GenericMortality, ParentchainId, SidechainBlockConfirmation}, + Block, OpaqueCall, TrustedCallSideEffect, H256, }; use its_primitives::{ traits::{ @@ -233,7 +233,7 @@ fn execute_top_pool_trusted_calls_internal() -> Result<()> { block_composer, ); - let (blocks, parentchain_calls) = + let (blocks, side_effects) = exec_aura_on_slot::<_, _, SignedSidechainBlock, _, _, _, _, _>( slot.clone(), authority, @@ -252,7 +252,7 @@ fn execute_top_pool_trusted_calls_internal() -> Result<()> { log_remaining_slot_duration(&slot, SlotStage::AfterAura); - send_blocks_and_extrinsics::(blocks, parentchain_calls, ocall_api)?; + send_blocks_and_execute_side_effects::(blocks, side_effects, ocall_api)?; log_remaining_slot_duration(&slot, SlotStage::AfterBroadcastAndExtrinsics); }, @@ -286,7 +286,7 @@ pub(crate) fn exec_aura_on_slot< maybe_target_b_block_import_trigger: Option>, proposer_environment: PEnvironment, shards: Vec>, -) -> Result<(Vec, Vec)> +) -> Result<(Vec, Vec)> where ParentchainBlock: BlockTrait, SignedSidechainBlock: @@ -321,20 +321,24 @@ where ) .with_claim_strategy(SlotClaimStrategy::RoundRobin); - let (blocks, pxts): (Vec<_>, Vec<_>) = + let (blocks, side_effects): (Vec<_>, Vec<_>) = PerShardSlotWorkerScheduler::on_slot(&mut aura, slot, shards) .into_iter() - .map(|r| (r.block, r.parentchain_effects)) + .map(|r| (r.block, r.side_effects)) .unzip(); - let opaque_calls: Vec = pxts.into_iter().flatten().collect(); + let opaque_calls: Vec = side_effects.into_iter().flatten().collect(); Ok((blocks, opaque_calls)) } /// Broadcasts sidechain blocks to fellow peers and sends opaque calls as extrinsic to the parentchain. -pub(crate) fn send_blocks_and_extrinsics( +pub(crate) fn send_blocks_and_execute_side_effects< + ParentchainBlock, + SignedSidechainBlock, + OCallApi, +>( blocks: Vec, - parentchain_calls: Vec, + side_effects: Vec, ocall_api: Arc, ) -> Result<()> where @@ -346,8 +350,12 @@ where debug!("Proposing {} sidechain block(s) (broadcasting to peers)", blocks.len()); ocall_api.propose_sidechain_blocks(blocks)?; - let calls: Vec<(OpaqueCall, GenericMortality)> = parentchain_calls + let calls: Vec<(OpaqueCall, GenericMortality)> = side_effects .iter() + .filter_map(|side_effect| match side_effect { + TrustedCallSideEffect::ParentchainCall(call) => Some(call.clone()), + _ => None, + }) .filter_map(|parentchain_call| parentchain_call.as_integritee()) .collect(); debug!("Enclave wants to send {} extrinsics to Integritee Parentchain", calls.len()); @@ -357,8 +365,12 @@ where let validator_access = get_validator_accessor_from_integritee_solo_or_parachain()?; validator_access.execute_mut_on_validator(|v| v.send_extrinsics(xts))?; } - let calls: Vec<(OpaqueCall, GenericMortality)> = parentchain_calls + let calls: Vec<(OpaqueCall, GenericMortality)> = side_effects .iter() + .filter_map(|side_effect| match side_effect { + TrustedCallSideEffect::ParentchainCall(call) => Some(call.clone()), + _ => None, + }) .filter_map(|parentchain_call| parentchain_call.as_target_a()) .collect(); debug!("Enclave wants to send {} extrinsics to TargetA Parentchain", calls.len()); @@ -368,8 +380,12 @@ where let validator_access = get_validator_accessor_from_target_a_solo_or_parachain()?; validator_access.execute_mut_on_validator(|v| v.send_extrinsics(xts))?; } - let calls: Vec<(OpaqueCall, GenericMortality)> = parentchain_calls + let calls: Vec<(OpaqueCall, GenericMortality)> = side_effects .iter() + .filter_map(|side_effect| match side_effect { + TrustedCallSideEffect::ParentchainCall(call) => Some(call.clone()), + _ => None, + }) .filter_map(|parentchain_call| parentchain_call.as_target_b()) .collect(); debug!("Enclave wants to send {} extrinsics to TargetB Parentchain", calls.len()); @@ -379,6 +395,16 @@ where let validator_access = get_validator_accessor_from_target_b_solo_or_parachain()?; validator_access.execute_mut_on_validator(|v| v.send_extrinsics(xts))?; } + let ipfs_blobs_to_add: Vec> = side_effects + .iter() + .filter_map(|side_effect| match side_effect { + TrustedCallSideEffect::IpfsAdd(blob) => Some(blob.clone()), + _ => None, + }) + .collect(); + if !ipfs_blobs_to_add.is_empty() { + warn!("Storing data on IPFS from within AURA is not yet supported."); + } Ok(()) } diff --git a/sidechain/consensus/aura/src/slot_proposer.rs b/sidechain/consensus/aura/src/slot_proposer.rs index 617cb22ec..667f5ddc8 100644 --- a/sidechain/consensus/aura/src/slot_proposer.rs +++ b/sidechain/consensus/aura/src/slot_proposer.rs @@ -116,7 +116,7 @@ where ) .map_err(|e| ConsensusError::Other(e.to_string().into()))?; - let parentchain_extrinsics = batch_execution_result.get_extrinsic_callbacks(); + let side_effects = batch_execution_result.get_extrinsic_callbacks(); let executed_operation_hashes: Vec<_> = batch_execution_result.get_executed_operation_hashes().to_vec(); @@ -157,6 +157,6 @@ where max_duration.as_millis(), ); - Ok(Proposal { block: sidechain_block, parentchain_effects: parentchain_extrinsics }) + Ok(Proposal { block: sidechain_block, side_effects }) } } diff --git a/sidechain/consensus/common/src/lib.rs b/sidechain/consensus/common/src/lib.rs index 64391c1bf..c7861d5c1 100644 --- a/sidechain/consensus/common/src/lib.rs +++ b/sidechain/consensus/common/src/lib.rs @@ -50,7 +50,7 @@ pub use block_import::*; pub use block_import_confirmation_handler::*; pub use block_import_queue_worker::*; pub use error::*; -use itp_types::parentchain::ParentchainCall; +use itp_types::TrustedCallSideEffect; pub use peer_block_sync::*; pub trait Verifier: Send + Sync @@ -107,9 +107,10 @@ pub trait Proposer< pub struct Proposal { /// The sidechain block that was build. pub block: SignedSidechainBlock, - /// Parentchain state transitions triggered by sidechain state transitions. + /// Side effects of the trusted calls included in the block. + /// e.g. Parentchain state transitions triggered by sidechain state transitions. /// /// Any sidechain stf that invokes a parentchain stf must not commit its state change /// before the parentchain effect has been finalized. - pub parentchain_effects: Vec, + pub side_effects: Vec, } diff --git a/sidechain/consensus/slots/src/lib.rs b/sidechain/consensus/slots/src/lib.rs index 47fd911bc..7a897c87d 100644 --- a/sidechain/consensus/slots/src/lib.rs +++ b/sidechain/consensus/slots/src/lib.rs @@ -55,7 +55,7 @@ mod mocks; #[cfg(test)] mod per_shard_slot_worker_tests; -use itp_types::parentchain::ParentchainCall; +use itp_types::TrustedCallSideEffect; #[cfg(feature = "std")] pub use slot_stream::*; pub use slots::*; @@ -65,11 +65,12 @@ pub use slots::*; pub struct SlotResult { /// The result of a slot operation. pub block: SignedSidechainBlock, - /// Parentchain state transitions triggered by sidechain state transitions. + /// Side effects of the trusted calls included in the block. + /// e.g. Parentchain state transitions triggered by sidechain state transitions. /// /// Any sidechain stf that invokes a parentchain stf must not commit its state change /// before the parentchain effect has been finalized. - pub parentchain_effects: Vec, + pub side_effects: Vec, } /// A worker that should be invoked at every new slot for a specific shard. @@ -387,10 +388,7 @@ pub trait SimpleSlotWorker { latest_integritee_parentchain_header.number(), latest_integritee_parentchain_header.hash() ); - Some(SlotResult { - block: proposing.block, - parentchain_effects: proposing.parentchain_effects, - }) + Some(SlotResult { block: proposing.block, side_effects: proposing.side_effects }) } } From 6452f58485a58016033fe00e9e28b4dd2e989751 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Tue, 16 Sep 2025 16:26:46 +0200 Subject: [PATCH 05/91] connect Ipfs OcallApi --- core-primitives/ocall-api/src/lib.rs | 4 ++-- enclave-runtime/src/ocall/ipfs_ocall.rs | 9 +++++---- .../src/test/mocks/propose_to_import_call_mock.rs | 13 ++++++++++++- enclave-runtime/src/top_pool_execution.rs | 12 ++++++++---- 4 files changed, 27 insertions(+), 11 deletions(-) diff --git a/core-primitives/ocall-api/src/lib.rs b/core-primitives/ocall-api/src/lib.rs index c9890bf5a..1efbe35e7 100644 --- a/core-primitives/ocall-api/src/lib.rs +++ b/core-primitives/ocall-api/src/lib.rs @@ -142,11 +142,11 @@ pub trait EnclaveSidechainOCallApi: Clone + Send + Sync { } /// Newtype for IPFS CID +#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] pub struct IpfsCid(pub [u8; 46]); /// trait for o-call related to IPFS pub trait EnclaveIpfsOCallApi: Clone + Send + Sync { fn write_ipfs(&self, encoded_state: &[u8]) -> SgxResult; - - fn read_ipfs(&self, cid: &IpfsCid) -> SgxResult<()>; + fn read_ipfs(&self, cid: &IpfsCid) -> SgxResult>; } diff --git a/enclave-runtime/src/ocall/ipfs_ocall.rs b/enclave-runtime/src/ocall/ipfs_ocall.rs index d1a553085..690d25e66 100644 --- a/enclave-runtime/src/ocall/ipfs_ocall.rs +++ b/enclave-runtime/src/ocall/ipfs_ocall.rs @@ -15,10 +15,11 @@ limitations under the License. */ - use crate::ocall::{ffi, OcallApi}; +use alloc::vec::Vec; use frame_support::ensure; use itp_ocall_api::{EnclaveIpfsOCallApi, IpfsCid}; +use log::warn; use sgx_types::{sgx_status_t, SgxResult}; impl EnclaveIpfsOCallApi for OcallApi { @@ -42,7 +43,7 @@ impl EnclaveIpfsOCallApi for OcallApi { Ok(cid_buf) } - fn read_ipfs(&self, cid: &IpfsCid) -> SgxResult<()> { + fn read_ipfs(&self, cid: &IpfsCid) -> SgxResult> { let mut rt: sgx_status_t = sgx_status_t::SGX_ERROR_UNEXPECTED; let res = unsafe { @@ -51,7 +52,7 @@ impl EnclaveIpfsOCallApi for OcallApi { ensure!(rt == sgx_status_t::SGX_SUCCESS, rt); ensure!(res == sgx_status_t::SGX_SUCCESS, res); - - Ok(()) + warn!("IPFS read not implemented, returning empty vec"); + Ok(vec![]) } } diff --git a/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs b/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs index f6b04e922..cd9c9b533 100644 --- a/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs +++ b/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs @@ -19,7 +19,9 @@ use crate::test::mocks::types::TestBlockImporter; use codec::{Decode, Encode}; use itc_parentchain::primitives::ParentchainId; -use itp_ocall_api::{EnclaveOnChainOCallApi, EnclaveSidechainOCallApi, Result}; +use itp_ocall_api::{ + EnclaveIpfsOCallApi, EnclaveOnChainOCallApi, EnclaveSidechainOCallApi, IpfsCid, Result, +}; use itp_types::{ storage::StorageEntryVerified, BlockHash, Header as ParentchainHeader, ShardIdentifier, WorkerRequest, WorkerResponse, H256, @@ -122,3 +124,12 @@ impl EnclaveSidechainOCallApi for ProposeToImportOCallApi { Ok(Vec::new()) } } + +impl EnclaveIpfsOCallApi for ProposeToImportOCallApi { + fn write_ipfs(&self, _encoded_state: &[u8]) -> SgxResult { + Ok(IpfsCid([0u8; 46])) + } + fn read_ipfs(&self, _cid: &IpfsCid) -> SgxResult> { + Ok(vec![]) + } +} diff --git a/enclave-runtime/src/top_pool_execution.rs b/enclave-runtime/src/top_pool_execution.rs index 697ff6b71..01b4c03d0 100644 --- a/enclave-runtime/src/top_pool_execution.rs +++ b/enclave-runtime/src/top_pool_execution.rs @@ -49,7 +49,9 @@ use itc_parentchain::{ use itp_component_container::ComponentGetter; use itp_enclave_metrics::EnclaveMetric; use itp_extrinsics_factory::CreateExtrinsics; -use itp_ocall_api::{EnclaveMetricsOCallApi, EnclaveOnChainOCallApi, EnclaveSidechainOCallApi}; +use itp_ocall_api::{ + EnclaveIpfsOCallApi, EnclaveMetricsOCallApi, EnclaveOnChainOCallApi, EnclaveSidechainOCallApi, +}; use itp_pallet_storage::{SidechainPalletStorage, SidechainPalletStorageKeys}; use itp_settings::sidechain::SLOT_DURATION; use itp_sgx_crypto::key_repository::AccessKey; @@ -344,7 +346,7 @@ pub(crate) fn send_blocks_and_execute_side_effects< where ParentchainBlock: BlockTrait, SignedSidechainBlock: SignedBlock + 'static, - OCallApi: EnclaveSidechainOCallApi, + OCallApi: EnclaveSidechainOCallApi + EnclaveIpfsOCallApi, NumberFor: BlockNumberOps, { debug!("Proposing {} sidechain block(s) (broadcasting to peers)", blocks.len()); @@ -403,9 +405,11 @@ where }) .collect(); if !ipfs_blobs_to_add.is_empty() { - warn!("Storing data on IPFS from within AURA is not yet supported."); + ipfs_blobs_to_add.iter().for_each(|blob| match ocall_api.write_ipfs(blob) { + Ok(cid) => info!("SideEffects: Stored blob on IPFS with CID: {:?}", cid), + Err(e) => error!("SideEffects: Failed to store blob on IPFS: {:?}", e), + }); } - Ok(()) } From dde5c68916935e94f20431c70d2cb4180806219f Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Wed, 17 Sep 2025 12:10:53 +0200 Subject: [PATCH 06/91] refactor Ipfs support and plaintext relay notes to ipfs if requested --- Cargo.lock | 361 ++++++++++++------ app-libs/stf/src/relayed_note.rs | 31 +- app-libs/stf/src/trusted_call.rs | 38 +- .../trusted_base_cli/commands/send_note.rs | 29 +- .../src/pallet_teerex.rs | 6 +- core-primitives/ocall-api/src/lib.rs | 6 +- core-primitives/stf-executor/src/lib.rs | 5 +- core-primitives/types/src/lib.rs | 2 +- core-primitives/utils/Cargo.toml | 7 + core-primitives/utils/src/ipfs.rs | 181 +++++++++ core-primitives/utils/src/lib.rs | 2 + enclave-runtime/Cargo.lock | 140 +++---- enclave-runtime/src/ipfs.rs | 99 ----- enclave-runtime/src/lib.rs | 1 - enclave-runtime/src/ocall/ipfs_ocall.rs | 23 +- enclave-runtime/src/test/ipfs_tests.rs | 19 +- .../test/mocks/propose_to_import_call_mock.rs | 8 +- enclave-runtime/src/test/tests_main.rs | 10 +- service/src/ocall_bridge/bridge_api.rs | 8 +- service/src/ocall_bridge/ffi/ipfs.rs | 31 +- service/src/ocall_bridge/ipfs_ocall.rs | 50 ++- service/src/tests/mock.rs | 3 +- 22 files changed, 684 insertions(+), 376 deletions(-) create mode 100644 core-primitives/utils/src/ipfs.rs delete mode 100644 enclave-runtime/src/ipfs.rs diff --git a/Cargo.lock b/Cargo.lock index 8bc0660e8..e3b9b6be3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18,7 +18,7 @@ version = "0.4.2" source = "git+https://github.com/encointer/substrate-api-client.git?branch=v0.9.42-tag-v0.14.0-integritee-patch#946f3ae82c5d48023107c1890728582561e94725" dependencies = [ "ac-primitives", - "log 0.4.22", + "log 0.4.28", "maybe-async", ] @@ -33,7 +33,7 @@ dependencies = [ "either", "frame-metadata", "hex", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "scale-bits", "scale-decode", @@ -292,6 +292,11 @@ dependencies = [ "rustc-demangle", ] +[[package]] +name = "base-x" +version = "0.2.6" +source = "git+https://github.com/whalelephant/base-x-rs?branch=no_std#906c9ac59282ff5a2eec86efd25d50ad9927b147" + [[package]] name = "base16ct" version = "0.2.0" @@ -361,7 +366,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42#ff24c60ac7d9f87727ecdd0ded9a80c56e4f4b65" dependencies = [ "hash-db 0.16.0", - "log 0.4.22", + "log 0.4.28", ] [[package]] @@ -432,6 +437,17 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "blake2b_simd" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" +dependencies = [ + "arrayref", + "arrayvec 0.5.2", + "constant_time_eq 0.1.5", +] + [[package]] name = "blake2b_simd" version = "1.0.1" @@ -440,7 +456,18 @@ checksum = "3c2f0dc9a68c6317d884f97cc36cf5a3d20ba14ce404227df55e1af708ab04bc" dependencies = [ "arrayref", "arrayvec 0.7.4", - "constant_time_eq", + "constant_time_eq 0.2.6", +] + +[[package]] +name = "blake2s_simd" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e461a7034e85b211a4acb57ee2e6730b32912b06c08cc242243c39fc21ae6a2" +dependencies = [ + "arrayref", + "arrayvec 0.5.2", + "constant_time_eq 0.1.5", ] [[package]] @@ -449,7 +476,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ - "block-padding", + "block-padding 0.1.5", "byte-tools", "byteorder 1.4.3", "generic-array 0.12.4", @@ -461,6 +488,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ + "block-padding 0.2.1", "generic-array 0.14.7", ] @@ -482,13 +510,19 @@ dependencies = [ "byte-tools", ] +[[package]] +name = "block-padding" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" + [[package]] name = "bounded-collections" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb5b05133427c07c4776906f673ccf36c21b102c9829c641a5b56bd151d44fd6" dependencies = [ - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "scale-info", "serde 1.0.193", @@ -648,6 +682,16 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "cid" +version = "0.5.1" +source = "git+https://github.com/whalelephant/rust-cid?branch=nstd#cca87467c46106c801ca3727500477258b0f13b0" +dependencies = [ + "multibase", + "multihash 0.11.4", + "unsigned-varint 0.5.1", +] + [[package]] name = "cipher" version = "0.2.5" @@ -757,6 +801,12 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "795bc6e66a8e340f075fcf6227e417a2dc976b92b91f3cdc778bb858778b6747" +[[package]] +name = "constant_time_eq" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" + [[package]] name = "constant_time_eq" version = "0.2.6" @@ -1226,7 +1276,7 @@ version = "0.1.0" source = "git+https://github.com/integritee-network/pallets.git?branch=sdk-v0.13.0-polkadot-v0.9.42#abf29acd41a0fca9cd7025b297b6a9fa272a122f" dependencies = [ "common-primitives", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "scale-info", "serde 1.0.193", @@ -1253,7 +1303,7 @@ checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" dependencies = [ "atty", "humantime", - "log 0.4.22", + "log 0.4.28", "regex 1.9.5", "termcolor", ] @@ -1266,7 +1316,7 @@ checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" dependencies = [ "humantime", "is-terminal", - "log 0.4.22", + "log 0.4.28", "regex 1.9.5", "termcolor", ] @@ -1340,7 +1390,7 @@ dependencies = [ "rlp", "scale-info", "serde 1.0.193", - "sha3", + "sha3 0.10.8", "triehash", ] @@ -1372,13 +1422,13 @@ dependencies = [ "evm-core", "evm-gasometer", "evm-runtime", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "primitive-types", "rlp", "scale-info", "serde 1.0.193", - "sha3", + "sha3 0.10.8", ] [[package]] @@ -1415,7 +1465,7 @@ dependencies = [ "environmental 1.1.4", "evm-core", "primitive-types", - "sha3", + "sha3 0.10.8", ] [[package]] @@ -1490,7 +1540,7 @@ dependencies = [ "either", "futures 0.3.28", "futures-timer", - "log 0.4.22", + "log 0.4.28", "num-traits 0.2.16", "parity-scale-codec", "parking_lot 0.12.1", @@ -1588,7 +1638,7 @@ dependencies = [ "hex", "impl-serde", "libsecp256k1", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "scale-info", "serde 1.0.193", @@ -1628,7 +1678,7 @@ dependencies = [ "frame-support-procedural", "frame-system", "linregress", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "paste", "scale-info", @@ -1683,7 +1733,7 @@ dependencies = [ "frame-support-procedural", "impl-trait-for-tuples", "k256", - "log 0.4.22", + "log 0.4.28", "once_cell 1.18.0", "parity-scale-codec", "paste", @@ -1749,7 +1799,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42#ff24c60ac7d9f87727ecdd0ded9a80c56e4f4b65" dependencies = [ "frame-support", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "scale-info", "serde 1.0.193", @@ -2066,7 +2116,7 @@ dependencies = [ "aho-corasick", "bstr", "fnv 1.0.7", - "log 0.4.22", + "log 0.4.28", "regex 1.9.5", ] @@ -2329,7 +2379,7 @@ name = "http_req" version = "0.8.1" source = "git+https://github.com/integritee-network/http_req?branch=master#3723e88235f2b29bc1a31835853b072ffd0455fd" dependencies = [ - "log 0.4.22", + "log 0.4.28", "rustls 0.19.1", "unicase 2.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "webpki 0.21.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2341,7 +2391,7 @@ name = "http_req" version = "0.8.1" source = "git+https://github.com/integritee-network/http_req#3723e88235f2b29bc1a31835853b072ffd0455fd" dependencies = [ - "log 0.4.22", + "log 0.4.28", "rustls 0.19.0 (git+https://github.com/mesalock-linux/rustls?branch=mesalock_sgx)", "sgx_tstd", "unicase 2.6.0 (git+https://github.com/mesalock-linux/unicase-sgx)", @@ -2421,7 +2471,7 @@ dependencies = [ "ct-logs", "futures-util 0.3.28", "hyper", - "log 0.4.22", + "log 0.4.28", "rustls 0.19.1", "rustls-native-certs", "tokio", @@ -2604,7 +2654,7 @@ dependencies = [ "itp-types", "itp-utils", "its-primitives", - "log 0.4.22", + "log 0.4.28", "pallet-assets", "pallet-balances", "pallet-enclave-bridge", @@ -2680,7 +2730,7 @@ dependencies = [ "its-test", "jsonrpsee", "lazy_static", - "log 0.4.22", + "log 0.4.28", "mockall", "pallet-balances", "parity-scale-codec", @@ -2762,6 +2812,18 @@ dependencies = [ "walkdir", ] +[[package]] +name = "ipfs-unixfs" +version = "0.0.1" +source = "git+https://github.com/whalelephant/rust-ipfs?branch=w-nstd#52f84dceea7065bb4ee2c24da53b3bedf162241a" +dependencies = [ + "cid", + "either", + "multihash 0.11.4", + "quick-protobuf", + "sha2 0.9.9", +] + [[package]] name = "ipnet" version = "2.7.2" @@ -2800,7 +2862,7 @@ dependencies = [ "itp-enclave-metrics", "itp-ocall-api", "lazy_static", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "serde 1.0.193", "sgx_tstd", @@ -2833,7 +2895,7 @@ dependencies = [ "itp-top-pool-author", "itp-types", "itp-utils", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "regex 1.9.5", "sgx_tstd", @@ -2849,7 +2911,7 @@ version = "0.1.0" dependencies = [ "hex-literal 0.4.1", "itp-types", - "log 0.4.22", + "log 0.4.28", ] [[package]] @@ -2904,7 +2966,7 @@ dependencies = [ "itp-storage", "itp-types", "itp-utils", - "log 0.4.22", + "log 0.4.28", "pallet-assets", "pallet-balances", "pallet-notes", @@ -2915,7 +2977,7 @@ dependencies = [ "parity-scale-codec", "rlp", "sgx_tstd", - "sha3", + "sha3 0.10.8", "sp-core", "sp-io 7.0.0", "sp-keyring", @@ -2933,7 +2995,7 @@ dependencies = [ "itp-utils", "jsonrpc-core 18.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 18.0.0 (git+https://github.com/scs/jsonrpc?branch=no_std_v18)", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "serde_json 1.0.103", "sgx_tstd", @@ -2956,7 +3018,7 @@ dependencies = [ "itp-test", "itp-top-pool-author", "itp-types", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sp-io 7.0.0", @@ -2985,7 +3047,7 @@ dependencies = [ "itc-parentchain-block-importer", "itp-import-queue", "itp-types", - "log 0.4.22", + "log 0.4.28", "sgx_tstd", "sgx_types", "thiserror 1.0.44", @@ -3003,7 +3065,7 @@ dependencies = [ "itp-stf-executor", "itp-stf-interface", "itp-types", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sgx_types", @@ -3031,7 +3093,7 @@ dependencies = [ "itp-test", "itp-top-pool-author", "itp-types", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sgx_types", @@ -3053,7 +3115,7 @@ dependencies = [ "itp-storage", "itp-test", "itp-types", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sgx_types", @@ -3080,7 +3142,7 @@ dependencies = [ "http 0.2.9", "http_req 0.8.1 (git+https://github.com/integritee-network/http_req?branch=master)", "http_req 0.8.1 (git+https://github.com/integritee-network/http_req)", - "log 0.4.22", + "log 0.4.28", "serde 1.0.193", "serde_json 1.0.103", "sgx_tstd", @@ -3102,7 +3164,7 @@ dependencies = [ "itp-rpc", "itp-types", "itp-utils", - "log 0.4.22", + "log 0.4.28", "openssl", "parity-scale-codec", "parking_lot 0.12.1", @@ -3129,7 +3191,7 @@ dependencies = [ "its-storage", "its-test", "jsonrpsee", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "sp-core", "tokio", @@ -3142,7 +3204,7 @@ dependencies = [ "bit-vec", "chrono 0.4.26", "env_logger 0.9.3", - "log 0.4.22", + "log 0.4.28", "mio 0.6.21", "mio 0.6.23", "mio-extras 2.0.6 (git+https://github.com/integritee-network/mio-extras-sgx?rev=963234b)", @@ -3200,7 +3262,7 @@ version = "0.9.0" dependencies = [ "itp-api-client-types", "itp-types", - "log 0.4.22", + "log 0.4.28", "sp-consensus-grandpa", "sp-runtime", "substrate-api-client", @@ -3233,7 +3295,7 @@ dependencies = [ "itp-sgx-crypto", "itp-sgx-io", "itp-time-utils", - "log 0.4.22", + "log 0.4.28", "num-bigint 0.2.5", "parity-scale-codec", "rustls 0.19.0 (git+https://github.com/mesalock-linux/rustls?rev=sgx_1.1.3)", @@ -3284,7 +3346,7 @@ dependencies = [ "itp-stf-interface", "itp-storage", "itp-types", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "serde_json 1.0.103", "sgx_crypto_helper", @@ -3320,7 +3382,7 @@ dependencies = [ "itp-node-api", "itp-nonce-cache", "itp-types", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sgx_types", @@ -3471,7 +3533,7 @@ dependencies = [ "derive_more", "itp-sgx-io", "itp-sgx-temp-dir", - "log 0.4.22", + "log 0.4.28", "ofb", "parity-scale-codec", "serde_json 1.0.103", @@ -3490,7 +3552,7 @@ dependencies = [ "derive_more", "environmental 1.1.3", "itp-hashing", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "postcard", "serde 1.0.193", @@ -3546,7 +3608,7 @@ dependencies = [ "itp-top-pool", "itp-top-pool-author", "itp-types", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sgx_types", @@ -3593,7 +3655,7 @@ dependencies = [ "itp-stf-state-observer", "itp-time-utils", "itp-types", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "rust-base58 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "rust-base58 0.0.4 (git+https://github.com/mesalock-linux/rust-base58-sgx?rev=sgx_1.1.3)", @@ -3609,7 +3671,7 @@ name = "itp-stf-state-observer" version = "0.9.0" dependencies = [ "itp-types", - "log 0.4.22", + "log 0.4.28", "sgx_tstd", "thiserror 1.0.44", "thiserror 1.0.9", @@ -3652,7 +3714,7 @@ dependencies = [ "itp-time-utils", "itp-types", "jsonrpc-core 18.0.0 (git+https://github.com/scs/jsonrpc?branch=no_std_v18)", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "sgx_crypto_helper", "sgx_tstd", @@ -3685,7 +3747,7 @@ dependencies = [ "jsonrpc-core 18.0.0 (git+https://github.com/scs/jsonrpc?branch=no_std_v18)", "linked-hash-map 0.5.2", "linked-hash-map 0.5.6", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "parity-util-mem", "serde 1.0.193", @@ -3711,7 +3773,7 @@ dependencies = [ "itp-types", "jsonrpc-core 18.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 18.0.0 (git+https://github.com/scs/jsonrpc?branch=no_std_v18)", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "sgx_crypto_helper", "sgx_tstd", @@ -3747,7 +3809,11 @@ dependencies = [ name = "itp-utils" version = "0.9.0" dependencies = [ + "cid", "hex", + "ipfs-unixfs", + "log 0.4.28", + "multibase", "parity-scale-codec", ] @@ -3766,7 +3832,7 @@ dependencies = [ "itp-types", "its-primitives", "its-state", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sgx_types", @@ -3796,7 +3862,7 @@ dependencies = [ "itp-utils", "its-primitives", "its-test", - "log 0.4.22", + "log 0.4.28", "sgx_tstd", "sp-consensus-slots", "sp-core", @@ -3838,7 +3904,7 @@ dependencies = [ "its-state", "its-test", "its-validateer-fetch", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sp-core", @@ -3870,7 +3936,7 @@ dependencies = [ "its-primitives", "its-state", "its-test", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sgx_types", @@ -3895,7 +3961,7 @@ dependencies = [ "its-primitives", "its-test", "lazy_static", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sp-consensus-slots", @@ -3918,7 +3984,7 @@ dependencies = [ "its-storage", "its-test", "jsonrpsee", - "log 0.4.22", + "log 0.4.28", "serde 1.0.193", "serde_json 1.0.103", "thiserror 1.0.44", @@ -3953,7 +4019,7 @@ dependencies = [ "its-primitives", "jsonrpc-core 18.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 18.0.0 (git+https://github.com/scs/jsonrpc?branch=no_std_v18)", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "rust-base58 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "rust-base58 0.0.4 (git+https://github.com/mesalock-linux/rust-base58-sgx?rev=sgx_1.1.3)", @@ -3983,7 +4049,7 @@ dependencies = [ "itp-sgx-externalities", "itp-storage", "its-primitives", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sp-core", @@ -4002,7 +4068,7 @@ dependencies = [ "itp-types", "its-primitives", "its-test", - "log 0.4.22", + "log 0.4.28", "mockall", "parity-scale-codec", "parking_lot 0.12.1", @@ -4033,7 +4099,7 @@ dependencies = [ "itp-test", "itp-types", "its-primitives", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "sp-core", "sp-runtime", @@ -4067,7 +4133,7 @@ dependencies = [ "futures 0.3.28", "futures-executor 0.3.28", "futures-util 0.3.28", - "log 0.4.22", + "log 0.4.28", "serde 1.0.193", "serde_derive 1.0.193", "serde_json 1.0.103", @@ -4112,7 +4178,7 @@ dependencies = [ "hyper-rustls", "jsonrpsee-types", "jsonrpsee-utils", - "log 0.4.22", + "log 0.4.28", "serde 1.0.193", "serde_json 1.0.103", "thiserror 1.0.44", @@ -4132,7 +4198,7 @@ dependencies = [ "jsonrpsee-types", "jsonrpsee-utils", "lazy_static", - "log 0.4.22", + "log 0.4.28", "serde 1.0.193", "serde_json 1.0.103", "socket2", @@ -4165,7 +4231,7 @@ dependencies = [ "futures-channel 0.3.28", "futures-util 0.3.28", "hyper", - "log 0.4.22", + "log 0.4.28", "serde 1.0.193", "serde_json 1.0.103", "soketto", @@ -4182,7 +4248,7 @@ dependencies = [ "futures-util 0.3.28", "hyper", "jsonrpsee-types", - "log 0.4.22", + "log 0.4.28", "parking_lot 0.11.2", "rand 0.8.5", "rustc-hash", @@ -4201,7 +4267,7 @@ dependencies = [ "fnv 1.0.7", "futures 0.3.28", "jsonrpsee-types", - "log 0.4.22", + "log 0.4.28", "pin-project", "rustls 0.19.1", "rustls-native-certs", @@ -4225,7 +4291,7 @@ dependencies = [ "futures-util 0.3.28", "jsonrpsee-types", "jsonrpsee-utils", - "log 0.4.22", + "log 0.4.28", "rustc-hash", "serde 1.0.193", "serde_json 1.0.103", @@ -4451,9 +4517,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.22" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" [[package]] name = "lz4-sys" @@ -4636,7 +4702,7 @@ dependencies = [ "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys", "libc", - "log 0.4.22", + "log 0.4.28", "miow", "net2 0.2.39", "slab 0.4.8", @@ -4661,7 +4727,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52403fe290012ce777c4626790c8951324a2b9e3316b3143779c72b029742f19" dependencies = [ "lazycell", - "log 0.4.22", + "log 0.4.28", "mio 0.6.23", "slab 0.4.8", ] @@ -4672,7 +4738,7 @@ version = "2.0.6" source = "git+https://github.com/integritee-network/mio-extras-sgx?rev=963234b#963234bf55e44f9efff921938255126c48deef3a" dependencies = [ "lazycell", - "log 0.4.22", + "log 0.4.28", "mio 0.6.21", "mio 0.6.23", "sgx_tstd", @@ -4730,13 +4796,37 @@ dependencies = [ "futures-util 0.3.28", "http 0.2.9", "httparse 1.8.0", - "log 0.4.22", + "log 0.4.28", "memchr 2.6.3", "mime", "spin 0.9.8", "version_check", ] +[[package]] +name = "multibase" +version = "0.8.0" +source = "git+https://github.com/whalelephant/rust-multibase?branch=nstd#df67fb30e86998f7c10d4eea16a1cd480d2448c0" +dependencies = [ + "base-x", + "data-encoding", + "lazy_static", +] + +[[package]] +name = "multihash" +version = "0.11.4" +source = "git+https://github.com/whalelephant/rust-multihash?branch=nstd#2c8aca8fa1fcbcba26951d925de40fa81696020a" +dependencies = [ + "blake2b_simd 0.5.11", + "blake2s_simd", + "digest 0.9.0", + "sha-1 0.9.8", + "sha2 0.9.9", + "sha3 0.9.1", + "unsigned-varint 0.5.1", +] + [[package]] name = "multihash" version = "0.13.2" @@ -4797,7 +4887,7 @@ checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" dependencies = [ "lazy_static", "libc", - "log 0.4.22", + "log 0.4.28", "openssl", "openssl-probe", "openssl-sys", @@ -5208,7 +5298,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "scale-info", "sp-runtime", @@ -5223,7 +5313,7 @@ dependencies = [ "enclave-bridge-primitives", "frame-support", "frame-system", - "log 0.4.22", + "log 0.4.28", "pallet-teerex", "pallet-timestamp", "parity-scale-codec", @@ -5250,7 +5340,7 @@ dependencies = [ "frame-system", "hex", "impl-trait-for-tuples", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "rlp", "scale-info", @@ -5268,7 +5358,7 @@ dependencies = [ "frame-support", "frame-system", "itp-randomness", - "log 0.4.22", + "log 0.4.28", "pallet-balances", "pallet-timestamp", "parity-scale-codec", @@ -5290,7 +5380,7 @@ dependencies = [ "frame-system", "ita-stf", "itp-randomness", - "log 0.4.22", + "log 0.4.28", "pallet-balances", "pallet-timestamp", "parity-scale-codec", @@ -5310,7 +5400,7 @@ dependencies = [ "env_logger 0.9.3", "frame-support", "frame-system", - "log 0.4.22", + "log 0.4.28", "pallet-balances", "parity-scale-codec", "scale-info", @@ -5331,7 +5421,7 @@ dependencies = [ "frame-system", "ita-stf", "itp-randomness", - "log 0.4.22", + "log 0.4.28", "pallet-balances", "pallet-timestamp", "parity-scale-codec", @@ -5351,7 +5441,7 @@ dependencies = [ "enclave-bridge-primitives", "frame-support", "frame-system", - "log 0.4.22", + "log 0.4.28", "pallet-balances", "parity-scale-codec", "scale-info", @@ -5369,7 +5459,7 @@ dependencies = [ "enclave-bridge-primitives", "frame-support", "frame-system", - "log 0.4.22", + "log 0.4.28", "pallet-enclave-bridge", "pallet-teerex", "pallet-timestamp", @@ -5405,7 +5495,7 @@ source = "git+https://github.com/integritee-network/pallets.git?branch=sdk-v0.13 dependencies = [ "frame-support", "frame-system", - "log 0.4.22", + "log 0.4.28", "pallet-teerex", "parity-scale-codec", "scale-info", @@ -5426,7 +5516,7 @@ dependencies = [ "frame-support", "frame-system", "hex", - "log 0.4.22", + "log 0.4.28", "pallet-timestamp", "parity-scale-codec", "rustls-webpki", @@ -5448,7 +5538,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "scale-info", "sp-inherents", @@ -5484,7 +5574,7 @@ dependencies = [ "bs58", "byteorder 1.4.3", "data-encoding", - "multihash", + "multihash 0.13.2", "percent-encoding 2.3.1", "serde 1.0.193", "static_assertions", @@ -5933,6 +6023,15 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "quick-protobuf" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e489d4a83c17ea69b0291630229b5d4c92a94a3bf0165f7f72f506e94cda8b4b" +dependencies = [ + "byteorder 1.4.3", +] + [[package]] name = "quote" version = "1.0.40" @@ -6245,7 +6344,7 @@ dependencies = [ "hyper-tls", "ipnet", "js-sys", - "log 0.4.22", + "log 0.4.28", "mime", "native-tls", "once_cell 1.18.0", @@ -6292,7 +6391,7 @@ source = "git+https://github.com/betrusted-io/ring-xous?branch=0.16.20-cleanup#4 dependencies = [ "cc", "libc", - "log 0.4.22", + "log 0.4.28", "once_cell 1.18.0", "rkyv", "spin 0.5.2", @@ -6489,7 +6588,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" dependencies = [ "base64 0.13.1", - "log 0.4.22", + "log 0.4.28", "ring 0.16.20", "sct 0.6.1", "webpki 0.21.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -6921,7 +7020,7 @@ dependencies = [ "frame-support", "hex", "hex-literal 0.3.4", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "ring 0.16.20", "rustls-webpki", @@ -7160,6 +7259,18 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "sha3" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" +dependencies = [ + "block-buffer 0.9.0", + "digest 0.9.0", + "keccak", + "opaque-debug 0.3.0", +] + [[package]] name = "sha3" version = "0.10.8" @@ -7288,7 +7399,7 @@ dependencies = [ "bytes 1.4.0", "futures 0.3.28", "httparse 1.8.0", - "log 0.4.22", + "log 0.4.28", "rand 0.8.5", "sha-1 0.9.8", ] @@ -7299,7 +7410,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42#ff24c60ac7d9f87727ecdd0ded9a80c56e4f4b65" dependencies = [ "hash-db 0.16.0", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "scale-info", "sp-api-proc-macro", @@ -7361,7 +7472,7 @@ source = "git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.4 dependencies = [ "async-trait", "futures 0.3.28", - "log 0.4.22", + "log 0.4.28", "sp-core", "sp-inherents", "sp-runtime", @@ -7393,7 +7504,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42#ff24c60ac7d9f87727ecdd0ded9a80c56e4f4b65" dependencies = [ "finality-grandpa", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "scale-info", "serde 1.0.193", @@ -7435,7 +7546,7 @@ dependencies = [ "impl-serde", "lazy_static", "libsecp256k1", - "log 0.4.22", + "log 0.4.28", "merlin", "parity-scale-codec", "parking_lot 0.12.1", @@ -7466,11 +7577,11 @@ name = "sp-core-hashing" version = "5.0.0" source = "git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42#ff24c60ac7d9f87727ecdd0ded9a80c56e4f4b65" dependencies = [ - "blake2b_simd", + "blake2b_simd 1.0.1", "byteorder 1.4.3", "digest 0.10.7", "sha2 0.10.7", - "sha3", + "sha3 0.10.8", "sp-std", "twox-hash", ] @@ -7528,7 +7639,7 @@ version = "7.0.0" dependencies = [ "itp-sgx-externalities", "libsecp256k1", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sp-core", @@ -7544,7 +7655,7 @@ dependencies = [ "ed25519-dalek", "futures 0.3.28", "libsecp256k1", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "rustversion", "secp256k1", @@ -7614,7 +7725,7 @@ dependencies = [ "either", "hash256-std-hasher", "impl-trait-for-tuples", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "paste", "rand 0.8.5", @@ -7677,7 +7788,7 @@ version = "0.13.0" source = "git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42#ff24c60ac7d9f87727ecdd0ded9a80c56e4f4b65" dependencies = [ "hash-db 0.16.0", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "parking_lot 0.12.1", "rand 0.8.5", @@ -7716,7 +7827,7 @@ source = "git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.4 dependencies = [ "async-trait", "futures-timer", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "sp-inherents", "sp-runtime", @@ -7794,7 +7905,7 @@ source = "git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.4 dependencies = [ "anyhow", "impl-trait-for-tuples", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "sp-std", "wasmi", @@ -7922,7 +8033,7 @@ dependencies = [ "frame-metadata", "frame-support", "hex", - "log 0.4.22", + "log 0.4.28", "maybe-async", "parity-scale-codec", "serde 1.0.193", @@ -8043,7 +8154,7 @@ source = "git+https://github.com/integritee-network/pallets.git?branch=sdk-v0.13 dependencies = [ "common-primitives", "derive_more", - "log 0.4.22", + "log 0.4.28", "parity-scale-codec", "scale-info", "serde 1.0.193", @@ -8274,7 +8385,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c83b561d025642014097b66e6c1bb422783339e0909e4429cde4749d1990bc38" dependencies = [ "futures-util 0.3.28", - "log 0.4.22", + "log 0.4.28", "tokio", "tungstenite 0.21.0", ] @@ -8289,7 +8400,7 @@ dependencies = [ "futures-core 0.3.28", "futures-io 0.3.28", "futures-sink 0.3.28", - "log 0.4.22", + "log 0.4.28", "pin-project-lite", "tokio", ] @@ -8338,7 +8449,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if 1.0.0", - "log 0.4.22", + "log 0.4.28", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -8372,7 +8483,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" dependencies = [ "lazy_static", - "log 0.4.22", + "log 0.4.28", "tracing-core", ] @@ -8416,7 +8527,7 @@ checksum = "767abe6ffed88a1889671a102c2861ae742726f52e0a5a425b92c9fbfa7e9c85" dependencies = [ "hash-db 0.16.0", "hashbrown 0.13.2", - "log 0.4.22", + "log 0.4.28", "rustc-hex", "smallvec 1.11.0", ] @@ -8485,7 +8596,7 @@ dependencies = [ "bytes 1.4.0", "http 0.2.9", "httparse 1.8.0", - "log 0.4.22", + "log 0.4.28", "rand 0.8.5", "rustls 0.19.1", "sha-1 0.9.8", @@ -8507,7 +8618,7 @@ dependencies = [ "bytes 1.4.0", "http 0.2.9", "httparse 1.8.0", - "log 0.4.22", + "log 0.4.28", "native-tls", "rand 0.8.5", "sha1 0.10.5", @@ -8527,7 +8638,7 @@ dependencies = [ "data-encoding", "http 1.2.0", "httparse 1.8.0", - "log 0.4.22", + "log 0.4.28", "rand 0.8.5", "sha1 0.10.5", "thiserror 1.0.44", @@ -8541,7 +8652,7 @@ version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "digest 0.10.7", "rand 0.8.5", "static_assertions", @@ -8774,7 +8885,7 @@ dependencies = [ "headers", "http 0.2.9", "hyper", - "log 0.4.22", + "log 0.4.28", "mime", "mime_guess", "multer", @@ -8826,7 +8937,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", - "log 0.4.22", + "log 0.4.28", "once_cell 1.18.0", "proc-macro2", "quote", @@ -8929,7 +9040,7 @@ dependencies = [ "cfg-if 1.0.0", "indexmap 1.9.3", "libc", - "log 0.4.22", + "log 0.4.28", "object 0.29.0", "once_cell 1.18.0", "paste", @@ -8962,7 +9073,7 @@ dependencies = [ "cranelift-entity", "gimli 0.26.2", "indexmap 1.9.3", - "log 0.4.22", + "log 0.4.28", "object 0.29.0", "serde 1.0.193", "target-lexicon", @@ -8983,7 +9094,7 @@ dependencies = [ "cfg-if 1.0.0", "cpp_demangle", "gimli 0.26.2", - "log 0.4.22", + "log 0.4.28", "object 0.29.0", "rustc-demangle", "serde 1.0.193", @@ -9025,7 +9136,7 @@ dependencies = [ "cfg-if 1.0.0", "indexmap 1.9.3", "libc", - "log 0.4.22", + "log 0.4.28", "mach", "memfd", "memoffset 0.6.5", @@ -9344,7 +9455,7 @@ dependencies = [ "byteorder 1.4.3", "bytes 0.4.12", "httparse 1.8.0", - "log 0.4.22", + "log 0.4.28", "mio 0.6.23", "mio-extras 2.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "openssl", @@ -9400,7 +9511,7 @@ version = "0.1.50" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03e07c190c743d6d9e076f715333e94c48de41b99078343d174c707803df28c7" dependencies = [ - "log 0.4.22", + "log 0.4.28", "num-derive", "num-traits 0.2.16", "xous", @@ -9413,7 +9524,7 @@ version = "0.9.52" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32d8361077e67966d25922056284d17d042cbb1c96a7ebc2584eb8181427cbb0" dependencies = [ - "log 0.4.22", + "log 0.4.28", "num-derive", "num-traits 0.2.16", "rkyv", diff --git a/app-libs/stf/src/relayed_note.rs b/app-libs/stf/src/relayed_note.rs index c99a9e6fd..bdbfe4bed 100644 --- a/app-libs/stf/src/relayed_note.rs +++ b/app-libs/stf/src/relayed_note.rs @@ -1,12 +1,17 @@ use codec::{Decode, Encode}; -use itp_types::IpfsHash; +use itp_utils::IpfsCid; use sp_std::vec::Vec; pub type ConversationId = u32; #[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)] -pub struct RelayedNote { - pub conversation_id: ConversationId, - pub retreival_info: RelayedNoteRetreivalInfo, +pub enum NoteRelayType { + /// the note will be stored in chain state entirely + Here, + /// the note is stored on ipfs, encrypted with a symmetric key + Ipfs, + /// the note is relayed through an undeclared channel which is assumed to be + /// known by the recipient + Undeclared, } /// Necessary information for recipient to retrieve and potentially decrypt a relayed note @@ -15,7 +20,7 @@ pub enum RelayedNoteRetreivalInfo { /// the message is included within and not actually relayed Here { msg: Vec }, /// the message is stored on ipfs, encrypted with the provided key - Ipfs { cid: IpfsHash, encryption_key: [u8; 32] }, + Ipfs { cid: IpfsCid, encryption_key: [u8; 32] }, /// the message is relayed through an undeclared channel which is assumed to be /// known by the recipient, but the encryption key is provided Undeclared { encryption_key: [u8; 32] }, @@ -24,6 +29,20 @@ pub enum RelayedNoteRetreivalInfo { /// A user request to relay a note to a specific conversation. #[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)] pub struct RelayedNoteRequest { - pub conversation_id: ConversationId, + pub allow_onchain_fallback: bool, + pub relay_type: NoteRelayType, pub msg: Vec, + /// in the case of `Undeclared` relaying, this can be used to securely share the encryption key with the recipient + pub maybe_encryption_key: Option<[u8; 32]>, +} + +impl Default for RelayedNoteRequest { + fn default() -> Self { + RelayedNoteRequest { + allow_onchain_fallback: true, + relay_type: NoteRelayType::Here, + msg: Vec::new(), + maybe_encryption_key: None, + } + } } diff --git a/app-libs/stf/src/trusted_call.rs b/app-libs/stf/src/trusted_call.rs index 45a621404..71961f425 100644 --- a/app-libs/stf/src/trusted_call.rs +++ b/app-libs/stf/src/trusted_call.rs @@ -30,7 +30,7 @@ use crate::{ enclave_signer_account, ensure_enclave_signer_account, ensure_maintainer_account, get_mortality, shard_vault, shielding_target_genesis_hash, store_note, wrap_bytes, }, - relayed_note::{ConversationId, RelayedNoteRequest, RelayedNoteRetreivalInfo}, + relayed_note::{ConversationId, NoteRelayType, RelayedNoteRequest, RelayedNoteRetreivalInfo}, Getter, STF_BYTE_FEE_UNIT_DIVIDER, STF_SESSION_PROXY_DEPOSIT_DIVIDER, STF_SHIELDING_FEE_AMOUNT_DIVIDER, STF_TX_FEE_UNIT_DIVIDER, }; @@ -67,7 +67,7 @@ use itp_types::{ parentchain::{GenericMortality, ParentchainCall, ParentchainId, ProxyType}, Address, Moment, OpaqueCall, TrustedCallSideEffect, }; -use itp_utils::stringify::account_id_to_string; +use itp_utils::{stringify::account_id_to_string, IpfsCid}; use log::*; use pallet_notes::{TimestampedTrustedNote, TrustedNote}; use sp_core::{ @@ -628,10 +628,38 @@ where store_note(&from, self.call, vec![from.clone(), to])?; Ok(()) }, - TrustedCall::send_relayed_note(from, to, conversation_id, _blob) => { + TrustedCall::send_relayed_note(from, to, conversation_id, request) => { std::println!("⣿STF⣿ 🔄 send_relayed_note from ⣿⣿⣿ to ⣿⣿⣿ with note ⣿⣿⣿"); - let retreival_info = - RelayedNoteRetreivalInfo::Undeclared { encryption_key: [0u8; 32] }; + let retreival_info = if (self.call.encoded_size() <= MaxNoteSize::get() as usize) + && (request.allow_onchain_fallback) + { + Ok(RelayedNoteRetreivalInfo::Here { msg: request.msg.clone() }) + } else if (request.relay_type == NoteRelayType::Undeclared) + && request.maybe_encryption_key.is_some() + { + Ok(RelayedNoteRetreivalInfo::Undeclared { + encryption_key: request + .maybe_encryption_key + .expect("is_some has been tested previously"), + }) + } else if request.relay_type == NoteRelayType::Here + && request.msg.len() <= MaxNoteSize::get() as usize + { + Ok(RelayedNoteRetreivalInfo::Here { msg: request.msg.clone() }) + } else if request.relay_type == NoteRelayType::Ipfs { + //todo: proxy re-encryption for IPFS content. now plaintext + let cid = IpfsCid::from_content_bytes(&request.msg) + .map_err(|e| StfError::Dispatch(format!("IPFS error: {:?}", e)))?; + info!("storing relayed note to IPFS with CID {:?}", cid); + side_effects.push(TrustedCallSideEffect::IpfsAdd(request.msg)); + Ok(RelayedNoteRetreivalInfo::Ipfs { + cid, + encryption_key: request.maybe_encryption_key.unwrap_or([0u8; 32]), + }) + } else { + Err(StfError::Dispatch("Invalid relayed note request".into())) + }?; + let stripped_call = TrustedCall::send_relayed_note_stripped( from.clone(), to.clone(), diff --git a/cli/src/trusted_base_cli/commands/send_note.rs b/cli/src/trusted_base_cli/commands/send_note.rs index 594849d8d..a28317db6 100644 --- a/cli/src/trusted_base_cli/commands/send_note.rs +++ b/cli/src/trusted_base_cli/commands/send_note.rs @@ -22,7 +22,10 @@ use crate::{ trusted_operation::{perform_trusted_operation, send_direct_request}, Cli, CliResult, CliResultOk, }; -use ita_stf::{Getter, TrustedCall, TrustedCallSigned}; +use ita_stf::{ + relayed_note::{ConversationId, NoteRelayType, RelayedNoteRequest}, + Getter, TrustedCall, TrustedCallSigned, +}; use itp_stf_primitives::{ traits::TrustedCallSigning, types::{KeyPair, TrustedOperation}, @@ -43,6 +46,14 @@ pub struct SendNoteCommand { /// session proxy who can sign on behalf of the account #[clap(long)] session_proxy: Option, + + /// Instruct the worker enclave to encrypt and relay the message via IPFS instead of onchain + #[clap(long)] + ipfs_proxy: bool, + + /// specify conversation ID + #[clap(long)] + conversation_id: Option, } impl SendNoteCommand { @@ -56,10 +67,22 @@ impl SendNoteCommand { let nonce = get_trusted_account_info(cli, trusted_args, &sender, &signer) .map(|info| info.nonce) .unwrap_or_default(); - let top: TrustedOperation = + let top: TrustedOperation = if self.ipfs_proxy { + let request = RelayedNoteRequest { + allow_onchain_fallback: false, + relay_type: NoteRelayType::Ipfs, + msg: self.message.as_bytes().to_vec(), + maybe_encryption_key: None, + }; + let conversation_id = self.conversation_id.unwrap_or_default(); + TrustedCall::send_relayed_note(sender, to, conversation_id, request) + .sign(&KeyPair::Sr25519(Box::new(signer)), nonce, &mrenclave, &shard) + .into_trusted_operation(trusted_args.direct) + } else { TrustedCall::send_note(sender, to, self.message.as_bytes().to_vec()) .sign(&KeyPair::Sr25519(Box::new(signer)), nonce, &mrenclave, &shard) - .into_trusted_operation(trusted_args.direct); + .into_trusted_operation(trusted_args.direct) + }; if trusted_args.direct { Ok(send_direct_request(cli, trusted_args, &top).map(|_| CliResultOk::None)?) diff --git a/core-primitives/node-api/api-client-extensions/src/pallet_teerex.rs b/core-primitives/node-api/api-client-extensions/src/pallet_teerex.rs index e8007729b..9d3c73aea 100644 --- a/core-primitives/node-api/api-client-extensions/src/pallet_teerex.rs +++ b/core-primitives/node-api/api-client-extensions/src/pallet_teerex.rs @@ -18,7 +18,7 @@ use crate::ApiResult; use itp_api_client_types::{traits::GetStorage, Api, Config, Request}; use itp_types::{ - AccountId, IpfsHash, MultiEnclave, ShardIdentifier, ShardSignerStatus, ShardStatus, + AccountId, IpfsCid, MultiEnclave, ShardIdentifier, ShardSignerStatus, ShardStatus, }; use log::error; @@ -51,7 +51,7 @@ pub trait PalletTeerexApi { &self, shard: &ShardIdentifier, at_block: Option, - ) -> ApiResult>; + ) -> ApiResult>; } impl PalletTeerexApi for Api @@ -119,7 +119,7 @@ where &self, shard: &ShardIdentifier, at_block: Option, - ) -> ApiResult> { + ) -> ApiResult> { self.get_storage_map(TEEREX, "LatestIPFSHash", shard, at_block) } } diff --git a/core-primitives/ocall-api/src/lib.rs b/core-primitives/ocall-api/src/lib.rs index 1efbe35e7..74a406e76 100644 --- a/core-primitives/ocall-api/src/lib.rs +++ b/core-primitives/ocall-api/src/lib.rs @@ -25,7 +25,7 @@ use core::result::Result as StdResult; use derive_more::{Display, From}; use itp_storage::Error as StorageError; use itp_types::{ - parentchain::ParentchainId, storage::StorageEntryVerified, BlockHash, ShardIdentifier, + parentchain::ParentchainId, storage::StorageEntryVerified, BlockHash, IpfsCid, ShardIdentifier, TrustedOperationStatus, WorkerRequest, WorkerResponse, }; use sgx_types::*; @@ -141,10 +141,6 @@ pub trait EnclaveSidechainOCallApi: Clone + Send + Sync { ) -> SgxResult>; } -/// Newtype for IPFS CID -#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] -pub struct IpfsCid(pub [u8; 46]); - /// trait for o-call related to IPFS pub trait EnclaveIpfsOCallApi: Clone + Send + Sync { fn write_ipfs(&self, encoded_state: &[u8]) -> SgxResult; diff --git a/core-primitives/stf-executor/src/lib.rs b/core-primitives/stf-executor/src/lib.rs index 9da6fc993..d6203ca8c 100644 --- a/core-primitives/stf-executor/src/lib.rs +++ b/core-primitives/stf-executor/src/lib.rs @@ -173,7 +173,10 @@ mod tests { use super::*; use itp_sgx_externalities::SgxExternalities; use itp_test::mock::stf_mock::{GetterMock, TrustedCallSignedMock}; - use itp_types::{parentchain::GenericMortality, OpaqueCall}; + use itp_types::{ + parentchain::{GenericMortality, ParentchainCall}, + OpaqueCall, + }; use sp_runtime::generic::Era; #[test] diff --git a/core-primitives/types/src/lib.rs b/core-primitives/types/src/lib.rs index d4b20b0a6..dcd0364ed 100644 --- a/core-primitives/types/src/lib.rs +++ b/core-primitives/types/src/lib.rs @@ -41,7 +41,7 @@ pub type Nonce = u32; pub use itp_sgx_runtime_primitives::types::*; -pub type IpfsHash = [u8; 46]; +pub use itp_utils::IpfsCid; pub type MrEnclave = [u8; 32]; pub type ConfirmCallFn = ([u8; 2], ShardIdentifier, H256, Vec); diff --git a/core-primitives/utils/Cargo.toml b/core-primitives/utils/Cargo.toml index ad02e01c6..76e8d1464 100644 --- a/core-primitives/utils/Cargo.toml +++ b/core-primitives/utils/Cargo.toml @@ -10,10 +10,17 @@ edition = "2021" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex = { version = "0.4.3", default-features = false, features = ["alloc"] } +log = "0.4.28" +cid = { default-features = false, git = "https://github.com/whalelephant/rust-cid", branch = "nstd" } +multibase = { default-features = false, git = "https://github.com/whalelephant/rust-multibase", branch = "nstd" } +ipfs-unixfs = { default-features = false, git = "https://github.com/whalelephant/rust-ipfs", branch = "w-nstd" } [features] default = ["std"] std = [ + "cid/std", + "multibase/std", + "ipfs-unixfs/std", "codec/std", "hex/std", ] diff --git a/core-primitives/utils/src/ipfs.rs b/core-primitives/utils/src/ipfs.rs new file mode 100644 index 000000000..fa9cf9fa2 --- /dev/null +++ b/core-primitives/utils/src/ipfs.rs @@ -0,0 +1,181 @@ +use alloc::vec::Vec; +use cid::Cid; +use codec::{Decode, Encode}; +use core::{convert::TryFrom, fmt::Debug}; +use ipfs_unixfs::file::adder::FileAdder; +use multibase::Base; + +#[derive(Clone, PartialEq, Eq)] +pub struct IpfsCid(pub Cid); + +impl From for IpfsCid { + fn from(value: Cid) -> Self { + IpfsCid(value) + } +} +impl TryFrom<&str> for IpfsCid { + type Error = cid::Error; + + fn try_from(value: &str) -> Result { + let cid = Cid::try_from(value)?; + Ok(IpfsCid(cid)) + } +} + +impl IpfsCid { + pub fn from_content_bytes(content: &Vec) -> Result { + let mut adder: FileAdder = FileAdder::default(); + let mut total: usize = 0; + let mut stats = Stats::default(); + while total < content.len() { + let (blocks, consumed) = adder.push(&content[total..]); + total += consumed; + stats.process(blocks); + } + let blocks = adder.finish(); + stats.process(blocks); + stats.last.map(|cid| IpfsCid(cid)).ok_or(IpfsError::FinalCidMissing) + } +} +impl Encode for IpfsCid { + fn encode(&self) -> Vec { + self.0.to_bytes().encode() + } +} + +impl Decode for IpfsCid { + fn decode(input: &mut I) -> Result { + let bytes: Vec = Decode::decode(input)?; + let cid = Cid::try_from(bytes) + .map_err(|_| codec::Error::from("Failed to decode IpfsCid from bytes"))?; + Ok(IpfsCid(cid)) + } +} + +impl Debug for IpfsCid { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let cid_str = Base::Base58Btc.encode(self.0.hash().as_bytes()); + write!(f, "{}", cid_str) + } +} + +pub struct IpfsContent { + pub cid: IpfsCid, + pub file_content: Vec, +} +#[derive(Debug, PartialEq)] +pub enum IpfsError { + InputCidInvalid, + FinalCidMissing, + Verification, +} + +impl IpfsContent { + pub fn new_with_cid_unverified(cid: IpfsCid, content: Vec) -> IpfsContent { + IpfsContent { cid, file_content: content } + } + + pub fn verify(&mut self) -> Result<(), IpfsError> { + let derived_cid = Self::derive_cid_from_file_content(&self.file_content)?; + if derived_cid.0.hash().eq(&self.cid.0.hash()) { + Ok(()) + } else { + Err(IpfsError::Verification) + } + } + + pub fn derive_cid_from_file_content(file_content: &Vec) -> Result { + let mut adder: FileAdder = FileAdder::default(); + let mut total: usize = 0; + let mut stats = Stats::default(); + while total < file_content.len() { + let (blocks, consumed) = adder.push(&file_content[total..]); + total += consumed; + stats.process(blocks); + } + let blocks = adder.finish(); + stats.process(blocks); + stats.last.map(|cid| IpfsCid(cid)).ok_or(IpfsError::FinalCidMissing) + } +} + +impl TryFrom> for IpfsContent { + type Error = IpfsError; + + fn try_from(value: Vec) -> Result { + let cid = Self::derive_cid_from_file_content(&value)?; + Ok(IpfsContent { cid, file_content: value }) + } +} + +impl Debug for IpfsContent { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let cid_str = Base::Base58Btc.encode(self.cid.0.hash().as_bytes()); + f.debug_struct("IpfsContent") + .field("cid", &cid_str) + .field("file_content_length", &self.file_content.len()) + .finish() + } +} + +#[derive(Default)] +pub struct Stats { + pub blocks: usize, + pub block_bytes: u64, + pub last: Option, +} + +impl Stats { + fn process)>>(&mut self, new_blocks: I) { + for (cid, block) in new_blocks { + self.last = Some(cid); + self.blocks += 1; + self.block_bytes += block.len() as u64; + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloc::vec; + #[test] + pub fn test_try_from_multichunk_content_works() { + let expected_cid_str = "QmSaFjwJ2QtS3rZDKzC98XEzv2bqT4TfpWLCpphPPwyQTr"; + let expected_cid = IpfsCid::try_from(expected_cid_str).unwrap(); + let content: Vec = vec![20; 512 * 1024]; // bigger than one chunk of 256kB + let ipfs_content = IpfsContent::try_from(content.clone()).unwrap(); + assert_eq!(ipfs_content.cid, expected_cid); + assert_eq!(ipfs_content.file_content, content); + } + + #[test] + pub fn test_verification_ok_for_correct_multichunk_content() { + let expected_cid_str = "QmSaFjwJ2QtS3rZDKzC98XEzv2bqT4TfpWLCpphPPwyQTr"; + let expected_cid = IpfsCid::try_from(expected_cid_str).unwrap(); + let content: Vec = vec![20; 512 * 1024]; // bigger than one chunk of 256kB + let mut ipfs_content = IpfsContent::new_with_cid_unverified(expected_cid, content); + let verification = ipfs_content.verify(); + assert!(verification.is_ok()); + } + + #[test] + pub fn test_verification_fails_for_incorrect_multichunk_content() { + let expected_cid_str = "QmSaFjwJ2QtS3rZDKzC98XEzv2bqT4TfpWLCpphPPwyQTr"; + let expected_cid = IpfsCid::try_from(expected_cid_str).unwrap(); + let content: Vec = vec![99; 512 * 1024]; // bigger than one chunk of 256kB + let mut ipfs_content = IpfsContent::new_with_cid_unverified(expected_cid, content); + let verification = ipfs_content.verify(); + assert!(verification.is_err()); + } + + #[test] + pub fn test_encode_decode_ipfscid_works() { + let expected_cid_str = "QmSaFjwJ2QtS3rZDKzC98XEzv2bqT4TfpWLCpphPPwyQTr"; + let expected_cid = IpfsCid::try_from(expected_cid_str).unwrap(); + let encoded = expected_cid.encode(); + assert_eq!(encoded.len(), 34 + 1); + let decoded = IpfsCid::decode(&mut &encoded[..]).unwrap(); + assert_eq!(decoded, expected_cid); + } +} diff --git a/core-primitives/utils/src/lib.rs b/core-primitives/utils/src/lib.rs index 297ff5090..2bc5c3bc6 100644 --- a/core-primitives/utils/src/lib.rs +++ b/core-primitives/utils/src/lib.rs @@ -25,10 +25,12 @@ pub mod buffer; pub mod error; pub mod hex; pub mod hex_display; +pub mod ipfs; pub mod stringify; // Public re-exports. pub use self::{ buffer::write_slice_and_whitespace_pad, hex::{FromHexPrefixed, ToHexPrefixed}, + ipfs::IpfsCid, }; diff --git a/enclave-runtime/Cargo.lock b/enclave-runtime/Cargo.lock index 48a27470f..a53381a45 100644 --- a/enclave-runtime/Cargo.lock +++ b/enclave-runtime/Cargo.lock @@ -18,7 +18,7 @@ version = "0.4.2" source = "git+https://github.com/encointer/substrate-api-client.git?branch=v0.9.42-tag-v0.14.0-integritee-patch#946f3ae82c5d48023107c1890728582561e94725" dependencies = [ "ac-primitives", - "log", + "log 0.4.17", "maybe-async", ] @@ -33,7 +33,7 @@ dependencies = [ "either", "frame-metadata", "hex", - "log", + "log 0.4.17", "parity-scale-codec", "scale-bits", "scale-decode", @@ -381,7 +381,7 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb5b05133427c07c4776906f673ccf36c21b102c9829c641a5b56bd151d44fd6" dependencies = [ - "log", + "log 0.4.17", "parity-scale-codec", "scale-info", ] @@ -760,7 +760,7 @@ version = "0.1.0" source = "git+https://github.com/integritee-network/pallets.git?branch=sdk-v0.13.0-polkadot-v0.9.42#abf29acd41a0fca9cd7025b297b6a9fa272a122f" dependencies = [ "common-primitives", - "log", + "log 0.4.17", "parity-scale-codec", "scale-info", "serde 1.0.192", @@ -829,7 +829,7 @@ dependencies = [ "its-sidechain", "jsonrpc-core", "lazy_static", - "log", + "log 0.4.17", "multibase", "once_cell 1.4.0", "parity-scale-codec", @@ -860,7 +860,7 @@ version = "0.9.0" source = "git+https://github.com/integritee-network/env_logger-sgx#55745829b2ae8a77f0915af3671ec8a9a00cace9" dependencies = [ "humantime", - "log", + "log 0.4.17", "regex 1.3.1", "sgx_tstd", "termcolor", @@ -942,7 +942,7 @@ dependencies = [ "evm-core", "evm-gasometer", "evm-runtime", - "log", + "log 0.4.17", "parity-scale-codec", "primitive-types", "rlp", @@ -1066,7 +1066,7 @@ source = "git+https://github.com/integritee-network/frontier.git?branch=bar/polk dependencies = [ "hex", "libsecp256k1", - "log", + "log 0.4.17", "parity-scale-codec", "scale-info", "sp-core", @@ -1128,7 +1128,7 @@ dependencies = [ "frame-support-procedural", "impl-trait-for-tuples", "k256", - "log", + "log 0.4.17", "parity-scale-codec", "paste", "scale-info", @@ -1191,7 +1191,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42#ff24c60ac7d9f87727ecdd0ded9a80c56e4f4b65" dependencies = [ "frame-support", - "log", + "log 0.4.17", "parity-scale-codec", "scale-info", "sp-core", @@ -1524,7 +1524,7 @@ name = "http_req" version = "0.8.1" source = "git+https://github.com/integritee-network/http_req#3723e88235f2b29bc1a31835853b072ffd0455fd" dependencies = [ - "log", + "log 0.4.17", "rustls 0.19.0 (git+https://github.com/mesalock-linux/rustls?branch=mesalock_sgx)", "sgx_tstd", "unicase", @@ -1674,7 +1674,7 @@ dependencies = [ "itp-enclave-metrics", "itp-ocall-api", "lazy_static", - "log", + "log 0.4.17", "parity-scale-codec", "serde 1.0.192", "sgx_tstd", @@ -1699,7 +1699,7 @@ dependencies = [ "itp-stf-primitives", "itp-types", "itp-utils", - "log", + "log 0.4.17", "parity-scale-codec", "sgx_tstd", "sp-core", @@ -1714,7 +1714,7 @@ version = "0.1.0" dependencies = [ "hex-literal", "itp-types", - "log", + "log 0.4.17", ] [[package]] @@ -1768,7 +1768,7 @@ dependencies = [ "itp-storage", "itp-types", "itp-utils", - "log", + "log 0.4.17", "pallet-assets", "pallet-balances", "pallet-notes", @@ -1795,7 +1795,7 @@ dependencies = [ "itp-types", "itp-utils", "jsonrpc-core", - "log", + "log 0.4.17", "parity-scale-codec", "serde_json 1.0.108", "sgx_tstd", @@ -1815,7 +1815,7 @@ dependencies = [ "itp-stf-state-handler", "itp-top-pool-author", "itp-types", - "log", + "log 0.4.17", "parity-scale-codec", "sgx_tstd", "sp-runtime", @@ -1841,7 +1841,7 @@ version = "0.9.0" dependencies = [ "itc-parentchain-block-importer", "itp-import-queue", - "log", + "log 0.4.17", "sgx_tstd", "sgx_types", "thiserror", @@ -1858,7 +1858,7 @@ dependencies = [ "itp-stf-executor", "itp-stf-interface", "itp-types", - "log", + "log 0.4.17", "parity-scale-codec", "sgx_tstd", "sgx_types", @@ -1882,7 +1882,7 @@ dependencies = [ "itp-test", "itp-top-pool-author", "itp-types", - "log", + "log 0.4.17", "parity-scale-codec", "sgx_tstd", "sgx_types", @@ -1903,7 +1903,7 @@ dependencies = [ "itp-storage", "itp-test", "itp-types", - "log", + "log 0.4.17", "parity-scale-codec", "sgx_tstd", "sgx_types", @@ -1927,7 +1927,7 @@ dependencies = [ "base64 0.13.1", "http", "http_req", - "log", + "log 0.4.17", "serde 1.0.192", "serde_json 1.0.108", "sgx_tstd", @@ -1941,7 +1941,7 @@ version = "0.9.0" dependencies = [ "bit-vec", "chrono 0.4.26", - "log", + "log 0.4.17", "mio", "mio-extras", "rcgen", @@ -2011,7 +2011,7 @@ dependencies = [ "itp-sgx-crypto", "itp-sgx-io", "itp-time-utils", - "log", + "log 0.4.17", "num-bigint", "parity-scale-codec", "rustls 0.19.0 (git+https://github.com/mesalock-linux/rustls?rev=sgx_1.1.3)", @@ -2053,7 +2053,7 @@ dependencies = [ "itp-node-api", "itp-nonce-cache", "itp-types", - "log", + "log 0.4.17", "parity-scale-codec", "sgx_tstd", "sgx_types", @@ -2181,7 +2181,7 @@ dependencies = [ "derive_more", "itp-sgx-io", "itp-sgx-temp-dir", - "log", + "log 0.4.17", "ofb", "parity-scale-codec", "serde_json 1.0.60 (git+https://github.com/mesalock-linux/serde-json-sgx?tag=sgx_1.1.3)", @@ -2199,7 +2199,7 @@ dependencies = [ "derive_more", "environmental 1.1.3", "itp-hashing", - "log", + "log 0.4.17", "parity-scale-codec", "postcard", "serde 1.0.192", @@ -2253,7 +2253,7 @@ dependencies = [ "itp-time-utils", "itp-top-pool-author", "itp-types", - "log", + "log 0.4.17", "parity-scale-codec", "sgx_tstd", "sgx_types", @@ -2299,7 +2299,7 @@ dependencies = [ "itp-stf-state-observer", "itp-time-utils", "itp-types", - "log", + "log 0.4.17", "parity-scale-codec", "rust-base58", "sgx_tstd", @@ -2313,7 +2313,7 @@ name = "itp-stf-state-observer" version = "0.9.0" dependencies = [ "itp-types", - "log", + "log 0.4.17", "sgx_tstd", "thiserror", ] @@ -2353,7 +2353,7 @@ dependencies = [ "itp-time-utils", "itp-types", "jsonrpc-core", - "log", + "log 0.4.17", "parity-scale-codec", "sgx_crypto_helper", "sgx_tstd", @@ -2383,7 +2383,7 @@ dependencies = [ "its-primitives", "jsonrpc-core", "linked-hash-map", - "log", + "log 0.4.17", "parity-scale-codec", "serde 1.0.192", "sgx_tstd", @@ -2406,7 +2406,7 @@ dependencies = [ "itp-top-pool", "itp-types", "jsonrpc-core", - "log", + "log 0.4.17", "parity-scale-codec", "sgx_tstd", "sp-core", @@ -2440,7 +2440,11 @@ dependencies = [ name = "itp-utils" version = "0.9.0" dependencies = [ + "cid", "hex", + "ipfs-unixfs", + "log 0.4.28", + "multibase", "parity-scale-codec", ] @@ -2459,7 +2463,7 @@ dependencies = [ "itp-types", "its-primitives", "its-state", - "log", + "log 0.4.17", "parity-scale-codec", "sgx_tstd", "sgx_types", @@ -2485,7 +2489,7 @@ dependencies = [ "itp-types", "itp-utils", "its-primitives", - "log", + "log 0.4.17", "sgx_tstd", "sp-consensus-slots", "sp-core", @@ -2520,7 +2524,7 @@ dependencies = [ "its-primitives", "its-state", "its-validateer-fetch", - "log", + "log 0.4.17", "parity-scale-codec", "sgx_tstd", "sp-core", @@ -2547,7 +2551,7 @@ dependencies = [ "its-block-verification", "its-primitives", "its-state", - "log", + "log 0.4.17", "parity-scale-codec", "sgx_tstd", "sgx_types", @@ -2567,7 +2571,7 @@ dependencies = [ "its-consensus-common", "its-primitives", "lazy_static", - "log", + "log 0.4.17", "parity-scale-codec", "sgx_tstd", "sp-consensus-slots", @@ -2601,7 +2605,7 @@ dependencies = [ "itp-utils", "its-primitives", "jsonrpc-core", - "log", + "log 0.4.17", "parity-scale-codec", "rust-base58", "sgx_tstd", @@ -2630,7 +2634,7 @@ dependencies = [ "itp-sgx-externalities", "itp-storage", "its-primitives", - "log", + "log 0.4.17", "parity-scale-codec", "sgx_tstd", "sp-core", @@ -2647,7 +2651,7 @@ dependencies = [ "itp-pallet-storage", "itp-types", "its-primitives", - "log", + "log 0.4.17", "parity-scale-codec", "sp-core", "sp-runtime", @@ -2660,7 +2664,7 @@ version = "18.0.0" source = "git+https://github.com/scs/jsonrpc?branch=no_std_v18#0faf53c491c3222b96242a973d902dd06e9b6674" dependencies = [ "futures 0.3.8", - "log", + "log 0.4.17", "serde 1.0.118", "serde_derive 1.0.118", "serde_json 1.0.60 (git+https://github.com/mesalock-linux/serde-json-sgx)", @@ -2770,6 +2774,12 @@ dependencies = [ "sgx_tstd", ] +[[package]] +name = "log" +version = "0.4.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" + [[package]] name = "matches" version = "0.1.8" @@ -2829,7 +2839,7 @@ version = "0.6.21" source = "git+https://github.com/mesalock-linux/mio-sgx?tag=sgx_1.1.3#5b0e56a3066231c7a8d1876c7be3a19b08ffdfd5" dependencies = [ "iovec", - "log", + "log 0.4.17", "net2", "sgx_libc", "sgx_trts", @@ -2843,7 +2853,7 @@ version = "2.0.6" source = "git+https://github.com/integritee-network/mio-extras-sgx?rev=963234b#963234bf55e44f9efff921938255126c48deef3a" dependencies = [ "lazycell", - "log", + "log 0.4.17", "mio", "sgx_tstd", "sgx_types", @@ -3024,7 +3034,7 @@ source = "git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.4 dependencies = [ "frame-support", "frame-system", - "log", + "log 0.4.17", "parity-scale-codec", "scale-info", "sp-runtime", @@ -3043,7 +3053,7 @@ dependencies = [ "frame-system", "hex", "impl-trait-for-tuples", - "log", + "log 0.4.17", "parity-scale-codec", "rlp", "scale-info", @@ -3060,7 +3070,7 @@ dependencies = [ "frame-support", "frame-system", "itp-randomness", - "log", + "log 0.4.17", "pallet-balances", "pallet-timestamp", "parity-scale-codec", @@ -3078,7 +3088,7 @@ dependencies = [ "frame-support", "frame-system", "itp-randomness", - "log", + "log 0.4.17", "pallet-balances", "pallet-timestamp", "parity-scale-codec", @@ -3095,7 +3105,7 @@ version = "0.11.0" dependencies = [ "frame-support", "frame-system", - "log", + "log 0.4.17", "pallet-balances", "parity-scale-codec", "scale-info", @@ -3112,7 +3122,7 @@ dependencies = [ "frame-support", "frame-system", "itp-randomness", - "log", + "log 0.4.17", "pallet-balances", "pallet-timestamp", "parity-scale-codec", @@ -3130,7 +3140,7 @@ dependencies = [ "enclave-bridge-primitives", "frame-support", "frame-system", - "log", + "log 0.4.17", "parity-scale-codec", "scale-info", "sp-core", @@ -3158,7 +3168,7 @@ source = "git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.4 dependencies = [ "frame-support", "frame-system", - "log", + "log 0.4.17", "parity-scale-codec", "scale-info", "sp-inherents", @@ -3584,7 +3594,7 @@ version = "0.19.0" source = "git+https://github.com/mesalock-linux/rustls?tag=sgx_1.1.3#95b5e79dc24b02f3ce424437eb9698509d0baf58" dependencies = [ "base64 0.13.0 (git+https://github.com/mesalock-linux/rust-base64-sgx)", - "log", + "log 0.4.17", "ring", "sct", "sgx_tstd", @@ -3597,7 +3607,7 @@ version = "0.19.0" source = "git+https://github.com/mesalock-linux/rustls?branch=mesalock_sgx#95b5e79dc24b02f3ce424437eb9698509d0baf58" dependencies = [ "base64 0.13.0 (git+https://github.com/mesalock-linux/rust-base64-sgx)", - "log", + "log 0.4.17", "ring", "sct", "sgx_tstd", @@ -3610,7 +3620,7 @@ version = "0.19.0" source = "git+https://github.com/mesalock-linux/rustls?rev=sgx_1.1.3#95b5e79dc24b02f3ce424437eb9698509d0baf58" dependencies = [ "base64 0.13.0 (git+https://github.com/mesalock-linux/rust-base64-sgx)", - "log", + "log 0.4.17", "ring", "sct", "sgx_tstd", @@ -4180,7 +4190,7 @@ name = "sp-api" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42#ff24c60ac7d9f87727ecdd0ded9a80c56e4f4b65" dependencies = [ - "log", + "log 0.4.17", "parity-scale-codec", "scale-info", "sp-api-proc-macro", @@ -4236,7 +4246,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42#ff24c60ac7d9f87727ecdd0ded9a80c56e4f4b65" dependencies = [ "finality-grandpa", - "log", + "log 0.4.17", "parity-scale-codec", "scale-info", "sp-api", @@ -4270,7 +4280,7 @@ dependencies = [ "hash-db 0.16.0", "hash256-std-hasher", "libsecp256k1", - "log", + "log 0.4.17", "merlin", "parity-scale-codec", "paste", @@ -4352,7 +4362,7 @@ version = "7.0.0" dependencies = [ "itp-sgx-externalities", "libsecp256k1", - "log", + "log 0.4.17", "parity-scale-codec", "sgx_tstd", "sp-core", @@ -4377,7 +4387,7 @@ dependencies = [ "either", "hash256-std-hasher", "impl-trait-for-tuples", - "log", + "log 0.4.17", "parity-scale-codec", "paste", "scale-info", @@ -4576,7 +4586,7 @@ dependencies = [ "derive_more", "frame-metadata", "hex", - "log", + "log 0.4.17", "maybe-async", "parity-scale-codec", "serde 1.0.192", @@ -4667,7 +4677,7 @@ source = "git+https://github.com/integritee-network/pallets.git?branch=sdk-v0.13 dependencies = [ "common-primitives", "derive_more", - "log", + "log 0.4.17", "parity-scale-codec", "scale-info", "serde 1.0.192", @@ -4763,7 +4773,7 @@ checksum = "767abe6ffed88a1889671a102c2861ae742726f52e0a5a425b92c9fbfa7e9c85" dependencies = [ "hash-db 0.16.0", "hashbrown 0.13.2", - "log", + "log 0.4.17", "smallvec 1.11.0", ] @@ -4802,7 +4812,7 @@ dependencies = [ "bytes 1.0.1", "http", "httparse", - "log", + "log 0.4.17", "rand 0.7.3", "rustls 0.19.0 (git+https://github.com/mesalock-linux/rustls?tag=sgx_1.1.3)", "sgx_tstd", @@ -4820,7 +4830,7 @@ version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "digest 0.10.7", "static_assertions", ] diff --git a/enclave-runtime/src/ipfs.rs b/enclave-runtime/src/ipfs.rs deleted file mode 100644 index 7b90f062c..000000000 --- a/enclave-runtime/src/ipfs.rs +++ /dev/null @@ -1,99 +0,0 @@ -use cid::{Cid, Result as CidResult}; -use ipfs_unixfs::file::adder::FileAdder; -use log::*; -use multibase::Base; -use std::{convert::TryFrom, vec::Vec}; - -pub struct IpfsContent { - pub cid: CidResult, - pub file_content: Vec, - pub stats: Stats, -} -#[derive(Debug, PartialEq)] -pub enum IpfsError { - InputCidInvalid, - FinalCidMissing, - Verification, -} - -impl IpfsContent { - pub fn new(_cid: &str, _content: Vec) -> IpfsContent { - IpfsContent { cid: Cid::try_from(_cid), file_content: _content, stats: Stats::default() } - } - - pub fn verify(&mut self) -> Result<(), IpfsError> { - let mut adder: FileAdder = FileAdder::default(); - let mut total: usize = 0; - while total < self.file_content.len() { - let (blocks, consumed) = adder.push(&self.file_content[total..]); - total += consumed; - self.stats.process(blocks); - } - let blocks = adder.finish(); - self.stats.process(blocks); - - if let Some(last_cid) = self.stats.last.as_ref() { - let cid_str = Base::Base58Btc.encode(last_cid.hash().as_bytes()); - info!( - "new cid: {} generated from {} blocks, total of {} bytes", - cid_str, self.stats.blocks, self.stats.block_bytes - ); - match self.cid.as_ref() { - Ok(initial_cid) => - if last_cid.hash().eq(&initial_cid.hash()) { - Ok(()) - } else { - Err(IpfsError::Verification) - }, - Err(_) => Err(IpfsError::InputCidInvalid), - } - } else { - Err(IpfsError::FinalCidMissing) - } - } -} -#[derive(Default)] -pub struct Stats { - pub blocks: usize, - pub block_bytes: u64, - pub last: Option, -} - -impl Stats { - fn process)>>(&mut self, new_blocks: I) { - for (cid, block) in new_blocks { - self.last = Some(cid); - self.blocks += 1; - self.block_bytes += block.len() as u64; - } - } -} - -#[allow(unused)] -pub fn test_creates_ipfs_content_struct_works() { - let cid = "QmSaFjwJ2QtS3rZDKzC98XEzv2bqT4TfpWLCpphPPwyQTr"; - let content: Vec = vec![20; 512 * 1024]; - let ipfs_content = IpfsContent::new(cid, content.clone()); - - let cid_str = Base::Base58Btc.encode(ipfs_content.cid.as_ref().unwrap().hash().as_bytes()); - assert_eq!(cid_str, cid); - assert_eq!(ipfs_content.file_content, content); -} - -#[allow(unused)] -pub fn test_verification_ok_for_correct_content() { - let cid = "QmSaFjwJ2QtS3rZDKzC98XEzv2bqT4TfpWLCpphPPwyQTr"; - let content: Vec = vec![20; 512 * 1024]; - let mut ipfs_content = IpfsContent::new(cid, content); - let verification = ipfs_content.verify(); - assert!(verification.is_ok()); -} - -#[allow(unused)] -pub fn test_verification_fails_for_incorrect_content() { - let cid = "QmSaFjwJ2QtS3rZDKzC98XEzv2bqT4TfpWLCpphPPwyQTr"; - let content: Vec = vec![10; 512 * 1024]; - let mut ipfs_content = IpfsContent::new(cid, content); - let verification = ipfs_content.verify(); - assert_eq!(verification.unwrap_err(), IpfsError::Verification); -} diff --git a/enclave-runtime/src/lib.rs b/enclave-runtime/src/lib.rs index ae65a4ae4..4a4a29a6e 100644 --- a/enclave-runtime/src/lib.rs +++ b/enclave-runtime/src/lib.rs @@ -73,7 +73,6 @@ use std::{ mod attestation; mod empty_impls; mod initialization; -mod ipfs; mod ocall; mod shard_config; mod shard_creation_info; diff --git a/enclave-runtime/src/ocall/ipfs_ocall.rs b/enclave-runtime/src/ocall/ipfs_ocall.rs index 690d25e66..9393e011e 100644 --- a/enclave-runtime/src/ocall/ipfs_ocall.rs +++ b/enclave-runtime/src/ocall/ipfs_ocall.rs @@ -17,37 +17,44 @@ */ use crate::ocall::{ffi, OcallApi}; use alloc::vec::Vec; +use codec::{Decode, Encode}; use frame_support::ensure; -use itp_ocall_api::{EnclaveIpfsOCallApi, IpfsCid}; +use itp_ocall_api::EnclaveIpfsOCallApi; +use itp_types::IpfsCid; use log::warn; use sgx_types::{sgx_status_t, SgxResult}; impl EnclaveIpfsOCallApi for OcallApi { fn write_ipfs(&self, encoded_state: &[u8]) -> SgxResult { let mut rt: sgx_status_t = sgx_status_t::SGX_ERROR_UNEXPECTED; - let mut cid_buf = IpfsCid([0u8; 46]); + let mut cid_buf = [0u8; 46].to_vec(); let res = unsafe { ffi::ocall_write_ipfs( &mut rt as *mut sgx_status_t, encoded_state.as_ptr(), encoded_state.len() as u32, - cid_buf.0.as_mut_ptr(), - cid_buf.0.len() as u32, + cid_buf.as_mut_ptr(), + cid_buf.len() as u32, ) }; ensure!(rt == sgx_status_t::SGX_SUCCESS, rt); ensure!(res == sgx_status_t::SGX_SUCCESS, res); - - Ok(cid_buf) + let cid = IpfsCid::decode(&mut cid_buf.as_slice()) + .map_err(|_| sgx_status_t::SGX_ERROR_UNEXPECTED)?; + Ok(cid) } fn read_ipfs(&self, cid: &IpfsCid) -> SgxResult> { let mut rt: sgx_status_t = sgx_status_t::SGX_ERROR_UNEXPECTED; - + let cid_buf = cid.encode(); let res = unsafe { - ffi::ocall_read_ipfs(&mut rt as *mut sgx_status_t, cid.0.as_ptr(), cid.0.len() as u32) + ffi::ocall_read_ipfs( + &mut rt as *mut sgx_status_t, + cid_buf.as_ptr(), + cid_buf.len() as u32, + ) }; ensure!(rt == sgx_status_t::SGX_SUCCESS, rt); diff --git a/enclave-runtime/src/test/ipfs_tests.rs b/enclave-runtime/src/test/ipfs_tests.rs index f1f94d369..6b28b79c3 100644 --- a/enclave-runtime/src/test/ipfs_tests.rs +++ b/enclave-runtime/src/test/ipfs_tests.rs @@ -16,27 +16,30 @@ */ -use crate::{ipfs::IpfsContent, ocall::OcallApi}; +use crate::ocall::OcallApi; use itp_ocall_api::EnclaveIpfsOCallApi; +use itp_utils::IpfsCid; use log::*; use std::{fs::File, io::Read, vec::Vec}; #[allow(unused)] -fn test_ocall_read_write_ipfs() { +pub fn test_ocall_read_write_ipfs() { info!("testing IPFS read/write. Hopefully ipfs daemon is running..."); let enc_state: Vec = vec![20; 4 * 512 * 1024]; - let cid = OcallApi.write_ipfs(enc_state.as_slice()).unwrap(); + let expected_cid = IpfsCid::from_content_bytes(&enc_state).unwrap(); - OcallApi.read_ipfs(&cid).unwrap(); + let returned_cid = OcallApi.write_ipfs(enc_state.as_slice()).unwrap(); + assert_eq!(expected_cid, returned_cid); - let cid_str = std::str::from_utf8(&cid.0).unwrap(); + OcallApi.read_ipfs(&returned_cid).unwrap(); + + let cid_str = format!("{:?}", returned_cid); let mut f = File::open(cid_str).unwrap(); let mut content_buf = Vec::new(); f.read_to_end(&mut content_buf).unwrap(); info!("reading file {:?} of size {} bytes", f, &content_buf.len()); - let mut ipfs_content = IpfsContent::new(cid_str, content_buf); - let verification = ipfs_content.verify(); - assert!(verification.is_ok()); + let file_cid = IpfsCid::from_content_bytes(&content_buf).unwrap(); + assert_eq!(expected_cid, file_cid); } diff --git a/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs b/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs index cd9c9b533..3a6d03595 100644 --- a/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs +++ b/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs @@ -20,11 +20,11 @@ use crate::test::mocks::types::TestBlockImporter; use codec::{Decode, Encode}; use itc_parentchain::primitives::ParentchainId; use itp_ocall_api::{ - EnclaveIpfsOCallApi, EnclaveOnChainOCallApi, EnclaveSidechainOCallApi, IpfsCid, Result, + EnclaveIpfsOCallApi, EnclaveOnChainOCallApi, EnclaveSidechainOCallApi, Result, }; use itp_types::{ - storage::StorageEntryVerified, BlockHash, Header as ParentchainHeader, ShardIdentifier, - WorkerRequest, WorkerResponse, H256, + storage::StorageEntryVerified, BlockHash, Header as ParentchainHeader, IpfsCid, + ShardIdentifier, WorkerRequest, WorkerResponse, H256, }; use its_primitives::types::block::SignedBlock as SignedSidechainBlockType; use its_sidechain::consensus_common::BlockImport; @@ -127,7 +127,7 @@ impl EnclaveSidechainOCallApi for ProposeToImportOCallApi { impl EnclaveIpfsOCallApi for ProposeToImportOCallApi { fn write_ipfs(&self, _encoded_state: &[u8]) -> SgxResult { - Ok(IpfsCid([0u8; 46])) + Ok(IpfsCid::try_from("QmSaFjwJ2QtS3rZDKzC98XEzv2bqT4TfpWLCpphPPwyQTr").unwrap()) } fn read_ipfs(&self, _cid: &IpfsCid) -> SgxResult> { Ok(vec![]) diff --git a/enclave-runtime/src/test/tests_main.rs b/enclave-runtime/src/test/tests_main.rs index 8f8881176..8d46f3716 100644 --- a/enclave-runtime/src/test/tests_main.rs +++ b/enclave-runtime/src/test/tests_main.rs @@ -162,11 +162,8 @@ pub extern "C" fn test_main_entrance() -> size_t { itc_parentchain::light_client::io::sgx_tests::init_parachain_light_client_works, itc_parentchain::light_client::io::sgx_tests::sealing_creates_backup, - // these unit test (?) need an ipfs node running.. - // ipfs::test_creates_ipfs_content_struct_works, - // ipfs::test_verification_ok_for_correct_content, - // ipfs::test_verification_fails_for_incorrect_content, - // test_ocall_read_write_ipfs, + // this test needs an ipfs node running.. + crate::test::ipfs_tests::test_ocall_read_write_ipfs, // Teeracle tests run_teeracle_tests, @@ -429,7 +426,8 @@ fn test_create_state_diff() { assert_eq!( sender_acc_info.data.free, ita_stf::test_genesis::ENDOWED_ACC_FUNDS - - TX_AMOUNT - 1_000_000_000_000 / ita_stf::STF_TX_FEE_UNIT_DIVIDER + - TX_AMOUNT + - 1_000_000_000_000 / ita_stf::STF_TX_FEE_UNIT_DIVIDER ); } diff --git a/service/src/ocall_bridge/bridge_api.rs b/service/src/ocall_bridge/bridge_api.rs index 6e8f67c66..274320c8d 100644 --- a/service/src/ocall_bridge/bridge_api.rs +++ b/service/src/ocall_bridge/bridge_api.rs @@ -17,6 +17,7 @@ */ use itp_enclave_api::remote_attestation::QveReport; +use itp_types::IpfsCid; use lazy_static::lazy_static; use log::*; use parking_lot::RwLock; @@ -240,15 +241,12 @@ pub trait SidechainBridge { ) -> OCallBridgeResult>; } -/// type for IPFS -pub type Cid = [u8; 46]; - /// Trait for all the OCalls related to IPFS #[cfg_attr(test, automock)] pub trait IpfsBridge { - fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult; + fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult; - fn read_from_ipfs(&self, cid: Cid) -> OCallBridgeResult<()>; + fn read_from_ipfs(&self, cid: IpfsCid) -> OCallBridgeResult<()>; } /// Trait for the direct invocation OCalls diff --git a/service/src/ocall_bridge/ffi/ipfs.rs b/service/src/ocall_bridge/ffi/ipfs.rs index e264b49db..37be75cfb 100644 --- a/service/src/ocall_bridge/ffi/ipfs.rs +++ b/service/src/ocall_bridge/ffi/ipfs.rs @@ -16,7 +16,9 @@ */ -use crate::ocall_bridge::bridge_api::{Bridge, Cid, IpfsBridge}; +use crate::ocall_bridge::bridge_api::{Bridge, IpfsBridge}; +use codec::{Decode, Encode}; +use itp_utils::IpfsCid; use log::*; use sgx_types::sgx_status_t; use std::{slice, sync::Arc}; @@ -50,7 +52,10 @@ fn write_ipfs( return match ipfs_api.write_to_ipfs(state) { Ok(r) => { - cid.clone_from_slice(&r); + cid.fill(0); + let encoded = r.encode(); + let len = encoded.len().min(cid.len()); + cid[..len].copy_from_slice(&encoded[..len]); sgx_status_t::SGX_SUCCESS }, Err(e) => { @@ -61,16 +66,18 @@ fn write_ipfs( } fn read_ipfs(cid: *const u8, cid_size: u32, ipfs_api: Arc) -> sgx_status_t { - let _cid = unsafe { slice::from_raw_parts(cid, cid_size as usize) }; + let mut cid_raw = unsafe { slice::from_raw_parts(cid, cid_size as usize) }; - let mut cid: Cid = [0; 46]; - cid.clone_from_slice(_cid); - - match ipfs_api.read_from_ipfs(cid) { - Ok(_) => sgx_status_t::SGX_SUCCESS, - Err(e) => { - error!("OCall to read_ipfs failed: {:?}", e); - sgx_status_t::SGX_ERROR_UNEXPECTED - }, + if let Ok(cid) = IpfsCid::decode(&mut cid_raw) { + match ipfs_api.read_from_ipfs(cid) { + Ok(_) => sgx_status_t::SGX_SUCCESS, + Err(e) => { + error!("OCall to read_ipfs failed: {:?}", e); + sgx_status_t::SGX_ERROR_UNEXPECTED + }, + } + } else { + error!("Decoding CID failed"); + sgx_status_t::SGX_ERROR_UNEXPECTED } } diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index 8cc25f85d..225f672ce 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -16,9 +16,10 @@ */ -use crate::ocall_bridge::bridge_api::{Cid, IpfsBridge, OCallBridgeError, OCallBridgeResult}; +use crate::ocall_bridge::bridge_api::{IpfsBridge, OCallBridgeError, OCallBridgeResult}; use futures::TryStreamExt; use ipfs_api::IpfsClient; +use itp_utils::IpfsCid; use log::*; use std::{ fs::File, @@ -30,19 +31,19 @@ use std::{ pub struct IpfsOCall; impl IpfsBridge for IpfsOCall { - fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { + fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { debug!(" Entering ocall_write_ipfs"); - Ok(write_to_ipfs(data)) + write_to_ipfs(data) } - fn read_from_ipfs(&self, cid: Cid) -> OCallBridgeResult<()> { + fn read_from_ipfs(&self, cid: IpfsCid) -> OCallBridgeResult<()> { debug!("Entering ocall_read_ipfs"); - let result = read_from_ipfs(cid); + let result = read_from_ipfs(&cid); match result { Ok(res) => { - let filename = str::from_utf8(&cid).unwrap(); - create_file(filename, &res).map_err(OCallBridgeError::IpfsError) + let filename = format!("{:?}", cid); + create_file(&filename, &res).map_err(OCallBridgeError::IpfsError) }, Err(_) => Err(OCallBridgeError::IpfsError("failed to read from IPFS".to_string())), } @@ -59,14 +60,16 @@ fn create_file(filename: &str, result: &[u8]) -> Result<(), String> { } #[tokio::main] -async fn write_to_ipfs(data: &'static [u8]) -> Cid { +async fn write_to_ipfs(data: &'static [u8]) -> OCallBridgeResult { // Creates an `IpfsClient` connected to the endpoint specified in ~/.ipfs/api. // If not found, tries to connect to `localhost:5001`. let client = IpfsClient::default(); - match client.version().await { Ok(version) => info!("version: {:?}", version.version), - Err(e) => eprintln!("error getting version: {}", e), + Err(e) => { + error!("error getting version: {}", e); + return Err(OCallBridgeError::IpfsError(format!("error getting version: {}", e))); + }, } let datac = Cursor::new(data); @@ -74,27 +77,38 @@ async fn write_to_ipfs(data: &'static [u8]) -> Cid { match client.add(datac).await { Ok(res) => { - info!("Result Hash {}", res.hash); + info!("Result IpfsCid {}", res.hash); tx.send(res.hash.into_bytes()).unwrap(); }, - Err(e) => eprintln!("error adding file: {}", e), + Err(e) => { + error!("error adding file: {}", e); + return Err(OCallBridgeError::IpfsError(format!("error adding file: {}", e))); + }, } - let mut cid: Cid = [0; 46]; - cid.clone_from_slice(&rx.recv().unwrap()); - cid + rx.recv() + .map_err(|e| OCallBridgeError::IpfsError(format!("error receiving cid: {}", e))) + .and_then(|cid_str| { + str::from_utf8(&cid_str) + .map_err(|e| OCallBridgeError::IpfsError(format!("invalid UTF-8 in cid: {}", e))) + .and_then(|cid_utf8| { + IpfsCid::try_from(cid_utf8).map_err(|e| { + OCallBridgeError::IpfsError(format!("invalid IpfsCid: {:?}", e)) + }) + }) + }) } #[tokio::main] -pub async fn read_from_ipfs(cid: Cid) -> Result, String> { +pub async fn read_from_ipfs(cid: &IpfsCid) -> Result, String> { // Creates an `IpfsClient` connected to the endpoint specified in ~/.ipfs/api. // If not found, tries to connect to `localhost:5001`. let client = IpfsClient::default(); - let h = str::from_utf8(&cid).unwrap(); + let h = format!("{:?}", cid); info!("Fetching content from: {}", h); client - .cat(h) + .cat(&h) .map_ok(|chunk| chunk.to_vec()) .map_err(|e| e.to_string()) .try_concat() diff --git a/service/src/tests/mock.rs b/service/src/tests/mock.rs index 7c7528fa6..f06a2bf74 100644 --- a/service/src/tests/mock.rs +++ b/service/src/tests/mock.rs @@ -22,6 +22,7 @@ use itp_types::{ parentchain::BlockNumber, AccountId, MultiEnclave, SgxBuildMode, SgxEnclave, SgxReportData, SgxStatus, ShardIdentifier, H256 as Hash, }; +use itp_utils::IpfsCid; pub struct TestNodeApi; @@ -93,7 +94,7 @@ impl PalletTeerexApi for TestNodeApi { &self, _: &ShardIdentifier, _at_block: Option, - ) -> ApiResult> { + ) -> ApiResult> { unreachable!() } } From 2afc85042cc3567d0b1f50de28c5a319b42a25a9 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Thu, 18 Sep 2025 15:17:59 +0200 Subject: [PATCH 07/91] cli cleanup --- Cargo.lock | 1 + app-libs/stf/Cargo.toml | 1 + app-libs/stf/src/lib.rs | 1 + app-libs/stf/src/relayed_note.rs | 23 ++++++++++++- .../trusted_base_cli/commands/get_notes.rs | 34 ++++++++++++++++--- .../trusted_base_cli/commands/send_note.rs | 11 ++++++ enclave-runtime/Cargo.lock | 1 + 7 files changed, 66 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e3b9b6be3..979c61ac4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2951,6 +2951,7 @@ dependencies = [ "base58", "frame-support", "frame-system", + "hex", "ita-assets-map", "ita-parentchain-specs", "ita-sgx-runtime", diff --git a/app-libs/stf/Cargo.toml b/app-libs/stf/Cargo.toml index a48763e04..e3278dcb2 100644 --- a/app-libs/stf/Cargo.toml +++ b/app-libs/stf/Cargo.toml @@ -10,6 +10,7 @@ codec = { version = "3.0.0", default-features = false, features = ["derive"], pa log = { version = "0.4", default-features = false } rlp = { version = "0.5", default-features = false } sha3 = { version = "0.10", default-features = false } +hex = { version = "0.4.3", default-features = false, features = ["alloc"] } # sgx deps sgx_tstd = { branch = "master", features = ["untrusted_fs", "net", "backtrace"], git = "https://github.com/apache/teaclave-sgx-sdk.git", optional = true } diff --git a/app-libs/stf/src/lib.rs b/app-libs/stf/src/lib.rs index ad589439d..3601ac3a6 100644 --- a/app-libs/stf/src/lib.rs +++ b/app-libs/stf/src/lib.rs @@ -25,6 +25,7 @@ #![cfg_attr(target_env = "sgx", feature(rustc_private))] extern crate alloc; +extern crate core; #[cfg(all(not(feature = "std"), feature = "sgx"))] extern crate sgx_tstd as std; diff --git a/app-libs/stf/src/relayed_note.rs b/app-libs/stf/src/relayed_note.rs index bdbfe4bed..7b6672149 100644 --- a/app-libs/stf/src/relayed_note.rs +++ b/app-libs/stf/src/relayed_note.rs @@ -1,4 +1,5 @@ use codec::{Decode, Encode}; +use core::fmt::Debug; use itp_utils::IpfsCid; use sp_std::vec::Vec; pub type ConversationId = u32; @@ -15,7 +16,7 @@ pub enum NoteRelayType { } /// Necessary information for recipient to retrieve and potentially decrypt a relayed note -#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)] +#[derive(Encode, Decode, Clone, PartialEq, Eq)] pub enum RelayedNoteRetreivalInfo { /// the message is included within and not actually relayed Here { msg: Vec }, @@ -26,6 +27,26 @@ pub enum RelayedNoteRetreivalInfo { Undeclared { encryption_key: [u8; 32] }, } +impl Debug for RelayedNoteRetreivalInfo { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + RelayedNoteRetreivalInfo::Here { msg } => write!( + f, + "Here {{ msg: {} }}", + core::str::from_utf8(msg).unwrap_or("") + ), + RelayedNoteRetreivalInfo::Ipfs { cid, encryption_key } => write!( + f, + "Ipfs {{ cid: {:?}, encryption_key: 0x{} }}", + cid, + hex::encode(encryption_key) + ), + RelayedNoteRetreivalInfo::Undeclared { encryption_key } => + write!(f, "Undeclared {{ encryption_key: 0x{} }}", hex::encode(encryption_key)), + } + } +} + /// A user request to relay a note to a specific conversation. #[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)] pub struct RelayedNoteRequest { diff --git a/cli/src/trusted_base_cli/commands/get_notes.rs b/cli/src/trusted_base_cli/commands/get_notes.rs index ef259e390..a415cd651 100644 --- a/cli/src/trusted_base_cli/commands/get_notes.rs +++ b/cli/src/trusted_base_cli/commands/get_notes.rs @@ -29,7 +29,7 @@ use itp_stf_primitives::types::{KeyPair, TrustedOperation}; use itp_types::{AccountId, Moment}; use log::error; use pallet_notes::{BucketIndex, TimestampedTrustedNote, TrustedNote}; -use sp_core::Pair; +use sp_core::{crypto::Ss58Codec, Pair}; #[derive(Parser)] pub struct GetNotesCommand { @@ -108,19 +108,43 @@ impl GetNotesCommand { TrustedCall::send_note(from, to, note) => if from == who_accountid { println!( - "[{}] Message to: {:?}: {}", + "[{}] Message to: {}: {}", datetime_str, - to, + to.to_ss58check(), String::from_utf8_lossy(note.as_ref()) ); } else { println!( - "[{}] Message from: {:?}: {}", + "[{}] Message from: {}: {}", datetime_str, - from, + from.to_ss58check(), String::from_utf8_lossy(note.as_ref()) ); }, + TrustedCall::send_relayed_note_stripped( + from, + to, + conversation_id, + retreival, + ) => + if from == who_accountid { + println!( + "[{}] Message in conversation {} to: {}: {:?}", + datetime_str, + conversation_id, + to.to_ss58check(), + retreival + ); + } else { + println!( + "[{}] Message in conversation {} from: {}: {:?}", + datetime_str, + conversation_id, + from.to_ss58check(), + retreival + ); + }, + _ => println!("[{}] {:?}", datetime_str, call), } } else { diff --git a/cli/src/trusted_base_cli/commands/send_note.rs b/cli/src/trusted_base_cli/commands/send_note.rs index a28317db6..cf963a2d6 100644 --- a/cli/src/trusted_base_cli/commands/send_note.rs +++ b/cli/src/trusted_base_cli/commands/send_note.rs @@ -78,6 +78,17 @@ impl SendNoteCommand { TrustedCall::send_relayed_note(sender, to, conversation_id, request) .sign(&KeyPair::Sr25519(Box::new(signer)), nonce, &mrenclave, &shard) .into_trusted_operation(trusted_args.direct) + } else if self.conversation_id.is_some() { + let request = RelayedNoteRequest { + allow_onchain_fallback: false, + relay_type: NoteRelayType::Here, + msg: self.message.as_bytes().to_vec(), + maybe_encryption_key: None, + }; + let conversation_id = self.conversation_id.unwrap_or_default(); + TrustedCall::send_relayed_note(sender, to, conversation_id, request) + .sign(&KeyPair::Sr25519(Box::new(signer)), nonce, &mrenclave, &shard) + .into_trusted_operation(trusted_args.direct) } else { TrustedCall::send_note(sender, to, self.message.as_bytes().to_vec()) .sign(&KeyPair::Sr25519(Box::new(signer)), nonce, &mrenclave, &shard) diff --git a/enclave-runtime/Cargo.lock b/enclave-runtime/Cargo.lock index a53381a45..86583804e 100644 --- a/enclave-runtime/Cargo.lock +++ b/enclave-runtime/Cargo.lock @@ -1753,6 +1753,7 @@ version = "0.9.0" dependencies = [ "frame-support", "frame-system", + "hex", "ita-assets-map", "ita-parentchain-specs", "ita-sgx-runtime", From 80f887630f68dbe5328ec3b5249cc1779ba3b6d0 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Fri, 19 Sep 2025 11:40:30 +0200 Subject: [PATCH 08/91] add cli args for ipfs endpoint for worker --- Cargo.lock | 227 ++++++++++-------- enclave-runtime/src/test/ipfs_tests.rs | 1 + enclave-runtime/src/test/tests_main.rs | 2 +- service/Cargo.toml | 2 +- service/src/cli.yml | 12 + service/src/config.rs | 20 ++ service/src/main_impl.rs | 66 +++-- service/src/ocall_bridge/component_factory.rs | 9 +- service/src/ocall_bridge/ipfs_ocall.rs | 45 ++-- 9 files changed, 234 insertions(+), 150 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 979c61ac4..fed3e45df 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -297,6 +297,12 @@ name = "base-x" version = "0.2.6" source = "git+https://github.com/whalelephant/base-x-rs?branch=no_std#906c9ac59282ff5a2eec86efd25d50ad9927b147" +[[package]] +name = "base-x" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" + [[package]] name = "base16ct" version = "0.2.0" @@ -687,7 +693,7 @@ name = "cid" version = "0.5.1" source = "git+https://github.com/whalelephant/rust-cid?branch=nstd#cca87467c46106c801ca3727500477258b0f13b0" dependencies = [ - "multibase", + "multibase 0.8.0", "multihash 0.11.4", "unsigned-varint 0.5.1", ] @@ -769,12 +775,13 @@ dependencies = [ [[package]] name = "common-multipart-rfc7578" -version = "0.3.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d0a7a42b9c13f2b2a1a7e64b949a19bcb56a49b190076e60261001ceaa5304" +checksum = "5baee326bc603965b0f26583e1ecd7c111c41b49bd92a344897476a352798869" dependencies = [ "bytes 1.4.0", - "futures 0.3.28", + "futures-core 0.3.28", + "futures-util 0.3.28", "http 0.2.9", "mime", "mime_guess", @@ -835,6 +842,15 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" +[[package]] +name = "core2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b49ba7ef1ad6107f8824dbe97de947cbaac53c44e7f9756a1fba0d37c1eec505" +dependencies = [ + "memchr 2.6.3", +] + [[package]] name = "cpp_demangle" version = "0.3.5" @@ -1038,6 +1054,26 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" +[[package]] +name = "data-encoding-macro" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c904b33cc60130e1aeea4956ab803d08a3f4a0ca82d64ed757afac3891f2bb99" +dependencies = [ + "data-encoding", + "data-encoding-macro-internal", +] + +[[package]] +name = "data-encoding-macro-internal" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fdf3fce3ce863539ec1d7fd1b6dcc3c645663376b43ed376bbf887733e4f772" +dependencies = [ + "data-encoding", + "syn 1.0.109", +] + [[package]] name = "der" version = "0.6.1" @@ -1140,6 +1176,15 @@ dependencies = [ "dirs-sys", ] +[[package]] +name = "dirs" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059" +dependencies = [ + "dirs-sys", +] + [[package]] name = "dirs-sys" version = "0.3.7" @@ -1481,28 +1526,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "failure" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32e9bd16cc02eae7db7ef620b392808b89f6a5e16bb3497d159c6b92a0f4f86" -dependencies = [ - "backtrace", - "failure_derive", -] - -[[package]] -name = "failure_derive" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", - "synstructure", -] - [[package]] name = "fake-simd" version = "0.1.2" @@ -2451,13 +2474,13 @@ dependencies = [ [[package]] name = "hyper-multipart-rfc7578" -version = "0.5.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3538ce6aeb81f7cd0d547a42435944d2283714a3f696630318bc47bd839fcfc9" +checksum = "f0eb2cf73e96e9925f4bed948e763aa2901c2f1a3a5f713ee41917433ced6671" dependencies = [ "bytes 1.4.0", "common-multipart-rfc7578", - "futures 0.3.28", + "futures-core 0.3.28", "http 0.2.9", "hyper", ] @@ -2697,14 +2720,14 @@ dependencies = [ "base58", "chrono 0.4.26", "clap 2.34.0", - "dirs", + "dirs 3.0.2", "enclave-bridge-primitives", "env_logger 0.9.3", "frame-support", "futures 0.3.28", "hex", "humantime", - "ipfs-api", + "ipfs-api-backend-hyper", "ita-parentchain-interface", "itc-parentchain", "itc-parentchain-test", @@ -2788,27 +2811,45 @@ dependencies = [ ] [[package]] -name = "ipfs-api" -version = "0.11.0" +name = "ipfs-api-backend-hyper" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3824538e42e84c792988098df4ad5a35b47be98b19e31454e09f4e322f00fc" +checksum = "8a9d131b408b4caafe1e7c00d410a09ad3eb7e3ab68690cf668e86904b2176b4" dependencies = [ + "async-trait", + "base64 0.13.1", "bytes 1.4.0", - "dirs", - "failure", "futures 0.3.28", "http 0.2.9", "hyper", "hyper-multipart-rfc7578", "hyper-tls", - "parity-multiaddr", + "ipfs-api-prelude", + "thiserror 1.0.44", +] + +[[package]] +name = "ipfs-api-prelude" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b74065805db266ba2c6edbd670b23c4714824a955628472b2e46cc9f3a869cb" +dependencies = [ + "async-trait", + "bytes 1.4.0", + "cfg-if 1.0.0", + "common-multipart-rfc7578", + "dirs 4.0.0", + "futures 0.3.28", + "http 0.2.9", + "multiaddr", + "multibase 0.9.1", "serde 1.0.193", "serde_json 1.0.103", "serde_urlencoded", + "thiserror 1.0.44", "tokio", - "tokio-util 0.6.10", + "tokio-util 0.7.8", "tracing", - "typed-builder", "walkdir", ] @@ -3814,7 +3855,7 @@ dependencies = [ "hex", "ipfs-unixfs", "log 0.4.28", - "multibase", + "multibase 0.8.0", "parity-scale-codec", ] @@ -4804,16 +4845,46 @@ dependencies = [ "version_check", ] +[[package]] +name = "multiaddr" +version = "0.17.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b36f567c7099511fa8612bbbb52dda2419ce0bdbacf31714e3a5ffdb766d3bd" +dependencies = [ + "arrayref", + "byteorder 1.4.3", + "data-encoding", + "log 0.4.28", + "multibase 0.9.1", + "multihash 0.17.0", + "percent-encoding 2.3.1", + "serde 1.0.193", + "static_assertions", + "unsigned-varint 0.7.1", + "url 2.5.0", +] + [[package]] name = "multibase" version = "0.8.0" source = "git+https://github.com/whalelephant/rust-multibase?branch=nstd#df67fb30e86998f7c10d4eea16a1cd480d2448c0" dependencies = [ - "base-x", + "base-x 0.2.6", "data-encoding", "lazy_static", ] +[[package]] +name = "multibase" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b3539ec3c1f04ac9748a260728e855f261b4977f5c3406612c884564f329404" +dependencies = [ + "base-x 0.2.11", + "data-encoding", + "data-encoding-macro", +] + [[package]] name = "multihash" version = "0.11.4" @@ -4830,20 +4901,20 @@ dependencies = [ [[package]] name = "multihash" -version = "0.13.2" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dac63698b887d2d929306ea48b63760431ff8a24fac40ddb22f9c7f49fb7cab" +checksum = "835d6ff01d610179fbce3de1694d007e500bf33a7f29689838941d6bf783ae40" dependencies = [ - "generic-array 0.14.7", + "core2", "multihash-derive", - "unsigned-varint 0.5.1", + "unsigned-varint 0.7.1", ] [[package]] name = "multihash-derive" -version = "0.7.2" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "424f6e86263cd5294cbd7f1e95746b95aca0e0d66bff31e5a40d6baa87b4aa99" +checksum = "1d6d4752e6230d8ef7adf7bd5d8c4b1f6561c1014c5ba9a37445ccefe18aa1db" dependencies = [ "proc-macro-crate", "proc-macro-error", @@ -5565,24 +5636,6 @@ dependencies = [ "sp-std", ] -[[package]] -name = "parity-multiaddr" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58341485071825827b7f03cf7efd1cb21e6a709bea778fb50227fd45d2f361b4" -dependencies = [ - "arrayref", - "bs58", - "byteorder 1.4.3", - "data-encoding", - "multihash 0.13.2", - "percent-encoding 2.3.1", - "serde 1.0.193", - "static_assertions", - "unsigned-varint 0.7.1", - "url 2.5.0", -] - [[package]] name = "parity-scale-codec" version = "3.6.4" @@ -5881,12 +5934,12 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.3.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" +checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" dependencies = [ - "once_cell 1.18.0", - "toml_edit", + "thiserror 1.0.44", + "toml", ] [[package]] @@ -8421,20 +8474,12 @@ dependencies = [ ] [[package]] -name = "toml_datetime" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" - -[[package]] -name = "toml_edit" -version = "0.19.15" +name = "toml" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ - "indexmap 2.0.0", - "toml_datetime", - "winnow", + "serde 1.0.193", ] [[package]] @@ -8659,17 +8704,6 @@ dependencies = [ "static_assertions", ] -[[package]] -name = "typed-builder" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a46ee5bd706ff79131be9c94e7edcb82b703c487766a114434e5790361cf08c5" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "typenum" version = "1.16.0" @@ -9428,15 +9462,6 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" -[[package]] -name = "winnow" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25b5872fa2e10bd067ae946f927e726d7d603eaeb6e02fa6a350e0722d2b8c11" -dependencies = [ - "memchr 2.6.3", -] - [[package]] name = "winreg" version = "0.50.0" diff --git a/enclave-runtime/src/test/ipfs_tests.rs b/enclave-runtime/src/test/ipfs_tests.rs index 6b28b79c3..b1333e750 100644 --- a/enclave-runtime/src/test/ipfs_tests.rs +++ b/enclave-runtime/src/test/ipfs_tests.rs @@ -23,6 +23,7 @@ use log::*; use std::{fs::File, io::Read, vec::Vec}; #[allow(unused)] +/// this test neeeds an ipfs node running and configured with cli args. here for reference but may never be called pub fn test_ocall_read_write_ipfs() { info!("testing IPFS read/write. Hopefully ipfs daemon is running..."); let enc_state: Vec = vec![20; 4 * 512 * 1024]; diff --git a/enclave-runtime/src/test/tests_main.rs b/enclave-runtime/src/test/tests_main.rs index 8d46f3716..23529c62a 100644 --- a/enclave-runtime/src/test/tests_main.rs +++ b/enclave-runtime/src/test/tests_main.rs @@ -163,7 +163,7 @@ pub extern "C" fn test_main_entrance() -> size_t { itc_parentchain::light_client::io::sgx_tests::sealing_creates_backup, // this test needs an ipfs node running.. - crate::test::ipfs_tests::test_ocall_read_write_ipfs, + // crate::test::ipfs_tests::test_ocall_read_write_ipfs, // Teeracle tests run_teeracle_tests, diff --git a/service/Cargo.toml b/service/Cargo.toml index 4e6ef0d52..a5efdde9e 100644 --- a/service/Cargo.toml +++ b/service/Cargo.toml @@ -31,7 +31,7 @@ url = "2.5.0" warp = "0.3" # ipfs -ipfs-api = "0.11.0" +ipfs-api-backend-hyper = { version = "0.6.0", features = ["with-hyper-tls"] } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } primitive-types = { version = "0.12.1", default-features = false, features = ["codec"] } diff --git a/service/src/cli.yml b/service/src/cli.yml index b97f6907a..d14b972e6 100644 --- a/service/src/cli.yml +++ b/service/src/cli.yml @@ -107,6 +107,18 @@ args: long: clean-reset short: c help: Cleans and purges any previous state and key files and generates them anew before starting. + - ipfs-api-url: + long: ipfs-api-url + takes_value: true + required: false + help: + Set the IPFS API endpoint to connect to a local or remote IPFS node. Use http(s) without api path. + - ipfs-api-auth: + long: ipfs-api-auth + takes_value: true + required: false + help: + Set the IPFS API basic auth credentials in the form username:password. subcommands: - run: diff --git a/service/src/config.rs b/service/src/config.rs index 65bf4c042..0be35003b 100644 --- a/service/src/config.rs +++ b/service/src/config.rs @@ -63,6 +63,10 @@ pub struct Config { metrics_server_port: String, /// Port for the untrusted HTTP server (e.g. for `is_initialized`) untrusted_http_port: String, + /// IPFS API endpoint + ipfs_api_url: Option, + /// IPFS API authentication + ipfs_api_auth: Option, /// Data directory used by all the services. data_dir: PathBuf, /// Config of the 'run' subcommand @@ -88,6 +92,8 @@ impl Config { enable_metrics_server: bool, metrics_server_port: String, untrusted_http_port: String, + ipfs_api_url: Option, + ipfs_api_auth: Option, data_dir: PathBuf, run_config: Option, ) -> Self { @@ -108,6 +114,8 @@ impl Config { enable_metrics_server, metrics_server_port, untrusted_http_port, + ipfs_api_url, + ipfs_api_auth, data_dir, run_config, } @@ -204,6 +212,14 @@ impl Config { self.untrusted_http_port.parse::().ok() } + pub fn ipfs_api_url(&self) -> Option { + self.ipfs_api_url.clone() + } + + pub fn ipfs_api_auth(&self) -> Option { + self.ipfs_api_auth.clone() + } + pub fn with_test_data_dir(&self) -> Self { let mut new = self.clone(); new.data_dir.push("test"); @@ -220,6 +236,8 @@ impl From<&ArgMatches<'_>> for Config { let metrics_server_port = m.value_of("metrics-port").unwrap_or(DEFAULT_METRICS_PORT); let untrusted_http_port = m.value_of("untrusted-http-port").unwrap_or(DEFAULT_UNTRUSTED_HTTP_PORT); + let ipfs_api_url = m.value_of("ipfs-api-url"); + let ipfs_api_auth = m.value_of("ipfs-api-auth"); let data_dir = match m.value_of("data-dir") { Some(d) => { @@ -261,6 +279,8 @@ impl From<&ArgMatches<'_>> for Config { is_metrics_server_enabled, metrics_server_port.to_string(), untrusted_http_port.to_string(), + ipfs_api_url.map(str::to_string), + ipfs_api_auth.map(str::to_string), data_dir, run_config, ) diff --git a/service/src/main_impl.rs b/service/src/main_impl.rs index e8242873d..e8e4d3fad 100644 --- a/service/src/main_impl.rs +++ b/service/src/main_impl.rs @@ -31,6 +31,7 @@ use crate::{ use base58::ToBase58; use clap::{load_yaml, App, ArgMatches}; use codec::{Decode, Encode}; +use ipfs_api_backend_hyper::{IpfsApi, TryFromUri}; use ita_parentchain_interface::integritee::{Hash, Header}; use itp_enclave_api::{ enclave_base::EnclaveBase, @@ -222,6 +223,27 @@ pub(crate) fn main() { )) }); + let maybe_ipfs_client = config.ipfs_api_url().map(|url| { + let client = ipfs_api_backend_hyper::IpfsClient::from_str(&url).unwrap(); + + let client = if let Some((user, pwd)) = config + .ipfs_api_auth() + .and_then(|s| s.split_once(':').map(|(u, p)| (u.to_string(), p.to_string()))) + { + info!("Using IPFS node at {} with credentials ******", url); + client.with_credentials(user, pwd) + } else { + info!("Using IPFS node at {}", url); + client + }; + let version = tokio::runtime::Runtime::new().unwrap().block_on(client.version()); + match version { + Ok(v) => info!("Connected to IPFS node version: {}", v.version), + Err(e) => error!("Error getting IPFS node version: {}", e), + } + Arc::new(client) + }); + // initialize o-call bridge with a concrete factory implementation OCallBridge::initialize(Arc::new(OCallBridgeComponentFactory::new( node_api_factory.clone(), @@ -234,6 +256,7 @@ pub(crate) fn main() { peer_sidechain_block_fetcher, tokio_handle.clone(), enclave_metrics_receiver, + maybe_ipfs_client, config.data_dir().into(), ))); @@ -563,9 +586,12 @@ fn start_worker( .expect("our enclave should be registered at this point"); trace!("verified that our enclave is registered: {:?}", my_enclave); - let (we_are_primary_validateer, re_init_parentchain_needed) = - match integritee_rpc_api.primary_worker_for_shard(shard, None).unwrap() { - Some(primary_enclave) => match primary_enclave.instance_signer() { + let (we_are_primary_validateer, re_init_parentchain_needed) = match integritee_rpc_api + .primary_worker_for_shard(shard, None) + .unwrap() + { + Some(primary_enclave) => + match primary_enclave.instance_signer() { AnySigner::Known(MultiSigner::Ed25519(primary)) => if primary.encode() == tee_accountid.encode() { println!("We are primary worker on this shard and we have been previously running."); @@ -600,23 +626,23 @@ fn start_worker( ); }, }, - None => - if WorkerModeProvider::worker_mode() != WorkerMode::Teeracle { - println!("We are the primary worker on this shard and the shard is untouched. Will initialize it"); - enclave.init_shard(shard.encode()).unwrap(); - enclave - .init_shard_creation_parentchain_header( - shard, - &ParentchainId::Integritee, - ®ister_enclave_xt_header, - ) - .unwrap(); - debug!("shard config should be initialized on integritee network now"); - (true, true) - } else { - (true, false) - }, - }; + None => + if WorkerModeProvider::worker_mode() != WorkerMode::Teeracle { + println!("We are the primary worker on this shard and the shard is untouched. Will initialize it"); + enclave.init_shard(shard.encode()).unwrap(); + enclave + .init_shard_creation_parentchain_header( + shard, + &ParentchainId::Integritee, + ®ister_enclave_xt_header, + ) + .unwrap(); + debug!("shard config should be initialized on integritee network now"); + (true, true) + } else { + (true, false) + }, + }; debug!("getting shard creation: {:?}", enclave.get_shard_creation_info(shard)); initialization_handler.registered_on_parentchain(); diff --git a/service/src/ocall_bridge/component_factory.rs b/service/src/ocall_bridge/component_factory.rs index 6d36702e0..607fae27a 100644 --- a/service/src/ocall_bridge/component_factory.rs +++ b/service/src/ocall_bridge/component_factory.rs @@ -32,6 +32,7 @@ use crate::{ sync_block_broadcaster::BroadcastBlocks, worker_peers_updater::UpdateWorkerPeers, }; +use ipfs_api_backend_hyper::IpfsClient; use itp_api_client_types::{Config, Request}; use itp_enclave_api::remote_attestation::RemoteAttestationCallBacks; use itp_node_api::node_api_factory::{CreateNodeApi, NodeApiFactory}; @@ -69,6 +70,7 @@ pub struct OCallBridgeComponentFactory< peer_block_fetcher: Arc, tokio_handle: Arc, metrics_receiver: Arc, + maybe_ipfs_client: Option>, log_dir: Arc, } @@ -115,6 +117,7 @@ impl< peer_block_fetcher: Arc, tokio_handle: Arc, metrics_receiver: Arc, + maybe_ipfs_client: Option>, log_dir: Arc, ) -> Self { OCallBridgeComponentFactory { @@ -128,6 +131,7 @@ impl< peer_block_fetcher, tokio_handle, metrics_receiver, + maybe_ipfs_client, log_dir, } } @@ -156,7 +160,8 @@ impl< PeerBlockFetcher, TokioHandle, MetricsReceiver, - > where + > +where IntegriteeRuntimeConfig: Config + 'static, TargetARuntimeConfig: Config + 'static, @@ -196,7 +201,7 @@ impl< } fn get_ipfs_api(&self) -> Arc { - Arc::new(IpfsOCall {}) + Arc::new(IpfsOCall::new(self.maybe_ipfs_client.clone())) } fn get_metrics_api(&self) -> Arc { diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index 225f672ce..d69092f2c 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -18,28 +18,39 @@ use crate::ocall_bridge::bridge_api::{IpfsBridge, OCallBridgeError, OCallBridgeResult}; use futures::TryStreamExt; -use ipfs_api::IpfsClient; +use ipfs_api_backend_hyper::{IpfsApi, IpfsClient, TryFromUri}; use itp_utils::IpfsCid; use log::*; use std::{ fs::File, io::{Cursor, Write}, str, - sync::mpsc::channel, + sync::{mpsc::channel, Arc}, }; -pub struct IpfsOCall; +pub struct IpfsOCall { + client: Arc, +} + +impl IpfsOCall { + pub fn new(client: Option>) -> Self { + // Fallback if None: + // Creates an `IpfsClient` connected to the endpoint specified in ~/.ipfs/api. + // If not found, tries to connect to `localhost:5001`. + Self { client: client.unwrap_or_default() } + } +} impl IpfsBridge for IpfsOCall { fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { debug!(" Entering ocall_write_ipfs"); - write_to_ipfs(data) + write_to_ipfs(&self.client, data) } fn read_from_ipfs(&self, cid: IpfsCid) -> OCallBridgeResult<()> { debug!("Entering ocall_read_ipfs"); - let result = read_from_ipfs(&cid); + let result = read_from_ipfs(&self.client, &cid); match result { Ok(res) => { let filename = format!("{:?}", cid); @@ -60,24 +71,13 @@ fn create_file(filename: &str, result: &[u8]) -> Result<(), String> { } #[tokio::main] -async fn write_to_ipfs(data: &'static [u8]) -> OCallBridgeResult { - // Creates an `IpfsClient` connected to the endpoint specified in ~/.ipfs/api. - // If not found, tries to connect to `localhost:5001`. - let client = IpfsClient::default(); - match client.version().await { - Ok(version) => info!("version: {:?}", version.version), - Err(e) => { - error!("error getting version: {}", e); - return Err(OCallBridgeError::IpfsError(format!("error getting version: {}", e))); - }, - } - +async fn write_to_ipfs(client: &IpfsClient, data: &'static [u8]) -> OCallBridgeResult { let datac = Cursor::new(data); let (tx, rx) = channel(); match client.add(datac).await { Ok(res) => { - info!("Result IpfsCid {}", res.hash); + debug!("Result IpfsCid {}", res.hash); tx.send(res.hash.into_bytes()).unwrap(); }, Err(e) => { @@ -99,14 +99,9 @@ async fn write_to_ipfs(data: &'static [u8]) -> OCallBridgeResult { } #[tokio::main] -pub async fn read_from_ipfs(cid: &IpfsCid) -> Result, String> { - // Creates an `IpfsClient` connected to the endpoint specified in ~/.ipfs/api. - // If not found, tries to connect to `localhost:5001`. - let client = IpfsClient::default(); +pub async fn read_from_ipfs(client: &IpfsClient, cid: &IpfsCid) -> Result, String> { let h = format!("{:?}", cid); - - info!("Fetching content from: {}", h); - + debug!("Fetching content with cid {}", h); client .cat(&h) .map_ok(|chunk| chunk.to_vec()) From ffdddd7be02c615c823df0756492f915c9b264a6 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Fri, 19 Sep 2025 14:18:41 +0200 Subject: [PATCH 09/91] proxy-encrypt the message payload and decrypt in cli when fetching notes --- Cargo.lock | 1 + app-libs/stf/Cargo.toml | 1 + app-libs/stf/src/trusted_call.rs | 20 ++++--- cli/src/lib.rs | 4 ++ .../trusted_base_cli/commands/get_notes.rs | 55 ++++++++++++++++--- core-primitives/randomness/src/lib.rs | 17 +++++- core-primitives/utils/src/ipfs.rs | 12 +++- enclave-runtime/Cargo.lock | 1 + 8 files changed, 93 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fed3e45df..4601350a2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3001,6 +3001,7 @@ dependencies = [ "itp-node-api-metadata", "itp-pallet-storage", "itp-randomness", + "itp-sgx-crypto", "itp-sgx-externalities", "itp-sgx-runtime-primitives", "itp-stf-interface", diff --git a/app-libs/stf/Cargo.toml b/app-libs/stf/Cargo.toml index e3278dcb2..f0fdbbf6b 100644 --- a/app-libs/stf/Cargo.toml +++ b/app-libs/stf/Cargo.toml @@ -24,6 +24,7 @@ itp-node-api = { default-features = false, path = "../../core-primitives/node-ap itp-node-api-metadata = { default-features = false, path = "../../core-primitives/node-api/metadata" } itp-pallet-storage = { path = "../../core-primitives/pallet-storage", default-features = false } itp-randomness = { path = "../../core-primitives/randomness", default-features = false } +itp-sgx-crypto = { default-features = false, path = "../../core-primitives/sgx/crypto" } itp-sgx-externalities = { default-features = false, path = "../../core-primitives/substrate-sgx/externalities" } itp-sgx-runtime-primitives = { default-features = false, path = "../../core-primitives/sgx-runtime-primitives" } itp-stf-interface = { default-features = false, path = "../../core-primitives/stf-interface" } diff --git a/app-libs/stf/src/trusted_call.rs b/app-libs/stf/src/trusted_call.rs index 71961f425..d5387786e 100644 --- a/app-libs/stf/src/trusted_call.rs +++ b/app-libs/stf/src/trusted_call.rs @@ -57,6 +57,8 @@ use itp_node_api_metadata::{ pallet_enclave_bridge::EnclaveBridgeCallIndexes, pallet_proxy::ProxyCallIndexes, }; +use itp_randomness::{Randomness, SgxRandomness}; +use itp_sgx_crypto::{aes::Aes, StateCrypto}; use itp_stf_interface::ExecuteCall; use itp_stf_primitives::{ error::StfError, @@ -647,15 +649,19 @@ where { Ok(RelayedNoteRetreivalInfo::Here { msg: request.msg.clone() }) } else if request.relay_type == NoteRelayType::Ipfs { - //todo: proxy re-encryption for IPFS content. now plaintext - let cid = IpfsCid::from_content_bytes(&request.msg) + let key = SgxRandomness::random_128bits(); + let iv = SgxRandomness::random_128bits(); + let encryption_key: [u8; 32] = + [key.as_ref(), iv.as_ref()].concat().try_into().expect("2x16=32. q.e.d."); + let aes = Aes::new(key, iv); + let mut ciphertext = request.msg.clone(); + aes.encrypt(&mut ciphertext) + .map_err(|e| StfError::Dispatch(format!("AES encrypt error: {:?}", e)))?; + let cid = IpfsCid::from_content_bytes(&ciphertext) .map_err(|e| StfError::Dispatch(format!("IPFS error: {:?}", e)))?; info!("storing relayed note to IPFS with CID {:?}", cid); - side_effects.push(TrustedCallSideEffect::IpfsAdd(request.msg)); - Ok(RelayedNoteRetreivalInfo::Ipfs { - cid, - encryption_key: request.maybe_encryption_key.unwrap_or([0u8; 32]), - }) + side_effects.push(TrustedCallSideEffect::IpfsAdd(ciphertext)); + Ok(RelayedNoteRetreivalInfo::Ipfs { cid, encryption_key }) } else { Err(StfError::Dispatch("Invalid relayed note request".into())) }?; diff --git a/cli/src/lib.rs b/cli/src/lib.rs index 60ef8a2a2..dcd5a03eb 100644 --- a/cli/src/lib.rs +++ b/cli/src/lib.rs @@ -94,6 +94,10 @@ pub struct Cli { #[clap(short = 'P', long, default_value_t = String::from("2000"))] trusted_worker_port: String, + /// IPFS gateway url including query path, e.g. "https://ipfs.integritee.network/ipfs" + #[clap(short = 'i', long, default_value_t = String::from("https://ipfs.integritee.network/ipfs"))] + ipfs_gateway_url: String, + #[clap(subcommand)] command: Commands, } diff --git a/cli/src/trusted_base_cli/commands/get_notes.rs b/cli/src/trusted_base_cli/commands/get_notes.rs index a415cd651..bb2390866 100644 --- a/cli/src/trusted_base_cli/commands/get_notes.rs +++ b/cli/src/trusted_base_cli/commands/get_notes.rs @@ -19,16 +19,17 @@ use crate::{ trusted_command_utils::get_pair_from_str, trusted_operation::perform_trusted_operation, Cli, CliResult, CliResultOk, }; - use codec::Decode; use ita_stf::{ - guess_the_number::GuessTheNumberTrustedCall, Getter, TrustedCall, TrustedCallSigned, - TrustedGetter, + guess_the_number::GuessTheNumberTrustedCall, relayed_note::RelayedNoteRetreivalInfo, Getter, + TrustedCall, TrustedCallSigned, TrustedGetter, }; +use itp_sgx_crypto::{aes::Aes, StateCrypto}; use itp_stf_primitives::types::{KeyPair, TrustedOperation}; use itp_types::{AccountId, Moment}; -use log::error; +use log::{debug, error}; use pallet_notes::{BucketIndex, TimestampedTrustedNote, TrustedNote}; +use reqwest::blocking::get; use sp_core::{crypto::Ss58Codec, Pair}; #[derive(Parser)] @@ -126,14 +127,32 @@ impl GetNotesCommand { to, conversation_id, retreival, - ) => + ) => { + let msg = match retreival { + RelayedNoteRetreivalInfo::Ipfs { cid, encryption_key } => { + debug!("fetching ipfs data for cid: {:?}", cid); + let ciphertext = fetch_ipfs_data( + &cli.ipfs_gateway_url, + &cid.to_string(), + ) + .unwrap(); + let plaintext = decrypt(&ciphertext, &encryption_key); + String::from_utf8_lossy(&plaintext).to_string() + }, + RelayedNoteRetreivalInfo::Here { msg } => + String::from_utf8_lossy(msg.as_ref()).to_string(), + RelayedNoteRetreivalInfo::Undeclared { .. } => { + "[encryption key provided: *****, but message relay is undeclared]".into() + }, + }; + if from == who_accountid { println!( "[{}] Message in conversation {} to: {}: {:?}", datetime_str, conversation_id, to.to_ss58check(), - retreival + msg ); } else { println!( @@ -141,10 +160,10 @@ impl GetNotesCommand { datetime_str, conversation_id, from.to_ss58check(), - retreival + msg ); - }, - + } + }, _ => println!("[{}] {:?}", datetime_str, call), } } else { @@ -157,3 +176,21 @@ impl GetNotesCommand { Ok(CliResultOk::Notes { notes }) } } + +fn fetch_ipfs_data(gateway_url: &str, ipfs_hash: &str) -> Result, reqwest::Error> { + let url = format!("{}/ipfs/{}", gateway_url.trim_end_matches('/'), ipfs_hash); + debug!("Fetching ipfs data from url: {}", url); + let response = get(&url)?; + let bytes = response.bytes()?.to_vec(); + Ok(bytes) +} + +fn decrypt(data: &Vec, encryption_key: &[u8; 32]) -> Vec { + let key: [u8; 16] = encryption_key[0..16].try_into().unwrap(); + let iv: [u8; 16] = encryption_key[16..32].try_into().unwrap(); + debug!("decrypting with \n key 0x{} \n iv 0x{}", hex::encode(key), hex::encode(iv)); + let aes = Aes::new(key, iv); + let mut decrypted_data = data.clone(); + aes.decrypt(&mut decrypted_data).unwrap(); + decrypted_data +} diff --git a/core-primitives/randomness/src/lib.rs b/core-primitives/randomness/src/lib.rs index 5a722180e..4de7f2c4d 100644 --- a/core-primitives/randomness/src/lib.rs +++ b/core-primitives/randomness/src/lib.rs @@ -6,6 +6,7 @@ use sgx_rand::{thread_rng, Rng}; pub trait Randomness { fn shuffle(values: &mut [T]); fn random_u32(min: u32, max: u32) -> u32; + fn random_128bits() -> [u8; 16]; } pub struct SgxRandomness; @@ -22,6 +23,13 @@ impl Randomness for SgxRandomness { let mut rng = thread_rng(); // Use thread-local random number generator rng.gen_range(min, max) } + + fn random_128bits() -> [u8; 16] { + let mut rng = thread_rng(); // Use thread-local random number generator + let mut buf = [0u8; 16]; + rng.fill_bytes(&mut buf); + buf + } } #[cfg(not(feature = "sgx"))] @@ -29,10 +37,12 @@ impl Randomness for SgxRandomness { fn shuffle(_values: &mut [T]) { unimplemented!() } - fn random_u32(_min: u32, _max: u32) -> u32 { unimplemented!() } + fn random_128bits() -> [u8; 16] { + unimplemented!() + } } pub struct MockRandomness; @@ -49,4 +59,9 @@ impl Randomness for MockRandomness { fn random_u32(min: u32, max: u32) -> u32 { min + max / 2 } + + /// return a deterministic 256-bit value + fn random_128bits() -> [u8; 16] { + [0u8; 16] + } } diff --git a/core-primitives/utils/src/ipfs.rs b/core-primitives/utils/src/ipfs.rs index fa9cf9fa2..b18488434 100644 --- a/core-primitives/utils/src/ipfs.rs +++ b/core-primitives/utils/src/ipfs.rs @@ -1,7 +1,10 @@ use alloc::vec::Vec; use cid::Cid; use codec::{Decode, Encode}; -use core::{convert::TryFrom, fmt::Debug}; +use core::{ + convert::TryFrom, + fmt::{Debug, Display}, +}; use ipfs_unixfs::file::adder::FileAdder; use multibase::Base; @@ -59,6 +62,13 @@ impl Debug for IpfsCid { } } +impl Display for IpfsCid { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let cid_str = Base::Base58Btc.encode(self.0.hash().as_bytes()); + write!(f, "{}", cid_str) + } +} + pub struct IpfsContent { pub cid: IpfsCid, pub file_content: Vec, diff --git a/enclave-runtime/Cargo.lock b/enclave-runtime/Cargo.lock index 86583804e..a0627ce64 100644 --- a/enclave-runtime/Cargo.lock +++ b/enclave-runtime/Cargo.lock @@ -1762,6 +1762,7 @@ dependencies = [ "itp-node-api-metadata", "itp-pallet-storage", "itp-randomness", + "itp-sgx-crypto", "itp-sgx-externalities", "itp-sgx-runtime-primitives", "itp-stf-interface", From 5ccaae60346ec176997160163a8b9901f32b33ed Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Fri, 19 Sep 2025 14:24:06 +0200 Subject: [PATCH 10/91] cleanup & fmt --- app-libs/stf/Cargo.toml | 2 +- app-libs/stf/src/lib.rs | 1 - app-libs/stf/src/trusted_call.rs | 3 +- core-primitives/stf-executor/src/executor.rs | 6 +-- core-primitives/utils/Cargo.toml | 4 +- core/offchain-worker-executor/src/executor.rs | 3 +- enclave-runtime/src/test/tests_main.rs | 3 +- service/src/main_impl.rs | 43 +++++++++---------- service/src/ocall_bridge/component_factory.rs | 3 +- service/src/ocall_bridge/ipfs_ocall.rs | 2 +- 10 files changed, 30 insertions(+), 40 deletions(-) diff --git a/app-libs/stf/Cargo.toml b/app-libs/stf/Cargo.toml index f0fdbbf6b..ef89c677a 100644 --- a/app-libs/stf/Cargo.toml +++ b/app-libs/stf/Cargo.toml @@ -7,10 +7,10 @@ edition = "2021" [dependencies] # crates.io codec = { version = "3.0.0", default-features = false, features = ["derive"], package = "parity-scale-codec" } +hex = { version = "0.4.3", default-features = false, features = ["alloc"] } log = { version = "0.4", default-features = false } rlp = { version = "0.5", default-features = false } sha3 = { version = "0.10", default-features = false } -hex = { version = "0.4.3", default-features = false, features = ["alloc"] } # sgx deps sgx_tstd = { branch = "master", features = ["untrusted_fs", "net", "backtrace"], git = "https://github.com/apache/teaclave-sgx-sdk.git", optional = true } diff --git a/app-libs/stf/src/lib.rs b/app-libs/stf/src/lib.rs index 3601ac3a6..ad589439d 100644 --- a/app-libs/stf/src/lib.rs +++ b/app-libs/stf/src/lib.rs @@ -25,7 +25,6 @@ #![cfg_attr(target_env = "sgx", feature(rustc_private))] extern crate alloc; -extern crate core; #[cfg(all(not(feature = "std"), feature = "sgx"))] extern crate sgx_tstd as std; diff --git a/app-libs/stf/src/trusted_call.rs b/app-libs/stf/src/trusted_call.rs index d5387786e..a67769a14 100644 --- a/app-libs/stf/src/trusted_call.rs +++ b/app-libs/stf/src/trusted_call.rs @@ -972,8 +972,7 @@ where let unshield_amount = balance.saturating_sub( MinimalChainSpec::one_unit( shielding_target_genesis_hash().unwrap_or_default(), - ) / STF_TX_FEE_UNIT_DIVIDER - * 3, + ) / STF_TX_FEE_UNIT_DIVIDER * 3, ); let parentchain_call = parentchain_vault_proxy_call( unshield_native_from_vault_parentchain_call( diff --git a/core-primitives/stf-executor/src/executor.rs b/core-primitives/stf-executor/src/executor.rs index 4087d53c8..73b11cbda 100644 --- a/core-primitives/stf-executor/src/executor.rs +++ b/core-primitives/stf-executor/src/executor.rs @@ -159,10 +159,8 @@ where hex::encode(call.encode()), mortality ), }, - TrustedCallSideEffect::IpfsAdd(blob) => trace!( - "trusted_call wants to add blob of size {} to ipfs", - blob.len() - ), + TrustedCallSideEffect::IpfsAdd(blob) => + trace!("trusted_call wants to add blob of size {} to ipfs", blob.len()), } } Ok(ExecutedOperation::success(operation_hash, top_or_hash, trusted_call_side_effects)) diff --git a/core-primitives/utils/Cargo.toml b/core-primitives/utils/Cargo.toml index 76e8d1464..3592ca899 100644 --- a/core-primitives/utils/Cargo.toml +++ b/core-primitives/utils/Cargo.toml @@ -8,12 +8,12 @@ license = "Apache-2.0" edition = "2021" [dependencies] +cid = { default-features = false, git = "https://github.com/whalelephant/rust-cid", branch = "nstd" } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex = { version = "0.4.3", default-features = false, features = ["alloc"] } +ipfs-unixfs = { default-features = false, git = "https://github.com/whalelephant/rust-ipfs", branch = "w-nstd" } log = "0.4.28" -cid = { default-features = false, git = "https://github.com/whalelephant/rust-cid", branch = "nstd" } multibase = { default-features = false, git = "https://github.com/whalelephant/rust-multibase", branch = "nstd" } -ipfs-unixfs = { default-features = false, git = "https://github.com/whalelephant/rust-ipfs", branch = "w-nstd" } [features] default = ["std"] diff --git a/core/offchain-worker-executor/src/executor.rs b/core/offchain-worker-executor/src/executor.rs index ab5532091..c9b6a9a76 100644 --- a/core/offchain-worker-executor/src/executor.rs +++ b/core/offchain-worker-executor/src/executor.rs @@ -84,8 +84,7 @@ impl< Stf, TCS, G, - > -where + > where ParentchainBlock: Block, ParentchainBlock::Header: Header, StfExecutor: StateUpdateProposer, diff --git a/enclave-runtime/src/test/tests_main.rs b/enclave-runtime/src/test/tests_main.rs index 23529c62a..59032cc46 100644 --- a/enclave-runtime/src/test/tests_main.rs +++ b/enclave-runtime/src/test/tests_main.rs @@ -426,8 +426,7 @@ fn test_create_state_diff() { assert_eq!( sender_acc_info.data.free, ita_stf::test_genesis::ENDOWED_ACC_FUNDS - - TX_AMOUNT - - 1_000_000_000_000 / ita_stf::STF_TX_FEE_UNIT_DIVIDER + - TX_AMOUNT - 1_000_000_000_000 / ita_stf::STF_TX_FEE_UNIT_DIVIDER ); } diff --git a/service/src/main_impl.rs b/service/src/main_impl.rs index e8e4d3fad..5fd8edbdf 100644 --- a/service/src/main_impl.rs +++ b/service/src/main_impl.rs @@ -586,12 +586,9 @@ fn start_worker( .expect("our enclave should be registered at this point"); trace!("verified that our enclave is registered: {:?}", my_enclave); - let (we_are_primary_validateer, re_init_parentchain_needed) = match integritee_rpc_api - .primary_worker_for_shard(shard, None) - .unwrap() - { - Some(primary_enclave) => - match primary_enclave.instance_signer() { + let (we_are_primary_validateer, re_init_parentchain_needed) = + match integritee_rpc_api.primary_worker_for_shard(shard, None).unwrap() { + Some(primary_enclave) => match primary_enclave.instance_signer() { AnySigner::Known(MultiSigner::Ed25519(primary)) => if primary.encode() == tee_accountid.encode() { println!("We are primary worker on this shard and we have been previously running."); @@ -626,23 +623,23 @@ fn start_worker( ); }, }, - None => - if WorkerModeProvider::worker_mode() != WorkerMode::Teeracle { - println!("We are the primary worker on this shard and the shard is untouched. Will initialize it"); - enclave.init_shard(shard.encode()).unwrap(); - enclave - .init_shard_creation_parentchain_header( - shard, - &ParentchainId::Integritee, - ®ister_enclave_xt_header, - ) - .unwrap(); - debug!("shard config should be initialized on integritee network now"); - (true, true) - } else { - (true, false) - }, - }; + None => + if WorkerModeProvider::worker_mode() != WorkerMode::Teeracle { + println!("We are the primary worker on this shard and the shard is untouched. Will initialize it"); + enclave.init_shard(shard.encode()).unwrap(); + enclave + .init_shard_creation_parentchain_header( + shard, + &ParentchainId::Integritee, + ®ister_enclave_xt_header, + ) + .unwrap(); + debug!("shard config should be initialized on integritee network now"); + (true, true) + } else { + (true, false) + }, + }; debug!("getting shard creation: {:?}", enclave.get_shard_creation_info(shard)); initialization_handler.registered_on_parentchain(); diff --git a/service/src/ocall_bridge/component_factory.rs b/service/src/ocall_bridge/component_factory.rs index 607fae27a..6738f66a6 100644 --- a/service/src/ocall_bridge/component_factory.rs +++ b/service/src/ocall_bridge/component_factory.rs @@ -160,8 +160,7 @@ impl< PeerBlockFetcher, TokioHandle, MetricsReceiver, - > -where + > where IntegriteeRuntimeConfig: Config + 'static, TargetARuntimeConfig: Config + 'static, diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index d69092f2c..f066d14e3 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -82,7 +82,7 @@ async fn write_to_ipfs(client: &IpfsClient, data: &'static [u8]) -> OCallBridgeR }, Err(e) => { error!("error adding file: {}", e); - return Err(OCallBridgeError::IpfsError(format!("error adding file: {}", e))); + return Err(OCallBridgeError::IpfsError(format!("error adding file: {}", e))) }, } rx.recv() From 836073c7a11e958c768e0356c76dbf28d58ac3b6 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Fri, 19 Sep 2025 14:36:17 +0200 Subject: [PATCH 11/91] clippy --- app-libs/stf/src/trusted_call.rs | 6 +++--- cli/src/trusted_base_cli/commands/get_notes.rs | 4 ++-- core-primitives/utils/src/ipfs.rs | 4 ++-- sidechain/rpc-handler/src/direct_top_pool_api.rs | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/app-libs/stf/src/trusted_call.rs b/app-libs/stf/src/trusted_call.rs index a67769a14..b4273649a 100644 --- a/app-libs/stf/src/trusted_call.rs +++ b/app-libs/stf/src/trusted_call.rs @@ -635,7 +635,7 @@ where let retreival_info = if (self.call.encoded_size() <= MaxNoteSize::get() as usize) && (request.allow_onchain_fallback) { - Ok(RelayedNoteRetreivalInfo::Here { msg: request.msg.clone() }) + Ok(RelayedNoteRetreivalInfo::Here { msg: request.msg }) } else if (request.relay_type == NoteRelayType::Undeclared) && request.maybe_encryption_key.is_some() { @@ -647,14 +647,14 @@ where } else if request.relay_type == NoteRelayType::Here && request.msg.len() <= MaxNoteSize::get() as usize { - Ok(RelayedNoteRetreivalInfo::Here { msg: request.msg.clone() }) + Ok(RelayedNoteRetreivalInfo::Here { msg: request.msg }) } else if request.relay_type == NoteRelayType::Ipfs { let key = SgxRandomness::random_128bits(); let iv = SgxRandomness::random_128bits(); let encryption_key: [u8; 32] = [key.as_ref(), iv.as_ref()].concat().try_into().expect("2x16=32. q.e.d."); let aes = Aes::new(key, iv); - let mut ciphertext = request.msg.clone(); + let mut ciphertext = request.msg; aes.encrypt(&mut ciphertext) .map_err(|e| StfError::Dispatch(format!("AES encrypt error: {:?}", e)))?; let cid = IpfsCid::from_content_bytes(&ciphertext) diff --git a/cli/src/trusted_base_cli/commands/get_notes.rs b/cli/src/trusted_base_cli/commands/get_notes.rs index bb2390866..c6ff0adc7 100644 --- a/cli/src/trusted_base_cli/commands/get_notes.rs +++ b/cli/src/trusted_base_cli/commands/get_notes.rs @@ -185,12 +185,12 @@ fn fetch_ipfs_data(gateway_url: &str, ipfs_hash: &str) -> Result, reqwes Ok(bytes) } -fn decrypt(data: &Vec, encryption_key: &[u8; 32]) -> Vec { +fn decrypt(data: &[u8], encryption_key: &[u8; 32]) -> Vec { let key: [u8; 16] = encryption_key[0..16].try_into().unwrap(); let iv: [u8; 16] = encryption_key[16..32].try_into().unwrap(); debug!("decrypting with \n key 0x{} \n iv 0x{}", hex::encode(key), hex::encode(iv)); let aes = Aes::new(key, iv); - let mut decrypted_data = data.clone(); + let mut decrypted_data = data.to_vec(); aes.decrypt(&mut decrypted_data).unwrap(); decrypted_data } diff --git a/core-primitives/utils/src/ipfs.rs b/core-primitives/utils/src/ipfs.rs index b18488434..47308316d 100644 --- a/core-primitives/utils/src/ipfs.rs +++ b/core-primitives/utils/src/ipfs.rs @@ -37,7 +37,7 @@ impl IpfsCid { } let blocks = adder.finish(); stats.process(blocks); - stats.last.map(|cid| IpfsCid(cid)).ok_or(IpfsError::FinalCidMissing) + stats.last.map(IpfsCid).ok_or(IpfsError::FinalCidMissing) } } impl Encode for IpfsCid { @@ -105,7 +105,7 @@ impl IpfsContent { } let blocks = adder.finish(); stats.process(blocks); - stats.last.map(|cid| IpfsCid(cid)).ok_or(IpfsError::FinalCidMissing) + stats.last.map(IpfsCid).ok_or(IpfsError::FinalCidMissing) } } diff --git a/sidechain/rpc-handler/src/direct_top_pool_api.rs b/sidechain/rpc-handler/src/direct_top_pool_api.rs index fc8b62f3e..7e2dba33f 100644 --- a/sidechain/rpc-handler/src/direct_top_pool_api.rs +++ b/sidechain/rpc-handler/src/direct_top_pool_api.rs @@ -164,9 +164,9 @@ where let encrypted_trusted_call: Vec = request.cyphertext; if encrypted_trusted_call.len() > MAX_TOP_SIZE_TO_ENTER_POOL { - let error_msg = format!("Trusted operation too large"); + let error_msg = "Trusted operation too large"; error!("{}", error_msg); - return Err(error_msg) + return Err(error_msg.into()) } let result = async { author.watch_top(encrypted_trusted_call, shard).await }; From 4f4ae8c96d6794ae02015b48beba05c7acc3f870 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Fri, 19 Sep 2025 17:23:36 +0200 Subject: [PATCH 12/91] fix tests --- app-libs/stf/src/relayed_note.rs | 17 ++++ cli/src/lib.rs | 4 +- .../trusted_base_cli/commands/send_note.rs | 2 +- core-primitives/utils/src/ipfs.rs | 96 +++++-------------- service/src/tests/commons.rs | 2 + .../aura/src/test/mocks/proposer_mock.rs | 2 +- 6 files changed, 49 insertions(+), 74 deletions(-) diff --git a/app-libs/stf/src/relayed_note.rs b/app-libs/stf/src/relayed_note.rs index 7b6672149..89ada31db 100644 --- a/app-libs/stf/src/relayed_note.rs +++ b/app-libs/stf/src/relayed_note.rs @@ -1,3 +1,20 @@ +/* + Copyright 2021 Integritee AG + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + use codec::{Decode, Encode}; use core::fmt::Debug; use itp_utils::IpfsCid; diff --git a/cli/src/lib.rs b/cli/src/lib.rs index dcd5a03eb..95cc79256 100644 --- a/cli/src/lib.rs +++ b/cli/src/lib.rs @@ -94,8 +94,8 @@ pub struct Cli { #[clap(short = 'P', long, default_value_t = String::from("2000"))] trusted_worker_port: String, - /// IPFS gateway url including query path, e.g. "https://ipfs.integritee.network/ipfs" - #[clap(short = 'i', long, default_value_t = String::from("https://ipfs.integritee.network/ipfs"))] + /// IPFS gateway url, e.g. "https://ipfs.integritee.network" + #[clap(short = 'i', long, default_value_t = String::from("https://ipfs.integritee.network"))] ipfs_gateway_url: String, #[clap(subcommand)] diff --git a/cli/src/trusted_base_cli/commands/send_note.rs b/cli/src/trusted_base_cli/commands/send_note.rs index cf963a2d6..8cf727199 100644 --- a/cli/src/trusted_base_cli/commands/send_note.rs +++ b/cli/src/trusted_base_cli/commands/send_note.rs @@ -1,5 +1,5 @@ /* - Copyright 2021 Integritee AG and Supercomputing Systems AG + Copyright 2021 Integritee AG Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/core-primitives/utils/src/ipfs.rs b/core-primitives/utils/src/ipfs.rs index 47308316d..00e48203a 100644 --- a/core-primitives/utils/src/ipfs.rs +++ b/core-primitives/utils/src/ipfs.rs @@ -1,3 +1,20 @@ +/* + Copyright 2021 Integritee AG + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + use alloc::vec::Vec; use cid::Cid; use codec::{Decode, Encode}; @@ -8,6 +25,7 @@ use core::{ use ipfs_unixfs::file::adder::FileAdder; use multibase::Base; +/// IPFS content identifier helper: https://docs.ipfs.tech/concepts/content-addressing/ #[derive(Clone, PartialEq, Eq)] pub struct IpfsCid(pub Cid); @@ -69,10 +87,6 @@ impl Display for IpfsCid { } } -pub struct IpfsContent { - pub cid: IpfsCid, - pub file_content: Vec, -} #[derive(Debug, PartialEq)] pub enum IpfsError { InputCidInvalid, @@ -80,54 +94,8 @@ pub enum IpfsError { Verification, } -impl IpfsContent { - pub fn new_with_cid_unverified(cid: IpfsCid, content: Vec) -> IpfsContent { - IpfsContent { cid, file_content: content } - } - - pub fn verify(&mut self) -> Result<(), IpfsError> { - let derived_cid = Self::derive_cid_from_file_content(&self.file_content)?; - if derived_cid.0.hash().eq(&self.cid.0.hash()) { - Ok(()) - } else { - Err(IpfsError::Verification) - } - } - - pub fn derive_cid_from_file_content(file_content: &Vec) -> Result { - let mut adder: FileAdder = FileAdder::default(); - let mut total: usize = 0; - let mut stats = Stats::default(); - while total < file_content.len() { - let (blocks, consumed) = adder.push(&file_content[total..]); - total += consumed; - stats.process(blocks); - } - let blocks = adder.finish(); - stats.process(blocks); - stats.last.map(IpfsCid).ok_or(IpfsError::FinalCidMissing) - } -} - -impl TryFrom> for IpfsContent { - type Error = IpfsError; - - fn try_from(value: Vec) -> Result { - let cid = Self::derive_cid_from_file_content(&value)?; - Ok(IpfsContent { cid, file_content: value }) - } -} - -impl Debug for IpfsContent { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - let cid_str = Base::Base58Btc.encode(self.cid.0.hash().as_bytes()); - f.debug_struct("IpfsContent") - .field("cid", &cid_str) - .field("file_content_length", &self.file_content.len()) - .finish() - } -} - +/// IPFS chunk blocks helper +/// See https://ipfs-search.readthedocs.io/en/latest/ipfs_datatypes.html#files #[derive(Default)] pub struct Stats { pub blocks: usize, @@ -150,33 +118,21 @@ mod tests { use super::*; use alloc::vec; #[test] - pub fn test_try_from_multichunk_content_works() { - let expected_cid_str = "QmSaFjwJ2QtS3rZDKzC98XEzv2bqT4TfpWLCpphPPwyQTr"; - let expected_cid = IpfsCid::try_from(expected_cid_str).unwrap(); - let content: Vec = vec![20; 512 * 1024]; // bigger than one chunk of 256kB - let ipfs_content = IpfsContent::try_from(content.clone()).unwrap(); - assert_eq!(ipfs_content.cid, expected_cid); - assert_eq!(ipfs_content.file_content, content); - } - - #[test] - pub fn test_verification_ok_for_correct_multichunk_content() { + pub fn test_from_multichunk_content_works() { let expected_cid_str = "QmSaFjwJ2QtS3rZDKzC98XEzv2bqT4TfpWLCpphPPwyQTr"; let expected_cid = IpfsCid::try_from(expected_cid_str).unwrap(); let content: Vec = vec![20; 512 * 1024]; // bigger than one chunk of 256kB - let mut ipfs_content = IpfsContent::new_with_cid_unverified(expected_cid, content); - let verification = ipfs_content.verify(); - assert!(verification.is_ok()); + let derived_cid = IpfsCid::from_content_bytes(&content).unwrap(); + assert_eq!(derived_cid, expected_cid); } #[test] - pub fn test_verification_fails_for_incorrect_multichunk_content() { + pub fn test_cid_verification_fails_for_incorrect_multichunk_content() { let expected_cid_str = "QmSaFjwJ2QtS3rZDKzC98XEzv2bqT4TfpWLCpphPPwyQTr"; let expected_cid = IpfsCid::try_from(expected_cid_str).unwrap(); let content: Vec = vec![99; 512 * 1024]; // bigger than one chunk of 256kB - let mut ipfs_content = IpfsContent::new_with_cid_unverified(expected_cid, content); - let verification = ipfs_content.verify(); - assert!(verification.is_err()); + let wrong_cid = IpfsCid::from_content_bytes(&content).unwrap(); + assert!(wrong_cid != expected_cid); } #[test] diff --git a/service/src/tests/commons.rs b/service/src/tests/commons.rs index f87020168..702818246 100644 --- a/service/src/tests/commons.rs +++ b/service/src/tests/commons.rs @@ -54,6 +54,8 @@ pub fn local_worker_config( false, "8787".to_string(), "4545".to_string(), + None, + None, crate::config::pwd(), None, ) diff --git a/sidechain/consensus/aura/src/test/mocks/proposer_mock.rs b/sidechain/consensus/aura/src/test/mocks/proposer_mock.rs index 574083aaf..03d99a9d0 100644 --- a/sidechain/consensus/aura/src/test/mocks/proposer_mock.rs +++ b/sidechain/consensus/aura/src/test/mocks/proposer_mock.rs @@ -42,7 +42,7 @@ impl Proposer for ProposerMock { SidechainBlockBuilder::random().with_block_data(block_data).build_signed() }, - parentchain_effects: Default::default(), + side_effects: Default::default(), }) } } From 68a42228af6d2efb85f5796898ea26f3a1dba92d Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Fri, 19 Sep 2025 17:45:03 +0200 Subject: [PATCH 13/91] fix evm build --- enclave-runtime/src/test/evm_pallet_tests.rs | 28 ++++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/enclave-runtime/src/test/evm_pallet_tests.rs b/enclave-runtime/src/test/evm_pallet_tests.rs index a42376b11..c1fffdee8 100644 --- a/enclave-runtime/src/test/evm_pallet_tests.rs +++ b/enclave-runtime/src/test/evm_pallet_tests.rs @@ -29,7 +29,7 @@ use itp_node_api::metadata::{metadata_mocks::NodeMetadataMock, provider::NodeMet use itp_sgx_externalities::SgxExternalitiesTrait; use itp_stf_interface::StateCallInterface; use itp_stf_primitives::{traits::TrustedCallSigning, types::KeyPair}; -use itp_types::{parentchain::ParentchainCall, AccountId, ShardIdentifier}; +use itp_types::{AccountId, ShardIdentifier, TrustedCallSideEffect}; use primitive_types::H256; use sp_core::{crypto::Pair, H160, U256}; use std::{sync::Arc, vec::Vec}; @@ -37,7 +37,7 @@ use std::{sync::Arc, vec::Vec}; pub fn test_evm_call() { // given let (_, mut state, shard, mrenclave, ..) = test_setup(); - let mut parentchain_calls = Vec::new(); + let mut side_effects = Vec::new(); // Create the sender account. let sender = funded_pair(); @@ -82,7 +82,7 @@ pub fn test_evm_call() { &mut state, &ShardIdentifier::default(), trusted_call, - &mut parentchain_calls, + &mut side_effects, repo, ) .unwrap(); @@ -97,7 +97,7 @@ pub fn test_evm_call() { pub fn test_evm_counter() { // given let (_, mut state, shard, mrenclave, ..) = test_setup(); - let mut parentchain_calls = Vec::new(); + let mut side_effects = Vec::new(); // Create the sender account. let sender = funded_pair(); @@ -134,7 +134,7 @@ pub fn test_evm_counter() { &mut state, &ShardIdentifier::default(), trusted_call, - &mut parentchain_calls, + &mut side_effects, repo, ) .unwrap(); @@ -173,7 +173,7 @@ pub fn test_evm_counter() { &mrenclave, &shard, &mut state, - &mut parentchain_calls, + &mut side_effects, 2, ); @@ -189,7 +189,7 @@ pub fn test_evm_counter() { &mrenclave, &shard, &mut state, - &mut parentchain_calls, + &mut side_effects, 5, ); @@ -206,7 +206,7 @@ pub fn test_evm_counter() { &mrenclave, &shard, &mut state, - &mut parentchain_calls, + &mut side_effects, 6, ); @@ -229,7 +229,7 @@ pub fn test_evm_counter() { &mrenclave, &shard, &mut state, - &mut parentchain_calls, + &mut side_effects, 8, ); } @@ -246,7 +246,7 @@ fn execute_and_verify_evm_call( mrenclave: &[u8; 32], shard: &ShardIdentifier, state: &mut State, - calls: &mut Vec, + calls: &mut Vec, counter_expected: u64, ) { let inc_call = TrustedCall::evm_call( @@ -274,7 +274,7 @@ fn execute_and_verify_evm_call( pub fn test_evm_create() { // given let (_, mut state, shard, mrenclave, ..) = test_setup(); - let mut parentchain_calls = Vec::new(); + let mut side_effects = Vec::new(); // Create the sender account. let sender = funded_pair(); @@ -313,7 +313,7 @@ pub fn test_evm_create() { &mut state, &ShardIdentifier::default(), trusted_call, - &mut parentchain_calls, + &mut side_effects, repo, ) .unwrap(); @@ -335,7 +335,7 @@ pub fn test_evm_create() { pub fn test_evm_create2() { // given let (_, mut state, shard, mrenclave, ..) = test_setup(); - let mut parentchain_calls = Vec::new(); + let mut side_effects = Vec::new(); // Create the sender account. let sender = funded_pair(); @@ -375,7 +375,7 @@ pub fn test_evm_create2() { &mut state, &ShardIdentifier::default(), trusted_call, - &mut parentchain_calls, + &mut side_effects, repo, ) .unwrap(); From afd5e71058672241780f4c44a9bf6b24cb64415a Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Sat, 20 Sep 2025 14:42:00 +0200 Subject: [PATCH 14/91] fix retreival > retrieval --- app-libs/stf/src/relayed_note.rs | 10 +++---- app-libs/stf/src/trusted_call.rs | 27 ++++++++++--------- .../trusted_base_cli/commands/get_notes.rs | 12 ++++----- 3 files changed, 25 insertions(+), 24 deletions(-) diff --git a/app-libs/stf/src/relayed_note.rs b/app-libs/stf/src/relayed_note.rs index 89ada31db..1d15d89b1 100644 --- a/app-libs/stf/src/relayed_note.rs +++ b/app-libs/stf/src/relayed_note.rs @@ -34,7 +34,7 @@ pub enum NoteRelayType { /// Necessary information for recipient to retrieve and potentially decrypt a relayed note #[derive(Encode, Decode, Clone, PartialEq, Eq)] -pub enum RelayedNoteRetreivalInfo { +pub enum RelayedNoteRetrievalInfo { /// the message is included within and not actually relayed Here { msg: Vec }, /// the message is stored on ipfs, encrypted with the provided key @@ -44,21 +44,21 @@ pub enum RelayedNoteRetreivalInfo { Undeclared { encryption_key: [u8; 32] }, } -impl Debug for RelayedNoteRetreivalInfo { +impl Debug for RelayedNoteRetrievalInfo { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { - RelayedNoteRetreivalInfo::Here { msg } => write!( + RelayedNoteRetrievalInfo::Here { msg } => write!( f, "Here {{ msg: {} }}", core::str::from_utf8(msg).unwrap_or("") ), - RelayedNoteRetreivalInfo::Ipfs { cid, encryption_key } => write!( + RelayedNoteRetrievalInfo::Ipfs { cid, encryption_key } => write!( f, "Ipfs {{ cid: {:?}, encryption_key: 0x{} }}", cid, hex::encode(encryption_key) ), - RelayedNoteRetreivalInfo::Undeclared { encryption_key } => + RelayedNoteRetrievalInfo::Undeclared { encryption_key } => write!(f, "Undeclared {{ encryption_key: 0x{} }}", hex::encode(encryption_key)), } } diff --git a/app-libs/stf/src/trusted_call.rs b/app-libs/stf/src/trusted_call.rs index b4273649a..7453fd1a0 100644 --- a/app-libs/stf/src/trusted_call.rs +++ b/app-libs/stf/src/trusted_call.rs @@ -30,7 +30,7 @@ use crate::{ enclave_signer_account, ensure_enclave_signer_account, ensure_maintainer_account, get_mortality, shard_vault, shielding_target_genesis_hash, store_note, wrap_bytes, }, - relayed_note::{ConversationId, NoteRelayType, RelayedNoteRequest, RelayedNoteRetreivalInfo}, + relayed_note::{ConversationId, NoteRelayType, RelayedNoteRequest, RelayedNoteRetrievalInfo}, Getter, STF_BYTE_FEE_UNIT_DIVIDER, STF_SESSION_PROXY_DEPOSIT_DIVIDER, STF_SHIELDING_FEE_AMOUNT_DIVIDER, STF_TX_FEE_UNIT_DIVIDER, }; @@ -99,7 +99,7 @@ pub enum TrustedCall { spam_extrinsics(AccountId, u32, ParentchainId) = 12, send_note(AccountId, AccountId, Vec) = 20, send_relayed_note(AccountId, AccountId, ConversationId, RelayedNoteRequest) = 21, - send_relayed_note_stripped(AccountId, AccountId, ConversationId, RelayedNoteRetreivalInfo) = 22, // without payload + send_relayed_note_stripped(AccountId, AccountId, ConversationId, RelayedNoteRetrievalInfo) = 22, // without payload add_session_proxy(AccountId, AccountId, SessionProxyCredentials) = 30, assets_transfer(AccountId, AccountId, AssetId, Balance) = 42, assets_unshield(AccountId, AccountId, AssetId, Balance, ShardIdentifier) = 43, @@ -632,14 +632,14 @@ where }, TrustedCall::send_relayed_note(from, to, conversation_id, request) => { std::println!("⣿STF⣿ 🔄 send_relayed_note from ⣿⣿⣿ to ⣿⣿⣿ with note ⣿⣿⣿"); - let retreival_info = if (self.call.encoded_size() <= MaxNoteSize::get() as usize) + let retrieval_info = if (self.call.encoded_size() <= MaxNoteSize::get() as usize) && (request.allow_onchain_fallback) { - Ok(RelayedNoteRetreivalInfo::Here { msg: request.msg }) + Ok(RelayedNoteRetrievalInfo::Here { msg: request.msg }) } else if (request.relay_type == NoteRelayType::Undeclared) && request.maybe_encryption_key.is_some() { - Ok(RelayedNoteRetreivalInfo::Undeclared { + Ok(RelayedNoteRetrievalInfo::Undeclared { encryption_key: request .maybe_encryption_key .expect("is_some has been tested previously"), @@ -647,7 +647,7 @@ where } else if request.relay_type == NoteRelayType::Here && request.msg.len() <= MaxNoteSize::get() as usize { - Ok(RelayedNoteRetreivalInfo::Here { msg: request.msg }) + Ok(RelayedNoteRetrievalInfo::Here { msg: request.msg }) } else if request.relay_type == NoteRelayType::Ipfs { let key = SgxRandomness::random_128bits(); let iv = SgxRandomness::random_128bits(); @@ -661,7 +661,7 @@ where .map_err(|e| StfError::Dispatch(format!("IPFS error: {:?}", e)))?; info!("storing relayed note to IPFS with CID {:?}", cid); side_effects.push(TrustedCallSideEffect::IpfsAdd(ciphertext)); - Ok(RelayedNoteRetreivalInfo::Ipfs { cid, encryption_key }) + Ok(RelayedNoteRetrievalInfo::Ipfs { cid, encryption_key }) } else { Err(StfError::Dispatch("Invalid relayed note request".into())) }?; @@ -670,12 +670,12 @@ where from.clone(), to.clone(), conversation_id, - retreival_info, + retrieval_info, ); store_note(&from, stripped_call, vec![from.clone(), to])?; Ok(()) }, - TrustedCall::send_relayed_note_stripped(from, to, _conversation_id, _retreival) => { + TrustedCall::send_relayed_note_stripped(from, to, _conversation_id, _retrieval) => { std::println!("⣿STF⣿ 🔄 send_relayed_note_stripped from ⣿⣿⣿ to ⣿⣿⣿ with note ⣿⣿⣿"); store_note(&from, self.call, vec![from.clone(), to])?; Ok(()) @@ -972,7 +972,8 @@ where let unshield_amount = balance.saturating_sub( MinimalChainSpec::one_unit( shielding_target_genesis_hash().unwrap_or_default(), - ) / STF_TX_FEE_UNIT_DIVIDER * 3, + ) / STF_TX_FEE_UNIT_DIVIDER + * 3, ); let parentchain_call = parentchain_vault_proxy_call( unshield_native_from_vault_parentchain_call( @@ -1053,10 +1054,10 @@ fn get_fee_for(tc: &TrustedCallSigned, fee_asset: Option) -> Fee { ))) / STF_BYTE_FEE_UNIT_DIVIDER, TrustedCall::send_relayed_note_stripped(_, _, _, retrieval_info) => { let byte_fee = match retrieval_info { - RelayedNoteRetreivalInfo::Undeclared { .. } => 32 * one / STF_BYTE_FEE_UNIT_DIVIDER, // flat fee for undeclared - RelayedNoteRetreivalInfo::Ipfs { .. } => + RelayedNoteRetrievalInfo::Undeclared { .. } => 32 * one / STF_BYTE_FEE_UNIT_DIVIDER, // flat fee for undeclared + RelayedNoteRetrievalInfo::Ipfs { .. } => (46 + 32) * one / STF_BYTE_FEE_UNIT_DIVIDER, // flat fee for ipfs - RelayedNoteRetreivalInfo::Here { msg } => + RelayedNoteRetrievalInfo::Here { msg } => (one.saturating_mul(Balance::from(msg.len() as u32))) / STF_BYTE_FEE_UNIT_DIVIDER, }; diff --git a/cli/src/trusted_base_cli/commands/get_notes.rs b/cli/src/trusted_base_cli/commands/get_notes.rs index c6ff0adc7..9940126b5 100644 --- a/cli/src/trusted_base_cli/commands/get_notes.rs +++ b/cli/src/trusted_base_cli/commands/get_notes.rs @@ -21,7 +21,7 @@ use crate::{ }; use codec::Decode; use ita_stf::{ - guess_the_number::GuessTheNumberTrustedCall, relayed_note::RelayedNoteRetreivalInfo, Getter, + guess_the_number::GuessTheNumberTrustedCall, relayed_note::RelayedNoteRetrievalInfo, Getter, TrustedCall, TrustedCallSigned, TrustedGetter, }; use itp_sgx_crypto::{aes::Aes, StateCrypto}; @@ -126,10 +126,10 @@ impl GetNotesCommand { from, to, conversation_id, - retreival, + retrieval, ) => { - let msg = match retreival { - RelayedNoteRetreivalInfo::Ipfs { cid, encryption_key } => { + let msg = match retrieval { + RelayedNoteRetrievalInfo::Ipfs { cid, encryption_key } => { debug!("fetching ipfs data for cid: {:?}", cid); let ciphertext = fetch_ipfs_data( &cli.ipfs_gateway_url, @@ -139,9 +139,9 @@ impl GetNotesCommand { let plaintext = decrypt(&ciphertext, &encryption_key); String::from_utf8_lossy(&plaintext).to_string() }, - RelayedNoteRetreivalInfo::Here { msg } => + RelayedNoteRetrievalInfo::Here { msg } => String::from_utf8_lossy(msg.as_ref()).to_string(), - RelayedNoteRetreivalInfo::Undeclared { .. } => { + RelayedNoteRetrievalInfo::Undeclared { .. } => { "[encryption key provided: *****, but message relay is undeclared]".into() }, }; From 29afd7ac7183a646b5191e8ebae6acf11078b693 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Sat, 20 Sep 2025 15:14:17 +0200 Subject: [PATCH 15/91] move IPFS encryption logic into its own helper fn --- app-libs/stf/src/helpers.rs | 17 +++++++++++++++++ app-libs/stf/src/trusted_call.rs | 19 +++++-------------- 2 files changed, 22 insertions(+), 14 deletions(-) diff --git a/app-libs/stf/src/helpers.rs b/app-libs/stf/src/helpers.rs index 9e51ff116..062c9e150 100644 --- a/app-libs/stf/src/helpers.rs +++ b/app-libs/stf/src/helpers.rs @@ -18,6 +18,8 @@ use crate::{TrustedCall, ENCLAVE_ACCOUNT_KEY}; use codec::{Decode, Encode}; use frame_support::dispatch::UnfilteredDispatchable; use ita_sgx_runtime::{ParentchainIntegritee, ParentchainTargetA, ParentchainTargetB, Runtime}; +use itp_randomness::{Randomness, SgxRandomness}; +use itp_sgx_crypto::{aes::Aes, StateCrypto}; use itp_stf_interface::{BlockMetadata, ShardCreationInfo}; use itp_stf_primitives::{ error::{StfError, StfResult}, @@ -256,3 +258,18 @@ pub fn store_note( .map_err(|e| StfError::Dispatch(format!("Store note error: {:?}", e.error)))?; Ok(()) } + +/// Encrypt data with AES-128-OFB with a fresh key and IV. +/// Encrypts data in-place and returns the ciphertext and the full encryption key (key + iv). +/// The full encryption key is 32 bytes: first 16 bytes are the AES key, +/// the last 16 bytes are the IV. +pub fn encrypt_with_fresh_key(mut data: Vec) -> StfResult<(Vec, [u8; 32])> { + let key = SgxRandomness::random_128bits(); + let iv = SgxRandomness::random_128bits(); + let aes = Aes::new(key, iv); + aes.encrypt(&mut data) + .map_err(|e| StfError::Dispatch(format!("AES encrypt error: {:?}", e)))?; + let full_encryption_key: [u8; 32] = + [key.as_ref(), iv.as_ref()].concat().try_into().expect("2x16=32. q.e.d."); + Ok((data, full_encryption_key)) +} diff --git a/app-libs/stf/src/trusted_call.rs b/app-libs/stf/src/trusted_call.rs index 7453fd1a0..9986a30e3 100644 --- a/app-libs/stf/src/trusted_call.rs +++ b/app-libs/stf/src/trusted_call.rs @@ -27,8 +27,9 @@ use crate::{ guess_the_number, guess_the_number::GuessTheNumberTrustedCall, helpers::{ - enclave_signer_account, ensure_enclave_signer_account, ensure_maintainer_account, - get_mortality, shard_vault, shielding_target_genesis_hash, store_note, wrap_bytes, + enclave_signer_account, encrypt_with_fresh_key, ensure_enclave_signer_account, + ensure_maintainer_account, get_mortality, shard_vault, shielding_target_genesis_hash, + store_note, wrap_bytes, }, relayed_note::{ConversationId, NoteRelayType, RelayedNoteRequest, RelayedNoteRetrievalInfo}, Getter, STF_BYTE_FEE_UNIT_DIVIDER, STF_SESSION_PROXY_DEPOSIT_DIVIDER, @@ -57,8 +58,6 @@ use itp_node_api_metadata::{ pallet_enclave_bridge::EnclaveBridgeCallIndexes, pallet_proxy::ProxyCallIndexes, }; -use itp_randomness::{Randomness, SgxRandomness}; -use itp_sgx_crypto::{aes::Aes, StateCrypto}; use itp_stf_interface::ExecuteCall; use itp_stf_primitives::{ error::StfError, @@ -649,14 +648,7 @@ where { Ok(RelayedNoteRetrievalInfo::Here { msg: request.msg }) } else if request.relay_type == NoteRelayType::Ipfs { - let key = SgxRandomness::random_128bits(); - let iv = SgxRandomness::random_128bits(); - let encryption_key: [u8; 32] = - [key.as_ref(), iv.as_ref()].concat().try_into().expect("2x16=32. q.e.d."); - let aes = Aes::new(key, iv); - let mut ciphertext = request.msg; - aes.encrypt(&mut ciphertext) - .map_err(|e| StfError::Dispatch(format!("AES encrypt error: {:?}", e)))?; + let (ciphertext, encryption_key) = encrypt_with_fresh_key(request.msg)?; let cid = IpfsCid::from_content_bytes(&ciphertext) .map_err(|e| StfError::Dispatch(format!("IPFS error: {:?}", e)))?; info!("storing relayed note to IPFS with CID {:?}", cid); @@ -972,8 +964,7 @@ where let unshield_amount = balance.saturating_sub( MinimalChainSpec::one_unit( shielding_target_genesis_hash().unwrap_or_default(), - ) / STF_TX_FEE_UNIT_DIVIDER - * 3, + ) / STF_TX_FEE_UNIT_DIVIDER * 3, ); let parentchain_call = parentchain_vault_proxy_call( unshield_native_from_vault_parentchain_call( From dfd866524bbd8fc5c57cb2893330dbd8e35a11af Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Sat, 20 Sep 2025 15:58:02 +0200 Subject: [PATCH 16/91] refactor ocal ipfs client handling --- service/src/main_impl.rs | 2275 ++++++++--------- service/src/ocall_bridge/component_factory.rs | 14 +- service/src/ocall_bridge/ipfs_ocall.rs | 148 +- 3 files changed, 1221 insertions(+), 1216 deletions(-) diff --git a/service/src/main_impl.rs b/service/src/main_impl.rs index 5fd8edbdf..621b91ee3 100644 --- a/service/src/main_impl.rs +++ b/service/src/main_impl.rs @@ -4,29 +4,29 @@ use crate::teeracle::{schedule_periodic_reregistration_thread, start_periodic_ma #[cfg(not(feature = "dcap"))] use crate::utils::check_files; use crate::{ - account_funding::{setup_reasonable_account_funding, ParentchainAccountInfoProvider}, - config::Config, - enclave::{ - api::enclave_init, - tls_ra::{enclave_request_state_provisioning, enclave_run_state_provisioning_server}, - }, - error::Error, - globals::tokio_handle::{GetTokioHandle, GlobalTokioHandle}, - initialized_service::{ - start_is_initialized_server, InitializationHandler, IsInitialized, TrackInitialization, - }, - ocall_bridge::{ - bridge_api::Bridge as OCallBridge, component_factory::OCallBridgeComponentFactory, - }, - parentchain_handler::{HandleParentchain, ParentchainHandler}, - prometheus_metrics::{start_metrics_server, EnclaveMetricsReceiver, MetricsHandler}, - setup, - sidechain_setup::{sidechain_init_block_production, sidechain_start_untrusted_rpc_server}, - sync_block_broadcaster::SyncBlockBroadcaster, - sync_state, tests, - utils::extract_shard, - worker::Worker, - worker_peers_updater::WorkerPeersUpdater, + account_funding::{setup_reasonable_account_funding, ParentchainAccountInfoProvider}, + config::Config, + enclave::{ + api::enclave_init, + tls_ra::{enclave_request_state_provisioning, enclave_run_state_provisioning_server}, + }, + error::Error, + globals::tokio_handle::{GetTokioHandle, GlobalTokioHandle}, + initialized_service::{ + start_is_initialized_server, InitializationHandler, IsInitialized, TrackInitialization, + }, + ocall_bridge::{ + bridge_api::Bridge as OCallBridge, component_factory::OCallBridgeComponentFactory, + }, + parentchain_handler::{HandleParentchain, ParentchainHandler}, + prometheus_metrics::{start_metrics_server, EnclaveMetricsReceiver, MetricsHandler}, + setup, + sidechain_setup::{sidechain_init_block_production, sidechain_start_untrusted_rpc_server}, + sync_block_broadcaster::SyncBlockBroadcaster, + sync_state, tests, + utils::extract_shard, + worker::Worker, + worker_peers_updater::WorkerPeersUpdater, }; use base58::ToBase58; use clap::{load_yaml, App, ArgMatches}; @@ -34,19 +34,19 @@ use codec::{Decode, Encode}; use ipfs_api_backend_hyper::{IpfsApi, TryFromUri}; use ita_parentchain_interface::integritee::{Hash, Header}; use itp_enclave_api::{ - enclave_base::EnclaveBase, - remote_attestation::{RemoteAttestation, TlsRemoteAttestation}, - sidechain::Sidechain, - teeracle_api::TeeracleApi, + enclave_base::EnclaveBase, + remote_attestation::{RemoteAttestation, TlsRemoteAttestation}, + sidechain::Sidechain, + teeracle_api::TeeracleApi, }; use itp_node_api::{ - api_client::{AccountApi, PalletTeerexApi}, - metadata::NodeMetadata, - node_api_factory::{CreateNodeApi, NodeApiFactory}, + api_client::{AccountApi, PalletTeerexApi}, + metadata::NodeMetadata, + node_api_factory::{CreateNodeApi, NodeApiFactory}, }; use itp_settings::worker_mode::{ProvideWorkerMode, WorkerMode, WorkerModeProvider}; use its_peer_fetch::{ - block_fetch_client::BlockFetcher, untrusted_peer_fetch::UntrustedPeerFetcher, + block_fetch_client::BlockFetcher, untrusted_peer_fetch::UntrustedPeerFetcher, }; use its_primitives::types::block::SignedBlock as SignedSidechainBlock; use its_storage::{interface::FetchBlocks, BlockPruner, SidechainStorageLock}; @@ -55,10 +55,10 @@ use regex::Regex; use sgx_types::*; use sp_runtime::traits::{Header as HeaderT, IdentifyAccount}; use substrate_api_client::{ - api::XtStatus, - rpc::{HandleSubscription, Request, Subscribe}, - Api, GetAccountInformation, GetBalance, GetChainInfo, GetStorage, SubmitAndWatch, - SubscribeChain, SubscribeEvents, + api::XtStatus, + rpc::{HandleSubscription, Request, Subscribe}, + Api, GetAccountInformation, GetBalance, GetChainInfo, GetStorage, SubmitAndWatch, + SubscribeChain, SubscribeEvents, }; use teerex_primitives::{AnySigner, MultiEnclave}; @@ -69,20 +69,20 @@ use sgx_verify::extract_tcb_info_from_raw_dcap_quote; use itp_enclave_api::Enclave; use crate::{ - account_funding::{shard_vault_initial_funds, AccountAndRole}, - error::ServiceResult, - prometheus_metrics::{set_static_metrics, start_prometheus_metrics_server, HandleMetrics}, - sidechain_setup::ParentchainIntegriteeSidechainInfoProvider, + account_funding::{shard_vault_initial_funds, AccountAndRole}, + error::ServiceResult, + prometheus_metrics::{set_static_metrics, start_prometheus_metrics_server, HandleMetrics}, + sidechain_setup::ParentchainIntegriteeSidechainInfoProvider, }; use enclave_bridge_primitives::ShardIdentifier; use ita_parentchain_interface::{ - integritee::{ - api_client_types::{IntegriteeApi, IntegriteeTip}, - api_factory::IntegriteeNodeApiFactory, - }, - target_a::api_client_types::{TargetAApi, TargetARuntimeConfig}, - target_b::api_client_types::{TargetBApi, TargetBRuntimeConfig}, - ParentchainRuntimeConfig, + integritee::{ + api_client_types::{IntegriteeApi, IntegriteeTip}, + api_factory::IntegriteeNodeApiFactory, + }, + target_a::api_client_types::{TargetAApi, TargetARuntimeConfig}, + target_b::api_client_types::{TargetBApi, TargetBRuntimeConfig}, + ParentchainRuntimeConfig, }; use itc_parentchain::primitives::ParentchainId; use itp_node_api::api_client::ChainApi; @@ -92,20 +92,20 @@ use sp_core::crypto::{AccountId32, Ss58Codec}; use sp_keyring::AccountKeyring; use sp_runtime::MultiSigner; use std::{ - fmt::Debug, - path::PathBuf, - str, - str::Utf8Error, - sync::{ - atomic::{AtomicBool, Ordering}, - mpsc, Arc, - }, - thread, - time::Duration, + fmt::Debug, + path::PathBuf, + str, + str::Utf8Error, + sync::{ + atomic::{AtomicBool, Ordering}, + mpsc, Arc, + }, + thread, + time::Duration, }; use substrate_api_client::{ - ac_node_api::{EventRecord, Phase::ApplyExtrinsic}, - rpc::TungsteniteRpcClient, + ac_node_api::{EventRecord, Phase::ApplyExtrinsic}, + rpc::TungsteniteRpcClient, }; use tokio::{runtime::Handle, task::JoinHandle, time::Instant}; @@ -123,920 +123,901 @@ const SGX_MODE_INFO: &str = " (debug enclave)"; #[cfg(feature = "link-binary")] pub type EnclaveWorker = Worker< - Config, - ParentchainRuntimeConfig, - Enclave, - InitializationHandler, + Config, + ParentchainRuntimeConfig, + Enclave, + InitializationHandler, >; pub(crate) fn main() { - // Setup logging - env_logger::builder() - .format_timestamp(Some(env_logger::TimestampPrecision::Millis)) - .init(); - - let yml = load_yaml!("cli.yml"); - let matches = App::from_yaml(yml) - .version(VERSION) - .about( - format!( - "Integritee {:?} worker{}{}", - WorkerModeProvider::worker_mode(), - EVM_INFO, - SGX_MODE_INFO - ) - .as_str(), - ) - .get_matches(); - - let config = Config::from(&matches); - - GlobalTokioHandle::initialize(); - - // log this information, don't println because some python scripts for GA rely on the - // stdout from the service - #[cfg(feature = "production")] - info!("*** Starting service in SGX production mode"); - #[cfg(not(feature = "production"))] - info!("*** Starting service in SGX debug mode"); - - info!("*** Running worker in mode: {:?} \n", WorkerModeProvider::worker_mode()); - - let mut lockfile = PathBuf::from(config.data_dir()); - lockfile.push("worker.lock"); - while std::fs::metadata(lockfile.clone()).is_ok() { - println!("lockfile is present, will wait for it to disappear {:?}", lockfile); - thread::sleep(std::time::Duration::from_secs(5)); - } - - let clean_reset = matches.is_present("clean-reset"); - if clean_reset { - println!("[+] Performing a clean reset of the worker"); - setup::purge_integritee_lcdb_unless_protected(config.data_dir()).unwrap(); - setup::purge_target_a_lcdb_unless_protected(config.data_dir()).unwrap(); - setup::purge_target_b_lcdb_unless_protected(config.data_dir()).unwrap(); - setup::purge_shards_unless_protected(config.data_dir()).unwrap(); - } - - // build the entire dependency tree - let tokio_handle = Arc::new(GlobalTokioHandle {}); - let sidechain_blockstorage = Arc::new( - SidechainStorageLock::::from_base_path( - config.data_dir().to_path_buf(), - ) - .unwrap(), - ); - let node_api_factory = Arc::new(NodeApiFactory::new( - config.integritee_rpc_endpoint(), - AccountKeyring::Alice.pair(), - )); - let enclave = Arc::new(enclave_init(&config).unwrap()); - let initialization_handler = Arc::new(InitializationHandler::default()); - let worker = Arc::new(EnclaveWorker::new( - config.clone(), - enclave.clone(), - node_api_factory.clone(), - initialization_handler.clone(), - Vec::new(), - )); - let sync_block_broadcaster = - Arc::new(SyncBlockBroadcaster::new(tokio_handle.clone(), worker.clone())); - let peer_updater = Arc::new(WorkerPeersUpdater::new(worker)); - let untrusted_peer_fetcher = UntrustedPeerFetcher::new(node_api_factory.clone()); - let peer_sidechain_block_fetcher = - Arc::new(BlockFetcher::::new(untrusted_peer_fetcher)); - let enclave_metrics_receiver = Arc::new(EnclaveMetricsReceiver {}); - - let maybe_target_a_parentchain_api_factory = - config.target_a_parentchain_rpc_endpoint().map(|url| { - Arc::new(NodeApiFactory::::new( - url, - AccountKeyring::Alice.pair(), - )) - }); - - let maybe_target_b_parentchain_api_factory = - config.target_b_parentchain_rpc_endpoint().map(|url| { - Arc::new(NodeApiFactory::::new( - url, - AccountKeyring::Alice.pair(), - )) - }); - - let maybe_ipfs_client = config.ipfs_api_url().map(|url| { - let client = ipfs_api_backend_hyper::IpfsClient::from_str(&url).unwrap(); - - let client = if let Some((user, pwd)) = config - .ipfs_api_auth() - .and_then(|s| s.split_once(':').map(|(u, p)| (u.to_string(), p.to_string()))) - { - info!("Using IPFS node at {} with credentials ******", url); - client.with_credentials(user, pwd) - } else { - info!("Using IPFS node at {}", url); - client - }; - let version = tokio::runtime::Runtime::new().unwrap().block_on(client.version()); - match version { - Ok(v) => info!("Connected to IPFS node version: {}", v.version), - Err(e) => error!("Error getting IPFS node version: {}", e), - } - Arc::new(client) - }); - - // initialize o-call bridge with a concrete factory implementation - OCallBridge::initialize(Arc::new(OCallBridgeComponentFactory::new( - node_api_factory.clone(), - maybe_target_a_parentchain_api_factory, - maybe_target_b_parentchain_api_factory, - sync_block_broadcaster, - enclave.clone(), - sidechain_blockstorage.clone(), - peer_updater, - peer_sidechain_block_fetcher, - tokio_handle.clone(), - enclave_metrics_receiver, - maybe_ipfs_client, - config.data_dir().into(), - ))); - - let quoting_enclave_target_info = match enclave.qe_get_target_info() { - Ok(target_info) => Some(target_info), - Err(e) => { - warn!("Setting up DCAP - qe_get_target_info failed with error: {:?}, continuing.", e); - None - }, - }; - let quote_size = match enclave.qe_get_quote_size() { - Ok(size) => Some(size), - Err(e) => { - warn!("Setting up DCAP - qe_get_quote_size failed with error: {:?}, continuing.", e); - None - }, - }; - - if let Some(run_config) = config.run_config() { - println!("Worker Config: {:?}", config); - - let shard = extract_shard(run_config.shard(), enclave.as_ref()); - - let mut shard_path = PathBuf::from(config.data_dir()); - shard_path.push(SHARDS_PATH); - shard_path.push(shard.encode().to_base58()); - println!("Worker Shard Path: {:?}", shard_path); - if clean_reset || std::fs::metadata(shard_path).is_err() { - // we default to purge here because we don't want to leave behind blocks - // for deprectated shards in the sidechain_db - setup::purge_shards_unless_protected(config.data_dir()).unwrap(); - // will auto-create folders for new shard - setup::initialize_shard_and_keys(enclave.as_ref(), &shard).unwrap(); - } - - let node_api = - node_api_factory.create_api().expect("Failed to create parentchain node API"); - - start_worker::<_, _, _, _, WorkerModeProvider>( - config, - &shard, - enclave, - sidechain_blockstorage, - node_api, - tokio_handle, - initialization_handler, - quoting_enclave_target_info, - quote_size, - ); - } else if let Some(smatches) = matches.subcommand_matches("request-state") { - println!("*** Requesting state from a registered worker \n"); - let node_api = - node_api_factory.create_api().expect("Failed to create parentchain node API"); - sync_state::sync_state::<_, _, WorkerModeProvider>( - &node_api, - &extract_shard(smatches.value_of("shard"), enclave.as_ref()), - enclave.as_ref(), - smatches.is_present("skip-ra"), - ); - } else if matches.is_present("shielding-key") { - setup::generate_shielding_key_file(enclave.as_ref()); - } else if matches.is_present("signing-key") { - setup::generate_signing_key_file(enclave.as_ref()); - } else if matches.is_present("dump-ra") { - info!("*** Perform RA and dump cert to disk"); - #[cfg(not(feature = "dcap"))] - enclave.dump_ias_ra_cert_to_disk().unwrap(); - #[cfg(feature = "dcap")] - { - let skip_ra = false; - let dcap_quote = enclave.generate_dcap_ra_quote(skip_ra).unwrap(); - let (fmspc, _tcb_info) = extract_tcb_info_from_raw_dcap_quote(&dcap_quote).unwrap(); - enclave.dump_dcap_collateral_to_disk(fmspc).unwrap(); - enclave.dump_dcap_ra_cert_to_disk().unwrap(); - } - } else if matches.is_present("mrenclave") { - println!("{}", enclave.get_fingerprint().unwrap().encode().to_base58()); - } else if let Some(sub_matches) = matches.subcommand_matches("init-shard") { - setup::init_shard( - enclave.as_ref(), - &extract_shard(sub_matches.value_of("shard"), enclave.as_ref()), - ); - } else if let Some(sub_matches) = matches.subcommand_matches("test") { - if sub_matches.is_present("provisioning-server") { - println!("*** Running Enclave MU-RA TLS server\n"); - enclave_run_state_provisioning_server( - enclave.as_ref(), - sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, - quoting_enclave_target_info.as_ref(), - quote_size.as_ref(), - &config.mu_ra_url(), - sub_matches.is_present("skip-ra"), - ); - println!("[+] Done!"); - } else if sub_matches.is_present("provisioning-client") { - println!("*** Running Enclave MU-RA TLS client\n"); - let shard = extract_shard(sub_matches.value_of("shard"), enclave.as_ref()); - enclave_request_state_provisioning( - enclave.as_ref(), - sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, - &config.mu_ra_url_external(), - &shard, - sub_matches.is_present("skip-ra"), - ) - .unwrap(); - println!("[+] Done!"); - } else { - tests::run_enclave_tests(sub_matches); - } - } else { - println!("For options: use --help"); - } + // Setup logging + env_logger::builder() + .format_timestamp(Some(env_logger::TimestampPrecision::Millis)) + .init(); + + let yml = load_yaml!("cli.yml"); + let matches = App::from_yaml(yml) + .version(VERSION) + .about( + format!( + "Integritee {:?} worker{}{}", + WorkerModeProvider::worker_mode(), + EVM_INFO, + SGX_MODE_INFO + ) + .as_str(), + ) + .get_matches(); + + let config = Config::from(&matches); + + GlobalTokioHandle::initialize(); + + // log this information, don't println because some python scripts for GA rely on the + // stdout from the service + #[cfg(feature = "production")] + info!("*** Starting service in SGX production mode"); + #[cfg(not(feature = "production"))] + info!("*** Starting service in SGX debug mode"); + + info!("*** Running worker in mode: {:?} \n", WorkerModeProvider::worker_mode()); + + let mut lockfile = PathBuf::from(config.data_dir()); + lockfile.push("worker.lock"); + while std::fs::metadata(lockfile.clone()).is_ok() { + println!("lockfile is present, will wait for it to disappear {:?}", lockfile); + thread::sleep(std::time::Duration::from_secs(5)); + } + + let clean_reset = matches.is_present("clean-reset"); + if clean_reset { + println!("[+] Performing a clean reset of the worker"); + setup::purge_integritee_lcdb_unless_protected(config.data_dir()).unwrap(); + setup::purge_target_a_lcdb_unless_protected(config.data_dir()).unwrap(); + setup::purge_target_b_lcdb_unless_protected(config.data_dir()).unwrap(); + setup::purge_shards_unless_protected(config.data_dir()).unwrap(); + } + + // build the entire dependency tree + let tokio_handle = Arc::new(GlobalTokioHandle {}); + let sidechain_blockstorage = Arc::new( + SidechainStorageLock::::from_base_path( + config.data_dir().to_path_buf(), + ) + .unwrap(), + ); + let node_api_factory = Arc::new(NodeApiFactory::new( + config.integritee_rpc_endpoint(), + AccountKeyring::Alice.pair(), + )); + let enclave = Arc::new(enclave_init(&config).unwrap()); + let initialization_handler = Arc::new(InitializationHandler::default()); + let worker = Arc::new(EnclaveWorker::new( + config.clone(), + enclave.clone(), + node_api_factory.clone(), + initialization_handler.clone(), + Vec::new(), + )); + let sync_block_broadcaster = + Arc::new(SyncBlockBroadcaster::new(tokio_handle.clone(), worker.clone())); + let peer_updater = Arc::new(WorkerPeersUpdater::new(worker)); + let untrusted_peer_fetcher = UntrustedPeerFetcher::new(node_api_factory.clone()); + let peer_sidechain_block_fetcher = + Arc::new(BlockFetcher::::new(untrusted_peer_fetcher)); + let enclave_metrics_receiver = Arc::new(EnclaveMetricsReceiver {}); + + let maybe_target_a_parentchain_api_factory = + config.target_a_parentchain_rpc_endpoint().map(|url| { + Arc::new(NodeApiFactory::::new( + url, + AccountKeyring::Alice.pair(), + )) + }); + + let maybe_target_b_parentchain_api_factory = + config.target_b_parentchain_rpc_endpoint().map(|url| { + Arc::new(NodeApiFactory::::new( + url, + AccountKeyring::Alice.pair(), + )) + }); + + let maybe_ipfs_url_and_auth = (config.ipfs_api_url(), config.ipfs_api_auth()); + + // initialize o-call bridge with a concrete factory implementation + OCallBridge::initialize(Arc::new(OCallBridgeComponentFactory::new( + node_api_factory.clone(), + maybe_target_a_parentchain_api_factory, + maybe_target_b_parentchain_api_factory, + sync_block_broadcaster, + enclave.clone(), + sidechain_blockstorage.clone(), + peer_updater, + peer_sidechain_block_fetcher, + tokio_handle.clone(), + enclave_metrics_receiver, + maybe_ipfs_url_and_auth, + config.data_dir().into(), + ))); + + let quoting_enclave_target_info = match enclave.qe_get_target_info() { + Ok(target_info) => Some(target_info), + Err(e) => { + warn!("Setting up DCAP - qe_get_target_info failed with error: {:?}, continuing.", e); + None + } + }; + let quote_size = match enclave.qe_get_quote_size() { + Ok(size) => Some(size), + Err(e) => { + warn!("Setting up DCAP - qe_get_quote_size failed with error: {:?}, continuing.", e); + None + } + }; + + if let Some(run_config) = config.run_config() { + println!("Worker Config: {:?}", config); + + let shard = extract_shard(run_config.shard(), enclave.as_ref()); + + let mut shard_path = PathBuf::from(config.data_dir()); + shard_path.push(SHARDS_PATH); + shard_path.push(shard.encode().to_base58()); + println!("Worker Shard Path: {:?}", shard_path); + if clean_reset || std::fs::metadata(shard_path).is_err() { + // we default to purge here because we don't want to leave behind blocks + // for deprectated shards in the sidechain_db + setup::purge_shards_unless_protected(config.data_dir()).unwrap(); + // will auto-create folders for new shard + setup::initialize_shard_and_keys(enclave.as_ref(), &shard).unwrap(); + } + + let node_api = + node_api_factory.create_api().expect("Failed to create parentchain node API"); + + start_worker::<_, _, _, _, WorkerModeProvider>( + config, + &shard, + enclave, + sidechain_blockstorage, + node_api, + tokio_handle, + initialization_handler, + quoting_enclave_target_info, + quote_size, + ); + } else if let Some(smatches) = matches.subcommand_matches("request-state") { + println!("*** Requesting state from a registered worker \n"); + let node_api = + node_api_factory.create_api().expect("Failed to create parentchain node API"); + sync_state::sync_state::<_, _, WorkerModeProvider>( + &node_api, + &extract_shard(smatches.value_of("shard"), enclave.as_ref()), + enclave.as_ref(), + smatches.is_present("skip-ra"), + ); + } else if matches.is_present("shielding-key") { + setup::generate_shielding_key_file(enclave.as_ref()); + } else if matches.is_present("signing-key") { + setup::generate_signing_key_file(enclave.as_ref()); + } else if matches.is_present("dump-ra") { + info!("*** Perform RA and dump cert to disk"); + #[cfg(not(feature = "dcap"))] + enclave.dump_ias_ra_cert_to_disk().unwrap(); + #[cfg(feature = "dcap")] + { + let skip_ra = false; + let dcap_quote = enclave.generate_dcap_ra_quote(skip_ra).unwrap(); + let (fmspc, _tcb_info) = extract_tcb_info_from_raw_dcap_quote(&dcap_quote).unwrap(); + enclave.dump_dcap_collateral_to_disk(fmspc).unwrap(); + enclave.dump_dcap_ra_cert_to_disk().unwrap(); + } + } else if matches.is_present("mrenclave") { + println!("{}", enclave.get_fingerprint().unwrap().encode().to_base58()); + } else if let Some(sub_matches) = matches.subcommand_matches("init-shard") { + setup::init_shard( + enclave.as_ref(), + &extract_shard(sub_matches.value_of("shard"), enclave.as_ref()), + ); + } else if let Some(sub_matches) = matches.subcommand_matches("test") { + if sub_matches.is_present("provisioning-server") { + println!("*** Running Enclave MU-RA TLS server\n"); + enclave_run_state_provisioning_server( + enclave.as_ref(), + sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, + quoting_enclave_target_info.as_ref(), + quote_size.as_ref(), + &config.mu_ra_url(), + sub_matches.is_present("skip-ra"), + ); + println!("[+] Done!"); + } else if sub_matches.is_present("provisioning-client") { + println!("*** Running Enclave MU-RA TLS client\n"); + let shard = extract_shard(sub_matches.value_of("shard"), enclave.as_ref()); + enclave_request_state_provisioning( + enclave.as_ref(), + sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, + &config.mu_ra_url_external(), + &shard, + sub_matches.is_present("skip-ra"), + ) + .unwrap(); + println!("[+] Done!"); + } else { + tests::run_enclave_tests(sub_matches); + } + } else { + println!("For options: use --help"); + } } /// FIXME: needs some discussion (restructuring?) #[allow(clippy::too_many_arguments)] fn start_worker( - config: Config, - shard: &ShardIdentifier, - enclave: Arc, - sidechain_storage: Arc, - integritee_rpc_api: IntegriteeApi, - tokio_handle_getter: Arc, - initialization_handler: Arc, - quoting_enclave_target_info: Option, - quote_size: Option, + config: Config, + shard: &ShardIdentifier, + enclave: Arc, + sidechain_storage: Arc, + integritee_rpc_api: IntegriteeApi, + tokio_handle_getter: Arc, + initialization_handler: Arc, + quoting_enclave_target_info: Option, + quote_size: Option, ) where - T: GetTokioHandle, - E: EnclaveBase + Sidechain + RemoteAttestation + TlsRemoteAttestation + TeeracleApi + Clone, - D: BlockPruner + FetchBlocks + Sync + Send + 'static, - InitializationHandler: TrackInitialization + IsInitialized + Sync + Send + 'static, - WorkerModeProvider: ProvideWorkerMode, + T: GetTokioHandle, + E: EnclaveBase + Sidechain + RemoteAttestation + TlsRemoteAttestation + TeeracleApi + Clone, + D: BlockPruner + FetchBlocks + Sync + Send + 'static, + InitializationHandler: TrackInitialization + IsInitialized + Sync + Send + 'static, + WorkerModeProvider: ProvideWorkerMode, { - let run_config = config.run_config().clone().expect("Run config missing"); - let skip_ra = run_config.skip_ra(); - - #[cfg(feature = "teeracle")] - let flavor_str = "teeracle"; - #[cfg(feature = "sidechain")] - let flavor_str = "sidechain"; - #[cfg(feature = "offchain-worker")] - let flavor_str = "offchain-worker"; - #[cfg(not(any(feature = "offchain-worker", feature = "sidechain", feature = "teeracle")))] - let flavor_str = "offchain-worker"; - - println!("Integritee Worker for {} v{}", flavor_str, VERSION); - - #[cfg(feature = "dcap")] - println!(" DCAP is enabled"); - #[cfg(not(feature = "dcap"))] - println!(" DCAP is disabled"); - #[cfg(feature = "production")] - println!(" Production Mode is enabled"); - #[cfg(not(feature = "production"))] - println!(" Production Mode is disabled"); - #[cfg(feature = "evm")] - println!(" EVM is enabled"); - #[cfg(not(feature = "evm"))] - println!(" EVM is disabled"); - - info!("starting worker on shard {}", shard.encode().to_base58()); - // ------------------------------------------------------------------------ - // check for required files - if !skip_ra { - #[cfg(not(feature = "dcap"))] - check_files(); - } - // ------------------------------------------------------------------------ - // initialize the enclave - let mrenclave = enclave.get_fingerprint().unwrap(); - println!("MRENCLAVE={}", mrenclave.0.to_base58()); - println!("MRENCLAVE in hex {:?}", hex::encode(mrenclave)); - set_static_metrics(VERSION, mrenclave.0.to_base58().as_str()); - // ------------------------------------------------------------------------ - // let new workers call us for key provisioning - println!("MU-RA server listening on {}", config.mu_ra_url()); - let is_development_mode = run_config.dev(); - let ra_url = config.mu_ra_url(); - let enclave_api_key_prov = enclave.clone(); - thread::spawn(move || { - enclave_run_state_provisioning_server( - enclave_api_key_prov.as_ref(), - sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, - quoting_enclave_target_info.as_ref(), - quote_size.as_ref(), - &ra_url, - skip_ra, - ); - info!("State provisioning server stopped."); - }); - - let tokio_handle = tokio_handle_getter.get_handle(); - - // ------------------------------------------------------------------------ - // Get the public key of our TEE. - let tee_accountid = enclave_account(enclave.as_ref()); - println!("Enclave account {:} ", &tee_accountid.to_ss58check()); - - // ------------------------------------------------------------------------ - // Start `is_initialized` server. - let untrusted_http_server_port = config - .try_parse_untrusted_http_server_port() - .expect("untrusted http server port to be a valid port number"); - let initialization_handler_clone = initialization_handler.clone(); - tokio_handle.spawn(async move { - if let Err(e) = - start_is_initialized_server(initialization_handler_clone, untrusted_http_server_port) - .await - { - error!("Unexpected error in `is_initialized` server: {:?}", e); - } - }); - - // ------------------------------------------------------------------------ - // Start trusted worker rpc server - if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain - || WorkerModeProvider::worker_mode() == WorkerMode::OffChainWorker - { - let direct_invocation_server_addr = config.trusted_worker_url_internal(); - let enclave_for_direct_invocation = enclave.clone(); - thread::spawn(move || { - println!( - "[+] Trusted RPC direct invocation server listening on {}", - direct_invocation_server_addr - ); - enclave_for_direct_invocation - .init_direct_invocation_server(direct_invocation_server_addr) - .unwrap(); - println!("[+] RPC direct invocation server shut down"); - }); - } - - // ------------------------------------------------------------------------ - // Start untrusted worker rpc server. - // i.e move sidechain block importing to trusted worker. - if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { - sidechain_start_untrusted_rpc_server(&config, sidechain_storage.clone(), &tokio_handle); - } - - // ------------------------------------------------------------------------ - // Init parentchain specific stuff. Needed early for parentchain communication. - let (integritee_parentchain_handler, integritee_last_synced_header_at_last_run) = - init_parentchain( - &enclave, - &integritee_rpc_api, - &tee_accountid, - ParentchainId::Integritee, - shard, - ); - - #[cfg(feature = "dcap")] - register_collateral( - &integritee_rpc_api, - &*enclave, - &tee_accountid, - is_development_mode, - skip_ra, - ); - - let trusted_url = config.trusted_worker_url_external(); - - #[cfg(feature = "attesteer")] - fetch_marblerun_events_every_hour( - integritee_rpc_api.clone(), - enclave.clone(), - tee_accountid.clone(), - is_development_mode, - trusted_url.clone(), - run_config.marblerun_base_url().to_string(), - ); - - // ------------------------------------------------------------------------ - // Perform a remote attestation and get an unchecked extrinsic back. - - if skip_ra { - println!( - "[!] skipping remote attestation. Registering enclave without attestation report." - ); - } else { - println!("[!] creating remote attestation report and create enclave register extrinsic."); - }; - - #[cfg(feature = "dcap")] - enclave.set_sgx_qpl_logging().expect("QPL logging setup failed"); - - let enclave2 = enclave.clone(); - #[cfg(not(feature = "dcap"))] - let register_xt = move || enclave2.generate_ias_ra_extrinsic(&trusted_url, skip_ra).unwrap(); - #[cfg(feature = "dcap")] - let register_xt = move || enclave2.generate_dcap_ra_extrinsic(&trusted_url, skip_ra).unwrap(); - - // clones because of the move - let node_api2 = integritee_rpc_api.clone(); - let tee_accountid_clone = tee_accountid.clone(); - let send_register_xt = move || { - println!("[+] Send register enclave extrinsic"); - send_integritee_extrinsic( - register_xt(), - &node_api2, - &tee_accountid_clone, - is_development_mode, - ) - }; - - let register_enclave_block_hash = - send_register_xt().expect("enclave RA registration must be successful to continue"); - - let api_register_enclave_xt_header = integritee_rpc_api - .get_header(Some(register_enclave_block_hash)) - .unwrap() - .unwrap(); - - // TODO: #1451: Fix api-client type hacks - let register_enclave_xt_header = - Header::decode(&mut api_register_enclave_xt_header.encode().as_slice()) - .expect("Can decode previously encoded header; qed"); - - println!( - "[+] Enclave registered at block number: {:?}, hash: {:?}", - register_enclave_xt_header.number(), - register_enclave_xt_header.hash() - ); - // double-check - let my_enclave = integritee_rpc_api - .enclave(&tee_accountid, None) - .unwrap() - .expect("our enclave should be registered at this point"); - trace!("verified that our enclave is registered: {:?}", my_enclave); - - let (we_are_primary_validateer, re_init_parentchain_needed) = - match integritee_rpc_api.primary_worker_for_shard(shard, None).unwrap() { - Some(primary_enclave) => match primary_enclave.instance_signer() { - AnySigner::Known(MultiSigner::Ed25519(primary)) => - if primary.encode() == tee_accountid.encode() { - println!("We are primary worker on this shard and we have been previously running."); - (true, false) - } else { - println!( - "We are NOT primary worker. The primary worker is {}.", - primary.to_ss58check(), - ); - info!("The primary worker enclave is {:?}", primary_enclave); - if enclave - .get_shard_creation_info(shard) - .unwrap() - .for_parentchain(ParentchainId::Integritee) - .is_none() - { - //obtain provisioning from last active worker as this hasn't been done before - info!("my state doesn't know the creation header of the shard. will request provisioning"); - sync_state::sync_state::<_, _, WorkerModeProvider>( - &integritee_rpc_api, - &shard, - enclave.as_ref(), - skip_ra, - ); - } - (false, true) - }, - _ => { - panic!( - "the primary worker for shard {:?} has unknown signer type: {:?}", - shard, primary_enclave - ); - }, - }, - None => - if WorkerModeProvider::worker_mode() != WorkerMode::Teeracle { - println!("We are the primary worker on this shard and the shard is untouched. Will initialize it"); - enclave.init_shard(shard.encode()).unwrap(); - enclave - .init_shard_creation_parentchain_header( - shard, - &ParentchainId::Integritee, - ®ister_enclave_xt_header, - ) - .unwrap(); - debug!("shard config should be initialized on integritee network now"); - (true, true) - } else { - (true, false) - }, - }; - debug!("getting shard creation: {:?}", enclave.get_shard_creation_info(shard)); - initialization_handler.registered_on_parentchain(); - - let (integritee_parentchain_handler, integritee_last_synced_header_at_last_run) = - if re_init_parentchain_needed { - // re-initialize integritee parentchain to make sure to use creation_header for fast-sync or the provisioned light client state - init_parentchain( - &enclave, - &integritee_rpc_api, - &tee_accountid, - ParentchainId::Integritee, - shard, - ) - } else { - (integritee_parentchain_handler, integritee_last_synced_header_at_last_run) - }; - - // some of the following threads need to be shut down gracefully. - let shutdown_flag = Arc::new(AtomicBool::new(false)); - let mut sensitive_threads: Vec> = Vec::new(); - - match WorkerModeProvider::worker_mode() { - WorkerMode::Teeracle => { - // ------------------------------------------------------------------------ - // initialize teeracle interval - #[cfg(feature = "teeracle")] - schedule_periodic_reregistration_thread( - send_register_xt, - run_config.reregister_teeracle_interval(), - ); - - #[cfg(feature = "teeracle")] - start_periodic_market_update( - &integritee_rpc_api, - run_config.teeracle_update_interval(), - enclave.clone(), - &tokio_handle, - ); - }, - WorkerMode::OffChainWorker => { - println!("[Integritee:OCW] Finished initializing light client, syncing parentchain..."); - - // Syncing all parentchain blocks, this might take a while.. - let last_synced_header = integritee_parentchain_handler - .sync_parentchain_until_latest_finalized( - integritee_last_synced_header_at_last_run, - *shard, - true, - ) - .unwrap(); - - let handle = start_parentchain_header_subscription_thread( - shutdown_flag.clone(), - integritee_parentchain_handler, - last_synced_header, - *shard, - ); - sensitive_threads.push(handle); - - info!("skipping shard vault check because not yet supported for offchain worker"); - }, - WorkerMode::Sidechain => { - println!("[Integritee:SCV] Finished initializing light client, syncing integritee parentchain..."); - - let last_synced_header = if we_are_primary_validateer { - info!("We're the first validateer to be registered, syncing parentchain blocks until the one we have registered ourselves on."); - integritee_parentchain_handler - .await_sync_and_import_parentchain_until_at_least( - &integritee_last_synced_header_at_last_run, - ®ister_enclave_xt_header, - *shard, - ) - .unwrap() - } else { - integritee_last_synced_header_at_last_run - }; - - let handle = start_parentchain_header_subscription_thread( - shutdown_flag.clone(), - integritee_parentchain_handler, - last_synced_header, - *shard, - ); - sensitive_threads.push(handle); - - spawn_worker_for_shard_polling( - shard, - integritee_rpc_api.clone(), - initialization_handler, - ); - }, - } - - let maybe_target_a_rpc_api = if let Some(url) = config.target_a_parentchain_rpc_endpoint() { - println!("Initializing parentchain TargetA with url: {}", url); - let api = ita_parentchain_interface::target_a::api_factory::TargetANodeApiFactory::new( - url, - AccountKeyring::Alice.pair(), - ) - .create_api() - .unwrap_or_else(|_| panic!("[TargetA] Failed to create parentchain node API")); - let mut handles = init_target_parentchain( - &enclave, - &tee_accountid, - api.clone(), - shard, - ParentchainId::TargetA, - is_development_mode, - shutdown_flag.clone(), - ); - sensitive_threads.append(&mut handles); - Some(api) - } else { - None - }; - - let maybe_target_b_rpc_api = if let Some(url) = config.target_b_parentchain_rpc_endpoint() { - println!("Initializing parentchain TargetB with url: {}", url); - let api = ita_parentchain_interface::target_b::api_factory::TargetBNodeApiFactory::new( - url, - AccountKeyring::Alice.pair(), - ) - .create_api() - .unwrap_or_else(|_| panic!("[TargetB] Failed to create parentchain node API")); - let mut handles = init_target_parentchain( - &enclave, - &tee_accountid, - api.clone(), - shard, - ParentchainId::TargetB, - is_development_mode, - shutdown_flag.clone(), - ); - sensitive_threads.append(&mut handles); - Some(api) - } else { - None - }; - - if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { - init_provided_shard_vault( - shard, - &enclave, - integritee_rpc_api.clone(), - maybe_target_a_rpc_api.clone(), - maybe_target_b_rpc_api.clone(), - run_config.shielding_target, - we_are_primary_validateer, - ); - } - - // ------------------------------------------------------------------------ - // Start prometheus metrics server. - if config.enable_metrics_server() { - let metrics_server_port = config - .try_parse_metrics_server_port() - .expect("metrics server port to be a valid port number"); - start_prometheus_metrics_server( - &enclave, - &tee_accountid, - shard, - integritee_rpc_api.clone(), - maybe_target_a_rpc_api.clone(), - maybe_target_b_rpc_api.clone(), - run_config.shielding_target, - &tokio_handle, - metrics_server_port, - ); - } - - if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { - println!("[Integritee:SCV] starting block production"); - let mut handles = sidechain_init_block_production( - enclave.clone(), - sidechain_storage, - shutdown_flag.clone(), - ) - .unwrap(); - sensitive_threads.append(&mut handles); - } - - ita_parentchain_interface::event_subscriber::subscribe_to_parentchain_events( - &integritee_rpc_api, - ParentchainId::Integritee, - shutdown_flag.clone(), - ); - println!( - "[!] waiting for {} sensitive threads to shut down gracefully", - sensitive_threads.len() - ); - // Join each thread to ensure they have completed - for handle in sensitive_threads { - handle.join().expect("Thread panicked"); - } - println!("[!] All threads stopped gracefully."); + let run_config = config.run_config().clone().expect("Run config missing"); + let skip_ra = run_config.skip_ra(); + + #[cfg(feature = "teeracle")] + let flavor_str = "teeracle"; + #[cfg(feature = "sidechain")] + let flavor_str = "sidechain"; + #[cfg(feature = "offchain-worker")] + let flavor_str = "offchain-worker"; + #[cfg(not(any(feature = "offchain-worker", feature = "sidechain", feature = "teeracle")))] + let flavor_str = "offchain-worker"; + + println!("Integritee Worker for {} v{}", flavor_str, VERSION); + + #[cfg(feature = "dcap")] + println!(" DCAP is enabled"); + #[cfg(not(feature = "dcap"))] + println!(" DCAP is disabled"); + #[cfg(feature = "production")] + println!(" Production Mode is enabled"); + #[cfg(not(feature = "production"))] + println!(" Production Mode is disabled"); + #[cfg(feature = "evm")] + println!(" EVM is enabled"); + #[cfg(not(feature = "evm"))] + println!(" EVM is disabled"); + + info!("starting worker on shard {}", shard.encode().to_base58()); + // ------------------------------------------------------------------------ + // check for required files + if !skip_ra { + #[cfg(not(feature = "dcap"))] + check_files(); + } + // ------------------------------------------------------------------------ + // initialize the enclave + let mrenclave = enclave.get_fingerprint().unwrap(); + println!("MRENCLAVE={}", mrenclave.0.to_base58()); + println!("MRENCLAVE in hex {:?}", hex::encode(mrenclave)); + set_static_metrics(VERSION, mrenclave.0.to_base58().as_str()); + // ------------------------------------------------------------------------ + // let new workers call us for key provisioning + println!("MU-RA server listening on {}", config.mu_ra_url()); + let is_development_mode = run_config.dev(); + let ra_url = config.mu_ra_url(); + let enclave_api_key_prov = enclave.clone(); + thread::spawn(move || { + enclave_run_state_provisioning_server( + enclave_api_key_prov.as_ref(), + sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, + quoting_enclave_target_info.as_ref(), + quote_size.as_ref(), + &ra_url, + skip_ra, + ); + info!("State provisioning server stopped."); + }); + + let tokio_handle = tokio_handle_getter.get_handle(); + + // ------------------------------------------------------------------------ + // Get the public key of our TEE. + let tee_accountid = enclave_account(enclave.as_ref()); + println!("Enclave account {:} ", &tee_accountid.to_ss58check()); + + // ------------------------------------------------------------------------ + // Start `is_initialized` server. + let untrusted_http_server_port = config + .try_parse_untrusted_http_server_port() + .expect("untrusted http server port to be a valid port number"); + let initialization_handler_clone = initialization_handler.clone(); + tokio_handle.spawn(async move { + if let Err(e) = + start_is_initialized_server(initialization_handler_clone, untrusted_http_server_port) + .await + { + error!("Unexpected error in `is_initialized` server: {:?}", e); + } + }); + + // ------------------------------------------------------------------------ + // Start trusted worker rpc server + if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain + || WorkerModeProvider::worker_mode() == WorkerMode::OffChainWorker + { + let direct_invocation_server_addr = config.trusted_worker_url_internal(); + let enclave_for_direct_invocation = enclave.clone(); + thread::spawn(move || { + println!( + "[+] Trusted RPC direct invocation server listening on {}", + direct_invocation_server_addr + ); + enclave_for_direct_invocation + .init_direct_invocation_server(direct_invocation_server_addr) + .unwrap(); + println!("[+] RPC direct invocation server shut down"); + }); + } + + // ------------------------------------------------------------------------ + // Start untrusted worker rpc server. + // i.e move sidechain block importing to trusted worker. + if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { + sidechain_start_untrusted_rpc_server(&config, sidechain_storage.clone(), &tokio_handle); + } + + // ------------------------------------------------------------------------ + // Init parentchain specific stuff. Needed early for parentchain communication. + let (integritee_parentchain_handler, integritee_last_synced_header_at_last_run) = + init_parentchain( + &enclave, + &integritee_rpc_api, + &tee_accountid, + ParentchainId::Integritee, + shard, + ); + + #[cfg(feature = "dcap")] + register_collateral( + &integritee_rpc_api, + &*enclave, + &tee_accountid, + is_development_mode, + skip_ra, + ); + + let trusted_url = config.trusted_worker_url_external(); + + #[cfg(feature = "attesteer")] + fetch_marblerun_events_every_hour( + integritee_rpc_api.clone(), + enclave.clone(), + tee_accountid.clone(), + is_development_mode, + trusted_url.clone(), + run_config.marblerun_base_url().to_string(), + ); + + // ------------------------------------------------------------------------ + // Perform a remote attestation and get an unchecked extrinsic back. + + if skip_ra { + println!( + "[!] skipping remote attestation. Registering enclave without attestation report." + ); + } else { + println!("[!] creating remote attestation report and create enclave register extrinsic."); + }; + + #[cfg(feature = "dcap")] + enclave.set_sgx_qpl_logging().expect("QPL logging setup failed"); + + let enclave2 = enclave.clone(); + #[cfg(not(feature = "dcap"))] + let register_xt = move || enclave2.generate_ias_ra_extrinsic(&trusted_url, skip_ra).unwrap(); + #[cfg(feature = "dcap")] + let register_xt = move || enclave2.generate_dcap_ra_extrinsic(&trusted_url, skip_ra).unwrap(); + + // clones because of the move + let node_api2 = integritee_rpc_api.clone(); + let tee_accountid_clone = tee_accountid.clone(); + let send_register_xt = move || { + println!("[+] Send register enclave extrinsic"); + send_integritee_extrinsic( + register_xt(), + &node_api2, + &tee_accountid_clone, + is_development_mode, + ) + }; + + let register_enclave_block_hash = + send_register_xt().expect("enclave RA registration must be successful to continue"); + + let api_register_enclave_xt_header = integritee_rpc_api + .get_header(Some(register_enclave_block_hash)) + .unwrap() + .unwrap(); + + // TODO: #1451: Fix api-client type hacks + let register_enclave_xt_header = + Header::decode(&mut api_register_enclave_xt_header.encode().as_slice()) + .expect("Can decode previously encoded header; qed"); + + println!( + "[+] Enclave registered at block number: {:?}, hash: {:?}", + register_enclave_xt_header.number(), + register_enclave_xt_header.hash() + ); + // double-check + let my_enclave = integritee_rpc_api + .enclave(&tee_accountid, None) + .unwrap() + .expect("our enclave should be registered at this point"); + trace!("verified that our enclave is registered: {:?}", my_enclave); + + let (we_are_primary_validateer, re_init_parentchain_needed) = + match integritee_rpc_api.primary_worker_for_shard(shard, None).unwrap() { + Some(primary_enclave) => match primary_enclave.instance_signer() { + AnySigner::Known(MultiSigner::Ed25519(primary)) => + if primary.encode() == tee_accountid.encode() { + println!("We are primary worker on this shard and we have been previously running."); + (true, false) + } else { + println!( + "We are NOT primary worker. The primary worker is {}.", + primary.to_ss58check(), + ); + info!("The primary worker enclave is {:?}", primary_enclave); + if enclave + .get_shard_creation_info(shard) + .unwrap() + .for_parentchain(ParentchainId::Integritee) + .is_none() + { + //obtain provisioning from last active worker as this hasn't been done before + info!("my state doesn't know the creation header of the shard. will request provisioning"); + sync_state::sync_state::<_, _, WorkerModeProvider>( + &integritee_rpc_api, + &shard, + enclave.as_ref(), + skip_ra, + ); + } + (false, true) + }, + _ => { + panic!( + "the primary worker for shard {:?} has unknown signer type: {:?}", + shard, primary_enclave + ); + } + }, + None => + if WorkerModeProvider::worker_mode() != WorkerMode::Teeracle { + println!("We are the primary worker on this shard and the shard is untouched. Will initialize it"); + enclave.init_shard(shard.encode()).unwrap(); + enclave + .init_shard_creation_parentchain_header( + shard, + &ParentchainId::Integritee, + ®ister_enclave_xt_header, + ) + .unwrap(); + debug!("shard config should be initialized on integritee network now"); + (true, true) + } else { + (true, false) + }, + }; + debug!("getting shard creation: {:?}", enclave.get_shard_creation_info(shard)); + initialization_handler.registered_on_parentchain(); + + let (integritee_parentchain_handler, integritee_last_synced_header_at_last_run) = + if re_init_parentchain_needed { + // re-initialize integritee parentchain to make sure to use creation_header for fast-sync or the provisioned light client state + init_parentchain( + &enclave, + &integritee_rpc_api, + &tee_accountid, + ParentchainId::Integritee, + shard, + ) + } else { + (integritee_parentchain_handler, integritee_last_synced_header_at_last_run) + }; + + // some of the following threads need to be shut down gracefully. + let shutdown_flag = Arc::new(AtomicBool::new(false)); + let mut sensitive_threads: Vec> = Vec::new(); + + match WorkerModeProvider::worker_mode() { + WorkerMode::Teeracle => { + // ------------------------------------------------------------------------ + // initialize teeracle interval + #[cfg(feature = "teeracle")] + schedule_periodic_reregistration_thread( + send_register_xt, + run_config.reregister_teeracle_interval(), + ); + + #[cfg(feature = "teeracle")] + start_periodic_market_update( + &integritee_rpc_api, + run_config.teeracle_update_interval(), + enclave.clone(), + &tokio_handle, + ); + } + WorkerMode::OffChainWorker => { + println!("[Integritee:OCW] Finished initializing light client, syncing parentchain..."); + + // Syncing all parentchain blocks, this might take a while.. + let last_synced_header = integritee_parentchain_handler + .sync_parentchain_until_latest_finalized( + integritee_last_synced_header_at_last_run, + *shard, + true, + ) + .unwrap(); + + let handle = start_parentchain_header_subscription_thread( + shutdown_flag.clone(), + integritee_parentchain_handler, + last_synced_header, + *shard, + ); + sensitive_threads.push(handle); + + info!("skipping shard vault check because not yet supported for offchain worker"); + } + WorkerMode::Sidechain => { + println!("[Integritee:SCV] Finished initializing light client, syncing integritee parentchain..."); + + let last_synced_header = if we_are_primary_validateer { + info!("We're the first validateer to be registered, syncing parentchain blocks until the one we have registered ourselves on."); + integritee_parentchain_handler + .await_sync_and_import_parentchain_until_at_least( + &integritee_last_synced_header_at_last_run, + ®ister_enclave_xt_header, + *shard, + ) + .unwrap() + } else { + integritee_last_synced_header_at_last_run + }; + + let handle = start_parentchain_header_subscription_thread( + shutdown_flag.clone(), + integritee_parentchain_handler, + last_synced_header, + *shard, + ); + sensitive_threads.push(handle); + + spawn_worker_for_shard_polling( + shard, + integritee_rpc_api.clone(), + initialization_handler, + ); + } + } + + let maybe_target_a_rpc_api = if let Some(url) = config.target_a_parentchain_rpc_endpoint() { + println!("Initializing parentchain TargetA with url: {}", url); + let api = ita_parentchain_interface::target_a::api_factory::TargetANodeApiFactory::new( + url, + AccountKeyring::Alice.pair(), + ) + .create_api() + .unwrap_or_else(|_| panic!("[TargetA] Failed to create parentchain node API")); + let mut handles = init_target_parentchain( + &enclave, + &tee_accountid, + api.clone(), + shard, + ParentchainId::TargetA, + is_development_mode, + shutdown_flag.clone(), + ); + sensitive_threads.append(&mut handles); + Some(api) + } else { + None + }; + + let maybe_target_b_rpc_api = if let Some(url) = config.target_b_parentchain_rpc_endpoint() { + println!("Initializing parentchain TargetB with url: {}", url); + let api = ita_parentchain_interface::target_b::api_factory::TargetBNodeApiFactory::new( + url, + AccountKeyring::Alice.pair(), + ) + .create_api() + .unwrap_or_else(|_| panic!("[TargetB] Failed to create parentchain node API")); + let mut handles = init_target_parentchain( + &enclave, + &tee_accountid, + api.clone(), + shard, + ParentchainId::TargetB, + is_development_mode, + shutdown_flag.clone(), + ); + sensitive_threads.append(&mut handles); + Some(api) + } else { + None + }; + + if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { + init_provided_shard_vault( + shard, + &enclave, + integritee_rpc_api.clone(), + maybe_target_a_rpc_api.clone(), + maybe_target_b_rpc_api.clone(), + run_config.shielding_target, + we_are_primary_validateer, + ); + } + + // ------------------------------------------------------------------------ + // Start prometheus metrics server. + if config.enable_metrics_server() { + let metrics_server_port = config + .try_parse_metrics_server_port() + .expect("metrics server port to be a valid port number"); + start_prometheus_metrics_server( + &enclave, + &tee_accountid, + shard, + integritee_rpc_api.clone(), + maybe_target_a_rpc_api.clone(), + maybe_target_b_rpc_api.clone(), + run_config.shielding_target, + &tokio_handle, + metrics_server_port, + ); + } + + if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { + println!("[Integritee:SCV] starting block production"); + let mut handles = sidechain_init_block_production( + enclave.clone(), + sidechain_storage, + shutdown_flag.clone(), + ) + .unwrap(); + sensitive_threads.append(&mut handles); + } + + ita_parentchain_interface::event_subscriber::subscribe_to_parentchain_events( + &integritee_rpc_api, + ParentchainId::Integritee, + shutdown_flag.clone(), + ); + println!( + "[!] waiting for {} sensitive threads to shut down gracefully", + sensitive_threads.len() + ); + // Join each thread to ensure they have completed + for handle in sensitive_threads { + handle.join().expect("Thread panicked"); + } + println!("[!] All threads stopped gracefully."); } fn init_provided_shard_vault( - shard: &ShardIdentifier, - enclave: &Arc, - integritee_rpc_api: IntegriteeApi, - maybe_target_a_rpc_api: Option, - maybe_target_b_rpc_api: Option, - shielding_target: Option, - we_are_primary_validateer: bool, + shard: &ShardIdentifier, + enclave: &Arc, + integritee_rpc_api: IntegriteeApi, + maybe_target_a_rpc_api: Option, + maybe_target_b_rpc_api: Option, + shielding_target: Option, + we_are_primary_validateer: bool, ) { - let shielding_target = shielding_target.unwrap_or_default(); - match shielding_target { - ParentchainId::Integritee => init_vault( - shard, - enclave, - &integritee_rpc_api, - shielding_target, - we_are_primary_validateer, - ), - ParentchainId::TargetA => init_vault( - shard, - enclave, - &maybe_target_a_rpc_api - .expect("target A must be initialized to be used as shielding target"), - shielding_target, - we_are_primary_validateer, - ), - ParentchainId::TargetB => init_vault( - shard, - enclave, - &maybe_target_b_rpc_api - .expect("target B must be initialized to be used as shielding target"), - shielding_target, - we_are_primary_validateer, - ), - }; + let shielding_target = shielding_target.unwrap_or_default(); + match shielding_target { + ParentchainId::Integritee => init_vault( + shard, + enclave, + &integritee_rpc_api, + shielding_target, + we_are_primary_validateer, + ), + ParentchainId::TargetA => init_vault( + shard, + enclave, + &maybe_target_a_rpc_api + .expect("target A must be initialized to be used as shielding target"), + shielding_target, + we_are_primary_validateer, + ), + ParentchainId::TargetB => init_vault( + shard, + enclave, + &maybe_target_b_rpc_api + .expect("target B must be initialized to be used as shielding target"), + shielding_target, + we_are_primary_validateer, + ), + }; } fn init_vault( - shard: &ShardIdentifier, - enclave: &Arc, - node_api: &Api, Client>, - shielding_target: ParentchainId, - we_are_primary_validateer: bool, + shard: &ShardIdentifier, + enclave: &Arc, + node_api: &Api, Client>, + shielding_target: ParentchainId, + we_are_primary_validateer: bool, ) where - E: EnclaveBase, - u128: From, - Tip: Copy + Default + Encode + Debug, - Client: Request, + E: EnclaveBase, + u128: From, + Tip: Copy + Default + Encode + Debug, + Client: Request, { - let funding_balance = shard_vault_initial_funds(&node_api, shielding_target).unwrap(); - if let Ok(shard_vault) = enclave.get_ecc_vault_pubkey(shard) { - // verify if proxy is set up on chain - let nonce = node_api.get_account_nonce(&AccountId::from(shard_vault)).unwrap(); - println!( - "[{:?}] shard vault account is already initialized in state: {} with nonce {}", - shielding_target, - shard_vault.to_ss58check(), - nonce - ); - if nonce == 0 && we_are_primary_validateer { - println!( - "[{:?}] nonce = 0 means shard vault not properly set up on chain. will retry", - shielding_target - ); - enclave.init_proxied_shard_vault(shard, &shielding_target, 0u128).unwrap(); - } - } else if we_are_primary_validateer { - println!("[{:?}] initializing proxied shard vault account now", shielding_target); - enclave - .init_proxied_shard_vault(shard, &shielding_target, funding_balance) - .unwrap(); - println!( - "[{:?}] initialized shard vault account: : {}", - shielding_target, - enclave.get_ecc_vault_pubkey(shard).unwrap().to_ss58check() - ); - } else { - panic!("no vault account has been initialized and we are not the primary worker"); - } + let funding_balance = shard_vault_initial_funds(&node_api, shielding_target).unwrap(); + if let Ok(shard_vault) = enclave.get_ecc_vault_pubkey(shard) { + // verify if proxy is set up on chain + let nonce = node_api.get_account_nonce(&AccountId::from(shard_vault)).unwrap(); + println!( + "[{:?}] shard vault account is already initialized in state: {} with nonce {}", + shielding_target, + shard_vault.to_ss58check(), + nonce + ); + if nonce == 0 && we_are_primary_validateer { + println!( + "[{:?}] nonce = 0 means shard vault not properly set up on chain. will retry", + shielding_target + ); + enclave.init_proxied_shard_vault(shard, &shielding_target, 0u128).unwrap(); + } + } else if we_are_primary_validateer { + println!("[{:?}] initializing proxied shard vault account now", shielding_target); + enclave + .init_proxied_shard_vault(shard, &shielding_target, funding_balance) + .unwrap(); + println!( + "[{:?}] initialized shard vault account: : {}", + shielding_target, + enclave.get_ecc_vault_pubkey(shard).unwrap().to_ss58check() + ); + } else { + panic!("no vault account has been initialized and we are not the primary worker"); + } } fn init_target_parentchain( - enclave: &Arc, - tee_account_id: &AccountId32, - node_api: Api, Client>, - shard: &ShardIdentifier, - parentchain_id: ParentchainId, - is_development_mode: bool, - shutdown_flag: Arc, + enclave: &Arc, + tee_account_id: &AccountId32, + node_api: Api, Client>, + shard: &ShardIdentifier, + parentchain_id: ParentchainId, + is_development_mode: bool, + shutdown_flag: Arc, ) -> Vec> where - E: EnclaveBase + Sidechain, - u128: From, - Tip: Copy + Default + Encode + Debug + Send + Sync + 'static, - Client: Request + Subscribe + Clone + Send + Sync + 'static, + E: EnclaveBase + Sidechain, + u128: From, + Tip: Copy + Default + Encode + Debug + Send + Sync + 'static, + Client: Request + Subscribe + Clone + Send + Sync + 'static, { - setup_reasonable_account_funding( - node_api.clone(), - tee_account_id, - parentchain_id, - is_development_mode, - ) - .unwrap_or_else(|e| { - panic!("[{:?}] Could not fund parentchain enclave account: {:?}", parentchain_id, e) - }); - - // we attempt to set shard creation for this parentchain in case it hasn't been done before - let api_head = node_api.get_header(node_api.get_finalized_head().unwrap()).unwrap().unwrap(); - // TODO: #1451: Fix api-client type hacks - let head = Header::decode(&mut api_head.encode().as_slice()) - .expect("Can decode previously encoded header; qed"); - - let (parentchain_handler, last_synched_header) = - init_parentchain(enclave, &node_api, tee_account_id, parentchain_id, shard); - - // we ignore failure - let _ = enclave.init_shard_creation_parentchain_header(shard, &parentchain_id, &head); - - let mut handles = Vec::new(); - - if WorkerModeProvider::worker_mode() != WorkerMode::Teeracle { - println!( - "[{:?}] Finished initializing light client, syncing parentchain...", - parentchain_id - ); - - // Syncing all parentchain blocks, this might take a while.. - let last_synched_header = parentchain_handler - .sync_parentchain_until_latest_finalized(last_synched_header, *shard, true) - .unwrap(); - - let handle = start_parentchain_header_subscription_thread( - shutdown_flag.clone(), - parentchain_handler.clone(), - last_synched_header, - *shard, - ); - handles.push(handle); - } - - let parentchain_init_params = parentchain_handler.parentchain_init_params.clone(); - - let node_api_clone = node_api.clone(); - thread::Builder::new() - .name(format!("{:?}_parentchain_event_subscription", parentchain_id)) - .spawn(move || { - ita_parentchain_interface::event_subscriber::subscribe_to_parentchain_events( - &node_api_clone, - parentchain_id, - shutdown_flag, - ) - }) - .unwrap(); - handles + setup_reasonable_account_funding( + node_api.clone(), + tee_account_id, + parentchain_id, + is_development_mode, + ) + .unwrap_or_else(|e| { + panic!("[{:?}] Could not fund parentchain enclave account: {:?}", parentchain_id, e) + }); + + // we attempt to set shard creation for this parentchain in case it hasn't been done before + let api_head = node_api.get_header(node_api.get_finalized_head().unwrap()).unwrap().unwrap(); + // TODO: #1451: Fix api-client type hacks + let head = Header::decode(&mut api_head.encode().as_slice()) + .expect("Can decode previously encoded header; qed"); + + let (parentchain_handler, last_synched_header) = + init_parentchain(enclave, &node_api, tee_account_id, parentchain_id, shard); + + // we ignore failure + let _ = enclave.init_shard_creation_parentchain_header(shard, &parentchain_id, &head); + + let mut handles = Vec::new(); + + if WorkerModeProvider::worker_mode() != WorkerMode::Teeracle { + println!( + "[{:?}] Finished initializing light client, syncing parentchain...", + parentchain_id + ); + + // Syncing all parentchain blocks, this might take a while.. + let last_synched_header = parentchain_handler + .sync_parentchain_until_latest_finalized(last_synched_header, *shard, true) + .unwrap(); + + let handle = start_parentchain_header_subscription_thread( + shutdown_flag.clone(), + parentchain_handler.clone(), + last_synched_header, + *shard, + ); + handles.push(handle); + } + + let parentchain_init_params = parentchain_handler.parentchain_init_params.clone(); + + let node_api_clone = node_api.clone(); + thread::Builder::new() + .name(format!("{:?}_parentchain_event_subscription", parentchain_id)) + .spawn(move || { + ita_parentchain_interface::event_subscriber::subscribe_to_parentchain_events( + &node_api_clone, + parentchain_id, + shutdown_flag, + ) + }) + .unwrap(); + handles } fn init_parentchain( - enclave: &Arc, - node_api: &Api, Client>, - tee_account_id: &AccountId32, - parentchain_id: ParentchainId, - shard: &ShardIdentifier, + enclave: &Arc, + node_api: &Api, Client>, + tee_account_id: &AccountId32, + parentchain_id: ParentchainId, + shard: &ShardIdentifier, ) -> (Arc>, Header) where - E: EnclaveBase + Sidechain, - u128: From, - Tip: Copy + Default + Encode + Debug, - Client: Request + Subscribe + Clone, + E: EnclaveBase + Sidechain, + u128: From, + Tip: Copy + Default + Encode + Debug, + Client: Request + Subscribe + Clone, { - let parentchain_handler = Arc::new( - ParentchainHandler::new_with_automatic_light_client_allocation( - node_api.clone(), - enclave.clone(), - parentchain_id, - *shard, - ) - .unwrap(), - ); - let last_synced_header = parentchain_handler.init_parentchain_components().unwrap(); - println!("[{:?}] last synced parentchain block: {}", parentchain_id, last_synced_header.number); - - let nonce = node_api.get_system_account_next_index(tee_account_id.clone()).unwrap(); - info!("[{:?}] Enclave nonce = {:?}", parentchain_id, nonce); - enclave.set_nonce(nonce, parentchain_id).unwrap_or_else(|_| { - panic!("[{:?}] Could not set nonce of enclave. Returning here...", parentchain_id) - }); - - let metadata = node_api.metadata().clone(); - let runtime_spec_version = node_api.runtime_version().spec_version; - let runtime_transaction_version = node_api.runtime_version().transaction_version; - enclave - .set_node_metadata( - NodeMetadata::new(metadata, runtime_spec_version, runtime_transaction_version).encode(), - parentchain_id, - ) - .unwrap_or_else(|_| { - panic!("[{:?}] Could not set the node metadata in the enclave", parentchain_id) - }); - - (parentchain_handler, last_synced_header) + let parentchain_handler = Arc::new( + ParentchainHandler::new_with_automatic_light_client_allocation( + node_api.clone(), + enclave.clone(), + parentchain_id, + *shard, + ) + .unwrap(), + ); + let last_synced_header = parentchain_handler.init_parentchain_components().unwrap(); + println!("[{:?}] last synced parentchain block: {}", parentchain_id, last_synced_header.number); + + let nonce = node_api.get_system_account_next_index(tee_account_id.clone()).unwrap(); + info!("[{:?}] Enclave nonce = {:?}", parentchain_id, nonce); + enclave.set_nonce(nonce, parentchain_id).unwrap_or_else(|_| { + panic!("[{:?}] Could not set nonce of enclave. Returning here...", parentchain_id) + }); + + let metadata = node_api.metadata().clone(); + let runtime_spec_version = node_api.runtime_version().spec_version; + let runtime_transaction_version = node_api.runtime_version().transaction_version; + enclave + .set_node_metadata( + NodeMetadata::new(metadata, runtime_spec_version, runtime_transaction_version).encode(), + parentchain_id, + ) + .unwrap_or_else(|_| { + panic!("[{:?}] Could not set the node metadata in the enclave", parentchain_id) + }); + + (parentchain_handler, last_synced_header) } /// Start polling loop to wait until we have a worker for a shard registered on @@ -1044,265 +1025,265 @@ where /// considered initialized and ready for the next worker to start (in sidechain mode only). /// considered initialized and ready for the next worker to start. fn spawn_worker_for_shard_polling( - shard: &ShardIdentifier, - node_api: IntegriteeApi, - initialization_handler: Arc, + shard: &ShardIdentifier, + node_api: IntegriteeApi, + initialization_handler: Arc, ) where - InitializationHandler: TrackInitialization + Sync + Send + 'static, + InitializationHandler: TrackInitialization + Sync + Send + 'static, { - let shard_for_initialized = *shard; - thread::spawn(move || { - const POLL_INTERVAL_SECS: u64 = 2; - - loop { - info!("Polling for worker for shard ({} seconds interval)", POLL_INTERVAL_SECS); - if let Ok(Some(enclave)) = - node_api.primary_worker_for_shard(&shard_for_initialized, None) - { - // Set that the service is initialized. - initialization_handler.worker_for_shard_registered(); - println!( - "[+] Found `WorkerForShard` on parentchain state: {:?}", - enclave.instance_signer() - ); - break - } - thread::sleep(Duration::from_secs(POLL_INTERVAL_SECS)); - } - }); + let shard_for_initialized = *shard; + thread::spawn(move || { + const POLL_INTERVAL_SECS: u64 = 2; + + loop { + info!("Polling for worker for shard ({} seconds interval)", POLL_INTERVAL_SECS); + if let Ok(Some(enclave)) = + node_api.primary_worker_for_shard(&shard_for_initialized, None) + { + // Set that the service is initialized. + initialization_handler.worker_for_shard_registered(); + println!( + "[+] Found `WorkerForShard` on parentchain state: {:?}", + enclave.instance_signer() + ); + break; + } + thread::sleep(Duration::from_secs(POLL_INTERVAL_SECS)); + } + }); } #[cfg(feature = "attesteer")] fn fetch_marblerun_events_every_hour( - api: IntegriteeApi, - enclave: Arc, - accountid: AccountId32, - is_development_mode: bool, - url: String, - marblerun_base_url: String, + api: IntegriteeApi, + enclave: Arc, + accountid: AccountId32, + is_development_mode: bool, + url: String, + marblerun_base_url: String, ) where - E: RemoteAttestation + Clone + Sync + Send + 'static, + E: RemoteAttestation + Clone + Sync + Send + 'static, { - let enclave = enclave.clone(); - let handle = thread::spawn(move || { - const POLL_INTERVAL_5_MINUTES_IN_SECS: u64 = 5 * 60; - loop { - info!("Polling marblerun events for quotes to register"); - register_quotes_from_marblerun( - &api, - enclave.clone(), - &accountid, - is_development_mode, - url.clone(), - &marblerun_base_url, - ); - - thread::sleep(Duration::from_secs(POLL_INTERVAL_5_MINUTES_IN_SECS)); - } - }); - - handle.join().unwrap() + let enclave = enclave.clone(); + let handle = thread::spawn(move || { + const POLL_INTERVAL_5_MINUTES_IN_SECS: u64 = 5 * 60; + loop { + info!("Polling marblerun events for quotes to register"); + register_quotes_from_marblerun( + &api, + enclave.clone(), + &accountid, + is_development_mode, + url.clone(), + &marblerun_base_url, + ); + + thread::sleep(Duration::from_secs(POLL_INTERVAL_5_MINUTES_IN_SECS)); + } + }); + + handle.join().unwrap() } #[cfg(feature = "attesteer")] fn register_quotes_from_marblerun( - api: &IntegriteeApi, - enclave: Arc, - accountid: &AccountId32, - is_development_mode: bool, - url: String, - marblerun_base_url: &str, + api: &IntegriteeApi, + enclave: Arc, + accountid: &AccountId32, + is_development_mode: bool, + url: String, + marblerun_base_url: &str, ) { - let enclave = enclave.as_ref(); - let events = crate::prometheus_metrics::fetch_marblerun_events(marblerun_base_url) - .map_err(|e| { - info!("Fetching events from Marblerun failed with: {:?}, continuing with 0 events.", e); - }) - .unwrap_or_default(); - let quotes: Vec<&[u8]> = - events.iter().map(|event| event.get_quote_without_prepended_bytes()).collect(); - - for quote in quotes { - match enclave.generate_dcap_ra_extrinsic_from_quote(url.clone(), "e) { - Ok(xt) => { - send_integritee_extrinsic(xt, api, accountid, is_development_mode); - }, - Err(e) => { - error!("Extracting information from quote failed: {}", e) - }, - } - } + let enclave = enclave.as_ref(); + let events = crate::prometheus_metrics::fetch_marblerun_events(marblerun_base_url) + .map_err(|e| { + info!("Fetching events from Marblerun failed with: {:?}, continuing with 0 events.", e); + }) + .unwrap_or_default(); + let quotes: Vec<&[u8]> = + events.iter().map(|event| event.get_quote_without_prepended_bytes()).collect(); + + for quote in quotes { + match enclave.generate_dcap_ra_extrinsic_from_quote(url.clone(), "e) { + Ok(xt) => { + send_integritee_extrinsic(xt, api, accountid, is_development_mode); + } + Err(e) => { + error!("Extracting information from quote failed: {}", e) + } + } + } } #[cfg(feature = "dcap")] fn register_collateral( - api: &IntegriteeApi, - enclave: &dyn RemoteAttestation, - accountid: &AccountId32, - is_development_mode: bool, - skip_ra: bool, + api: &IntegriteeApi, + enclave: &dyn RemoteAttestation, + accountid: &AccountId32, + is_development_mode: bool, + skip_ra: bool, ) { - //TODO generate_dcap_ra_quote() does not really need skip_ra, rethink how many layers skip_ra should be passed along - if !skip_ra { - let dcap_quote = enclave.generate_dcap_ra_quote(skip_ra).unwrap(); - let (fmspc, _tcb_info) = extract_tcb_info_from_raw_dcap_quote(&dcap_quote).unwrap(); - println!("[>] DCAP setup: register QE collateral"); - let uxt = enclave.generate_register_quoting_enclave_extrinsic(fmspc).unwrap(); - send_integritee_extrinsic(uxt, api, accountid, is_development_mode); - - println!("[>] DCAP setup: register TCB info"); - let uxt = enclave.generate_register_tcb_info_extrinsic(fmspc).unwrap(); - send_integritee_extrinsic(uxt, api, accountid, is_development_mode); - } + //TODO generate_dcap_ra_quote() does not really need skip_ra, rethink how many layers skip_ra should be passed along + if !skip_ra { + let dcap_quote = enclave.generate_dcap_ra_quote(skip_ra).unwrap(); + let (fmspc, _tcb_info) = extract_tcb_info_from_raw_dcap_quote(&dcap_quote).unwrap(); + println!("[>] DCAP setup: register QE collateral"); + let uxt = enclave.generate_register_quoting_enclave_extrinsic(fmspc).unwrap(); + send_integritee_extrinsic(uxt, api, accountid, is_development_mode); + + println!("[>] DCAP setup: register TCB info"); + let uxt = enclave.generate_register_tcb_info_extrinsic(fmspc).unwrap(); + send_integritee_extrinsic(uxt, api, accountid, is_development_mode); + } } fn send_integritee_extrinsic( - extrinsic: Vec, - api: &Api, Client>, - fee_payer: &AccountId32, - is_development_mode: bool, + extrinsic: Vec, + api: &Api, Client>, + fee_payer: &AccountId32, + is_development_mode: bool, ) -> ServiceResult where - u128: From, - Tip: Copy + Default + Encode + Debug + Send + Sync + 'static, - Client: Request + Subscribe + Clone + Send + Sync + 'static, + u128: From, + Tip: Copy + Default + Encode + Debug + Send + Sync + 'static, + Client: Request + Subscribe + Clone + Send + Sync + 'static, { - let timeout = Duration::from_secs(5 * 60); - let (sender, receiver) = mpsc::channel(); - let local_fee_payer = fee_payer.clone(); - let local_api = api.clone(); - // start thread which can time out - let handle = thread::spawn(move || { - let fee = crate::account_funding::estimate_fee(&local_api, extrinsic.clone()).unwrap(); - let ed = local_api.get_existential_deposit().unwrap(); - let free = local_api.get_free_balance(&local_fee_payer).unwrap(); - let missing_funds = fee.saturating_add(ed).saturating_sub(free); - info!("[Integritee] send extrinsic"); - debug!("fee: {:?}, ed: {:?}, free: {:?} => missing: {:?}", fee, ed, free, missing_funds); - trace!( + let timeout = Duration::from_secs(5 * 60); + let (sender, receiver) = mpsc::channel(); + let local_fee_payer = fee_payer.clone(); + let local_api = api.clone(); + // start thread which can time out + let handle = thread::spawn(move || { + let fee = crate::account_funding::estimate_fee(&local_api, extrinsic.clone()).unwrap(); + let ed = local_api.get_existential_deposit().unwrap(); + let free = local_api.get_free_balance(&local_fee_payer).unwrap(); + let missing_funds = fee.saturating_add(ed).saturating_sub(free); + info!("[Integritee] send extrinsic"); + debug!("fee: {:?}, ed: {:?}, free: {:?} => missing: {:?}", fee, ed, free, missing_funds); + trace!( " encoded extrinsic len: {}, payload: 0x{:}", extrinsic.len(), hex::encode(extrinsic.clone()) ); - if missing_funds > 0 { - setup_reasonable_account_funding( - local_api.clone(), - &local_fee_payer, - ParentchainId::Integritee, - is_development_mode, - ) - .unwrap() - } - - match local_api - .submit_and_watch_opaque_extrinsic_until(&extrinsic.into(), XtStatus::Finalized) - { - Ok(xt_report) => { - info!( + if missing_funds > 0 { + setup_reasonable_account_funding( + local_api.clone(), + &local_fee_payer, + ParentchainId::Integritee, + is_development_mode, + ) + .unwrap() + } + + match local_api + .submit_and_watch_opaque_extrinsic_until(&extrinsic.into(), XtStatus::Finalized) + { + Ok(xt_report) => { + info!( "[+] L1 extrinsic success. extrinsic hash: {:?} / status: {:?}", xt_report.extrinsic_hash, xt_report.status ); - xt_report.block_hash.ok_or(Error::Custom("no extrinsic hash returned".into())); - sender.send(xt_report.block_hash.unwrap()); - }, - Err(e) => { - panic!( - "Extrinsic failed {:?} parentchain genesis: {:?}", - e, - local_api.genesis_hash() - ); - }, - } - }); - // Wait for the result with a timeout - match receiver.recv_timeout(timeout) { - Ok(result) => { - println!("Task finished within timeout: {:?}", result); - Ok(result) - }, - Err(_) => { - println!("Task timed out after {:?}", timeout); - panic!("Extrinsic sending timed out. shutting down."); - }, - } + xt_report.block_hash.ok_or(Error::Custom("no extrinsic hash returned".into())); + sender.send(xt_report.block_hash.unwrap()); + } + Err(e) => { + panic!( + "Extrinsic failed {:?} parentchain genesis: {:?}", + e, + local_api.genesis_hash() + ); + } + } + }); + // Wait for the result with a timeout + match receiver.recv_timeout(timeout) { + Ok(result) => { + println!("Task finished within timeout: {:?}", result); + Ok(result) + } + Err(_) => { + println!("Task timed out after {:?}", timeout); + panic!("Extrinsic sending timed out. shutting down."); + } + } } fn start_parentchain_header_subscription_thread( - shutdown_flag: Arc, - parentchain_handler: Arc>, - last_synced_header: Header, - shard: ShardIdentifier, + shutdown_flag: Arc, + parentchain_handler: Arc>, + last_synced_header: Header, + shard: ShardIdentifier, ) -> thread::JoinHandle<()> where - EnclaveApi: EnclaveBase + Sidechain, - u128: From, - Tip: Copy + Default + Encode + Debug + Send + Sync + 'static, - Client: Request + Subscribe + Send + Sync + 'static, + EnclaveApi: EnclaveBase + Sidechain, + u128: From, + Tip: Copy + Default + Encode + Debug + Send + Sync + 'static, + Client: Request + Subscribe + Send + Sync + 'static, { - let parentchain_id = *parentchain_handler.parentchain_id(); - thread::Builder::new() - .name(format!("{:?}_parentchain_sync_loop", parentchain_id)) - .spawn(move || { - if let Err(e) = subscribe_to_parentchain_new_headers( - shutdown_flag, - parentchain_handler, - last_synced_header, - shard, - ) { - error!( + let parentchain_id = *parentchain_handler.parentchain_id(); + thread::Builder::new() + .name(format!("{:?}_parentchain_sync_loop", parentchain_id)) + .spawn(move || { + if let Err(e) = subscribe_to_parentchain_new_headers( + shutdown_flag, + parentchain_handler, + last_synced_header, + shard, + ) { + error!( "[{:?}] parentchain block syncing terminated with a failure: {:?}", parentchain_id, e ); - } - println!("[!] [{:?}] parentchain block syncing has terminated", parentchain_id); - }) - .unwrap() + } + println!("[!] [{:?}] parentchain block syncing has terminated", parentchain_id); + }) + .unwrap() } /// Subscribe to the node API finalized heads stream and trigger a parent chain sync /// upon receiving a new header. fn subscribe_to_parentchain_new_headers( - shutdown_flag: Arc, - parentchain_handler: Arc>, - mut last_synced_header: Header, - shard: ShardIdentifier, + shutdown_flag: Arc, + parentchain_handler: Arc>, + mut last_synced_header: Header, + shard: ShardIdentifier, ) -> Result<(), Error> where - EnclaveApi: EnclaveBase + Sidechain, - u128: From, - Tip: Copy + Default + Encode + Debug, - Client: Request + Subscribe, + EnclaveApi: EnclaveBase + Sidechain, + u128: From, + Tip: Copy + Default + Encode + Debug, + Client: Request + Subscribe, { - // TODO: this should be implemented by parentchain_handler directly, and not via - // exposed parentchain_api - let mut subscription = parentchain_handler - .parentchain_api() - .subscribe_finalized_heads() - .map_err(Error::ApiClient)?; - let parentchain_id = parentchain_handler.parentchain_id(); - while !shutdown_flag.load(Ordering::Relaxed) { - let new_header = subscription - .next() - .ok_or(Error::ApiSubscriptionDisconnected)? - .map_err(|e| Error::ApiClient(e.into()))?; - - info!( + // TODO: this should be implemented by parentchain_handler directly, and not via + // exposed parentchain_api + let mut subscription = parentchain_handler + .parentchain_api() + .subscribe_finalized_heads() + .map_err(Error::ApiClient)?; + let parentchain_id = parentchain_handler.parentchain_id(); + while !shutdown_flag.load(Ordering::Relaxed) { + let new_header = subscription + .next() + .ok_or(Error::ApiSubscriptionDisconnected)? + .map_err(|e| Error::ApiClient(e.into()))?; + + info!( "[{:?}] Received finalized header update ({}), syncing parent chain...", parentchain_id, new_header.number ); - last_synced_header = parentchain_handler.sync_parentchain_until_latest_finalized( - last_synced_header, - shard, - false, - )?; - } - warn!("[{:?}] parent chain block syncing has terminated", parentchain_id); - Ok(()) + last_synced_header = parentchain_handler.sync_parentchain_until_latest_finalized( + last_synced_header, + shard, + false, + )?; + } + warn!("[{:?}] parent chain block syncing has terminated", parentchain_id); + Ok(()) } /// Get the public signing key of the TEE. fn enclave_account(enclave_api: &E) -> AccountId32 { - let tee_public = enclave_api.get_ecc_signing_pubkey().unwrap(); - trace!("[+] Got ed25519 account of TEE = {}", tee_public.to_ss58check()); - AccountId32::from(*tee_public.as_array_ref()) + let tee_public = enclave_api.get_ecc_signing_pubkey().unwrap(); + trace!("[+] Got ed25519 account of TEE = {}", tee_public.to_ss58check()); + AccountId32::from(*tee_public.as_array_ref()) } diff --git a/service/src/ocall_bridge/component_factory.rs b/service/src/ocall_bridge/component_factory.rs index 6738f66a6..81502c728 100644 --- a/service/src/ocall_bridge/component_factory.rs +++ b/service/src/ocall_bridge/component_factory.rs @@ -70,7 +70,7 @@ pub struct OCallBridgeComponentFactory< peer_block_fetcher: Arc, tokio_handle: Arc, metrics_receiver: Arc, - maybe_ipfs_client: Option>, + maybe_ipfs_url_and_auth: (Option, Option), log_dir: Arc, } @@ -117,7 +117,7 @@ impl< peer_block_fetcher: Arc, tokio_handle: Arc, metrics_receiver: Arc, - maybe_ipfs_client: Option>, + maybe_ipfs_url_and_auth: (Option, Option), log_dir: Arc, ) -> Self { OCallBridgeComponentFactory { @@ -131,7 +131,7 @@ impl< peer_block_fetcher, tokio_handle, metrics_receiver, - maybe_ipfs_client, + maybe_ipfs_url_and_auth, log_dir, } } @@ -160,7 +160,8 @@ impl< PeerBlockFetcher, TokioHandle, MetricsReceiver, - > where + > +where IntegriteeRuntimeConfig: Config + 'static, TargetARuntimeConfig: Config + 'static, @@ -200,7 +201,10 @@ impl< } fn get_ipfs_api(&self) -> Arc { - Arc::new(IpfsOCall::new(self.maybe_ipfs_client.clone())) + Arc::new(IpfsOCall::new( + self.maybe_ipfs_url_and_auth.0.clone(), + self.maybe_ipfs_url_and_auth.1.clone(), + )) } fn get_metrics_api(&self) -> Arc { diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index f066d14e3..a41f413df 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -22,90 +22,110 @@ use ipfs_api_backend_hyper::{IpfsApi, IpfsClient, TryFromUri}; use itp_utils::IpfsCid; use log::*; use std::{ - fs::File, - io::{Cursor, Write}, - str, - sync::{mpsc::channel, Arc}, + fs::File, + io::{Cursor, Write}, + str, + sync::{mpsc::channel, Arc}, }; pub struct IpfsOCall { - client: Arc, + client: Option>, } impl IpfsOCall { - pub fn new(client: Option>) -> Self { - // Fallback if None: - // Creates an `IpfsClient` connected to the endpoint specified in ~/.ipfs/api. - // If not found, tries to connect to `localhost:5001`. - Self { client: client.unwrap_or_default() } - } + pub fn new(maybe_url: Option, maybe_auth: Option) -> Self { + if let Some(url) = maybe_url { + let client = ipfs_api_backend_hyper::IpfsClient::from_str(&url).unwrap(); + let client = if let Some((user, pwd)) = maybe_auth + .and_then(|s| s.split_once(':').map(|(u, p)| (u.to_string(), p.to_string()))) + { + info!("Using IPFS node at {} with credentials ******", url); + client.with_credentials(user, pwd) + } else { + info!("Using IPFS node at {}", url); + client + }; + let version = tokio::runtime::Runtime::new().unwrap().block_on(client.version()); + match version { + Ok(v) => info!("Connected to IPFS node version: {}", v.version), + Err(e) => error!("Error getting IPFS node version: {}", e), + } + Self { client: Some(Arc::new(client)) } + } else { + info!("No IPFS URL provided, disabling IPFS."); + Self { client: None } + } + } } impl IpfsBridge for IpfsOCall { - fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { - debug!(" Entering ocall_write_ipfs"); - write_to_ipfs(&self.client, data) - } + fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { + debug!(" Entering ocall_write_ipfs"); + write_to_ipfs( + self.client.as_ref().ok_or_else(|| + OCallBridgeError::IpfsError("No IPFS client configured, cannot write to IPFS".to_string()) + )?, + data, + ) + } - fn read_from_ipfs(&self, cid: IpfsCid) -> OCallBridgeResult<()> { - debug!("Entering ocall_read_ipfs"); - - let result = read_from_ipfs(&self.client, &cid); - match result { - Ok(res) => { - let filename = format!("{:?}", cid); - create_file(&filename, &res).map_err(OCallBridgeError::IpfsError) - }, - Err(_) => Err(OCallBridgeError::IpfsError("failed to read from IPFS".to_string())), - } - } + fn read_from_ipfs(&self, cid: IpfsCid) -> OCallBridgeResult<()> { + debug!("Entering ocall_read_ipfs"); + let client = self.client.as_ref().ok_or_else(|| + OCallBridgeError::IpfsError("No IPFS client configured, cannot read from IPFS".to_string()) + )?; + let res = read_from_ipfs(client, &cid) + .map_err(|_| OCallBridgeError::IpfsError("failed to read from IPFS".to_string()))?; + let filename = format!("{:?}", cid); + create_file(&filename, &res).map_err(OCallBridgeError::IpfsError) + } } fn create_file(filename: &str, result: &[u8]) -> Result<(), String> { - match File::create(filename) { - Ok(mut f) => f - .write_all(result) - .map_or_else(|e| Err(format!("failed writing to file: {}", e)), |_| Ok(())), - Err(e) => Err(format!("failed to create file: {}", e)), - } + match File::create(filename) { + Ok(mut f) => f + .write_all(result) + .map_or_else(|e| Err(format!("failed writing to file: {}", e)), |_| Ok(())), + Err(e) => Err(format!("failed to create file: {}", e)), + } } #[tokio::main] async fn write_to_ipfs(client: &IpfsClient, data: &'static [u8]) -> OCallBridgeResult { - let datac = Cursor::new(data); - let (tx, rx) = channel(); + let datac = Cursor::new(data); + let (tx, rx) = channel(); - match client.add(datac).await { - Ok(res) => { - debug!("Result IpfsCid {}", res.hash); - tx.send(res.hash.into_bytes()).unwrap(); - }, - Err(e) => { - error!("error adding file: {}", e); - return Err(OCallBridgeError::IpfsError(format!("error adding file: {}", e))) - }, - } - rx.recv() - .map_err(|e| OCallBridgeError::IpfsError(format!("error receiving cid: {}", e))) - .and_then(|cid_str| { - str::from_utf8(&cid_str) - .map_err(|e| OCallBridgeError::IpfsError(format!("invalid UTF-8 in cid: {}", e))) - .and_then(|cid_utf8| { - IpfsCid::try_from(cid_utf8).map_err(|e| { - OCallBridgeError::IpfsError(format!("invalid IpfsCid: {:?}", e)) - }) - }) - }) + match client.add(datac).await { + Ok(res) => { + debug!("Result IpfsCid {}", res.hash); + tx.send(res.hash.into_bytes()).unwrap(); + } + Err(e) => { + error!("error adding file: {}", e); + return Err(OCallBridgeError::IpfsError(format!("error adding file: {}", e))); + } + } + rx.recv() + .map_err(|e| OCallBridgeError::IpfsError(format!("error receiving cid: {}", e))) + .and_then(|cid_str| { + str::from_utf8(&cid_str) + .map_err(|e| OCallBridgeError::IpfsError(format!("invalid UTF-8 in cid: {}", e))) + .and_then(|cid_utf8| { + IpfsCid::try_from(cid_utf8).map_err(|e| { + OCallBridgeError::IpfsError(format!("invalid IpfsCid: {:?}", e)) + }) + }) + }) } #[tokio::main] pub async fn read_from_ipfs(client: &IpfsClient, cid: &IpfsCid) -> Result, String> { - let h = format!("{:?}", cid); - debug!("Fetching content with cid {}", h); - client - .cat(&h) - .map_ok(|chunk| chunk.to_vec()) - .map_err(|e| e.to_string()) - .try_concat() - .await + let h = format!("{:?}", cid); + debug!("Fetching content with cid {}", h); + client + .cat(&h) + .map_ok(|chunk| chunk.to_vec()) + .map_err(|e| e.to_string()) + .try_concat() + .await } From 096c8f3311233d10ef35d42b31ae4910c39dde89 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Sat, 20 Sep 2025 16:33:13 +0200 Subject: [PATCH 17/91] add simple integration test with ipfs relayed note --- .github/workflows/build_and_test.yml | 5 ++ cli/demo_send_relayed_note.sh | 91 ++++++++++++++++++++++++++++ docker/demo-send_relayed_note.yml | 35 +++++++++++ docker/docker-compose.yml | 2 +- 4 files changed, 132 insertions(+), 1 deletion(-) create mode 100755 cli/demo_send_relayed_note.sh create mode 100644 docker/demo-send_relayed_note.yml diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 2c8106b6f..ca94fd873 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -271,6 +271,11 @@ jobs: demo_name: demo-direct-call host: test-runner-sgx sgx_mode: HW + - test: relayed-notes + flavor_id: sidechain + demo_name: demo-send_relayed_note + host: test-runner-sgx + sgx_mode: HW - test: Sidechain flavor_id: sidechain demo_name: demo-sidechain diff --git a/cli/demo_send_relayed_note.sh b/cli/demo_send_relayed_note.sh new file mode 100755 index 000000000..176f5b4c5 --- /dev/null +++ b/cli/demo_send_relayed_note.sh @@ -0,0 +1,91 @@ +#!/bin/bash + +# Executes a direct call on a worker and checks the balance afterwards. +# +# setup: +# run all on localhost: +# integritee-node purge-chain --dev +# integritee-node --tmp --dev -lruntime=debug +# rm light_client_db.bin +# export RUST_LOG=integritee_service=info,ita_stf=debug +# integritee-service init_shard +# integritee-service shielding-key +# integritee-service signing-key +# integritee-service run +# +# then run this script + +# usage: +# demo_direct_call.sh -p -P -t +# +# TEST_BALANCE_RUN is either "first" or "second" + + +while getopts ":p:P:t:u:V:C:" opt; do + case $opt in + t) + TEST=$OPTARG + ;; + p) + INTEGRITEE_RPC_PORT=$OPTARG + ;; + P) + WORKER_1_PORT=$OPTARG + ;; + u) + INTEGRITEE_RPC_URL=$OPTARG + ;; + V) + WORKER_1_URL=$OPTARG + ;; + i) + IPFS_GATEWAY=$OPTARG + ;; + C) + CLIENT_BIN=$OPTARG + ;; + *) + echo "invalid arg ${OPTARG}" + exit 1 + esac +done + +# Using default port if none given as arguments. +INTEGRITEE_RPC_PORT=${INTEGRITEE_RPC_PORT:-9944} +INTEGRITEE_RPC_URL=${INTEGRITEE_RPC_URL:-"ws://127.0.0.1"} + +WORKER_1_PORT=${WORKER_1_PORT:-2000} +WORKER_1_URL=${WORKER_1_URL:-"wss://127.0.0.1"} + +CLIENT_BIN=${CLIENT_BIN:-"./../bin/integritee-cli"} + +echo "Using client binary ${CLIENT_BIN}" +${CLIENT_BIN} --version +echo "Using node uri ${INTEGRITEE_RPC_URL}:${INTEGRITEE_RPC_PORT}" +echo "Using trusted-worker uri ${WORKER_1_URL}:${WORKER_1_PORT}" +echo "Using IPFS gateway ${IPFS_GATEWAY}" +echo "" + +CLIENT="${CLIENT_BIN} -p ${INTEGRITEE_RPC_PORT} -P ${WORKER_1_PORT} -u ${INTEGRITEE_RPC_URL} -U ${WORKER_1_URL} -i ${IPFS_GATEWAY}" +# we simply believe the enclave here without verifying the teerex RA +MRENCLAVE="$($CLIENT trusted get-fingerprint)" +echo "Using MRENCLAVE: ${MRENCLAVE}" +TCLIENT="${CLIENT} trusted --mrenclave ${MRENCLAVE} --direct" + +NOTE="Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum." +${TCLIENT} send-note --ipfs-proxy //Alice //Bob ${NOTE} +echo "Alice sent note to Bob:" +echo $NOTE + +RECEIVED_NOTE=$(${TCLIENT} get-notes //Bob 0 | grep "${NOTE}") + +echo "Bob received:" +echo $RECEIVED_NOTE + +if echo "$RECEIVED_NOTE" | grep -qF "$NOTE"; then + echo "NOTE found in RECEIVED_NOTE" + exit 0 +else + echo "NOTE not found in RECEIVED_NOTE" + exit 1 +fi diff --git a/docker/demo-send_relayed_note.yml b/docker/demo-send_relayed_note.yml new file mode 100644 index 000000000..f5180f580 --- /dev/null +++ b/docker/demo-send_relayed_note.yml @@ -0,0 +1,35 @@ +services: + demo-direct-call: + image: integritee-cli:${VERSION:-dev} + devices: + - "${SGX_PROVISION:-/dev/null}:/dev/sgx/provision" + - "${SGX_ENCLAVE:-/dev/null}:/dev/sgx/enclave" + volumes: + - "${AESMD:-/dev/null}:/var/run/aesmd" + - "${SGX_QCNL:-/dev/null}:/etc/sgx_default_qcnl.conf" + build: + context: ${PWD}/.. + dockerfile: build.Dockerfile + target: deployed-client + depends_on: + integritee-node-${VERSION}: + condition: service_healthy + integritee-worker-1-${VERSION}: + condition: service_healthy + networks: + - integritee-test-network + entrypoint: + "/usr/local/worker-cli/demo_send_relayed_note.sh -p 9912 -u ws://integritee-node + -V wss://integritee-worker-1 -P 2011 -C /usr/local/bin/integritee-cli 2>&1" + restart: "no" + ipfs-node: + image: ipfs/kubo:latest + ports: + - "4001:4001" # Swarm + - "5001:5001" # API + - "8080:8080" # Gateway + networks: + - integritee-test-network +networks: + integritee-test-network: + driver: bridge \ No newline at end of file diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 23c0338d4..e711ca400 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -43,7 +43,7 @@ services: interval: 10s timeout: 10s retries: 25 - command: "--clean-reset --data-dir /tmp/worker1 --ws-external -M integritee-worker-1 -T wss://integritee-worker-1 -u ws://integritee-node -U ws://integritee-worker-1 -P 2011 -w 2101 -p 9912 -h 4645 run --dev ${ADDITIONAL_RUNTIME_FLAGS}" + command: "--clean-reset --data-dir /tmp/worker1 --ws-external -M integritee-worker-1 -T wss://integritee-worker-1 -u ws://integritee-node -U ws://integritee-worker-1 -P 2011 -w 2101 -p 9912 -h 4645 --ipfs-api-url http://ipfs-node:5001 run --dev ${ADDITIONAL_RUNTIME_FLAGS}" restart: "no" "integritee-worker-2-${VERSION}": image: integritee-worker:${VERSION:-dev} From 0fd2971f7884ace4a5731571b35e4c13b3f8fe89 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Sat, 20 Sep 2025 16:34:01 +0200 Subject: [PATCH 18/91] fmt --- service/src/main_impl.rs | 2256 ++++++++--------- service/src/ocall_bridge/component_factory.rs | 3 +- service/src/ocall_bridge/ipfs_ocall.rs | 172 +- 3 files changed, 1217 insertions(+), 1214 deletions(-) diff --git a/service/src/main_impl.rs b/service/src/main_impl.rs index 621b91ee3..fc6a7d03e 100644 --- a/service/src/main_impl.rs +++ b/service/src/main_impl.rs @@ -4,29 +4,29 @@ use crate::teeracle::{schedule_periodic_reregistration_thread, start_periodic_ma #[cfg(not(feature = "dcap"))] use crate::utils::check_files; use crate::{ - account_funding::{setup_reasonable_account_funding, ParentchainAccountInfoProvider}, - config::Config, - enclave::{ - api::enclave_init, - tls_ra::{enclave_request_state_provisioning, enclave_run_state_provisioning_server}, - }, - error::Error, - globals::tokio_handle::{GetTokioHandle, GlobalTokioHandle}, - initialized_service::{ - start_is_initialized_server, InitializationHandler, IsInitialized, TrackInitialization, - }, - ocall_bridge::{ - bridge_api::Bridge as OCallBridge, component_factory::OCallBridgeComponentFactory, - }, - parentchain_handler::{HandleParentchain, ParentchainHandler}, - prometheus_metrics::{start_metrics_server, EnclaveMetricsReceiver, MetricsHandler}, - setup, - sidechain_setup::{sidechain_init_block_production, sidechain_start_untrusted_rpc_server}, - sync_block_broadcaster::SyncBlockBroadcaster, - sync_state, tests, - utils::extract_shard, - worker::Worker, - worker_peers_updater::WorkerPeersUpdater, + account_funding::{setup_reasonable_account_funding, ParentchainAccountInfoProvider}, + config::Config, + enclave::{ + api::enclave_init, + tls_ra::{enclave_request_state_provisioning, enclave_run_state_provisioning_server}, + }, + error::Error, + globals::tokio_handle::{GetTokioHandle, GlobalTokioHandle}, + initialized_service::{ + start_is_initialized_server, InitializationHandler, IsInitialized, TrackInitialization, + }, + ocall_bridge::{ + bridge_api::Bridge as OCallBridge, component_factory::OCallBridgeComponentFactory, + }, + parentchain_handler::{HandleParentchain, ParentchainHandler}, + prometheus_metrics::{start_metrics_server, EnclaveMetricsReceiver, MetricsHandler}, + setup, + sidechain_setup::{sidechain_init_block_production, sidechain_start_untrusted_rpc_server}, + sync_block_broadcaster::SyncBlockBroadcaster, + sync_state, tests, + utils::extract_shard, + worker::Worker, + worker_peers_updater::WorkerPeersUpdater, }; use base58::ToBase58; use clap::{load_yaml, App, ArgMatches}; @@ -34,19 +34,19 @@ use codec::{Decode, Encode}; use ipfs_api_backend_hyper::{IpfsApi, TryFromUri}; use ita_parentchain_interface::integritee::{Hash, Header}; use itp_enclave_api::{ - enclave_base::EnclaveBase, - remote_attestation::{RemoteAttestation, TlsRemoteAttestation}, - sidechain::Sidechain, - teeracle_api::TeeracleApi, + enclave_base::EnclaveBase, + remote_attestation::{RemoteAttestation, TlsRemoteAttestation}, + sidechain::Sidechain, + teeracle_api::TeeracleApi, }; use itp_node_api::{ - api_client::{AccountApi, PalletTeerexApi}, - metadata::NodeMetadata, - node_api_factory::{CreateNodeApi, NodeApiFactory}, + api_client::{AccountApi, PalletTeerexApi}, + metadata::NodeMetadata, + node_api_factory::{CreateNodeApi, NodeApiFactory}, }; use itp_settings::worker_mode::{ProvideWorkerMode, WorkerMode, WorkerModeProvider}; use its_peer_fetch::{ - block_fetch_client::BlockFetcher, untrusted_peer_fetch::UntrustedPeerFetcher, + block_fetch_client::BlockFetcher, untrusted_peer_fetch::UntrustedPeerFetcher, }; use its_primitives::types::block::SignedBlock as SignedSidechainBlock; use its_storage::{interface::FetchBlocks, BlockPruner, SidechainStorageLock}; @@ -55,10 +55,10 @@ use regex::Regex; use sgx_types::*; use sp_runtime::traits::{Header as HeaderT, IdentifyAccount}; use substrate_api_client::{ - api::XtStatus, - rpc::{HandleSubscription, Request, Subscribe}, - Api, GetAccountInformation, GetBalance, GetChainInfo, GetStorage, SubmitAndWatch, - SubscribeChain, SubscribeEvents, + api::XtStatus, + rpc::{HandleSubscription, Request, Subscribe}, + Api, GetAccountInformation, GetBalance, GetChainInfo, GetStorage, SubmitAndWatch, + SubscribeChain, SubscribeEvents, }; use teerex_primitives::{AnySigner, MultiEnclave}; @@ -69,20 +69,20 @@ use sgx_verify::extract_tcb_info_from_raw_dcap_quote; use itp_enclave_api::Enclave; use crate::{ - account_funding::{shard_vault_initial_funds, AccountAndRole}, - error::ServiceResult, - prometheus_metrics::{set_static_metrics, start_prometheus_metrics_server, HandleMetrics}, - sidechain_setup::ParentchainIntegriteeSidechainInfoProvider, + account_funding::{shard_vault_initial_funds, AccountAndRole}, + error::ServiceResult, + prometheus_metrics::{set_static_metrics, start_prometheus_metrics_server, HandleMetrics}, + sidechain_setup::ParentchainIntegriteeSidechainInfoProvider, }; use enclave_bridge_primitives::ShardIdentifier; use ita_parentchain_interface::{ - integritee::{ - api_client_types::{IntegriteeApi, IntegriteeTip}, - api_factory::IntegriteeNodeApiFactory, - }, - target_a::api_client_types::{TargetAApi, TargetARuntimeConfig}, - target_b::api_client_types::{TargetBApi, TargetBRuntimeConfig}, - ParentchainRuntimeConfig, + integritee::{ + api_client_types::{IntegriteeApi, IntegriteeTip}, + api_factory::IntegriteeNodeApiFactory, + }, + target_a::api_client_types::{TargetAApi, TargetARuntimeConfig}, + target_b::api_client_types::{TargetBApi, TargetBRuntimeConfig}, + ParentchainRuntimeConfig, }; use itc_parentchain::primitives::ParentchainId; use itp_node_api::api_client::ChainApi; @@ -92,20 +92,20 @@ use sp_core::crypto::{AccountId32, Ss58Codec}; use sp_keyring::AccountKeyring; use sp_runtime::MultiSigner; use std::{ - fmt::Debug, - path::PathBuf, - str, - str::Utf8Error, - sync::{ - atomic::{AtomicBool, Ordering}, - mpsc, Arc, - }, - thread, - time::Duration, + fmt::Debug, + path::PathBuf, + str, + str::Utf8Error, + sync::{ + atomic::{AtomicBool, Ordering}, + mpsc, Arc, + }, + thread, + time::Duration, }; use substrate_api_client::{ - ac_node_api::{EventRecord, Phase::ApplyExtrinsic}, - rpc::TungsteniteRpcClient, + ac_node_api::{EventRecord, Phase::ApplyExtrinsic}, + rpc::TungsteniteRpcClient, }; use tokio::{runtime::Handle, task::JoinHandle, time::Instant}; @@ -123,901 +123,901 @@ const SGX_MODE_INFO: &str = " (debug enclave)"; #[cfg(feature = "link-binary")] pub type EnclaveWorker = Worker< - Config, - ParentchainRuntimeConfig, - Enclave, - InitializationHandler, + Config, + ParentchainRuntimeConfig, + Enclave, + InitializationHandler, >; pub(crate) fn main() { - // Setup logging - env_logger::builder() - .format_timestamp(Some(env_logger::TimestampPrecision::Millis)) - .init(); - - let yml = load_yaml!("cli.yml"); - let matches = App::from_yaml(yml) - .version(VERSION) - .about( - format!( - "Integritee {:?} worker{}{}", - WorkerModeProvider::worker_mode(), - EVM_INFO, - SGX_MODE_INFO - ) - .as_str(), - ) - .get_matches(); - - let config = Config::from(&matches); - - GlobalTokioHandle::initialize(); - - // log this information, don't println because some python scripts for GA rely on the - // stdout from the service - #[cfg(feature = "production")] - info!("*** Starting service in SGX production mode"); - #[cfg(not(feature = "production"))] - info!("*** Starting service in SGX debug mode"); - - info!("*** Running worker in mode: {:?} \n", WorkerModeProvider::worker_mode()); - - let mut lockfile = PathBuf::from(config.data_dir()); - lockfile.push("worker.lock"); - while std::fs::metadata(lockfile.clone()).is_ok() { - println!("lockfile is present, will wait for it to disappear {:?}", lockfile); - thread::sleep(std::time::Duration::from_secs(5)); - } - - let clean_reset = matches.is_present("clean-reset"); - if clean_reset { - println!("[+] Performing a clean reset of the worker"); - setup::purge_integritee_lcdb_unless_protected(config.data_dir()).unwrap(); - setup::purge_target_a_lcdb_unless_protected(config.data_dir()).unwrap(); - setup::purge_target_b_lcdb_unless_protected(config.data_dir()).unwrap(); - setup::purge_shards_unless_protected(config.data_dir()).unwrap(); - } - - // build the entire dependency tree - let tokio_handle = Arc::new(GlobalTokioHandle {}); - let sidechain_blockstorage = Arc::new( - SidechainStorageLock::::from_base_path( - config.data_dir().to_path_buf(), - ) - .unwrap(), - ); - let node_api_factory = Arc::new(NodeApiFactory::new( - config.integritee_rpc_endpoint(), - AccountKeyring::Alice.pair(), - )); - let enclave = Arc::new(enclave_init(&config).unwrap()); - let initialization_handler = Arc::new(InitializationHandler::default()); - let worker = Arc::new(EnclaveWorker::new( - config.clone(), - enclave.clone(), - node_api_factory.clone(), - initialization_handler.clone(), - Vec::new(), - )); - let sync_block_broadcaster = - Arc::new(SyncBlockBroadcaster::new(tokio_handle.clone(), worker.clone())); - let peer_updater = Arc::new(WorkerPeersUpdater::new(worker)); - let untrusted_peer_fetcher = UntrustedPeerFetcher::new(node_api_factory.clone()); - let peer_sidechain_block_fetcher = - Arc::new(BlockFetcher::::new(untrusted_peer_fetcher)); - let enclave_metrics_receiver = Arc::new(EnclaveMetricsReceiver {}); - - let maybe_target_a_parentchain_api_factory = - config.target_a_parentchain_rpc_endpoint().map(|url| { - Arc::new(NodeApiFactory::::new( - url, - AccountKeyring::Alice.pair(), - )) - }); - - let maybe_target_b_parentchain_api_factory = - config.target_b_parentchain_rpc_endpoint().map(|url| { - Arc::new(NodeApiFactory::::new( - url, - AccountKeyring::Alice.pair(), - )) - }); - - let maybe_ipfs_url_and_auth = (config.ipfs_api_url(), config.ipfs_api_auth()); - - // initialize o-call bridge with a concrete factory implementation - OCallBridge::initialize(Arc::new(OCallBridgeComponentFactory::new( - node_api_factory.clone(), - maybe_target_a_parentchain_api_factory, - maybe_target_b_parentchain_api_factory, - sync_block_broadcaster, - enclave.clone(), - sidechain_blockstorage.clone(), - peer_updater, - peer_sidechain_block_fetcher, - tokio_handle.clone(), - enclave_metrics_receiver, - maybe_ipfs_url_and_auth, - config.data_dir().into(), - ))); - - let quoting_enclave_target_info = match enclave.qe_get_target_info() { - Ok(target_info) => Some(target_info), - Err(e) => { - warn!("Setting up DCAP - qe_get_target_info failed with error: {:?}, continuing.", e); - None - } - }; - let quote_size = match enclave.qe_get_quote_size() { - Ok(size) => Some(size), - Err(e) => { - warn!("Setting up DCAP - qe_get_quote_size failed with error: {:?}, continuing.", e); - None - } - }; - - if let Some(run_config) = config.run_config() { - println!("Worker Config: {:?}", config); - - let shard = extract_shard(run_config.shard(), enclave.as_ref()); - - let mut shard_path = PathBuf::from(config.data_dir()); - shard_path.push(SHARDS_PATH); - shard_path.push(shard.encode().to_base58()); - println!("Worker Shard Path: {:?}", shard_path); - if clean_reset || std::fs::metadata(shard_path).is_err() { - // we default to purge here because we don't want to leave behind blocks - // for deprectated shards in the sidechain_db - setup::purge_shards_unless_protected(config.data_dir()).unwrap(); - // will auto-create folders for new shard - setup::initialize_shard_and_keys(enclave.as_ref(), &shard).unwrap(); - } - - let node_api = - node_api_factory.create_api().expect("Failed to create parentchain node API"); - - start_worker::<_, _, _, _, WorkerModeProvider>( - config, - &shard, - enclave, - sidechain_blockstorage, - node_api, - tokio_handle, - initialization_handler, - quoting_enclave_target_info, - quote_size, - ); - } else if let Some(smatches) = matches.subcommand_matches("request-state") { - println!("*** Requesting state from a registered worker \n"); - let node_api = - node_api_factory.create_api().expect("Failed to create parentchain node API"); - sync_state::sync_state::<_, _, WorkerModeProvider>( - &node_api, - &extract_shard(smatches.value_of("shard"), enclave.as_ref()), - enclave.as_ref(), - smatches.is_present("skip-ra"), - ); - } else if matches.is_present("shielding-key") { - setup::generate_shielding_key_file(enclave.as_ref()); - } else if matches.is_present("signing-key") { - setup::generate_signing_key_file(enclave.as_ref()); - } else if matches.is_present("dump-ra") { - info!("*** Perform RA and dump cert to disk"); - #[cfg(not(feature = "dcap"))] - enclave.dump_ias_ra_cert_to_disk().unwrap(); - #[cfg(feature = "dcap")] - { - let skip_ra = false; - let dcap_quote = enclave.generate_dcap_ra_quote(skip_ra).unwrap(); - let (fmspc, _tcb_info) = extract_tcb_info_from_raw_dcap_quote(&dcap_quote).unwrap(); - enclave.dump_dcap_collateral_to_disk(fmspc).unwrap(); - enclave.dump_dcap_ra_cert_to_disk().unwrap(); - } - } else if matches.is_present("mrenclave") { - println!("{}", enclave.get_fingerprint().unwrap().encode().to_base58()); - } else if let Some(sub_matches) = matches.subcommand_matches("init-shard") { - setup::init_shard( - enclave.as_ref(), - &extract_shard(sub_matches.value_of("shard"), enclave.as_ref()), - ); - } else if let Some(sub_matches) = matches.subcommand_matches("test") { - if sub_matches.is_present("provisioning-server") { - println!("*** Running Enclave MU-RA TLS server\n"); - enclave_run_state_provisioning_server( - enclave.as_ref(), - sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, - quoting_enclave_target_info.as_ref(), - quote_size.as_ref(), - &config.mu_ra_url(), - sub_matches.is_present("skip-ra"), - ); - println!("[+] Done!"); - } else if sub_matches.is_present("provisioning-client") { - println!("*** Running Enclave MU-RA TLS client\n"); - let shard = extract_shard(sub_matches.value_of("shard"), enclave.as_ref()); - enclave_request_state_provisioning( - enclave.as_ref(), - sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, - &config.mu_ra_url_external(), - &shard, - sub_matches.is_present("skip-ra"), - ) - .unwrap(); - println!("[+] Done!"); - } else { - tests::run_enclave_tests(sub_matches); - } - } else { - println!("For options: use --help"); - } + // Setup logging + env_logger::builder() + .format_timestamp(Some(env_logger::TimestampPrecision::Millis)) + .init(); + + let yml = load_yaml!("cli.yml"); + let matches = App::from_yaml(yml) + .version(VERSION) + .about( + format!( + "Integritee {:?} worker{}{}", + WorkerModeProvider::worker_mode(), + EVM_INFO, + SGX_MODE_INFO + ) + .as_str(), + ) + .get_matches(); + + let config = Config::from(&matches); + + GlobalTokioHandle::initialize(); + + // log this information, don't println because some python scripts for GA rely on the + // stdout from the service + #[cfg(feature = "production")] + info!("*** Starting service in SGX production mode"); + #[cfg(not(feature = "production"))] + info!("*** Starting service in SGX debug mode"); + + info!("*** Running worker in mode: {:?} \n", WorkerModeProvider::worker_mode()); + + let mut lockfile = PathBuf::from(config.data_dir()); + lockfile.push("worker.lock"); + while std::fs::metadata(lockfile.clone()).is_ok() { + println!("lockfile is present, will wait for it to disappear {:?}", lockfile); + thread::sleep(std::time::Duration::from_secs(5)); + } + + let clean_reset = matches.is_present("clean-reset"); + if clean_reset { + println!("[+] Performing a clean reset of the worker"); + setup::purge_integritee_lcdb_unless_protected(config.data_dir()).unwrap(); + setup::purge_target_a_lcdb_unless_protected(config.data_dir()).unwrap(); + setup::purge_target_b_lcdb_unless_protected(config.data_dir()).unwrap(); + setup::purge_shards_unless_protected(config.data_dir()).unwrap(); + } + + // build the entire dependency tree + let tokio_handle = Arc::new(GlobalTokioHandle {}); + let sidechain_blockstorage = Arc::new( + SidechainStorageLock::::from_base_path( + config.data_dir().to_path_buf(), + ) + .unwrap(), + ); + let node_api_factory = Arc::new(NodeApiFactory::new( + config.integritee_rpc_endpoint(), + AccountKeyring::Alice.pair(), + )); + let enclave = Arc::new(enclave_init(&config).unwrap()); + let initialization_handler = Arc::new(InitializationHandler::default()); + let worker = Arc::new(EnclaveWorker::new( + config.clone(), + enclave.clone(), + node_api_factory.clone(), + initialization_handler.clone(), + Vec::new(), + )); + let sync_block_broadcaster = + Arc::new(SyncBlockBroadcaster::new(tokio_handle.clone(), worker.clone())); + let peer_updater = Arc::new(WorkerPeersUpdater::new(worker)); + let untrusted_peer_fetcher = UntrustedPeerFetcher::new(node_api_factory.clone()); + let peer_sidechain_block_fetcher = + Arc::new(BlockFetcher::::new(untrusted_peer_fetcher)); + let enclave_metrics_receiver = Arc::new(EnclaveMetricsReceiver {}); + + let maybe_target_a_parentchain_api_factory = + config.target_a_parentchain_rpc_endpoint().map(|url| { + Arc::new(NodeApiFactory::::new( + url, + AccountKeyring::Alice.pair(), + )) + }); + + let maybe_target_b_parentchain_api_factory = + config.target_b_parentchain_rpc_endpoint().map(|url| { + Arc::new(NodeApiFactory::::new( + url, + AccountKeyring::Alice.pair(), + )) + }); + + let maybe_ipfs_url_and_auth = (config.ipfs_api_url(), config.ipfs_api_auth()); + + // initialize o-call bridge with a concrete factory implementation + OCallBridge::initialize(Arc::new(OCallBridgeComponentFactory::new( + node_api_factory.clone(), + maybe_target_a_parentchain_api_factory, + maybe_target_b_parentchain_api_factory, + sync_block_broadcaster, + enclave.clone(), + sidechain_blockstorage.clone(), + peer_updater, + peer_sidechain_block_fetcher, + tokio_handle.clone(), + enclave_metrics_receiver, + maybe_ipfs_url_and_auth, + config.data_dir().into(), + ))); + + let quoting_enclave_target_info = match enclave.qe_get_target_info() { + Ok(target_info) => Some(target_info), + Err(e) => { + warn!("Setting up DCAP - qe_get_target_info failed with error: {:?}, continuing.", e); + None + }, + }; + let quote_size = match enclave.qe_get_quote_size() { + Ok(size) => Some(size), + Err(e) => { + warn!("Setting up DCAP - qe_get_quote_size failed with error: {:?}, continuing.", e); + None + }, + }; + + if let Some(run_config) = config.run_config() { + println!("Worker Config: {:?}", config); + + let shard = extract_shard(run_config.shard(), enclave.as_ref()); + + let mut shard_path = PathBuf::from(config.data_dir()); + shard_path.push(SHARDS_PATH); + shard_path.push(shard.encode().to_base58()); + println!("Worker Shard Path: {:?}", shard_path); + if clean_reset || std::fs::metadata(shard_path).is_err() { + // we default to purge here because we don't want to leave behind blocks + // for deprectated shards in the sidechain_db + setup::purge_shards_unless_protected(config.data_dir()).unwrap(); + // will auto-create folders for new shard + setup::initialize_shard_and_keys(enclave.as_ref(), &shard).unwrap(); + } + + let node_api = + node_api_factory.create_api().expect("Failed to create parentchain node API"); + + start_worker::<_, _, _, _, WorkerModeProvider>( + config, + &shard, + enclave, + sidechain_blockstorage, + node_api, + tokio_handle, + initialization_handler, + quoting_enclave_target_info, + quote_size, + ); + } else if let Some(smatches) = matches.subcommand_matches("request-state") { + println!("*** Requesting state from a registered worker \n"); + let node_api = + node_api_factory.create_api().expect("Failed to create parentchain node API"); + sync_state::sync_state::<_, _, WorkerModeProvider>( + &node_api, + &extract_shard(smatches.value_of("shard"), enclave.as_ref()), + enclave.as_ref(), + smatches.is_present("skip-ra"), + ); + } else if matches.is_present("shielding-key") { + setup::generate_shielding_key_file(enclave.as_ref()); + } else if matches.is_present("signing-key") { + setup::generate_signing_key_file(enclave.as_ref()); + } else if matches.is_present("dump-ra") { + info!("*** Perform RA and dump cert to disk"); + #[cfg(not(feature = "dcap"))] + enclave.dump_ias_ra_cert_to_disk().unwrap(); + #[cfg(feature = "dcap")] + { + let skip_ra = false; + let dcap_quote = enclave.generate_dcap_ra_quote(skip_ra).unwrap(); + let (fmspc, _tcb_info) = extract_tcb_info_from_raw_dcap_quote(&dcap_quote).unwrap(); + enclave.dump_dcap_collateral_to_disk(fmspc).unwrap(); + enclave.dump_dcap_ra_cert_to_disk().unwrap(); + } + } else if matches.is_present("mrenclave") { + println!("{}", enclave.get_fingerprint().unwrap().encode().to_base58()); + } else if let Some(sub_matches) = matches.subcommand_matches("init-shard") { + setup::init_shard( + enclave.as_ref(), + &extract_shard(sub_matches.value_of("shard"), enclave.as_ref()), + ); + } else if let Some(sub_matches) = matches.subcommand_matches("test") { + if sub_matches.is_present("provisioning-server") { + println!("*** Running Enclave MU-RA TLS server\n"); + enclave_run_state_provisioning_server( + enclave.as_ref(), + sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, + quoting_enclave_target_info.as_ref(), + quote_size.as_ref(), + &config.mu_ra_url(), + sub_matches.is_present("skip-ra"), + ); + println!("[+] Done!"); + } else if sub_matches.is_present("provisioning-client") { + println!("*** Running Enclave MU-RA TLS client\n"); + let shard = extract_shard(sub_matches.value_of("shard"), enclave.as_ref()); + enclave_request_state_provisioning( + enclave.as_ref(), + sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, + &config.mu_ra_url_external(), + &shard, + sub_matches.is_present("skip-ra"), + ) + .unwrap(); + println!("[+] Done!"); + } else { + tests::run_enclave_tests(sub_matches); + } + } else { + println!("For options: use --help"); + } } /// FIXME: needs some discussion (restructuring?) #[allow(clippy::too_many_arguments)] fn start_worker( - config: Config, - shard: &ShardIdentifier, - enclave: Arc, - sidechain_storage: Arc, - integritee_rpc_api: IntegriteeApi, - tokio_handle_getter: Arc, - initialization_handler: Arc, - quoting_enclave_target_info: Option, - quote_size: Option, + config: Config, + shard: &ShardIdentifier, + enclave: Arc, + sidechain_storage: Arc, + integritee_rpc_api: IntegriteeApi, + tokio_handle_getter: Arc, + initialization_handler: Arc, + quoting_enclave_target_info: Option, + quote_size: Option, ) where - T: GetTokioHandle, - E: EnclaveBase + Sidechain + RemoteAttestation + TlsRemoteAttestation + TeeracleApi + Clone, - D: BlockPruner + FetchBlocks + Sync + Send + 'static, - InitializationHandler: TrackInitialization + IsInitialized + Sync + Send + 'static, - WorkerModeProvider: ProvideWorkerMode, + T: GetTokioHandle, + E: EnclaveBase + Sidechain + RemoteAttestation + TlsRemoteAttestation + TeeracleApi + Clone, + D: BlockPruner + FetchBlocks + Sync + Send + 'static, + InitializationHandler: TrackInitialization + IsInitialized + Sync + Send + 'static, + WorkerModeProvider: ProvideWorkerMode, { - let run_config = config.run_config().clone().expect("Run config missing"); - let skip_ra = run_config.skip_ra(); - - #[cfg(feature = "teeracle")] - let flavor_str = "teeracle"; - #[cfg(feature = "sidechain")] - let flavor_str = "sidechain"; - #[cfg(feature = "offchain-worker")] - let flavor_str = "offchain-worker"; - #[cfg(not(any(feature = "offchain-worker", feature = "sidechain", feature = "teeracle")))] - let flavor_str = "offchain-worker"; - - println!("Integritee Worker for {} v{}", flavor_str, VERSION); - - #[cfg(feature = "dcap")] - println!(" DCAP is enabled"); - #[cfg(not(feature = "dcap"))] - println!(" DCAP is disabled"); - #[cfg(feature = "production")] - println!(" Production Mode is enabled"); - #[cfg(not(feature = "production"))] - println!(" Production Mode is disabled"); - #[cfg(feature = "evm")] - println!(" EVM is enabled"); - #[cfg(not(feature = "evm"))] - println!(" EVM is disabled"); - - info!("starting worker on shard {}", shard.encode().to_base58()); - // ------------------------------------------------------------------------ - // check for required files - if !skip_ra { - #[cfg(not(feature = "dcap"))] - check_files(); - } - // ------------------------------------------------------------------------ - // initialize the enclave - let mrenclave = enclave.get_fingerprint().unwrap(); - println!("MRENCLAVE={}", mrenclave.0.to_base58()); - println!("MRENCLAVE in hex {:?}", hex::encode(mrenclave)); - set_static_metrics(VERSION, mrenclave.0.to_base58().as_str()); - // ------------------------------------------------------------------------ - // let new workers call us for key provisioning - println!("MU-RA server listening on {}", config.mu_ra_url()); - let is_development_mode = run_config.dev(); - let ra_url = config.mu_ra_url(); - let enclave_api_key_prov = enclave.clone(); - thread::spawn(move || { - enclave_run_state_provisioning_server( - enclave_api_key_prov.as_ref(), - sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, - quoting_enclave_target_info.as_ref(), - quote_size.as_ref(), - &ra_url, - skip_ra, - ); - info!("State provisioning server stopped."); - }); - - let tokio_handle = tokio_handle_getter.get_handle(); - - // ------------------------------------------------------------------------ - // Get the public key of our TEE. - let tee_accountid = enclave_account(enclave.as_ref()); - println!("Enclave account {:} ", &tee_accountid.to_ss58check()); - - // ------------------------------------------------------------------------ - // Start `is_initialized` server. - let untrusted_http_server_port = config - .try_parse_untrusted_http_server_port() - .expect("untrusted http server port to be a valid port number"); - let initialization_handler_clone = initialization_handler.clone(); - tokio_handle.spawn(async move { - if let Err(e) = - start_is_initialized_server(initialization_handler_clone, untrusted_http_server_port) - .await - { - error!("Unexpected error in `is_initialized` server: {:?}", e); - } - }); - - // ------------------------------------------------------------------------ - // Start trusted worker rpc server - if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain - || WorkerModeProvider::worker_mode() == WorkerMode::OffChainWorker - { - let direct_invocation_server_addr = config.trusted_worker_url_internal(); - let enclave_for_direct_invocation = enclave.clone(); - thread::spawn(move || { - println!( - "[+] Trusted RPC direct invocation server listening on {}", - direct_invocation_server_addr - ); - enclave_for_direct_invocation - .init_direct_invocation_server(direct_invocation_server_addr) - .unwrap(); - println!("[+] RPC direct invocation server shut down"); - }); - } - - // ------------------------------------------------------------------------ - // Start untrusted worker rpc server. - // i.e move sidechain block importing to trusted worker. - if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { - sidechain_start_untrusted_rpc_server(&config, sidechain_storage.clone(), &tokio_handle); - } - - // ------------------------------------------------------------------------ - // Init parentchain specific stuff. Needed early for parentchain communication. - let (integritee_parentchain_handler, integritee_last_synced_header_at_last_run) = - init_parentchain( - &enclave, - &integritee_rpc_api, - &tee_accountid, - ParentchainId::Integritee, - shard, - ); - - #[cfg(feature = "dcap")] - register_collateral( - &integritee_rpc_api, - &*enclave, - &tee_accountid, - is_development_mode, - skip_ra, - ); - - let trusted_url = config.trusted_worker_url_external(); - - #[cfg(feature = "attesteer")] - fetch_marblerun_events_every_hour( - integritee_rpc_api.clone(), - enclave.clone(), - tee_accountid.clone(), - is_development_mode, - trusted_url.clone(), - run_config.marblerun_base_url().to_string(), - ); - - // ------------------------------------------------------------------------ - // Perform a remote attestation and get an unchecked extrinsic back. - - if skip_ra { - println!( - "[!] skipping remote attestation. Registering enclave without attestation report." - ); - } else { - println!("[!] creating remote attestation report and create enclave register extrinsic."); - }; - - #[cfg(feature = "dcap")] - enclave.set_sgx_qpl_logging().expect("QPL logging setup failed"); - - let enclave2 = enclave.clone(); - #[cfg(not(feature = "dcap"))] - let register_xt = move || enclave2.generate_ias_ra_extrinsic(&trusted_url, skip_ra).unwrap(); - #[cfg(feature = "dcap")] - let register_xt = move || enclave2.generate_dcap_ra_extrinsic(&trusted_url, skip_ra).unwrap(); - - // clones because of the move - let node_api2 = integritee_rpc_api.clone(); - let tee_accountid_clone = tee_accountid.clone(); - let send_register_xt = move || { - println!("[+] Send register enclave extrinsic"); - send_integritee_extrinsic( - register_xt(), - &node_api2, - &tee_accountid_clone, - is_development_mode, - ) - }; - - let register_enclave_block_hash = - send_register_xt().expect("enclave RA registration must be successful to continue"); - - let api_register_enclave_xt_header = integritee_rpc_api - .get_header(Some(register_enclave_block_hash)) - .unwrap() - .unwrap(); - - // TODO: #1451: Fix api-client type hacks - let register_enclave_xt_header = - Header::decode(&mut api_register_enclave_xt_header.encode().as_slice()) - .expect("Can decode previously encoded header; qed"); - - println!( - "[+] Enclave registered at block number: {:?}, hash: {:?}", - register_enclave_xt_header.number(), - register_enclave_xt_header.hash() - ); - // double-check - let my_enclave = integritee_rpc_api - .enclave(&tee_accountid, None) - .unwrap() - .expect("our enclave should be registered at this point"); - trace!("verified that our enclave is registered: {:?}", my_enclave); - - let (we_are_primary_validateer, re_init_parentchain_needed) = - match integritee_rpc_api.primary_worker_for_shard(shard, None).unwrap() { - Some(primary_enclave) => match primary_enclave.instance_signer() { - AnySigner::Known(MultiSigner::Ed25519(primary)) => - if primary.encode() == tee_accountid.encode() { - println!("We are primary worker on this shard and we have been previously running."); - (true, false) - } else { - println!( - "We are NOT primary worker. The primary worker is {}.", - primary.to_ss58check(), - ); - info!("The primary worker enclave is {:?}", primary_enclave); - if enclave - .get_shard_creation_info(shard) - .unwrap() - .for_parentchain(ParentchainId::Integritee) - .is_none() - { - //obtain provisioning from last active worker as this hasn't been done before - info!("my state doesn't know the creation header of the shard. will request provisioning"); - sync_state::sync_state::<_, _, WorkerModeProvider>( - &integritee_rpc_api, - &shard, - enclave.as_ref(), - skip_ra, - ); - } - (false, true) - }, - _ => { - panic!( - "the primary worker for shard {:?} has unknown signer type: {:?}", - shard, primary_enclave - ); - } - }, - None => - if WorkerModeProvider::worker_mode() != WorkerMode::Teeracle { - println!("We are the primary worker on this shard and the shard is untouched. Will initialize it"); - enclave.init_shard(shard.encode()).unwrap(); - enclave - .init_shard_creation_parentchain_header( - shard, - &ParentchainId::Integritee, - ®ister_enclave_xt_header, - ) - .unwrap(); - debug!("shard config should be initialized on integritee network now"); - (true, true) - } else { - (true, false) - }, - }; - debug!("getting shard creation: {:?}", enclave.get_shard_creation_info(shard)); - initialization_handler.registered_on_parentchain(); - - let (integritee_parentchain_handler, integritee_last_synced_header_at_last_run) = - if re_init_parentchain_needed { - // re-initialize integritee parentchain to make sure to use creation_header for fast-sync or the provisioned light client state - init_parentchain( - &enclave, - &integritee_rpc_api, - &tee_accountid, - ParentchainId::Integritee, - shard, - ) - } else { - (integritee_parentchain_handler, integritee_last_synced_header_at_last_run) - }; - - // some of the following threads need to be shut down gracefully. - let shutdown_flag = Arc::new(AtomicBool::new(false)); - let mut sensitive_threads: Vec> = Vec::new(); - - match WorkerModeProvider::worker_mode() { - WorkerMode::Teeracle => { - // ------------------------------------------------------------------------ - // initialize teeracle interval - #[cfg(feature = "teeracle")] - schedule_periodic_reregistration_thread( - send_register_xt, - run_config.reregister_teeracle_interval(), - ); - - #[cfg(feature = "teeracle")] - start_periodic_market_update( - &integritee_rpc_api, - run_config.teeracle_update_interval(), - enclave.clone(), - &tokio_handle, - ); - } - WorkerMode::OffChainWorker => { - println!("[Integritee:OCW] Finished initializing light client, syncing parentchain..."); - - // Syncing all parentchain blocks, this might take a while.. - let last_synced_header = integritee_parentchain_handler - .sync_parentchain_until_latest_finalized( - integritee_last_synced_header_at_last_run, - *shard, - true, - ) - .unwrap(); - - let handle = start_parentchain_header_subscription_thread( - shutdown_flag.clone(), - integritee_parentchain_handler, - last_synced_header, - *shard, - ); - sensitive_threads.push(handle); - - info!("skipping shard vault check because not yet supported for offchain worker"); - } - WorkerMode::Sidechain => { - println!("[Integritee:SCV] Finished initializing light client, syncing integritee parentchain..."); - - let last_synced_header = if we_are_primary_validateer { - info!("We're the first validateer to be registered, syncing parentchain blocks until the one we have registered ourselves on."); - integritee_parentchain_handler - .await_sync_and_import_parentchain_until_at_least( - &integritee_last_synced_header_at_last_run, - ®ister_enclave_xt_header, - *shard, - ) - .unwrap() - } else { - integritee_last_synced_header_at_last_run - }; - - let handle = start_parentchain_header_subscription_thread( - shutdown_flag.clone(), - integritee_parentchain_handler, - last_synced_header, - *shard, - ); - sensitive_threads.push(handle); - - spawn_worker_for_shard_polling( - shard, - integritee_rpc_api.clone(), - initialization_handler, - ); - } - } - - let maybe_target_a_rpc_api = if let Some(url) = config.target_a_parentchain_rpc_endpoint() { - println!("Initializing parentchain TargetA with url: {}", url); - let api = ita_parentchain_interface::target_a::api_factory::TargetANodeApiFactory::new( - url, - AccountKeyring::Alice.pair(), - ) - .create_api() - .unwrap_or_else(|_| panic!("[TargetA] Failed to create parentchain node API")); - let mut handles = init_target_parentchain( - &enclave, - &tee_accountid, - api.clone(), - shard, - ParentchainId::TargetA, - is_development_mode, - shutdown_flag.clone(), - ); - sensitive_threads.append(&mut handles); - Some(api) - } else { - None - }; - - let maybe_target_b_rpc_api = if let Some(url) = config.target_b_parentchain_rpc_endpoint() { - println!("Initializing parentchain TargetB with url: {}", url); - let api = ita_parentchain_interface::target_b::api_factory::TargetBNodeApiFactory::new( - url, - AccountKeyring::Alice.pair(), - ) - .create_api() - .unwrap_or_else(|_| panic!("[TargetB] Failed to create parentchain node API")); - let mut handles = init_target_parentchain( - &enclave, - &tee_accountid, - api.clone(), - shard, - ParentchainId::TargetB, - is_development_mode, - shutdown_flag.clone(), - ); - sensitive_threads.append(&mut handles); - Some(api) - } else { - None - }; - - if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { - init_provided_shard_vault( - shard, - &enclave, - integritee_rpc_api.clone(), - maybe_target_a_rpc_api.clone(), - maybe_target_b_rpc_api.clone(), - run_config.shielding_target, - we_are_primary_validateer, - ); - } - - // ------------------------------------------------------------------------ - // Start prometheus metrics server. - if config.enable_metrics_server() { - let metrics_server_port = config - .try_parse_metrics_server_port() - .expect("metrics server port to be a valid port number"); - start_prometheus_metrics_server( - &enclave, - &tee_accountid, - shard, - integritee_rpc_api.clone(), - maybe_target_a_rpc_api.clone(), - maybe_target_b_rpc_api.clone(), - run_config.shielding_target, - &tokio_handle, - metrics_server_port, - ); - } - - if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { - println!("[Integritee:SCV] starting block production"); - let mut handles = sidechain_init_block_production( - enclave.clone(), - sidechain_storage, - shutdown_flag.clone(), - ) - .unwrap(); - sensitive_threads.append(&mut handles); - } - - ita_parentchain_interface::event_subscriber::subscribe_to_parentchain_events( - &integritee_rpc_api, - ParentchainId::Integritee, - shutdown_flag.clone(), - ); - println!( - "[!] waiting for {} sensitive threads to shut down gracefully", - sensitive_threads.len() - ); - // Join each thread to ensure they have completed - for handle in sensitive_threads { - handle.join().expect("Thread panicked"); - } - println!("[!] All threads stopped gracefully."); + let run_config = config.run_config().clone().expect("Run config missing"); + let skip_ra = run_config.skip_ra(); + + #[cfg(feature = "teeracle")] + let flavor_str = "teeracle"; + #[cfg(feature = "sidechain")] + let flavor_str = "sidechain"; + #[cfg(feature = "offchain-worker")] + let flavor_str = "offchain-worker"; + #[cfg(not(any(feature = "offchain-worker", feature = "sidechain", feature = "teeracle")))] + let flavor_str = "offchain-worker"; + + println!("Integritee Worker for {} v{}", flavor_str, VERSION); + + #[cfg(feature = "dcap")] + println!(" DCAP is enabled"); + #[cfg(not(feature = "dcap"))] + println!(" DCAP is disabled"); + #[cfg(feature = "production")] + println!(" Production Mode is enabled"); + #[cfg(not(feature = "production"))] + println!(" Production Mode is disabled"); + #[cfg(feature = "evm")] + println!(" EVM is enabled"); + #[cfg(not(feature = "evm"))] + println!(" EVM is disabled"); + + info!("starting worker on shard {}", shard.encode().to_base58()); + // ------------------------------------------------------------------------ + // check for required files + if !skip_ra { + #[cfg(not(feature = "dcap"))] + check_files(); + } + // ------------------------------------------------------------------------ + // initialize the enclave + let mrenclave = enclave.get_fingerprint().unwrap(); + println!("MRENCLAVE={}", mrenclave.0.to_base58()); + println!("MRENCLAVE in hex {:?}", hex::encode(mrenclave)); + set_static_metrics(VERSION, mrenclave.0.to_base58().as_str()); + // ------------------------------------------------------------------------ + // let new workers call us for key provisioning + println!("MU-RA server listening on {}", config.mu_ra_url()); + let is_development_mode = run_config.dev(); + let ra_url = config.mu_ra_url(); + let enclave_api_key_prov = enclave.clone(); + thread::spawn(move || { + enclave_run_state_provisioning_server( + enclave_api_key_prov.as_ref(), + sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, + quoting_enclave_target_info.as_ref(), + quote_size.as_ref(), + &ra_url, + skip_ra, + ); + info!("State provisioning server stopped."); + }); + + let tokio_handle = tokio_handle_getter.get_handle(); + + // ------------------------------------------------------------------------ + // Get the public key of our TEE. + let tee_accountid = enclave_account(enclave.as_ref()); + println!("Enclave account {:} ", &tee_accountid.to_ss58check()); + + // ------------------------------------------------------------------------ + // Start `is_initialized` server. + let untrusted_http_server_port = config + .try_parse_untrusted_http_server_port() + .expect("untrusted http server port to be a valid port number"); + let initialization_handler_clone = initialization_handler.clone(); + tokio_handle.spawn(async move { + if let Err(e) = + start_is_initialized_server(initialization_handler_clone, untrusted_http_server_port) + .await + { + error!("Unexpected error in `is_initialized` server: {:?}", e); + } + }); + + // ------------------------------------------------------------------------ + // Start trusted worker rpc server + if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain + || WorkerModeProvider::worker_mode() == WorkerMode::OffChainWorker + { + let direct_invocation_server_addr = config.trusted_worker_url_internal(); + let enclave_for_direct_invocation = enclave.clone(); + thread::spawn(move || { + println!( + "[+] Trusted RPC direct invocation server listening on {}", + direct_invocation_server_addr + ); + enclave_for_direct_invocation + .init_direct_invocation_server(direct_invocation_server_addr) + .unwrap(); + println!("[+] RPC direct invocation server shut down"); + }); + } + + // ------------------------------------------------------------------------ + // Start untrusted worker rpc server. + // i.e move sidechain block importing to trusted worker. + if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { + sidechain_start_untrusted_rpc_server(&config, sidechain_storage.clone(), &tokio_handle); + } + + // ------------------------------------------------------------------------ + // Init parentchain specific stuff. Needed early for parentchain communication. + let (integritee_parentchain_handler, integritee_last_synced_header_at_last_run) = + init_parentchain( + &enclave, + &integritee_rpc_api, + &tee_accountid, + ParentchainId::Integritee, + shard, + ); + + #[cfg(feature = "dcap")] + register_collateral( + &integritee_rpc_api, + &*enclave, + &tee_accountid, + is_development_mode, + skip_ra, + ); + + let trusted_url = config.trusted_worker_url_external(); + + #[cfg(feature = "attesteer")] + fetch_marblerun_events_every_hour( + integritee_rpc_api.clone(), + enclave.clone(), + tee_accountid.clone(), + is_development_mode, + trusted_url.clone(), + run_config.marblerun_base_url().to_string(), + ); + + // ------------------------------------------------------------------------ + // Perform a remote attestation and get an unchecked extrinsic back. + + if skip_ra { + println!( + "[!] skipping remote attestation. Registering enclave without attestation report." + ); + } else { + println!("[!] creating remote attestation report and create enclave register extrinsic."); + }; + + #[cfg(feature = "dcap")] + enclave.set_sgx_qpl_logging().expect("QPL logging setup failed"); + + let enclave2 = enclave.clone(); + #[cfg(not(feature = "dcap"))] + let register_xt = move || enclave2.generate_ias_ra_extrinsic(&trusted_url, skip_ra).unwrap(); + #[cfg(feature = "dcap")] + let register_xt = move || enclave2.generate_dcap_ra_extrinsic(&trusted_url, skip_ra).unwrap(); + + // clones because of the move + let node_api2 = integritee_rpc_api.clone(); + let tee_accountid_clone = tee_accountid.clone(); + let send_register_xt = move || { + println!("[+] Send register enclave extrinsic"); + send_integritee_extrinsic( + register_xt(), + &node_api2, + &tee_accountid_clone, + is_development_mode, + ) + }; + + let register_enclave_block_hash = + send_register_xt().expect("enclave RA registration must be successful to continue"); + + let api_register_enclave_xt_header = integritee_rpc_api + .get_header(Some(register_enclave_block_hash)) + .unwrap() + .unwrap(); + + // TODO: #1451: Fix api-client type hacks + let register_enclave_xt_header = + Header::decode(&mut api_register_enclave_xt_header.encode().as_slice()) + .expect("Can decode previously encoded header; qed"); + + println!( + "[+] Enclave registered at block number: {:?}, hash: {:?}", + register_enclave_xt_header.number(), + register_enclave_xt_header.hash() + ); + // double-check + let my_enclave = integritee_rpc_api + .enclave(&tee_accountid, None) + .unwrap() + .expect("our enclave should be registered at this point"); + trace!("verified that our enclave is registered: {:?}", my_enclave); + + let (we_are_primary_validateer, re_init_parentchain_needed) = + match integritee_rpc_api.primary_worker_for_shard(shard, None).unwrap() { + Some(primary_enclave) => match primary_enclave.instance_signer() { + AnySigner::Known(MultiSigner::Ed25519(primary)) => + if primary.encode() == tee_accountid.encode() { + println!("We are primary worker on this shard and we have been previously running."); + (true, false) + } else { + println!( + "We are NOT primary worker. The primary worker is {}.", + primary.to_ss58check(), + ); + info!("The primary worker enclave is {:?}", primary_enclave); + if enclave + .get_shard_creation_info(shard) + .unwrap() + .for_parentchain(ParentchainId::Integritee) + .is_none() + { + //obtain provisioning from last active worker as this hasn't been done before + info!("my state doesn't know the creation header of the shard. will request provisioning"); + sync_state::sync_state::<_, _, WorkerModeProvider>( + &integritee_rpc_api, + &shard, + enclave.as_ref(), + skip_ra, + ); + } + (false, true) + }, + _ => { + panic!( + "the primary worker for shard {:?} has unknown signer type: {:?}", + shard, primary_enclave + ); + }, + }, + None => + if WorkerModeProvider::worker_mode() != WorkerMode::Teeracle { + println!("We are the primary worker on this shard and the shard is untouched. Will initialize it"); + enclave.init_shard(shard.encode()).unwrap(); + enclave + .init_shard_creation_parentchain_header( + shard, + &ParentchainId::Integritee, + ®ister_enclave_xt_header, + ) + .unwrap(); + debug!("shard config should be initialized on integritee network now"); + (true, true) + } else { + (true, false) + }, + }; + debug!("getting shard creation: {:?}", enclave.get_shard_creation_info(shard)); + initialization_handler.registered_on_parentchain(); + + let (integritee_parentchain_handler, integritee_last_synced_header_at_last_run) = + if re_init_parentchain_needed { + // re-initialize integritee parentchain to make sure to use creation_header for fast-sync or the provisioned light client state + init_parentchain( + &enclave, + &integritee_rpc_api, + &tee_accountid, + ParentchainId::Integritee, + shard, + ) + } else { + (integritee_parentchain_handler, integritee_last_synced_header_at_last_run) + }; + + // some of the following threads need to be shut down gracefully. + let shutdown_flag = Arc::new(AtomicBool::new(false)); + let mut sensitive_threads: Vec> = Vec::new(); + + match WorkerModeProvider::worker_mode() { + WorkerMode::Teeracle => { + // ------------------------------------------------------------------------ + // initialize teeracle interval + #[cfg(feature = "teeracle")] + schedule_periodic_reregistration_thread( + send_register_xt, + run_config.reregister_teeracle_interval(), + ); + + #[cfg(feature = "teeracle")] + start_periodic_market_update( + &integritee_rpc_api, + run_config.teeracle_update_interval(), + enclave.clone(), + &tokio_handle, + ); + }, + WorkerMode::OffChainWorker => { + println!("[Integritee:OCW] Finished initializing light client, syncing parentchain..."); + + // Syncing all parentchain blocks, this might take a while.. + let last_synced_header = integritee_parentchain_handler + .sync_parentchain_until_latest_finalized( + integritee_last_synced_header_at_last_run, + *shard, + true, + ) + .unwrap(); + + let handle = start_parentchain_header_subscription_thread( + shutdown_flag.clone(), + integritee_parentchain_handler, + last_synced_header, + *shard, + ); + sensitive_threads.push(handle); + + info!("skipping shard vault check because not yet supported for offchain worker"); + }, + WorkerMode::Sidechain => { + println!("[Integritee:SCV] Finished initializing light client, syncing integritee parentchain..."); + + let last_synced_header = if we_are_primary_validateer { + info!("We're the first validateer to be registered, syncing parentchain blocks until the one we have registered ourselves on."); + integritee_parentchain_handler + .await_sync_and_import_parentchain_until_at_least( + &integritee_last_synced_header_at_last_run, + ®ister_enclave_xt_header, + *shard, + ) + .unwrap() + } else { + integritee_last_synced_header_at_last_run + }; + + let handle = start_parentchain_header_subscription_thread( + shutdown_flag.clone(), + integritee_parentchain_handler, + last_synced_header, + *shard, + ); + sensitive_threads.push(handle); + + spawn_worker_for_shard_polling( + shard, + integritee_rpc_api.clone(), + initialization_handler, + ); + }, + } + + let maybe_target_a_rpc_api = if let Some(url) = config.target_a_parentchain_rpc_endpoint() { + println!("Initializing parentchain TargetA with url: {}", url); + let api = ita_parentchain_interface::target_a::api_factory::TargetANodeApiFactory::new( + url, + AccountKeyring::Alice.pair(), + ) + .create_api() + .unwrap_or_else(|_| panic!("[TargetA] Failed to create parentchain node API")); + let mut handles = init_target_parentchain( + &enclave, + &tee_accountid, + api.clone(), + shard, + ParentchainId::TargetA, + is_development_mode, + shutdown_flag.clone(), + ); + sensitive_threads.append(&mut handles); + Some(api) + } else { + None + }; + + let maybe_target_b_rpc_api = if let Some(url) = config.target_b_parentchain_rpc_endpoint() { + println!("Initializing parentchain TargetB with url: {}", url); + let api = ita_parentchain_interface::target_b::api_factory::TargetBNodeApiFactory::new( + url, + AccountKeyring::Alice.pair(), + ) + .create_api() + .unwrap_or_else(|_| panic!("[TargetB] Failed to create parentchain node API")); + let mut handles = init_target_parentchain( + &enclave, + &tee_accountid, + api.clone(), + shard, + ParentchainId::TargetB, + is_development_mode, + shutdown_flag.clone(), + ); + sensitive_threads.append(&mut handles); + Some(api) + } else { + None + }; + + if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { + init_provided_shard_vault( + shard, + &enclave, + integritee_rpc_api.clone(), + maybe_target_a_rpc_api.clone(), + maybe_target_b_rpc_api.clone(), + run_config.shielding_target, + we_are_primary_validateer, + ); + } + + // ------------------------------------------------------------------------ + // Start prometheus metrics server. + if config.enable_metrics_server() { + let metrics_server_port = config + .try_parse_metrics_server_port() + .expect("metrics server port to be a valid port number"); + start_prometheus_metrics_server( + &enclave, + &tee_accountid, + shard, + integritee_rpc_api.clone(), + maybe_target_a_rpc_api.clone(), + maybe_target_b_rpc_api.clone(), + run_config.shielding_target, + &tokio_handle, + metrics_server_port, + ); + } + + if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { + println!("[Integritee:SCV] starting block production"); + let mut handles = sidechain_init_block_production( + enclave.clone(), + sidechain_storage, + shutdown_flag.clone(), + ) + .unwrap(); + sensitive_threads.append(&mut handles); + } + + ita_parentchain_interface::event_subscriber::subscribe_to_parentchain_events( + &integritee_rpc_api, + ParentchainId::Integritee, + shutdown_flag.clone(), + ); + println!( + "[!] waiting for {} sensitive threads to shut down gracefully", + sensitive_threads.len() + ); + // Join each thread to ensure they have completed + for handle in sensitive_threads { + handle.join().expect("Thread panicked"); + } + println!("[!] All threads stopped gracefully."); } fn init_provided_shard_vault( - shard: &ShardIdentifier, - enclave: &Arc, - integritee_rpc_api: IntegriteeApi, - maybe_target_a_rpc_api: Option, - maybe_target_b_rpc_api: Option, - shielding_target: Option, - we_are_primary_validateer: bool, + shard: &ShardIdentifier, + enclave: &Arc, + integritee_rpc_api: IntegriteeApi, + maybe_target_a_rpc_api: Option, + maybe_target_b_rpc_api: Option, + shielding_target: Option, + we_are_primary_validateer: bool, ) { - let shielding_target = shielding_target.unwrap_or_default(); - match shielding_target { - ParentchainId::Integritee => init_vault( - shard, - enclave, - &integritee_rpc_api, - shielding_target, - we_are_primary_validateer, - ), - ParentchainId::TargetA => init_vault( - shard, - enclave, - &maybe_target_a_rpc_api - .expect("target A must be initialized to be used as shielding target"), - shielding_target, - we_are_primary_validateer, - ), - ParentchainId::TargetB => init_vault( - shard, - enclave, - &maybe_target_b_rpc_api - .expect("target B must be initialized to be used as shielding target"), - shielding_target, - we_are_primary_validateer, - ), - }; + let shielding_target = shielding_target.unwrap_or_default(); + match shielding_target { + ParentchainId::Integritee => init_vault( + shard, + enclave, + &integritee_rpc_api, + shielding_target, + we_are_primary_validateer, + ), + ParentchainId::TargetA => init_vault( + shard, + enclave, + &maybe_target_a_rpc_api + .expect("target A must be initialized to be used as shielding target"), + shielding_target, + we_are_primary_validateer, + ), + ParentchainId::TargetB => init_vault( + shard, + enclave, + &maybe_target_b_rpc_api + .expect("target B must be initialized to be used as shielding target"), + shielding_target, + we_are_primary_validateer, + ), + }; } fn init_vault( - shard: &ShardIdentifier, - enclave: &Arc, - node_api: &Api, Client>, - shielding_target: ParentchainId, - we_are_primary_validateer: bool, + shard: &ShardIdentifier, + enclave: &Arc, + node_api: &Api, Client>, + shielding_target: ParentchainId, + we_are_primary_validateer: bool, ) where - E: EnclaveBase, - u128: From, - Tip: Copy + Default + Encode + Debug, - Client: Request, + E: EnclaveBase, + u128: From, + Tip: Copy + Default + Encode + Debug, + Client: Request, { - let funding_balance = shard_vault_initial_funds(&node_api, shielding_target).unwrap(); - if let Ok(shard_vault) = enclave.get_ecc_vault_pubkey(shard) { - // verify if proxy is set up on chain - let nonce = node_api.get_account_nonce(&AccountId::from(shard_vault)).unwrap(); - println!( - "[{:?}] shard vault account is already initialized in state: {} with nonce {}", - shielding_target, - shard_vault.to_ss58check(), - nonce - ); - if nonce == 0 && we_are_primary_validateer { - println!( - "[{:?}] nonce = 0 means shard vault not properly set up on chain. will retry", - shielding_target - ); - enclave.init_proxied_shard_vault(shard, &shielding_target, 0u128).unwrap(); - } - } else if we_are_primary_validateer { - println!("[{:?}] initializing proxied shard vault account now", shielding_target); - enclave - .init_proxied_shard_vault(shard, &shielding_target, funding_balance) - .unwrap(); - println!( - "[{:?}] initialized shard vault account: : {}", - shielding_target, - enclave.get_ecc_vault_pubkey(shard).unwrap().to_ss58check() - ); - } else { - panic!("no vault account has been initialized and we are not the primary worker"); - } + let funding_balance = shard_vault_initial_funds(&node_api, shielding_target).unwrap(); + if let Ok(shard_vault) = enclave.get_ecc_vault_pubkey(shard) { + // verify if proxy is set up on chain + let nonce = node_api.get_account_nonce(&AccountId::from(shard_vault)).unwrap(); + println!( + "[{:?}] shard vault account is already initialized in state: {} with nonce {}", + shielding_target, + shard_vault.to_ss58check(), + nonce + ); + if nonce == 0 && we_are_primary_validateer { + println!( + "[{:?}] nonce = 0 means shard vault not properly set up on chain. will retry", + shielding_target + ); + enclave.init_proxied_shard_vault(shard, &shielding_target, 0u128).unwrap(); + } + } else if we_are_primary_validateer { + println!("[{:?}] initializing proxied shard vault account now", shielding_target); + enclave + .init_proxied_shard_vault(shard, &shielding_target, funding_balance) + .unwrap(); + println!( + "[{:?}] initialized shard vault account: : {}", + shielding_target, + enclave.get_ecc_vault_pubkey(shard).unwrap().to_ss58check() + ); + } else { + panic!("no vault account has been initialized and we are not the primary worker"); + } } fn init_target_parentchain( - enclave: &Arc, - tee_account_id: &AccountId32, - node_api: Api, Client>, - shard: &ShardIdentifier, - parentchain_id: ParentchainId, - is_development_mode: bool, - shutdown_flag: Arc, + enclave: &Arc, + tee_account_id: &AccountId32, + node_api: Api, Client>, + shard: &ShardIdentifier, + parentchain_id: ParentchainId, + is_development_mode: bool, + shutdown_flag: Arc, ) -> Vec> where - E: EnclaveBase + Sidechain, - u128: From, - Tip: Copy + Default + Encode + Debug + Send + Sync + 'static, - Client: Request + Subscribe + Clone + Send + Sync + 'static, + E: EnclaveBase + Sidechain, + u128: From, + Tip: Copy + Default + Encode + Debug + Send + Sync + 'static, + Client: Request + Subscribe + Clone + Send + Sync + 'static, { - setup_reasonable_account_funding( - node_api.clone(), - tee_account_id, - parentchain_id, - is_development_mode, - ) - .unwrap_or_else(|e| { - panic!("[{:?}] Could not fund parentchain enclave account: {:?}", parentchain_id, e) - }); - - // we attempt to set shard creation for this parentchain in case it hasn't been done before - let api_head = node_api.get_header(node_api.get_finalized_head().unwrap()).unwrap().unwrap(); - // TODO: #1451: Fix api-client type hacks - let head = Header::decode(&mut api_head.encode().as_slice()) - .expect("Can decode previously encoded header; qed"); - - let (parentchain_handler, last_synched_header) = - init_parentchain(enclave, &node_api, tee_account_id, parentchain_id, shard); - - // we ignore failure - let _ = enclave.init_shard_creation_parentchain_header(shard, &parentchain_id, &head); - - let mut handles = Vec::new(); - - if WorkerModeProvider::worker_mode() != WorkerMode::Teeracle { - println!( - "[{:?}] Finished initializing light client, syncing parentchain...", - parentchain_id - ); - - // Syncing all parentchain blocks, this might take a while.. - let last_synched_header = parentchain_handler - .sync_parentchain_until_latest_finalized(last_synched_header, *shard, true) - .unwrap(); - - let handle = start_parentchain_header_subscription_thread( - shutdown_flag.clone(), - parentchain_handler.clone(), - last_synched_header, - *shard, - ); - handles.push(handle); - } - - let parentchain_init_params = parentchain_handler.parentchain_init_params.clone(); - - let node_api_clone = node_api.clone(); - thread::Builder::new() - .name(format!("{:?}_parentchain_event_subscription", parentchain_id)) - .spawn(move || { - ita_parentchain_interface::event_subscriber::subscribe_to_parentchain_events( - &node_api_clone, - parentchain_id, - shutdown_flag, - ) - }) - .unwrap(); - handles + setup_reasonable_account_funding( + node_api.clone(), + tee_account_id, + parentchain_id, + is_development_mode, + ) + .unwrap_or_else(|e| { + panic!("[{:?}] Could not fund parentchain enclave account: {:?}", parentchain_id, e) + }); + + // we attempt to set shard creation for this parentchain in case it hasn't been done before + let api_head = node_api.get_header(node_api.get_finalized_head().unwrap()).unwrap().unwrap(); + // TODO: #1451: Fix api-client type hacks + let head = Header::decode(&mut api_head.encode().as_slice()) + .expect("Can decode previously encoded header; qed"); + + let (parentchain_handler, last_synched_header) = + init_parentchain(enclave, &node_api, tee_account_id, parentchain_id, shard); + + // we ignore failure + let _ = enclave.init_shard_creation_parentchain_header(shard, &parentchain_id, &head); + + let mut handles = Vec::new(); + + if WorkerModeProvider::worker_mode() != WorkerMode::Teeracle { + println!( + "[{:?}] Finished initializing light client, syncing parentchain...", + parentchain_id + ); + + // Syncing all parentchain blocks, this might take a while.. + let last_synched_header = parentchain_handler + .sync_parentchain_until_latest_finalized(last_synched_header, *shard, true) + .unwrap(); + + let handle = start_parentchain_header_subscription_thread( + shutdown_flag.clone(), + parentchain_handler.clone(), + last_synched_header, + *shard, + ); + handles.push(handle); + } + + let parentchain_init_params = parentchain_handler.parentchain_init_params.clone(); + + let node_api_clone = node_api.clone(); + thread::Builder::new() + .name(format!("{:?}_parentchain_event_subscription", parentchain_id)) + .spawn(move || { + ita_parentchain_interface::event_subscriber::subscribe_to_parentchain_events( + &node_api_clone, + parentchain_id, + shutdown_flag, + ) + }) + .unwrap(); + handles } fn init_parentchain( - enclave: &Arc, - node_api: &Api, Client>, - tee_account_id: &AccountId32, - parentchain_id: ParentchainId, - shard: &ShardIdentifier, + enclave: &Arc, + node_api: &Api, Client>, + tee_account_id: &AccountId32, + parentchain_id: ParentchainId, + shard: &ShardIdentifier, ) -> (Arc>, Header) where - E: EnclaveBase + Sidechain, - u128: From, - Tip: Copy + Default + Encode + Debug, - Client: Request + Subscribe + Clone, + E: EnclaveBase + Sidechain, + u128: From, + Tip: Copy + Default + Encode + Debug, + Client: Request + Subscribe + Clone, { - let parentchain_handler = Arc::new( - ParentchainHandler::new_with_automatic_light_client_allocation( - node_api.clone(), - enclave.clone(), - parentchain_id, - *shard, - ) - .unwrap(), - ); - let last_synced_header = parentchain_handler.init_parentchain_components().unwrap(); - println!("[{:?}] last synced parentchain block: {}", parentchain_id, last_synced_header.number); - - let nonce = node_api.get_system_account_next_index(tee_account_id.clone()).unwrap(); - info!("[{:?}] Enclave nonce = {:?}", parentchain_id, nonce); - enclave.set_nonce(nonce, parentchain_id).unwrap_or_else(|_| { - panic!("[{:?}] Could not set nonce of enclave. Returning here...", parentchain_id) - }); - - let metadata = node_api.metadata().clone(); - let runtime_spec_version = node_api.runtime_version().spec_version; - let runtime_transaction_version = node_api.runtime_version().transaction_version; - enclave - .set_node_metadata( - NodeMetadata::new(metadata, runtime_spec_version, runtime_transaction_version).encode(), - parentchain_id, - ) - .unwrap_or_else(|_| { - panic!("[{:?}] Could not set the node metadata in the enclave", parentchain_id) - }); - - (parentchain_handler, last_synced_header) + let parentchain_handler = Arc::new( + ParentchainHandler::new_with_automatic_light_client_allocation( + node_api.clone(), + enclave.clone(), + parentchain_id, + *shard, + ) + .unwrap(), + ); + let last_synced_header = parentchain_handler.init_parentchain_components().unwrap(); + println!("[{:?}] last synced parentchain block: {}", parentchain_id, last_synced_header.number); + + let nonce = node_api.get_system_account_next_index(tee_account_id.clone()).unwrap(); + info!("[{:?}] Enclave nonce = {:?}", parentchain_id, nonce); + enclave.set_nonce(nonce, parentchain_id).unwrap_or_else(|_| { + panic!("[{:?}] Could not set nonce of enclave. Returning here...", parentchain_id) + }); + + let metadata = node_api.metadata().clone(); + let runtime_spec_version = node_api.runtime_version().spec_version; + let runtime_transaction_version = node_api.runtime_version().transaction_version; + enclave + .set_node_metadata( + NodeMetadata::new(metadata, runtime_spec_version, runtime_transaction_version).encode(), + parentchain_id, + ) + .unwrap_or_else(|_| { + panic!("[{:?}] Could not set the node metadata in the enclave", parentchain_id) + }); + + (parentchain_handler, last_synced_header) } /// Start polling loop to wait until we have a worker for a shard registered on @@ -1025,265 +1025,265 @@ where /// considered initialized and ready for the next worker to start (in sidechain mode only). /// considered initialized and ready for the next worker to start. fn spawn_worker_for_shard_polling( - shard: &ShardIdentifier, - node_api: IntegriteeApi, - initialization_handler: Arc, + shard: &ShardIdentifier, + node_api: IntegriteeApi, + initialization_handler: Arc, ) where - InitializationHandler: TrackInitialization + Sync + Send + 'static, + InitializationHandler: TrackInitialization + Sync + Send + 'static, { - let shard_for_initialized = *shard; - thread::spawn(move || { - const POLL_INTERVAL_SECS: u64 = 2; - - loop { - info!("Polling for worker for shard ({} seconds interval)", POLL_INTERVAL_SECS); - if let Ok(Some(enclave)) = - node_api.primary_worker_for_shard(&shard_for_initialized, None) - { - // Set that the service is initialized. - initialization_handler.worker_for_shard_registered(); - println!( - "[+] Found `WorkerForShard` on parentchain state: {:?}", - enclave.instance_signer() - ); - break; - } - thread::sleep(Duration::from_secs(POLL_INTERVAL_SECS)); - } - }); + let shard_for_initialized = *shard; + thread::spawn(move || { + const POLL_INTERVAL_SECS: u64 = 2; + + loop { + info!("Polling for worker for shard ({} seconds interval)", POLL_INTERVAL_SECS); + if let Ok(Some(enclave)) = + node_api.primary_worker_for_shard(&shard_for_initialized, None) + { + // Set that the service is initialized. + initialization_handler.worker_for_shard_registered(); + println!( + "[+] Found `WorkerForShard` on parentchain state: {:?}", + enclave.instance_signer() + ); + break + } + thread::sleep(Duration::from_secs(POLL_INTERVAL_SECS)); + } + }); } #[cfg(feature = "attesteer")] fn fetch_marblerun_events_every_hour( - api: IntegriteeApi, - enclave: Arc, - accountid: AccountId32, - is_development_mode: bool, - url: String, - marblerun_base_url: String, + api: IntegriteeApi, + enclave: Arc, + accountid: AccountId32, + is_development_mode: bool, + url: String, + marblerun_base_url: String, ) where - E: RemoteAttestation + Clone + Sync + Send + 'static, + E: RemoteAttestation + Clone + Sync + Send + 'static, { - let enclave = enclave.clone(); - let handle = thread::spawn(move || { - const POLL_INTERVAL_5_MINUTES_IN_SECS: u64 = 5 * 60; - loop { - info!("Polling marblerun events for quotes to register"); - register_quotes_from_marblerun( - &api, - enclave.clone(), - &accountid, - is_development_mode, - url.clone(), - &marblerun_base_url, - ); - - thread::sleep(Duration::from_secs(POLL_INTERVAL_5_MINUTES_IN_SECS)); - } - }); - - handle.join().unwrap() + let enclave = enclave.clone(); + let handle = thread::spawn(move || { + const POLL_INTERVAL_5_MINUTES_IN_SECS: u64 = 5 * 60; + loop { + info!("Polling marblerun events for quotes to register"); + register_quotes_from_marblerun( + &api, + enclave.clone(), + &accountid, + is_development_mode, + url.clone(), + &marblerun_base_url, + ); + + thread::sleep(Duration::from_secs(POLL_INTERVAL_5_MINUTES_IN_SECS)); + } + }); + + handle.join().unwrap() } #[cfg(feature = "attesteer")] fn register_quotes_from_marblerun( - api: &IntegriteeApi, - enclave: Arc, - accountid: &AccountId32, - is_development_mode: bool, - url: String, - marblerun_base_url: &str, + api: &IntegriteeApi, + enclave: Arc, + accountid: &AccountId32, + is_development_mode: bool, + url: String, + marblerun_base_url: &str, ) { - let enclave = enclave.as_ref(); - let events = crate::prometheus_metrics::fetch_marblerun_events(marblerun_base_url) - .map_err(|e| { - info!("Fetching events from Marblerun failed with: {:?}, continuing with 0 events.", e); - }) - .unwrap_or_default(); - let quotes: Vec<&[u8]> = - events.iter().map(|event| event.get_quote_without_prepended_bytes()).collect(); - - for quote in quotes { - match enclave.generate_dcap_ra_extrinsic_from_quote(url.clone(), "e) { - Ok(xt) => { - send_integritee_extrinsic(xt, api, accountid, is_development_mode); - } - Err(e) => { - error!("Extracting information from quote failed: {}", e) - } - } - } + let enclave = enclave.as_ref(); + let events = crate::prometheus_metrics::fetch_marblerun_events(marblerun_base_url) + .map_err(|e| { + info!("Fetching events from Marblerun failed with: {:?}, continuing with 0 events.", e); + }) + .unwrap_or_default(); + let quotes: Vec<&[u8]> = + events.iter().map(|event| event.get_quote_without_prepended_bytes()).collect(); + + for quote in quotes { + match enclave.generate_dcap_ra_extrinsic_from_quote(url.clone(), "e) { + Ok(xt) => { + send_integritee_extrinsic(xt, api, accountid, is_development_mode); + }, + Err(e) => { + error!("Extracting information from quote failed: {}", e) + }, + } + } } #[cfg(feature = "dcap")] fn register_collateral( - api: &IntegriteeApi, - enclave: &dyn RemoteAttestation, - accountid: &AccountId32, - is_development_mode: bool, - skip_ra: bool, + api: &IntegriteeApi, + enclave: &dyn RemoteAttestation, + accountid: &AccountId32, + is_development_mode: bool, + skip_ra: bool, ) { - //TODO generate_dcap_ra_quote() does not really need skip_ra, rethink how many layers skip_ra should be passed along - if !skip_ra { - let dcap_quote = enclave.generate_dcap_ra_quote(skip_ra).unwrap(); - let (fmspc, _tcb_info) = extract_tcb_info_from_raw_dcap_quote(&dcap_quote).unwrap(); - println!("[>] DCAP setup: register QE collateral"); - let uxt = enclave.generate_register_quoting_enclave_extrinsic(fmspc).unwrap(); - send_integritee_extrinsic(uxt, api, accountid, is_development_mode); - - println!("[>] DCAP setup: register TCB info"); - let uxt = enclave.generate_register_tcb_info_extrinsic(fmspc).unwrap(); - send_integritee_extrinsic(uxt, api, accountid, is_development_mode); - } + //TODO generate_dcap_ra_quote() does not really need skip_ra, rethink how many layers skip_ra should be passed along + if !skip_ra { + let dcap_quote = enclave.generate_dcap_ra_quote(skip_ra).unwrap(); + let (fmspc, _tcb_info) = extract_tcb_info_from_raw_dcap_quote(&dcap_quote).unwrap(); + println!("[>] DCAP setup: register QE collateral"); + let uxt = enclave.generate_register_quoting_enclave_extrinsic(fmspc).unwrap(); + send_integritee_extrinsic(uxt, api, accountid, is_development_mode); + + println!("[>] DCAP setup: register TCB info"); + let uxt = enclave.generate_register_tcb_info_extrinsic(fmspc).unwrap(); + send_integritee_extrinsic(uxt, api, accountid, is_development_mode); + } } fn send_integritee_extrinsic( - extrinsic: Vec, - api: &Api, Client>, - fee_payer: &AccountId32, - is_development_mode: bool, + extrinsic: Vec, + api: &Api, Client>, + fee_payer: &AccountId32, + is_development_mode: bool, ) -> ServiceResult where - u128: From, - Tip: Copy + Default + Encode + Debug + Send + Sync + 'static, - Client: Request + Subscribe + Clone + Send + Sync + 'static, + u128: From, + Tip: Copy + Default + Encode + Debug + Send + Sync + 'static, + Client: Request + Subscribe + Clone + Send + Sync + 'static, { - let timeout = Duration::from_secs(5 * 60); - let (sender, receiver) = mpsc::channel(); - let local_fee_payer = fee_payer.clone(); - let local_api = api.clone(); - // start thread which can time out - let handle = thread::spawn(move || { - let fee = crate::account_funding::estimate_fee(&local_api, extrinsic.clone()).unwrap(); - let ed = local_api.get_existential_deposit().unwrap(); - let free = local_api.get_free_balance(&local_fee_payer).unwrap(); - let missing_funds = fee.saturating_add(ed).saturating_sub(free); - info!("[Integritee] send extrinsic"); - debug!("fee: {:?}, ed: {:?}, free: {:?} => missing: {:?}", fee, ed, free, missing_funds); - trace!( + let timeout = Duration::from_secs(5 * 60); + let (sender, receiver) = mpsc::channel(); + let local_fee_payer = fee_payer.clone(); + let local_api = api.clone(); + // start thread which can time out + let handle = thread::spawn(move || { + let fee = crate::account_funding::estimate_fee(&local_api, extrinsic.clone()).unwrap(); + let ed = local_api.get_existential_deposit().unwrap(); + let free = local_api.get_free_balance(&local_fee_payer).unwrap(); + let missing_funds = fee.saturating_add(ed).saturating_sub(free); + info!("[Integritee] send extrinsic"); + debug!("fee: {:?}, ed: {:?}, free: {:?} => missing: {:?}", fee, ed, free, missing_funds); + trace!( " encoded extrinsic len: {}, payload: 0x{:}", extrinsic.len(), hex::encode(extrinsic.clone()) ); - if missing_funds > 0 { - setup_reasonable_account_funding( - local_api.clone(), - &local_fee_payer, - ParentchainId::Integritee, - is_development_mode, - ) - .unwrap() - } - - match local_api - .submit_and_watch_opaque_extrinsic_until(&extrinsic.into(), XtStatus::Finalized) - { - Ok(xt_report) => { - info!( + if missing_funds > 0 { + setup_reasonable_account_funding( + local_api.clone(), + &local_fee_payer, + ParentchainId::Integritee, + is_development_mode, + ) + .unwrap() + } + + match local_api + .submit_and_watch_opaque_extrinsic_until(&extrinsic.into(), XtStatus::Finalized) + { + Ok(xt_report) => { + info!( "[+] L1 extrinsic success. extrinsic hash: {:?} / status: {:?}", xt_report.extrinsic_hash, xt_report.status ); - xt_report.block_hash.ok_or(Error::Custom("no extrinsic hash returned".into())); - sender.send(xt_report.block_hash.unwrap()); - } - Err(e) => { - panic!( - "Extrinsic failed {:?} parentchain genesis: {:?}", - e, - local_api.genesis_hash() - ); - } - } - }); - // Wait for the result with a timeout - match receiver.recv_timeout(timeout) { - Ok(result) => { - println!("Task finished within timeout: {:?}", result); - Ok(result) - } - Err(_) => { - println!("Task timed out after {:?}", timeout); - panic!("Extrinsic sending timed out. shutting down."); - } - } + xt_report.block_hash.ok_or(Error::Custom("no extrinsic hash returned".into())); + sender.send(xt_report.block_hash.unwrap()); + }, + Err(e) => { + panic!( + "Extrinsic failed {:?} parentchain genesis: {:?}", + e, + local_api.genesis_hash() + ); + }, + } + }); + // Wait for the result with a timeout + match receiver.recv_timeout(timeout) { + Ok(result) => { + println!("Task finished within timeout: {:?}", result); + Ok(result) + }, + Err(_) => { + println!("Task timed out after {:?}", timeout); + panic!("Extrinsic sending timed out. shutting down."); + }, + } } fn start_parentchain_header_subscription_thread( - shutdown_flag: Arc, - parentchain_handler: Arc>, - last_synced_header: Header, - shard: ShardIdentifier, + shutdown_flag: Arc, + parentchain_handler: Arc>, + last_synced_header: Header, + shard: ShardIdentifier, ) -> thread::JoinHandle<()> where - EnclaveApi: EnclaveBase + Sidechain, - u128: From, - Tip: Copy + Default + Encode + Debug + Send + Sync + 'static, - Client: Request + Subscribe + Send + Sync + 'static, + EnclaveApi: EnclaveBase + Sidechain, + u128: From, + Tip: Copy + Default + Encode + Debug + Send + Sync + 'static, + Client: Request + Subscribe + Send + Sync + 'static, { - let parentchain_id = *parentchain_handler.parentchain_id(); - thread::Builder::new() - .name(format!("{:?}_parentchain_sync_loop", parentchain_id)) - .spawn(move || { - if let Err(e) = subscribe_to_parentchain_new_headers( - shutdown_flag, - parentchain_handler, - last_synced_header, - shard, - ) { - error!( + let parentchain_id = *parentchain_handler.parentchain_id(); + thread::Builder::new() + .name(format!("{:?}_parentchain_sync_loop", parentchain_id)) + .spawn(move || { + if let Err(e) = subscribe_to_parentchain_new_headers( + shutdown_flag, + parentchain_handler, + last_synced_header, + shard, + ) { + error!( "[{:?}] parentchain block syncing terminated with a failure: {:?}", parentchain_id, e ); - } - println!("[!] [{:?}] parentchain block syncing has terminated", parentchain_id); - }) - .unwrap() + } + println!("[!] [{:?}] parentchain block syncing has terminated", parentchain_id); + }) + .unwrap() } /// Subscribe to the node API finalized heads stream and trigger a parent chain sync /// upon receiving a new header. fn subscribe_to_parentchain_new_headers( - shutdown_flag: Arc, - parentchain_handler: Arc>, - mut last_synced_header: Header, - shard: ShardIdentifier, + shutdown_flag: Arc, + parentchain_handler: Arc>, + mut last_synced_header: Header, + shard: ShardIdentifier, ) -> Result<(), Error> where - EnclaveApi: EnclaveBase + Sidechain, - u128: From, - Tip: Copy + Default + Encode + Debug, - Client: Request + Subscribe, + EnclaveApi: EnclaveBase + Sidechain, + u128: From, + Tip: Copy + Default + Encode + Debug, + Client: Request + Subscribe, { - // TODO: this should be implemented by parentchain_handler directly, and not via - // exposed parentchain_api - let mut subscription = parentchain_handler - .parentchain_api() - .subscribe_finalized_heads() - .map_err(Error::ApiClient)?; - let parentchain_id = parentchain_handler.parentchain_id(); - while !shutdown_flag.load(Ordering::Relaxed) { - let new_header = subscription - .next() - .ok_or(Error::ApiSubscriptionDisconnected)? - .map_err(|e| Error::ApiClient(e.into()))?; - - info!( + // TODO: this should be implemented by parentchain_handler directly, and not via + // exposed parentchain_api + let mut subscription = parentchain_handler + .parentchain_api() + .subscribe_finalized_heads() + .map_err(Error::ApiClient)?; + let parentchain_id = parentchain_handler.parentchain_id(); + while !shutdown_flag.load(Ordering::Relaxed) { + let new_header = subscription + .next() + .ok_or(Error::ApiSubscriptionDisconnected)? + .map_err(|e| Error::ApiClient(e.into()))?; + + info!( "[{:?}] Received finalized header update ({}), syncing parent chain...", parentchain_id, new_header.number ); - last_synced_header = parentchain_handler.sync_parentchain_until_latest_finalized( - last_synced_header, - shard, - false, - )?; - } - warn!("[{:?}] parent chain block syncing has terminated", parentchain_id); - Ok(()) + last_synced_header = parentchain_handler.sync_parentchain_until_latest_finalized( + last_synced_header, + shard, + false, + )?; + } + warn!("[{:?}] parent chain block syncing has terminated", parentchain_id); + Ok(()) } /// Get the public signing key of the TEE. fn enclave_account(enclave_api: &E) -> AccountId32 { - let tee_public = enclave_api.get_ecc_signing_pubkey().unwrap(); - trace!("[+] Got ed25519 account of TEE = {}", tee_public.to_ss58check()); - AccountId32::from(*tee_public.as_array_ref()) + let tee_public = enclave_api.get_ecc_signing_pubkey().unwrap(); + trace!("[+] Got ed25519 account of TEE = {}", tee_public.to_ss58check()); + AccountId32::from(*tee_public.as_array_ref()) } diff --git a/service/src/ocall_bridge/component_factory.rs b/service/src/ocall_bridge/component_factory.rs index 81502c728..b8a728c7e 100644 --- a/service/src/ocall_bridge/component_factory.rs +++ b/service/src/ocall_bridge/component_factory.rs @@ -160,8 +160,7 @@ impl< PeerBlockFetcher, TokioHandle, MetricsReceiver, - > -where + > where IntegriteeRuntimeConfig: Config + 'static, TargetARuntimeConfig: Config + 'static, diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index a41f413df..668b3a1f8 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -22,110 +22,114 @@ use ipfs_api_backend_hyper::{IpfsApi, IpfsClient, TryFromUri}; use itp_utils::IpfsCid; use log::*; use std::{ - fs::File, - io::{Cursor, Write}, - str, - sync::{mpsc::channel, Arc}, + fs::File, + io::{Cursor, Write}, + str, + sync::{mpsc::channel, Arc}, }; pub struct IpfsOCall { - client: Option>, + client: Option>, } impl IpfsOCall { - pub fn new(maybe_url: Option, maybe_auth: Option) -> Self { - if let Some(url) = maybe_url { - let client = ipfs_api_backend_hyper::IpfsClient::from_str(&url).unwrap(); - let client = if let Some((user, pwd)) = maybe_auth - .and_then(|s| s.split_once(':').map(|(u, p)| (u.to_string(), p.to_string()))) - { - info!("Using IPFS node at {} with credentials ******", url); - client.with_credentials(user, pwd) - } else { - info!("Using IPFS node at {}", url); - client - }; - let version = tokio::runtime::Runtime::new().unwrap().block_on(client.version()); - match version { - Ok(v) => info!("Connected to IPFS node version: {}", v.version), - Err(e) => error!("Error getting IPFS node version: {}", e), - } - Self { client: Some(Arc::new(client)) } - } else { - info!("No IPFS URL provided, disabling IPFS."); - Self { client: None } - } - } + pub fn new(maybe_url: Option, maybe_auth: Option) -> Self { + if let Some(url) = maybe_url { + let client = ipfs_api_backend_hyper::IpfsClient::from_str(&url).unwrap(); + let client = if let Some((user, pwd)) = maybe_auth + .and_then(|s| s.split_once(':').map(|(u, p)| (u.to_string(), p.to_string()))) + { + info!("Using IPFS node at {} with credentials ******", url); + client.with_credentials(user, pwd) + } else { + info!("Using IPFS node at {}", url); + client + }; + let version = tokio::runtime::Runtime::new().unwrap().block_on(client.version()); + match version { + Ok(v) => info!("Connected to IPFS node version: {}", v.version), + Err(e) => error!("Error getting IPFS node version: {}", e), + } + Self { client: Some(Arc::new(client)) } + } else { + info!("No IPFS URL provided, disabling IPFS."); + Self { client: None } + } + } } impl IpfsBridge for IpfsOCall { - fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { - debug!(" Entering ocall_write_ipfs"); - write_to_ipfs( - self.client.as_ref().ok_or_else(|| - OCallBridgeError::IpfsError("No IPFS client configured, cannot write to IPFS".to_string()) - )?, - data, - ) - } + fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { + debug!(" Entering ocall_write_ipfs"); + write_to_ipfs( + self.client.as_ref().ok_or_else(|| { + OCallBridgeError::IpfsError( + "No IPFS client configured, cannot write to IPFS".to_string(), + ) + })?, + data, + ) + } - fn read_from_ipfs(&self, cid: IpfsCid) -> OCallBridgeResult<()> { - debug!("Entering ocall_read_ipfs"); - let client = self.client.as_ref().ok_or_else(|| - OCallBridgeError::IpfsError("No IPFS client configured, cannot read from IPFS".to_string()) - )?; - let res = read_from_ipfs(client, &cid) - .map_err(|_| OCallBridgeError::IpfsError("failed to read from IPFS".to_string()))?; - let filename = format!("{:?}", cid); - create_file(&filename, &res).map_err(OCallBridgeError::IpfsError) - } + fn read_from_ipfs(&self, cid: IpfsCid) -> OCallBridgeResult<()> { + debug!("Entering ocall_read_ipfs"); + let client = self.client.as_ref().ok_or_else(|| { + OCallBridgeError::IpfsError( + "No IPFS client configured, cannot read from IPFS".to_string(), + ) + })?; + let res = read_from_ipfs(client, &cid) + .map_err(|_| OCallBridgeError::IpfsError("failed to read from IPFS".to_string()))?; + let filename = format!("{:?}", cid); + create_file(&filename, &res).map_err(OCallBridgeError::IpfsError) + } } fn create_file(filename: &str, result: &[u8]) -> Result<(), String> { - match File::create(filename) { - Ok(mut f) => f - .write_all(result) - .map_or_else(|e| Err(format!("failed writing to file: {}", e)), |_| Ok(())), - Err(e) => Err(format!("failed to create file: {}", e)), - } + match File::create(filename) { + Ok(mut f) => f + .write_all(result) + .map_or_else(|e| Err(format!("failed writing to file: {}", e)), |_| Ok(())), + Err(e) => Err(format!("failed to create file: {}", e)), + } } #[tokio::main] async fn write_to_ipfs(client: &IpfsClient, data: &'static [u8]) -> OCallBridgeResult { - let datac = Cursor::new(data); - let (tx, rx) = channel(); + let datac = Cursor::new(data); + let (tx, rx) = channel(); - match client.add(datac).await { - Ok(res) => { - debug!("Result IpfsCid {}", res.hash); - tx.send(res.hash.into_bytes()).unwrap(); - } - Err(e) => { - error!("error adding file: {}", e); - return Err(OCallBridgeError::IpfsError(format!("error adding file: {}", e))); - } - } - rx.recv() - .map_err(|e| OCallBridgeError::IpfsError(format!("error receiving cid: {}", e))) - .and_then(|cid_str| { - str::from_utf8(&cid_str) - .map_err(|e| OCallBridgeError::IpfsError(format!("invalid UTF-8 in cid: {}", e))) - .and_then(|cid_utf8| { - IpfsCid::try_from(cid_utf8).map_err(|e| { - OCallBridgeError::IpfsError(format!("invalid IpfsCid: {:?}", e)) - }) - }) - }) + match client.add(datac).await { + Ok(res) => { + debug!("Result IpfsCid {}", res.hash); + tx.send(res.hash.into_bytes()).unwrap(); + }, + Err(e) => { + error!("error adding file: {}", e); + return Err(OCallBridgeError::IpfsError(format!("error adding file: {}", e))) + }, + } + rx.recv() + .map_err(|e| OCallBridgeError::IpfsError(format!("error receiving cid: {}", e))) + .and_then(|cid_str| { + str::from_utf8(&cid_str) + .map_err(|e| OCallBridgeError::IpfsError(format!("invalid UTF-8 in cid: {}", e))) + .and_then(|cid_utf8| { + IpfsCid::try_from(cid_utf8).map_err(|e| { + OCallBridgeError::IpfsError(format!("invalid IpfsCid: {:?}", e)) + }) + }) + }) } #[tokio::main] pub async fn read_from_ipfs(client: &IpfsClient, cid: &IpfsCid) -> Result, String> { - let h = format!("{:?}", cid); - debug!("Fetching content with cid {}", h); - client - .cat(&h) - .map_ok(|chunk| chunk.to_vec()) - .map_err(|e| e.to_string()) - .try_concat() - .await + let h = format!("{:?}", cid); + debug!("Fetching content with cid {}", h); + client + .cat(&h) + .map_ok(|chunk| chunk.to_vec()) + .map_err(|e| e.to_string()) + .try_concat() + .await } From 3144b62b5a163543aca6cf9d707100fe7105f5d2 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Sat, 20 Sep 2025 18:03:08 +0200 Subject: [PATCH 19/91] fix CI --- .github/workflows/build_and_test.yml | 2 +- cli/demo_send_relayed_note.sh | 21 ++++--------------- ...ed_note.yml => demo-send-relayed-note.yml} | 4 ++-- 3 files changed, 7 insertions(+), 20 deletions(-) rename docker/{demo-send_relayed_note.yml => demo-send-relayed-note.yml} (87%) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index ca94fd873..f80f69128 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -273,7 +273,7 @@ jobs: sgx_mode: HW - test: relayed-notes flavor_id: sidechain - demo_name: demo-send_relayed_note + demo_name: demo-send-relayed-note host: test-runner-sgx sgx_mode: HW - test: Sidechain diff --git a/cli/demo_send_relayed_note.sh b/cli/demo_send_relayed_note.sh index 176f5b4c5..7e3a21b60 100755 --- a/cli/demo_send_relayed_note.sh +++ b/cli/demo_send_relayed_note.sh @@ -1,31 +1,18 @@ #!/bin/bash -# Executes a direct call on a worker and checks the balance afterwards. +# Sends a relayed note with a direct call to the worker which is expected +# to relay it via IPFS in encrypted form # -# setup: -# run all on localhost: -# integritee-node purge-chain --dev -# integritee-node --tmp --dev -lruntime=debug -# rm light_client_db.bin -# export RUST_LOG=integritee_service=info,ita_stf=debug -# integritee-service init_shard -# integritee-service shielding-key -# integritee-service signing-key -# integritee-service run -# -# then run this script + # usage: -# demo_direct_call.sh -p -P -t +# demo-send-relayed-note.sh -p -P -i # # TEST_BALANCE_RUN is either "first" or "second" while getopts ":p:P:t:u:V:C:" opt; do case $opt in - t) - TEST=$OPTARG - ;; p) INTEGRITEE_RPC_PORT=$OPTARG ;; diff --git a/docker/demo-send_relayed_note.yml b/docker/demo-send-relayed-note.yml similarity index 87% rename from docker/demo-send_relayed_note.yml rename to docker/demo-send-relayed-note.yml index f5180f580..a2159e00d 100644 --- a/docker/demo-send_relayed_note.yml +++ b/docker/demo-send-relayed-note.yml @@ -1,5 +1,5 @@ services: - demo-direct-call: + demo-send-relayed-note: image: integritee-cli:${VERSION:-dev} devices: - "${SGX_PROVISION:-/dev/null}:/dev/sgx/provision" @@ -20,7 +20,7 @@ services: - integritee-test-network entrypoint: "/usr/local/worker-cli/demo_send_relayed_note.sh -p 9912 -u ws://integritee-node - -V wss://integritee-worker-1 -P 2011 -C /usr/local/bin/integritee-cli 2>&1" + -V wss://integritee-worker-1 -P 2011 -i http://ipfs-node:8080 -C /usr/local/bin/integritee-cli 2>&1" restart: "no" ipfs-node: image: ipfs/kubo:latest From a11e531d59c9b8b9cfbd25b2d8c322336742b3a2 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Sat, 20 Sep 2025 20:47:02 +0200 Subject: [PATCH 20/91] fix CI --- cli/demo_send_relayed_note.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cli/demo_send_relayed_note.sh b/cli/demo_send_relayed_note.sh index 7e3a21b60..a18d83f30 100755 --- a/cli/demo_send_relayed_note.sh +++ b/cli/demo_send_relayed_note.sh @@ -11,7 +11,7 @@ # TEST_BALANCE_RUN is either "first" or "second" -while getopts ":p:P:t:u:V:C:" opt; do +while getopts ":p:P:t:u:V:C:i:" opt; do case $opt in p) INTEGRITEE_RPC_PORT=$OPTARG @@ -43,6 +43,7 @@ INTEGRITEE_RPC_URL=${INTEGRITEE_RPC_URL:-"ws://127.0.0.1"} WORKER_1_PORT=${WORKER_1_PORT:-2000} WORKER_1_URL=${WORKER_1_URL:-"wss://127.0.0.1"} +IPFS_GATEWAY=${IPFS_GATEWAY:-"http://127.0.0.1:8080"} CLIENT_BIN=${CLIENT_BIN:-"./../bin/integritee-cli"} From affbcb358a77c70e79211114099071ab28326bcc Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Sat, 20 Sep 2025 21:14:19 +0200 Subject: [PATCH 21/91] fix CI --- cli/demo_send_relayed_note.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cli/demo_send_relayed_note.sh b/cli/demo_send_relayed_note.sh index a18d83f30..3b3d292e6 100755 --- a/cli/demo_send_relayed_note.sh +++ b/cli/demo_send_relayed_note.sh @@ -61,7 +61,7 @@ echo "Using MRENCLAVE: ${MRENCLAVE}" TCLIENT="${CLIENT} trusted --mrenclave ${MRENCLAVE} --direct" NOTE="Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum." -${TCLIENT} send-note --ipfs-proxy //Alice //Bob ${NOTE} +${TCLIENT} send-note --ipfs-proxy //Alice //Bob "${NOTE}" echo "Alice sent note to Bob:" echo $NOTE From a2ef29dabde406101d44f5ed01c9d4f8ebb891a4 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Sun, 21 Sep 2025 12:41:23 +0200 Subject: [PATCH 22/91] CI try different --- docker/demo-send-relayed-note.yml | 10 ++-------- docker/docker-compose.yml | 15 ++++++++++++++- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/docker/demo-send-relayed-note.yml b/docker/demo-send-relayed-note.yml index a2159e00d..7c4af5be6 100644 --- a/docker/demo-send-relayed-note.yml +++ b/docker/demo-send-relayed-note.yml @@ -16,20 +16,14 @@ services: condition: service_healthy integritee-worker-1-${VERSION}: condition: service_healthy + ipfs-node: + condition: service_started networks: - integritee-test-network entrypoint: "/usr/local/worker-cli/demo_send_relayed_note.sh -p 9912 -u ws://integritee-node -V wss://integritee-worker-1 -P 2011 -i http://ipfs-node:8080 -C /usr/local/bin/integritee-cli 2>&1" restart: "no" - ipfs-node: - image: ipfs/kubo:latest - ports: - - "4001:4001" # Swarm - - "5001:5001" # API - - "8080:8080" # Gateway - networks: - - integritee-test-network networks: integritee-test-network: driver: bridge \ No newline at end of file diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index e711ca400..75a2a7923 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -28,6 +28,8 @@ services: depends_on: "integritee-node-${VERSION}": condition: service_healthy + ipfs-node: + condition: service_started devices: - "${SGX_PROVISION:-/dev/null}:/dev/sgx/provision" - "${SGX_ENCLAVE:-/dev/null}:/dev/sgx/enclave" @@ -57,6 +59,8 @@ services: condition: service_healthy "integritee-worker-1-${VERSION}": condition: service_healthy + ipfs-node: + condition: service_started devices: - "${SGX_PROVISION:-/dev/null}:/dev/sgx/provision" - "${SGX_ENCLAVE:-/dev/null}:/dev/sgx/enclave" @@ -72,8 +76,17 @@ services: interval: 10s timeout: 10s retries: 25 - command: "--clean-reset --data-dir /tmp/worker2 --ws-external -M integritee-worker-2 -T wss://integritee-worker-2 -u ws://integritee-node -U ws://integritee-worker-2 -P 2012 -w 2102 -p 9912 -h 4646 run --dev ${ADDITIONAL_RUNTIME_FLAGS}" + command: "--clean-reset --data-dir /tmp/worker2 --ws-external -M integritee-worker-2 -T wss://integritee-worker-2 -u ws://integritee-node -U ws://integritee-worker-2 -P 2012 -w 2102 -p 9912 -h 4646 --ipfs-api-url http://ipfs-node:5001 run --dev ${ADDITIONAL_RUNTIME_FLAGS}" restart: "no" + ipfs-node: + image: ipfs/kubo:latest + hostname: ipfs-node + ports: + - "4001:4001" # Swarm + - "5001:5001" # API + - "8080:8080" # Gateway + networks: + - integritee-test-network networks: integritee-test-network: driver: bridge From 7393e0befc47375e423544f3572946fb2b0d4b72 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Sun, 21 Sep 2025 14:26:01 +0200 Subject: [PATCH 23/91] fall back to file dump if ipfs add fails for any reason to enable recovery --- service/src/ocall_bridge/component_factory.rs | 4 +- service/src/ocall_bridge/ipfs_ocall.rs | 203 ++++++++++-------- 2 files changed, 117 insertions(+), 90 deletions(-) diff --git a/service/src/ocall_bridge/component_factory.rs b/service/src/ocall_bridge/component_factory.rs index b8a728c7e..54af48d82 100644 --- a/service/src/ocall_bridge/component_factory.rs +++ b/service/src/ocall_bridge/component_factory.rs @@ -160,7 +160,8 @@ impl< PeerBlockFetcher, TokioHandle, MetricsReceiver, - > where + > +where IntegriteeRuntimeConfig: Config + 'static, TargetARuntimeConfig: Config + 'static, @@ -203,6 +204,7 @@ impl< Arc::new(IpfsOCall::new( self.maybe_ipfs_url_and_auth.0.clone(), self.maybe_ipfs_url_and_auth.1.clone(), + self.log_dir.clone(), )) } diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index 668b3a1f8..0f2dd911f 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -22,114 +22,139 @@ use ipfs_api_backend_hyper::{IpfsApi, IpfsClient, TryFromUri}; use itp_utils::IpfsCid; use log::*; use std::{ - fs::File, - io::{Cursor, Write}, - str, - sync::{mpsc::channel, Arc}, + fs::{File, create_dir_all}, + path::Path, + io::{self, Cursor, Write}, + str, + sync::{mpsc::channel, Arc}, }; +use chrono::Local; +use std::fmt::Display; pub struct IpfsOCall { - client: Option>, + client: Option>, + log_dir: Arc, } impl IpfsOCall { - pub fn new(maybe_url: Option, maybe_auth: Option) -> Self { - if let Some(url) = maybe_url { - let client = ipfs_api_backend_hyper::IpfsClient::from_str(&url).unwrap(); - let client = if let Some((user, pwd)) = maybe_auth - .and_then(|s| s.split_once(':').map(|(u, p)| (u.to_string(), p.to_string()))) - { - info!("Using IPFS node at {} with credentials ******", url); - client.with_credentials(user, pwd) - } else { - info!("Using IPFS node at {}", url); - client - }; - let version = tokio::runtime::Runtime::new().unwrap().block_on(client.version()); - match version { - Ok(v) => info!("Connected to IPFS node version: {}", v.version), - Err(e) => error!("Error getting IPFS node version: {}", e), - } - Self { client: Some(Arc::new(client)) } - } else { - info!("No IPFS URL provided, disabling IPFS."); - Self { client: None } - } - } + pub fn new(maybe_url: Option, maybe_auth: Option, log_dir: Arc) -> Self { + if let Some(url) = maybe_url { + let client = ipfs_api_backend_hyper::IpfsClient::from_str(&url).unwrap(); + let client = if let Some((user, pwd)) = maybe_auth + .and_then(|s| s.split_once(':').map(|(u, p)| (u.to_string(), p.to_string()))) + { + info!("Using IPFS node at {} with credentials ******", url); + client.with_credentials(user, pwd) + } else { + info!("Using IPFS node at {}", url); + client + }; + let version = tokio::runtime::Runtime::new().unwrap().block_on(client.version()); + match version { + Ok(v) => info!("Connected to IPFS node version: {}", v.version), + Err(e) => error!("Error getting IPFS node version: {}", e), + } + Self { client: Some(Arc::new(client)), log_dir } + } else { + info!("No IPFS URL provided, disabling IPFS."); + Self { client: None, log_dir } + } + } } impl IpfsBridge for IpfsOCall { - fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { - debug!(" Entering ocall_write_ipfs"); - write_to_ipfs( - self.client.as_ref().ok_or_else(|| { - OCallBridgeError::IpfsError( - "No IPFS client configured, cannot write to IPFS".to_string(), - ) - })?, - data, - ) - } + fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { + debug!(" Entering ocall_write_ipfs"); + write_to_ipfs( + self.client.as_ref().ok_or_else(|| { + let _ = log_failing_blob_to_file(data.into(), self.log_dir.clone()); + OCallBridgeError::IpfsError( + "No IPFS client configured, cannot write to IPFS".to_string(), + ) + })?, + data, + self.log_dir.clone(), + ) + } - fn read_from_ipfs(&self, cid: IpfsCid) -> OCallBridgeResult<()> { - debug!("Entering ocall_read_ipfs"); - let client = self.client.as_ref().ok_or_else(|| { - OCallBridgeError::IpfsError( - "No IPFS client configured, cannot read from IPFS".to_string(), - ) - })?; - let res = read_from_ipfs(client, &cid) - .map_err(|_| OCallBridgeError::IpfsError("failed to read from IPFS".to_string()))?; - let filename = format!("{:?}", cid); - create_file(&filename, &res).map_err(OCallBridgeError::IpfsError) - } + fn read_from_ipfs(&self, cid: IpfsCid) -> OCallBridgeResult<()> { + debug!("Entering ocall_read_ipfs"); + let client = self.client.as_ref().ok_or_else(|| { + OCallBridgeError::IpfsError( + "No IPFS client configured, cannot read from IPFS".to_string(), + ) + })?; + let res = read_from_ipfs(client, &cid) + .map_err(|_| OCallBridgeError::IpfsError("failed to read from IPFS".to_string()))?; + let filename = format!("{:?}", cid); + create_file(&filename, &res).map_err(OCallBridgeError::IpfsError) + } } fn create_file(filename: &str, result: &[u8]) -> Result<(), String> { - match File::create(filename) { - Ok(mut f) => f - .write_all(result) - .map_or_else(|e| Err(format!("failed writing to file: {}", e)), |_| Ok(())), - Err(e) => Err(format!("failed to create file: {}", e)), - } + match File::create(filename) { + Ok(mut f) => f + .write_all(result) + .map_or_else(|e| Err(format!("failed writing to file: {}", e)), |_| Ok(())), + Err(e) => Err(format!("failed to create file: {}", e)), + } } #[tokio::main] -async fn write_to_ipfs(client: &IpfsClient, data: &'static [u8]) -> OCallBridgeResult { - let datac = Cursor::new(data); - let (tx, rx) = channel(); +async fn write_to_ipfs(client: &IpfsClient, data: &'static [u8], log_dir: Arc) -> OCallBridgeResult { + let datac = Cursor::new(data); + let (tx, rx) = channel(); - match client.add(datac).await { - Ok(res) => { - debug!("Result IpfsCid {}", res.hash); - tx.send(res.hash.into_bytes()).unwrap(); - }, - Err(e) => { - error!("error adding file: {}", e); - return Err(OCallBridgeError::IpfsError(format!("error adding file: {}", e))) - }, - } - rx.recv() - .map_err(|e| OCallBridgeError::IpfsError(format!("error receiving cid: {}", e))) - .and_then(|cid_str| { - str::from_utf8(&cid_str) - .map_err(|e| OCallBridgeError::IpfsError(format!("invalid UTF-8 in cid: {}", e))) - .and_then(|cid_utf8| { - IpfsCid::try_from(cid_utf8).map_err(|e| { - OCallBridgeError::IpfsError(format!("invalid IpfsCid: {:?}", e)) - }) - }) - }) + match client.add(datac).await { + Ok(res) => { + debug!("Result IpfsCid {}", res.hash); + tx.send(res.hash.into_bytes()).unwrap(); + } + Err(e) => { + error!("error adding file: {}", e); + let _ = log_failing_blob_to_file(data.into(), log_dir.clone()); + return Err(OCallBridgeError::IpfsError(format!("error adding file: {}", e))); + } + } + rx.recv() + .map_err(|e| { + let _ = log_failing_blob_to_file(data.into(), log_dir.clone()); + OCallBridgeError::IpfsError(format!("error receiving cid: {}", e)) + }) + .and_then(|cid_str| { + str::from_utf8(&cid_str) + .map_err(|e| OCallBridgeError::IpfsError(format!("invalid UTF-8 in cid: {}", e))) + .and_then(|cid_utf8| { + IpfsCid::try_from(cid_utf8).map_err(|e| { + OCallBridgeError::IpfsError(format!("invalid IpfsCid: {:?}", e)) + }) + }) + }) } #[tokio::main] pub async fn read_from_ipfs(client: &IpfsClient, cid: &IpfsCid) -> Result, String> { - let h = format!("{:?}", cid); - debug!("Fetching content with cid {}", h); - client - .cat(&h) - .map_ok(|chunk| chunk.to_vec()) - .map_err(|e| e.to_string()) - .try_concat() - .await + let h = format!("{:?}", cid); + debug!("Fetching content with cid {}", h); + client + .cat(&h) + .map_ok(|chunk| chunk.to_vec()) + .map_err(|e| e.to_string()) + .try_concat() + .await +} + +fn log_failing_blob_to_file( + blob: Vec, + log_dir: Arc, +) -> io::Result<()> { + let log_dir = log_dir.join("log-ipfs-failing-add"); + create_dir_all(&log_dir)?; + let timestamp = Local::now().format("%Y%m%d-%H%M%S-%3f").to_string(); + let cid_str = IpfsCid::from_content_bytes(&blob).map(|cid| format!("{}", cid)).unwrap_or("invalid-cid".to_string()); + let file_name = format!("ipfs-{}-{}.bin", timestamp, cid_str); + let file_path = log_dir.join(file_name); + let mut file = File::create(file_path)?; + file.write_all(&blob)?; + Ok(()) } From 41de3ec01ae6c68ea339ea5fd19bc804d3fb958a Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Sun, 21 Sep 2025 14:28:35 +0200 Subject: [PATCH 24/91] CI fix port clash --- docker/README.md | 6 +++--- docker/demo-send-relayed-note.yml | 2 +- docker/docker-compose.yml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docker/README.md b/docker/README.md index ff5834344..e2fed7f6a 100644 --- a/docker/README.md +++ b/docker/README.md @@ -69,7 +69,7 @@ FLAVOR_ID=offchain-worker docker compose -f <(envsubst < docker-compose.yml) -f Build ``` -COMPOSE_DOCKER_CLI_BUILD=1 DOCKER_BUILDKIT=1 docker compose -f <(envsubst < docker-compose.yml) -f <(envsubst < demo-direct-call.yml) build --build-arg WORKER_MODE_ARG=sidechain +COMPOSE_DOCKER_CLI_BUILD=1 DOCKER_BUILDKIT=1 docker compose -f <(envsubst < docker-compose.yml) -f <(envsubst < demo-direct-call.yml) build --build-arg WORKER_MODE_ARG=sidechain --build-arg ADDITIONAL_FEATURES_ARG=dcap ``` Run @@ -83,7 +83,7 @@ docker compose -f <(envsubst < docker-compose.yml) -f <(envsubst < demo-direct-c Build ``` -COMPOSE_DOCKER_CLI_BUILD=1 DOCKER_BUILDKIT=1 docker compose -f <(envsubst < docker-compose.yml) -f <(envsubst < demo-sidechain.yml) build --build-arg WORKER_MODE_ARG=sidechain +COMPOSE_DOCKER_CLI_BUILD=1 DOCKER_BUILDKIT=1 docker compose -f <(envsubst < docker-compose.yml) -f <(envsubst < demo-sidechain.yml) build --build-arg WORKER_MODE_ARG=sidechain --build-arg ADDITIONAL_FEATURES_ARG=dcap ``` Run @@ -127,7 +127,7 @@ hosts, not on Windows with WSL unfortunately. Build the docker compose setup with ``` -COMPOSE_DOCKER_CLI_BUILD=1 DOCKER_BUILDKIT=1 docker compose -f <(envsubst < docker-compose.yml) -f <(envsubst < fork-inducer.yml) -f <(envsubst < demo-sidechain.yml) build --build-arg WORKER_MODE_ARG=sidechain +COMPOSE_DOCKER_CLI_BUILD=1 DOCKER_BUILDKIT=1 docker compose -f <(envsubst < docker-compose.yml) -f <(envsubst < fork-inducer.yml) -f <(envsubst < demo-sidechain.yml) build --build-arg WORKER_MODE_ARG=sidechain --build-arg ADDITIONAL_FEATURES_ARG=dcap ``` This requires the docker BuildKit (docker version >= 18.09) and support for it in docker compose (version >= 1.25.0) diff --git a/docker/demo-send-relayed-note.yml b/docker/demo-send-relayed-note.yml index 7c4af5be6..071aeb8a8 100644 --- a/docker/demo-send-relayed-note.yml +++ b/docker/demo-send-relayed-note.yml @@ -22,7 +22,7 @@ services: - integritee-test-network entrypoint: "/usr/local/worker-cli/demo_send_relayed_note.sh -p 9912 -u ws://integritee-node - -V wss://integritee-worker-1 -P 2011 -i http://ipfs-node:8080 -C /usr/local/bin/integritee-cli 2>&1" + -V wss://integritee-worker-1 -P 2011 -i http://ipfs-node:8082 -C /usr/local/bin/integritee-cli 2>&1" restart: "no" networks: integritee-test-network: diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 75a2a7923..9ce77ab13 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -84,7 +84,7 @@ services: ports: - "4001:4001" # Swarm - "5001:5001" # API - - "8080:8080" # Gateway + - "8082:8080" # Gateway networks: - integritee-test-network networks: From 57bbdbeb02be350835cddaf0e2b87b7b363124e9 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Sun, 21 Sep 2025 14:38:30 +0200 Subject: [PATCH 25/91] clippy&fmt --- service/src/ocall_bridge/component_factory.rs | 3 +- service/src/ocall_bridge/ipfs_ocall.rs | 227 +++++++++--------- 2 files changed, 116 insertions(+), 114 deletions(-) diff --git a/service/src/ocall_bridge/component_factory.rs b/service/src/ocall_bridge/component_factory.rs index 54af48d82..758e03b08 100644 --- a/service/src/ocall_bridge/component_factory.rs +++ b/service/src/ocall_bridge/component_factory.rs @@ -160,8 +160,7 @@ impl< PeerBlockFetcher, TokioHandle, MetricsReceiver, - > -where + > where IntegriteeRuntimeConfig: Config + 'static, TargetARuntimeConfig: Config + 'static, diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index 0f2dd911f..9ca70a737 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -17,144 +17,147 @@ */ use crate::ocall_bridge::bridge_api::{IpfsBridge, OCallBridgeError, OCallBridgeResult}; +use chrono::Local; use futures::TryStreamExt; use ipfs_api_backend_hyper::{IpfsApi, IpfsClient, TryFromUri}; use itp_utils::IpfsCid; use log::*; use std::{ - fs::{File, create_dir_all}, - path::Path, - io::{self, Cursor, Write}, - str, - sync::{mpsc::channel, Arc}, + fmt::Display, + fs::{create_dir_all, File}, + io::{self, Cursor, Write}, + path::Path, + str, + sync::{mpsc::channel, Arc}, }; -use chrono::Local; -use std::fmt::Display; pub struct IpfsOCall { - client: Option>, - log_dir: Arc, + client: Option>, + log_dir: Arc, } impl IpfsOCall { - pub fn new(maybe_url: Option, maybe_auth: Option, log_dir: Arc) -> Self { - if let Some(url) = maybe_url { - let client = ipfs_api_backend_hyper::IpfsClient::from_str(&url).unwrap(); - let client = if let Some((user, pwd)) = maybe_auth - .and_then(|s| s.split_once(':').map(|(u, p)| (u.to_string(), p.to_string()))) - { - info!("Using IPFS node at {} with credentials ******", url); - client.with_credentials(user, pwd) - } else { - info!("Using IPFS node at {}", url); - client - }; - let version = tokio::runtime::Runtime::new().unwrap().block_on(client.version()); - match version { - Ok(v) => info!("Connected to IPFS node version: {}", v.version), - Err(e) => error!("Error getting IPFS node version: {}", e), - } - Self { client: Some(Arc::new(client)), log_dir } - } else { - info!("No IPFS URL provided, disabling IPFS."); - Self { client: None, log_dir } - } - } + pub fn new(maybe_url: Option, maybe_auth: Option, log_dir: Arc) -> Self { + if let Some(url) = maybe_url { + let client = ipfs_api_backend_hyper::IpfsClient::from_str(&url).unwrap(); + let client = if let Some((user, pwd)) = maybe_auth + .and_then(|s| s.split_once(':').map(|(u, p)| (u.to_string(), p.to_string()))) + { + info!("Using IPFS node at {} with credentials ******", url); + client.with_credentials(user, pwd) + } else { + info!("Using IPFS node at {}", url); + client + }; + let version = tokio::runtime::Runtime::new().unwrap().block_on(client.version()); + match version { + Ok(v) => info!("Connected to IPFS node version: {}", v.version), + Err(e) => error!("Error getting IPFS node version: {}", e), + } + Self { client: Some(Arc::new(client)), log_dir } + } else { + info!("No IPFS URL provided, disabling IPFS."); + Self { client: None, log_dir } + } + } } impl IpfsBridge for IpfsOCall { - fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { - debug!(" Entering ocall_write_ipfs"); - write_to_ipfs( - self.client.as_ref().ok_or_else(|| { - let _ = log_failing_blob_to_file(data.into(), self.log_dir.clone()); - OCallBridgeError::IpfsError( - "No IPFS client configured, cannot write to IPFS".to_string(), - ) - })?, - data, - self.log_dir.clone(), - ) - } + fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { + debug!(" Entering ocall_write_ipfs"); + write_to_ipfs( + self.client.as_ref().ok_or_else(|| { + let _ = log_failing_blob_to_file(data.into(), self.log_dir.clone()); + OCallBridgeError::IpfsError( + "No IPFS client configured, cannot write to IPFS".to_string(), + ) + })?, + data, + self.log_dir.clone(), + ) + } - fn read_from_ipfs(&self, cid: IpfsCid) -> OCallBridgeResult<()> { - debug!("Entering ocall_read_ipfs"); - let client = self.client.as_ref().ok_or_else(|| { - OCallBridgeError::IpfsError( - "No IPFS client configured, cannot read from IPFS".to_string(), - ) - })?; - let res = read_from_ipfs(client, &cid) - .map_err(|_| OCallBridgeError::IpfsError("failed to read from IPFS".to_string()))?; - let filename = format!("{:?}", cid); - create_file(&filename, &res).map_err(OCallBridgeError::IpfsError) - } + fn read_from_ipfs(&self, cid: IpfsCid) -> OCallBridgeResult<()> { + debug!("Entering ocall_read_ipfs"); + let client = self.client.as_ref().ok_or_else(|| { + OCallBridgeError::IpfsError( + "No IPFS client configured, cannot read from IPFS".to_string(), + ) + })?; + let res = read_from_ipfs(client, &cid) + .map_err(|_| OCallBridgeError::IpfsError("failed to read from IPFS".to_string()))?; + let filename = format!("{:?}", cid); + create_file(&filename, &res).map_err(OCallBridgeError::IpfsError) + } } fn create_file(filename: &str, result: &[u8]) -> Result<(), String> { - match File::create(filename) { - Ok(mut f) => f - .write_all(result) - .map_or_else(|e| Err(format!("failed writing to file: {}", e)), |_| Ok(())), - Err(e) => Err(format!("failed to create file: {}", e)), - } + match File::create(filename) { + Ok(mut f) => f + .write_all(result) + .map_or_else(|e| Err(format!("failed writing to file: {}", e)), |_| Ok(())), + Err(e) => Err(format!("failed to create file: {}", e)), + } } #[tokio::main] -async fn write_to_ipfs(client: &IpfsClient, data: &'static [u8], log_dir: Arc) -> OCallBridgeResult { - let datac = Cursor::new(data); - let (tx, rx) = channel(); +async fn write_to_ipfs( + client: &IpfsClient, + data: &'static [u8], + log_dir: Arc, +) -> OCallBridgeResult { + let datac = Cursor::new(data); + let (tx, rx) = channel(); - match client.add(datac).await { - Ok(res) => { - debug!("Result IpfsCid {}", res.hash); - tx.send(res.hash.into_bytes()).unwrap(); - } - Err(e) => { - error!("error adding file: {}", e); - let _ = log_failing_blob_to_file(data.into(), log_dir.clone()); - return Err(OCallBridgeError::IpfsError(format!("error adding file: {}", e))); - } - } - rx.recv() - .map_err(|e| { - let _ = log_failing_blob_to_file(data.into(), log_dir.clone()); - OCallBridgeError::IpfsError(format!("error receiving cid: {}", e)) - }) - .and_then(|cid_str| { - str::from_utf8(&cid_str) - .map_err(|e| OCallBridgeError::IpfsError(format!("invalid UTF-8 in cid: {}", e))) - .and_then(|cid_utf8| { - IpfsCid::try_from(cid_utf8).map_err(|e| { - OCallBridgeError::IpfsError(format!("invalid IpfsCid: {:?}", e)) - }) - }) - }) + match client.add(datac).await { + Ok(res) => { + debug!("Result IpfsCid {}", res.hash); + tx.send(res.hash.into_bytes()).unwrap(); + }, + Err(e) => { + error!("error adding file: {}", e); + let _ = log_failing_blob_to_file(data.into(), log_dir.clone()); + return Err(OCallBridgeError::IpfsError(format!("error adding file: {}", e))) + }, + } + rx.recv() + .map_err(|e| { + let _ = log_failing_blob_to_file(data.into(), log_dir.clone()); + OCallBridgeError::IpfsError(format!("error receiving cid: {}", e)) + }) + .and_then(|cid_str| { + str::from_utf8(&cid_str) + .map_err(|e| OCallBridgeError::IpfsError(format!("invalid UTF-8 in cid: {}", e))) + .and_then(|cid_utf8| { + IpfsCid::try_from(cid_utf8).map_err(|e| { + OCallBridgeError::IpfsError(format!("invalid IpfsCid: {:?}", e)) + }) + }) + }) } #[tokio::main] pub async fn read_from_ipfs(client: &IpfsClient, cid: &IpfsCid) -> Result, String> { - let h = format!("{:?}", cid); - debug!("Fetching content with cid {}", h); - client - .cat(&h) - .map_ok(|chunk| chunk.to_vec()) - .map_err(|e| e.to_string()) - .try_concat() - .await + let h = format!("{:?}", cid); + debug!("Fetching content with cid {}", h); + client + .cat(&h) + .map_ok(|chunk| chunk.to_vec()) + .map_err(|e| e.to_string()) + .try_concat() + .await } -fn log_failing_blob_to_file( - blob: Vec, - log_dir: Arc, -) -> io::Result<()> { - let log_dir = log_dir.join("log-ipfs-failing-add"); - create_dir_all(&log_dir)?; - let timestamp = Local::now().format("%Y%m%d-%H%M%S-%3f").to_string(); - let cid_str = IpfsCid::from_content_bytes(&blob).map(|cid| format!("{}", cid)).unwrap_or("invalid-cid".to_string()); - let file_name = format!("ipfs-{}-{}.bin", timestamp, cid_str); - let file_path = log_dir.join(file_name); - let mut file = File::create(file_path)?; - file.write_all(&blob)?; - Ok(()) +fn log_failing_blob_to_file(blob: Vec, log_dir: Arc) -> io::Result<()> { + let log_dir = log_dir.join("log-ipfs-failing-add"); + create_dir_all(&log_dir)?; + let timestamp = Local::now().format("%Y%m%d-%H%M%S-%3f").to_string(); + let cid_str = IpfsCid::from_content_bytes(&blob) + .map(|cid| format!("{}", cid)) + .unwrap_or_else(|_| "invalid-cid".to_string()); + let file_name = format!("ipfs-{}-{}.bin", timestamp, cid_str); + let file_path = log_dir.join(file_name); + let mut file = File::create(file_path)?; + file.write_all(&blob)?; + Ok(()) } From bcaeb621ee39e59b056e39c6792ff5c5ef00b309 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Sun, 21 Sep 2025 15:09:20 +0200 Subject: [PATCH 26/91] CI do not map ports to host --- docker/docker-compose.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 9ce77ab13..70e798c89 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -81,10 +81,6 @@ services: ipfs-node: image: ipfs/kubo:latest hostname: ipfs-node - ports: - - "4001:4001" # Swarm - - "5001:5001" # API - - "8082:8080" # Gateway networks: - integritee-test-network networks: From 2533627847cee4f18ae02897d64d0c0279edebef Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Sun, 21 Sep 2025 15:11:05 +0200 Subject: [PATCH 27/91] CI fix internal port --- docker/demo-send-relayed-note.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/demo-send-relayed-note.yml b/docker/demo-send-relayed-note.yml index 071aeb8a8..7c4af5be6 100644 --- a/docker/demo-send-relayed-note.yml +++ b/docker/demo-send-relayed-note.yml @@ -22,7 +22,7 @@ services: - integritee-test-network entrypoint: "/usr/local/worker-cli/demo_send_relayed_note.sh -p 9912 -u ws://integritee-node - -V wss://integritee-worker-1 -P 2011 -i http://ipfs-node:8082 -C /usr/local/bin/integritee-cli 2>&1" + -V wss://integritee-worker-1 -P 2011 -i http://ipfs-node:8080 -C /usr/local/bin/integritee-cli 2>&1" restart: "no" networks: integritee-test-network: From e3154cdb752ccecb0bdf784e4aaddfb4ff862403 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Sun, 21 Sep 2025 17:25:30 +0200 Subject: [PATCH 28/91] doc fixes --- docker/README.md | 1 + docker/demo-send-relayed-note.yml | 2 ++ docker/demo-teeracle-generic.yml | 4 ++-- docker/demo-teeracle.yml | 4 ++-- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/docker/README.md b/docker/README.md index e2fed7f6a..d82a7ea40 100644 --- a/docker/README.md +++ b/docker/README.md @@ -75,6 +75,7 @@ COMPOSE_DOCKER_CLI_BUILD=1 DOCKER_BUILDKIT=1 docker compose -f <(envsubst < dock Run ``` +export ADDITIONAL_RUNTIME_FLAGS="--skip-ra" docker compose -f <(envsubst < docker-compose.yml) -f <(envsubst < demo-direct-call.yml) up demo-direct-call --exit-code-from demo-direct-call ``` diff --git a/docker/demo-send-relayed-note.yml b/docker/demo-send-relayed-note.yml index 7c4af5be6..41a2cdffa 100644 --- a/docker/demo-send-relayed-note.yml +++ b/docker/demo-send-relayed-note.yml @@ -1,3 +1,5 @@ +# Set the `ADDITIONAL_RUNTIME_FLAGS` variable to for additional flags. +# To skip remote attestation: `export ADDITIONAL_RUNTIME_FLAGS="--skip-ra"` services: demo-send-relayed-note: image: integritee-cli:${VERSION:-dev} diff --git a/docker/demo-teeracle-generic.yml b/docker/demo-teeracle-generic.yml index 1f14489bf..2a8f3605a 100644 --- a/docker/demo-teeracle-generic.yml +++ b/docker/demo-teeracle-generic.yml @@ -3,7 +3,7 @@ # The demo is parameterized with the interval that the teeracle uses to query its sources. # Set the `TEERACLE_INTERVAL_SECONDS` variable when invoking, e.g. `TEERACLE_INTERVAL_SECONDS=4 docker compose -f docker-compose.yml -f demo-teeracle-generic.yml up --exit-code-from demo-teeracle-generic` # Set the `ADDITIONAL_RUNTIME_FLAGS` variable to for additional flags. -# To skip remote attestation: `export ADDITIONAL_RUNTIME_FLAG="--skip-ra"` +# To skip remote attestation: `export ADDITIONAL_RUNTIME_FLAGS="--skip-ra"` services: integritee-teeracle-worker-${VERSION}: image: integritee-worker:${VERSION:-dev} @@ -31,7 +31,7 @@ services: timeout: 10s retries: 25 command: - "--clean-reset --ws-external -M integritee-teeracle-worker -T wss://integritee-teeracle-worker + "--clean-reset --ws-external -M integritee-teeracle-worker -T wss://integritee-teeracle-worker -u ws://integritee-node -U ws://integritee-teeracle-worker -P 2011 -w 2101 -p 9912 -h 4645 run --dev ${ADDITIONAL_RUNTIME_FLAGS} --teeracle-interval ${TEERACLE_INTERVAL_SECONDS}s" restart: always diff --git a/docker/demo-teeracle.yml b/docker/demo-teeracle.yml index e0bbd2a20..75759fb50 100644 --- a/docker/demo-teeracle.yml +++ b/docker/demo-teeracle.yml @@ -5,7 +5,7 @@ # This setup requires an API key for CoinMarketCap # Add the API key to the environment variable `COINMARKETCAP_KEY`, with `export COINMARKETCAP_KEY=` # Set the `ADDITIONAL_RUNTIME_FLAGS` variable to for additional flags. -# To skip remote attestation: `export ADDITIONAL_RUNTIME_FLAG="--skip-ra"` +# To skip remote attestation: `export ADDITIONAL_RUNTIME_FLAGS="--skip-ra"` services: integritee-teeracle-worker-${VERSION}: image: integritee-worker:${VERSION:-dev} @@ -34,7 +34,7 @@ services: timeout: 10s retries: 25 command: - "--clean-reset --ws-external -M integritee-teeracle-worker -T wss://integritee-teeracle-worker + "--clean-reset --ws-external -M integritee-teeracle-worker -T wss://integritee-teeracle-worker -u ws://integritee-node -U ws://integritee-teeracle-worker -P 2011 -w 2101 -p 9912 -h 4645 run --dev ${ADDITIONAL_RUNTIME_FLAGS} --teeracle-interval ${TEERACLE_INTERVAL_SECONDS}s" restart: always From 7da5c2449725bb7b21e5d80fd7abfb2ac07d955a Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Mon, 22 Sep 2025 08:46:41 +0200 Subject: [PATCH 29/91] use stack instead of heap for ocall return value --- enclave-runtime/src/ocall/ipfs_ocall.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/enclave-runtime/src/ocall/ipfs_ocall.rs b/enclave-runtime/src/ocall/ipfs_ocall.rs index 9393e011e..59b1e6c9e 100644 --- a/enclave-runtime/src/ocall/ipfs_ocall.rs +++ b/enclave-runtime/src/ocall/ipfs_ocall.rs @@ -25,15 +25,14 @@ use log::warn; use sgx_types::{sgx_status_t, SgxResult}; impl EnclaveIpfsOCallApi for OcallApi { - fn write_ipfs(&self, encoded_state: &[u8]) -> SgxResult { + fn write_ipfs(&self, content: &[u8]) -> SgxResult { let mut rt: sgx_status_t = sgx_status_t::SGX_ERROR_UNEXPECTED; - let mut cid_buf = [0u8; 46].to_vec(); - + let mut cid_buf = [0u8; 46]; //max expected length for an encoded cid let res = unsafe { ffi::ocall_write_ipfs( &mut rt as *mut sgx_status_t, - encoded_state.as_ptr(), - encoded_state.len() as u32, + content.as_ptr(), + content.len() as u32, cid_buf.as_mut_ptr(), cid_buf.len() as u32, ) From 11d3b06915d69d089d9ee322a2c4b319a6cb23a0 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Mon, 22 Sep 2025 08:47:05 +0200 Subject: [PATCH 30/91] log filepath for local file fallback when writing to IPFS --- service/src/ocall_bridge/ipfs_ocall.rs | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index 9ca70a737..dd8d9fd0b 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -64,12 +64,12 @@ impl IpfsOCall { impl IpfsBridge for IpfsOCall { fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { - debug!(" Entering ocall_write_ipfs"); + debug!(" Entering ocall_write_ipfs to write {}B", data.len()); write_to_ipfs( self.client.as_ref().ok_or_else(|| { - let _ = log_failing_blob_to_file(data.into(), self.log_dir.clone()); + let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()).unwrap_or_else(|e| e.to_string().into()); OCallBridgeError::IpfsError( - "No IPFS client configured, cannot write to IPFS".to_string(), + format!("No IPFS client configured, cannot write to IPFS. Dumped content to local file instead: {}", dumpfile) ) })?, data, @@ -115,15 +115,14 @@ async fn write_to_ipfs( tx.send(res.hash.into_bytes()).unwrap(); }, Err(e) => { - error!("error adding file: {}", e); - let _ = log_failing_blob_to_file(data.into(), log_dir.clone()); - return Err(OCallBridgeError::IpfsError(format!("error adding file: {}", e))) + let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()).unwrap_or_else(|e| e.to_string().into()); + return Err(OCallBridgeError::IpfsError(format!("error adding file to IPFS: {}. Dumped content to local file instead: {}", e, dumpfile))); }, } rx.recv() .map_err(|e| { - let _ = log_failing_blob_to_file(data.into(), log_dir.clone()); - OCallBridgeError::IpfsError(format!("error receiving cid: {}", e)) + let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()).unwrap_or_else(|e| e.to_string().into()); + OCallBridgeError::IpfsError(format!("error receiving cid: {}. Dumped contents to local file: {}", e, dumpfile)) }) .and_then(|cid_str| { str::from_utf8(&cid_str) @@ -148,7 +147,7 @@ pub async fn read_from_ipfs(client: &IpfsClient, cid: &IpfsCid) -> Result, log_dir: Arc) -> io::Result<()> { +fn log_failing_blob_to_file(blob: Vec, log_dir: Arc) -> io::Result { let log_dir = log_dir.join("log-ipfs-failing-add"); create_dir_all(&log_dir)?; let timestamp = Local::now().format("%Y%m%d-%H%M%S-%3f").to_string(); @@ -159,5 +158,5 @@ fn log_failing_blob_to_file(blob: Vec, log_dir: Arc) -> io::Result<()> let file_path = log_dir.join(file_name); let mut file = File::create(file_path)?; file.write_all(&blob)?; - Ok(()) + Ok(file_path) } From e2fb09e5dec4bd5efacac16bc7e6e4e48ba089ce Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Mon, 22 Sep 2025 09:00:48 +0200 Subject: [PATCH 31/91] fixes --- service/src/ocall_bridge/ipfs_ocall.rs | 222 ++++++++++++------------- 1 file changed, 111 insertions(+), 111 deletions(-) diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index dd8d9fd0b..09a03acf5 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -23,140 +23,140 @@ use ipfs_api_backend_hyper::{IpfsApi, IpfsClient, TryFromUri}; use itp_utils::IpfsCid; use log::*; use std::{ - fmt::Display, - fs::{create_dir_all, File}, - io::{self, Cursor, Write}, - path::Path, - str, - sync::{mpsc::channel, Arc}, + fmt::Display, + fs::{create_dir_all, File}, + io::{self, Cursor, Write}, + path::{Path, PathBuf}, + str, + sync::{mpsc::channel, Arc}, }; pub struct IpfsOCall { - client: Option>, - log_dir: Arc, + client: Option>, + log_dir: Arc, } impl IpfsOCall { - pub fn new(maybe_url: Option, maybe_auth: Option, log_dir: Arc) -> Self { - if let Some(url) = maybe_url { - let client = ipfs_api_backend_hyper::IpfsClient::from_str(&url).unwrap(); - let client = if let Some((user, pwd)) = maybe_auth - .and_then(|s| s.split_once(':').map(|(u, p)| (u.to_string(), p.to_string()))) - { - info!("Using IPFS node at {} with credentials ******", url); - client.with_credentials(user, pwd) - } else { - info!("Using IPFS node at {}", url); - client - }; - let version = tokio::runtime::Runtime::new().unwrap().block_on(client.version()); - match version { - Ok(v) => info!("Connected to IPFS node version: {}", v.version), - Err(e) => error!("Error getting IPFS node version: {}", e), - } - Self { client: Some(Arc::new(client)), log_dir } - } else { - info!("No IPFS URL provided, disabling IPFS."); - Self { client: None, log_dir } - } - } + pub fn new(maybe_url: Option, maybe_auth: Option, log_dir: Arc) -> Self { + if let Some(url) = maybe_url { + let client = ipfs_api_backend_hyper::IpfsClient::from_str(&url).unwrap(); + let client = if let Some((user, pwd)) = maybe_auth + .and_then(|s| s.split_once(':').map(|(u, p)| (u.to_string(), p.to_string()))) + { + info!("Using IPFS node at {} with credentials ******", url); + client.with_credentials(user, pwd) + } else { + info!("Using IPFS node at {}", url); + client + }; + let version = tokio::runtime::Runtime::new().unwrap().block_on(client.version()); + match version { + Ok(v) => info!("Connected to IPFS node version: {}", v.version), + Err(e) => error!("Error getting IPFS node version: {}", e), + } + Self { client: Some(Arc::new(client)), log_dir } + } else { + info!("No IPFS URL provided, disabling IPFS."); + Self { client: None, log_dir } + } + } } impl IpfsBridge for IpfsOCall { - fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { - debug!(" Entering ocall_write_ipfs to write {}B", data.len()); - write_to_ipfs( - self.client.as_ref().ok_or_else(|| { - let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()).unwrap_or_else(|e| e.to_string().into()); - OCallBridgeError::IpfsError( - format!("No IPFS client configured, cannot write to IPFS. Dumped content to local file instead: {}", dumpfile) - ) - })?, - data, - self.log_dir.clone(), - ) - } + fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { + debug!(" Entering ocall_write_ipfs to write {}B", data.len()); + write_to_ipfs( + self.client.as_ref().ok_or_else(|| { + let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()).unwrap_or_else(|e| e.to_string().into()); + OCallBridgeError::IpfsError( + format!("No IPFS client configured, cannot write to IPFS. Dumped content to local file instead: {}", dumpfile.display()) + ) + })?, + data, + self.log_dir.clone(), + ) + } - fn read_from_ipfs(&self, cid: IpfsCid) -> OCallBridgeResult<()> { - debug!("Entering ocall_read_ipfs"); - let client = self.client.as_ref().ok_or_else(|| { - OCallBridgeError::IpfsError( - "No IPFS client configured, cannot read from IPFS".to_string(), - ) - })?; - let res = read_from_ipfs(client, &cid) - .map_err(|_| OCallBridgeError::IpfsError("failed to read from IPFS".to_string()))?; - let filename = format!("{:?}", cid); - create_file(&filename, &res).map_err(OCallBridgeError::IpfsError) - } + fn read_from_ipfs(&self, cid: IpfsCid) -> OCallBridgeResult<()> { + debug!("Entering ocall_read_ipfs"); + let client = self.client.as_ref().ok_or_else(|| { + OCallBridgeError::IpfsError( + "No IPFS client configured, cannot read from IPFS".to_string(), + ) + })?; + let res = read_from_ipfs(client, &cid) + .map_err(|_| OCallBridgeError::IpfsError("failed to read from IPFS".to_string()))?; + let filename = format!("{:?}", cid); + create_file(&filename, &res).map_err(OCallBridgeError::IpfsError) + } } fn create_file(filename: &str, result: &[u8]) -> Result<(), String> { - match File::create(filename) { - Ok(mut f) => f - .write_all(result) - .map_or_else(|e| Err(format!("failed writing to file: {}", e)), |_| Ok(())), - Err(e) => Err(format!("failed to create file: {}", e)), - } + match File::create(filename) { + Ok(mut f) => f + .write_all(result) + .map_or_else(|e| Err(format!("failed writing to file: {}", e)), |_| Ok(())), + Err(e) => Err(format!("failed to create file: {}", e)), + } } #[tokio::main] async fn write_to_ipfs( - client: &IpfsClient, - data: &'static [u8], - log_dir: Arc, + client: &IpfsClient, + data: &'static [u8], + log_dir: Arc, ) -> OCallBridgeResult { - let datac = Cursor::new(data); - let (tx, rx) = channel(); + let datac = Cursor::new(data); + let (tx, rx) = channel(); - match client.add(datac).await { - Ok(res) => { - debug!("Result IpfsCid {}", res.hash); - tx.send(res.hash.into_bytes()).unwrap(); - }, - Err(e) => { - let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()).unwrap_or_else(|e| e.to_string().into()); - return Err(OCallBridgeError::IpfsError(format!("error adding file to IPFS: {}. Dumped content to local file instead: {}", e, dumpfile))); - }, - } - rx.recv() - .map_err(|e| { - let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()).unwrap_or_else(|e| e.to_string().into()); - OCallBridgeError::IpfsError(format!("error receiving cid: {}. Dumped contents to local file: {}", e, dumpfile)) - }) - .and_then(|cid_str| { - str::from_utf8(&cid_str) - .map_err(|e| OCallBridgeError::IpfsError(format!("invalid UTF-8 in cid: {}", e))) - .and_then(|cid_utf8| { - IpfsCid::try_from(cid_utf8).map_err(|e| { - OCallBridgeError::IpfsError(format!("invalid IpfsCid: {:?}", e)) - }) - }) - }) + match client.add(datac).await { + Ok(res) => { + debug!("Result IpfsCid {}", res.hash); + tx.send(res.hash.into_bytes()).unwrap(); + } + Err(e) => { + let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()).unwrap_or_else(|e| e.to_string().into()); + return Err(OCallBridgeError::IpfsError(format!("error adding file to IPFS: {}. Dumped content to local file instead: {}", e, dumpfile.display()))); + } + } + rx.recv() + .map_err(|e| { + let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()).unwrap_or_else(|e| e.to_string().into()); + OCallBridgeError::IpfsError(format!("error receiving cid: {}. Dumped contents to local file: {}", e, dumpfile.display())) + }) + .and_then(|cid_str| { + str::from_utf8(&cid_str) + .map_err(|e| OCallBridgeError::IpfsError(format!("invalid UTF-8 in cid: {}", e))) + .and_then(|cid_utf8| { + IpfsCid::try_from(cid_utf8).map_err(|e| { + OCallBridgeError::IpfsError(format!("invalid IpfsCid: {:?}", e)) + }) + }) + }) } #[tokio::main] pub async fn read_from_ipfs(client: &IpfsClient, cid: &IpfsCid) -> Result, String> { - let h = format!("{:?}", cid); - debug!("Fetching content with cid {}", h); - client - .cat(&h) - .map_ok(|chunk| chunk.to_vec()) - .map_err(|e| e.to_string()) - .try_concat() - .await + let h = format!("{:?}", cid); + debug!("Fetching content with cid {}", h); + client + .cat(&h) + .map_ok(|chunk| chunk.to_vec()) + .map_err(|e| e.to_string()) + .try_concat() + .await } -fn log_failing_blob_to_file(blob: Vec, log_dir: Arc) -> io::Result { - let log_dir = log_dir.join("log-ipfs-failing-add"); - create_dir_all(&log_dir)?; - let timestamp = Local::now().format("%Y%m%d-%H%M%S-%3f").to_string(); - let cid_str = IpfsCid::from_content_bytes(&blob) - .map(|cid| format!("{}", cid)) - .unwrap_or_else(|_| "invalid-cid".to_string()); - let file_name = format!("ipfs-{}-{}.bin", timestamp, cid_str); - let file_path = log_dir.join(file_name); - let mut file = File::create(file_path)?; - file.write_all(&blob)?; - Ok(file_path) +fn log_failing_blob_to_file(blob: Vec, log_dir: Arc) -> io::Result { + let log_dir = log_dir.join("log-ipfs-failing-add"); + create_dir_all(&log_dir)?; + let timestamp = Local::now().format("%Y%m%d-%H%M%S-%3f").to_string(); + let cid_str = IpfsCid::from_content_bytes(&blob) + .map(|cid| format!("{}", cid)) + .unwrap_or_else(|_| "invalid-cid".to_string()); + let file_name = format!("ipfs-{}-{}.bin", timestamp, cid_str); + let file_path = log_dir.join(file_name); + let mut file = File::create(file_path.clone())?; + file.write_all(&blob)?; + Ok(file_path.into()) } From 88985a3131854444cd55c2f5a60a7b59e2efeadd Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Mon, 22 Sep 2025 09:22:56 +0200 Subject: [PATCH 32/91] fmt&clippy --- service/src/main_impl.rs | 2 +- service/src/ocall_bridge/ipfs_ocall.rs | 20 +++++++++++++++----- 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/service/src/main_impl.rs b/service/src/main_impl.rs index fc6a7d03e..00fd0d2e7 100644 --- a/service/src/main_impl.rs +++ b/service/src/main_impl.rs @@ -190,7 +190,7 @@ pub(crate) fn main() { config.integritee_rpc_endpoint(), AccountKeyring::Alice.pair(), )); - let enclave = Arc::new(enclave_init(&config).unwrap()); + let enclave = Arc::new(enclave_init(&config).expect("Failed to initialize enclave")); let initialization_handler = Arc::new(InitializationHandler::default()); let worker = Arc::new(EnclaveWorker::new( config.clone(), diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index 09a03acf5..176802096 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -115,14 +115,24 @@ async fn write_to_ipfs( tx.send(res.hash.into_bytes()).unwrap(); } Err(e) => { - let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()).unwrap_or_else(|e| e.to_string().into()); - return Err(OCallBridgeError::IpfsError(format!("error adding file to IPFS: {}. Dumped content to local file instead: {}", e, dumpfile.display()))); + let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()) + .unwrap_or_else(|e| e.to_string().into()); + return Err(OCallBridgeError::IpfsError(format!( + "error adding file to IPFS: {}. Dumped content to local file instead: {}", + e, + dumpfile.display() + ))); } } rx.recv() .map_err(|e| { - let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()).unwrap_or_else(|e| e.to_string().into()); - OCallBridgeError::IpfsError(format!("error receiving cid: {}. Dumped contents to local file: {}", e, dumpfile.display())) + let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()) + .unwrap_or_else(|e| e.to_string().into()); + OCallBridgeError::IpfsError(format!( + "error receiving cid: {}. Dumped contents to local file: {}", + e, + dumpfile.display() + )) }) .and_then(|cid_str| { str::from_utf8(&cid_str) @@ -158,5 +168,5 @@ fn log_failing_blob_to_file(blob: Vec, log_dir: Arc) -> io::Result Date: Mon, 22 Sep 2025 10:07:03 +0200 Subject: [PATCH 33/91] add ipfs fallback unit test --- enclave-runtime/src/test/ipfs_tests.rs | 48 ++++++++++++++++++++++++-- enclave-runtime/src/test/tests_main.rs | 6 ++-- service/src/ocall_bridge/ipfs_ocall.rs | 2 +- 3 files changed, 51 insertions(+), 5 deletions(-) diff --git a/enclave-runtime/src/test/ipfs_tests.rs b/enclave-runtime/src/test/ipfs_tests.rs index b1333e750..cd69ad998 100644 --- a/enclave-runtime/src/test/ipfs_tests.rs +++ b/enclave-runtime/src/test/ipfs_tests.rs @@ -20,7 +20,13 @@ use crate::ocall::OcallApi; use itp_ocall_api::EnclaveIpfsOCallApi; use itp_utils::IpfsCid; use log::*; -use std::{fs::File, io::Read, vec::Vec}; +use std::{ + fs, + io::Read, + path::{Path, PathBuf}, + string::{String, ToString}, + vec::Vec, +}; #[allow(unused)] /// this test neeeds an ipfs node running and configured with cli args. here for reference but may never be called @@ -36,7 +42,7 @@ pub fn test_ocall_read_write_ipfs() { OcallApi.read_ipfs(&returned_cid).unwrap(); let cid_str = format!("{:?}", returned_cid); - let mut f = File::open(cid_str).unwrap(); + let mut f = fs::File::open(cid_str).unwrap(); let mut content_buf = Vec::new(); f.read_to_end(&mut content_buf).unwrap(); info!("reading file {:?} of size {} bytes", f, &content_buf.len()); @@ -44,3 +50,41 @@ pub fn test_ocall_read_write_ipfs() { let file_cid = IpfsCid::from_content_bytes(&content_buf).unwrap(); assert_eq!(expected_cid, file_cid); } + +pub fn test_ocall_write_ipfs_fallback() { + info!("testing IPFS write if api is unreachable. Expected to fallback to dump local file..."); + let enc_state: Vec = vec![20; 4 * 512 * 1024]; + let expected_cid = IpfsCid::from_content_bytes(&enc_state).unwrap(); + let result = OcallApi.write_ipfs(enc_state.as_slice()); + + if result.is_ok() { + panic!("write_ipfs succeeded, but was expected to fail and fallback to local file dump. Did you accidentally provide an ipfs api url to the test?"); + } else { + let dumpfile = + find_first_matching_file(expected_cid.to_string()).expect("dumped file not found"); + let mut f = fs::File::open(dumpfile).unwrap(); + let mut content_buf = Vec::new(); + f.read_to_end(&mut content_buf).unwrap(); + info!("reading file {:?} of size {} bytes", f, &content_buf.len()); + let file_cid = IpfsCid::from_content_bytes(&content_buf).unwrap(); + assert_eq!(expected_cid, file_cid); + return + } +} + +fn find_first_matching_file(cid_str: String) -> Option { + let dir = Path::new("log-ipfs-failing-add"); + let prefix = "ipfs-"; + let suffix = format!("-{}.bin", cid_str); + + for entry in fs::read_dir(dir).ok()? { + let entry = entry.ok()?; + let file_name = entry.file_name(); + debug!("Checking file: {:?}", file_name); + let file_name = file_name.to_string_lossy(); + if file_name.starts_with(prefix) && file_name.ends_with(suffix.as_str()) { + return Some(entry.path()); + } + } + None +} diff --git a/enclave-runtime/src/test/tests_main.rs b/enclave-runtime/src/test/tests_main.rs index 59032cc46..8b5283565 100644 --- a/enclave-runtime/src/test/tests_main.rs +++ b/enclave-runtime/src/test/tests_main.rs @@ -163,7 +163,8 @@ pub extern "C" fn test_main_entrance() -> size_t { itc_parentchain::light_client::io::sgx_tests::sealing_creates_backup, // this test needs an ipfs node running.. - // crate::test::ipfs_tests::test_ocall_read_write_ipfs, + //crate::test::ipfs_tests::test_ocall_read_write_ipfs, + crate::test::ipfs_tests::test_ocall_write_ipfs_fallback, // Teeracle tests run_teeracle_tests, @@ -426,7 +427,8 @@ fn test_create_state_diff() { assert_eq!( sender_acc_info.data.free, ita_stf::test_genesis::ENDOWED_ACC_FUNDS - - TX_AMOUNT - 1_000_000_000_000 / ita_stf::STF_TX_FEE_UNIT_DIVIDER + - TX_AMOUNT + - 1_000_000_000_000 / ita_stf::STF_TX_FEE_UNIT_DIVIDER ); } diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index 176802096..a54620d3f 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -129,7 +129,7 @@ async fn write_to_ipfs( let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()) .unwrap_or_else(|e| e.to_string().into()); OCallBridgeError::IpfsError(format!( - "error receiving cid: {}. Dumped contents to local file: {}", + "error receiving cid: {}. Dumped content to local file instead: {}", e, dumpfile.display() )) From c890845d435130577287cb3b342cd4a07644ab0a Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Mon, 22 Sep 2025 10:07:33 +0200 Subject: [PATCH 34/91] fmt --- enclave-runtime/src/test/ipfs_tests.rs | 2 +- enclave-runtime/src/test/tests_main.rs | 3 +- service/src/ocall_bridge/ipfs_ocall.rs | 222 ++++++++++++------------- 3 files changed, 113 insertions(+), 114 deletions(-) diff --git a/enclave-runtime/src/test/ipfs_tests.rs b/enclave-runtime/src/test/ipfs_tests.rs index cd69ad998..e9cccc5eb 100644 --- a/enclave-runtime/src/test/ipfs_tests.rs +++ b/enclave-runtime/src/test/ipfs_tests.rs @@ -83,7 +83,7 @@ fn find_first_matching_file(cid_str: String) -> Option { debug!("Checking file: {:?}", file_name); let file_name = file_name.to_string_lossy(); if file_name.starts_with(prefix) && file_name.ends_with(suffix.as_str()) { - return Some(entry.path()); + return Some(entry.path()) } } None diff --git a/enclave-runtime/src/test/tests_main.rs b/enclave-runtime/src/test/tests_main.rs index 8b5283565..948f1c2dd 100644 --- a/enclave-runtime/src/test/tests_main.rs +++ b/enclave-runtime/src/test/tests_main.rs @@ -427,8 +427,7 @@ fn test_create_state_diff() { assert_eq!( sender_acc_info.data.free, ita_stf::test_genesis::ENDOWED_ACC_FUNDS - - TX_AMOUNT - - 1_000_000_000_000 / ita_stf::STF_TX_FEE_UNIT_DIVIDER + - TX_AMOUNT - 1_000_000_000_000 / ita_stf::STF_TX_FEE_UNIT_DIVIDER ); } diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index a54620d3f..596cc0b5d 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -23,49 +23,49 @@ use ipfs_api_backend_hyper::{IpfsApi, IpfsClient, TryFromUri}; use itp_utils::IpfsCid; use log::*; use std::{ - fmt::Display, - fs::{create_dir_all, File}, - io::{self, Cursor, Write}, - path::{Path, PathBuf}, - str, - sync::{mpsc::channel, Arc}, + fmt::Display, + fs::{create_dir_all, File}, + io::{self, Cursor, Write}, + path::{Path, PathBuf}, + str, + sync::{mpsc::channel, Arc}, }; pub struct IpfsOCall { - client: Option>, - log_dir: Arc, + client: Option>, + log_dir: Arc, } impl IpfsOCall { - pub fn new(maybe_url: Option, maybe_auth: Option, log_dir: Arc) -> Self { - if let Some(url) = maybe_url { - let client = ipfs_api_backend_hyper::IpfsClient::from_str(&url).unwrap(); - let client = if let Some((user, pwd)) = maybe_auth - .and_then(|s| s.split_once(':').map(|(u, p)| (u.to_string(), p.to_string()))) - { - info!("Using IPFS node at {} with credentials ******", url); - client.with_credentials(user, pwd) - } else { - info!("Using IPFS node at {}", url); - client - }; - let version = tokio::runtime::Runtime::new().unwrap().block_on(client.version()); - match version { - Ok(v) => info!("Connected to IPFS node version: {}", v.version), - Err(e) => error!("Error getting IPFS node version: {}", e), - } - Self { client: Some(Arc::new(client)), log_dir } - } else { - info!("No IPFS URL provided, disabling IPFS."); - Self { client: None, log_dir } - } - } + pub fn new(maybe_url: Option, maybe_auth: Option, log_dir: Arc) -> Self { + if let Some(url) = maybe_url { + let client = ipfs_api_backend_hyper::IpfsClient::from_str(&url).unwrap(); + let client = if let Some((user, pwd)) = maybe_auth + .and_then(|s| s.split_once(':').map(|(u, p)| (u.to_string(), p.to_string()))) + { + info!("Using IPFS node at {} with credentials ******", url); + client.with_credentials(user, pwd) + } else { + info!("Using IPFS node at {}", url); + client + }; + let version = tokio::runtime::Runtime::new().unwrap().block_on(client.version()); + match version { + Ok(v) => info!("Connected to IPFS node version: {}", v.version), + Err(e) => error!("Error getting IPFS node version: {}", e), + } + Self { client: Some(Arc::new(client)), log_dir } + } else { + info!("No IPFS URL provided, disabling IPFS."); + Self { client: None, log_dir } + } + } } impl IpfsBridge for IpfsOCall { - fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { - debug!(" Entering ocall_write_ipfs to write {}B", data.len()); - write_to_ipfs( + fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { + debug!(" Entering ocall_write_ipfs to write {}B", data.len()); + write_to_ipfs( self.client.as_ref().ok_or_else(|| { let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()).unwrap_or_else(|e| e.to_string().into()); OCallBridgeError::IpfsError( @@ -75,98 +75,98 @@ impl IpfsBridge for IpfsOCall { data, self.log_dir.clone(), ) - } + } - fn read_from_ipfs(&self, cid: IpfsCid) -> OCallBridgeResult<()> { - debug!("Entering ocall_read_ipfs"); - let client = self.client.as_ref().ok_or_else(|| { - OCallBridgeError::IpfsError( - "No IPFS client configured, cannot read from IPFS".to_string(), - ) - })?; - let res = read_from_ipfs(client, &cid) - .map_err(|_| OCallBridgeError::IpfsError("failed to read from IPFS".to_string()))?; - let filename = format!("{:?}", cid); - create_file(&filename, &res).map_err(OCallBridgeError::IpfsError) - } + fn read_from_ipfs(&self, cid: IpfsCid) -> OCallBridgeResult<()> { + debug!("Entering ocall_read_ipfs"); + let client = self.client.as_ref().ok_or_else(|| { + OCallBridgeError::IpfsError( + "No IPFS client configured, cannot read from IPFS".to_string(), + ) + })?; + let res = read_from_ipfs(client, &cid) + .map_err(|_| OCallBridgeError::IpfsError("failed to read from IPFS".to_string()))?; + let filename = format!("{:?}", cid); + create_file(&filename, &res).map_err(OCallBridgeError::IpfsError) + } } fn create_file(filename: &str, result: &[u8]) -> Result<(), String> { - match File::create(filename) { - Ok(mut f) => f - .write_all(result) - .map_or_else(|e| Err(format!("failed writing to file: {}", e)), |_| Ok(())), - Err(e) => Err(format!("failed to create file: {}", e)), - } + match File::create(filename) { + Ok(mut f) => f + .write_all(result) + .map_or_else(|e| Err(format!("failed writing to file: {}", e)), |_| Ok(())), + Err(e) => Err(format!("failed to create file: {}", e)), + } } #[tokio::main] async fn write_to_ipfs( - client: &IpfsClient, - data: &'static [u8], - log_dir: Arc, + client: &IpfsClient, + data: &'static [u8], + log_dir: Arc, ) -> OCallBridgeResult { - let datac = Cursor::new(data); - let (tx, rx) = channel(); + let datac = Cursor::new(data); + let (tx, rx) = channel(); - match client.add(datac).await { - Ok(res) => { - debug!("Result IpfsCid {}", res.hash); - tx.send(res.hash.into_bytes()).unwrap(); - } - Err(e) => { - let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()) - .unwrap_or_else(|e| e.to_string().into()); - return Err(OCallBridgeError::IpfsError(format!( - "error adding file to IPFS: {}. Dumped content to local file instead: {}", - e, - dumpfile.display() - ))); - } - } - rx.recv() - .map_err(|e| { - let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()) - .unwrap_or_else(|e| e.to_string().into()); - OCallBridgeError::IpfsError(format!( - "error receiving cid: {}. Dumped content to local file instead: {}", - e, - dumpfile.display() - )) - }) - .and_then(|cid_str| { - str::from_utf8(&cid_str) - .map_err(|e| OCallBridgeError::IpfsError(format!("invalid UTF-8 in cid: {}", e))) - .and_then(|cid_utf8| { - IpfsCid::try_from(cid_utf8).map_err(|e| { - OCallBridgeError::IpfsError(format!("invalid IpfsCid: {:?}", e)) - }) - }) - }) + match client.add(datac).await { + Ok(res) => { + debug!("Result IpfsCid {}", res.hash); + tx.send(res.hash.into_bytes()).unwrap(); + }, + Err(e) => { + let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()) + .unwrap_or_else(|e| e.to_string().into()); + return Err(OCallBridgeError::IpfsError(format!( + "error adding file to IPFS: {}. Dumped content to local file instead: {}", + e, + dumpfile.display() + ))) + }, + } + rx.recv() + .map_err(|e| { + let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()) + .unwrap_or_else(|e| e.to_string().into()); + OCallBridgeError::IpfsError(format!( + "error receiving cid: {}. Dumped content to local file instead: {}", + e, + dumpfile.display() + )) + }) + .and_then(|cid_str| { + str::from_utf8(&cid_str) + .map_err(|e| OCallBridgeError::IpfsError(format!("invalid UTF-8 in cid: {}", e))) + .and_then(|cid_utf8| { + IpfsCid::try_from(cid_utf8).map_err(|e| { + OCallBridgeError::IpfsError(format!("invalid IpfsCid: {:?}", e)) + }) + }) + }) } #[tokio::main] pub async fn read_from_ipfs(client: &IpfsClient, cid: &IpfsCid) -> Result, String> { - let h = format!("{:?}", cid); - debug!("Fetching content with cid {}", h); - client - .cat(&h) - .map_ok(|chunk| chunk.to_vec()) - .map_err(|e| e.to_string()) - .try_concat() - .await + let h = format!("{:?}", cid); + debug!("Fetching content with cid {}", h); + client + .cat(&h) + .map_ok(|chunk| chunk.to_vec()) + .map_err(|e| e.to_string()) + .try_concat() + .await } fn log_failing_blob_to_file(blob: Vec, log_dir: Arc) -> io::Result { - let log_dir = log_dir.join("log-ipfs-failing-add"); - create_dir_all(&log_dir)?; - let timestamp = Local::now().format("%Y%m%d-%H%M%S-%3f").to_string(); - let cid_str = IpfsCid::from_content_bytes(&blob) - .map(|cid| format!("{}", cid)) - .unwrap_or_else(|_| "invalid-cid".to_string()); - let file_name = format!("ipfs-{}-{}.bin", timestamp, cid_str); - let file_path = log_dir.join(file_name); - let mut file = File::create(file_path.clone())?; - file.write_all(&blob)?; - Ok(file_path) + let log_dir = log_dir.join("log-ipfs-failing-add"); + create_dir_all(&log_dir)?; + let timestamp = Local::now().format("%Y%m%d-%H%M%S-%3f").to_string(); + let cid_str = IpfsCid::from_content_bytes(&blob) + .map(|cid| format!("{}", cid)) + .unwrap_or_else(|_| "invalid-cid".to_string()); + let file_name = format!("ipfs-{}-{}.bin", timestamp, cid_str); + let file_path = log_dir.join(file_name); + let mut file = File::create(file_path.clone())?; + file.write_all(&blob)?; + Ok(file_path) } From a734b70d2a87bffdebfe9786c7c42c38ed1c70a7 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Mon, 22 Sep 2025 13:30:35 +0200 Subject: [PATCH 35/91] probe content size for OCALL --- enclave-runtime/src/test/ipfs_tests.rs | 34 ++++++++++++++------------ 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/enclave-runtime/src/test/ipfs_tests.rs b/enclave-runtime/src/test/ipfs_tests.rs index e9cccc5eb..1707e5f9b 100644 --- a/enclave-runtime/src/test/ipfs_tests.rs +++ b/enclave-runtime/src/test/ipfs_tests.rs @@ -52,23 +52,25 @@ pub fn test_ocall_read_write_ipfs() { } pub fn test_ocall_write_ipfs_fallback() { - info!("testing IPFS write if api is unreachable. Expected to fallback to dump local file..."); - let enc_state: Vec = vec![20; 4 * 512 * 1024]; - let expected_cid = IpfsCid::from_content_bytes(&enc_state).unwrap(); - let result = OcallApi.write_ipfs(enc_state.as_slice()); + let payload_sizes = vec![1, 100, 1024]; + for payload_size in payload_sizes { + info!("testing IPFS write of {}kB if api is unreachable. Expected to fallback to dump local file...", payload_size); + let enc_state: Vec = vec![20; payload_size * 1024]; + let expected_cid = IpfsCid::from_content_bytes(&enc_state).unwrap(); + let result = OcallApi.write_ipfs(enc_state.as_slice()); - if result.is_ok() { - panic!("write_ipfs succeeded, but was expected to fail and fallback to local file dump. Did you accidentally provide an ipfs api url to the test?"); - } else { - let dumpfile = - find_first_matching_file(expected_cid.to_string()).expect("dumped file not found"); - let mut f = fs::File::open(dumpfile).unwrap(); - let mut content_buf = Vec::new(); - f.read_to_end(&mut content_buf).unwrap(); - info!("reading file {:?} of size {} bytes", f, &content_buf.len()); - let file_cid = IpfsCid::from_content_bytes(&content_buf).unwrap(); - assert_eq!(expected_cid, file_cid); - return + if result.is_ok() { + panic!("write_ipfs succeeded, but was expected to fail and fallback to local file dump. Did you accidentally provide an ipfs api url to the test?"); + } else { + let dumpfile = + find_first_matching_file(expected_cid.to_string()).expect("dumped file not found"); + let mut f = fs::File::open(dumpfile).unwrap(); + let mut content_buf = Vec::new(); + f.read_to_end(&mut content_buf).unwrap(); + info!("reading file {:?} of size {} bytes", f, &content_buf.len()); + let file_cid = IpfsCid::from_content_bytes(&content_buf).unwrap(); + assert_eq!(expected_cid, file_cid); + } } } From 53ec7f87bced3c31af424f99f3722aa334dcfc77 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Mon, 22 Sep 2025 13:38:17 +0200 Subject: [PATCH 36/91] better test logs --- enclave-runtime/src/test/ipfs_tests.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/enclave-runtime/src/test/ipfs_tests.rs b/enclave-runtime/src/test/ipfs_tests.rs index 1707e5f9b..d1bc92fdb 100644 --- a/enclave-runtime/src/test/ipfs_tests.rs +++ b/enclave-runtime/src/test/ipfs_tests.rs @@ -31,7 +31,7 @@ use std::{ #[allow(unused)] /// this test neeeds an ipfs node running and configured with cli args. here for reference but may never be called pub fn test_ocall_read_write_ipfs() { - info!("testing IPFS read/write. Hopefully ipfs daemon is running..."); + println!("testing IPFS read/write. Hopefully ipfs daemon is running..."); let enc_state: Vec = vec![20; 4 * 512 * 1024]; let expected_cid = IpfsCid::from_content_bytes(&enc_state).unwrap(); @@ -54,7 +54,7 @@ pub fn test_ocall_read_write_ipfs() { pub fn test_ocall_write_ipfs_fallback() { let payload_sizes = vec![1, 100, 1024]; for payload_size in payload_sizes { - info!("testing IPFS write of {}kB if api is unreachable. Expected to fallback to dump local file...", payload_size); + println!("testing IPFS write of {}kB if api is unreachable. Expected to fallback to dump local file...", payload_size); let enc_state: Vec = vec![20; payload_size * 1024]; let expected_cid = IpfsCid::from_content_bytes(&enc_state).unwrap(); let result = OcallApi.write_ipfs(enc_state.as_slice()); From 8694da39a43eeedc66fbee08c592cb50d4b24c11 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Mon, 22 Sep 2025 16:19:02 +0200 Subject: [PATCH 37/91] disable cid return value to debug enclave crash --- core-primitives/utils/src/ipfs.rs | 6 ++++++ enclave-runtime/src/ocall/ipfs_ocall.rs | 8 +++++--- service/src/ocall_bridge/ffi/ipfs.rs | 9 +++++---- 3 files changed, 16 insertions(+), 7 deletions(-) diff --git a/core-primitives/utils/src/ipfs.rs b/core-primitives/utils/src/ipfs.rs index 00e48203a..0573f726a 100644 --- a/core-primitives/utils/src/ipfs.rs +++ b/core-primitives/utils/src/ipfs.rs @@ -87,6 +87,12 @@ impl Display for IpfsCid { } } +impl Default for IpfsCid { + fn default() -> Self { + IpfsCid::from_content_bytes(&Vec::new()).expect("known to work for empty vec") + } +} + #[derive(Debug, PartialEq)] pub enum IpfsError { InputCidInvalid, diff --git a/enclave-runtime/src/ocall/ipfs_ocall.rs b/enclave-runtime/src/ocall/ipfs_ocall.rs index 59b1e6c9e..3bb7b7668 100644 --- a/enclave-runtime/src/ocall/ipfs_ocall.rs +++ b/enclave-runtime/src/ocall/ipfs_ocall.rs @@ -17,7 +17,7 @@ */ use crate::ocall::{ffi, OcallApi}; use alloc::vec::Vec; -use codec::{Decode, Encode}; +use codec::Encode; use frame_support::ensure; use itp_ocall_api::EnclaveIpfsOCallApi; use itp_types::IpfsCid; @@ -40,8 +40,10 @@ impl EnclaveIpfsOCallApi for OcallApi { ensure!(rt == sgx_status_t::SGX_SUCCESS, rt); ensure!(res == sgx_status_t::SGX_SUCCESS, res); - let cid = IpfsCid::decode(&mut cid_buf.as_slice()) - .map_err(|_| sgx_status_t::SGX_ERROR_UNEXPECTED)?; + let cid = IpfsCid::default(); + // TODO: actually decode the returned cid + // cid.decode(&mut cid_buf.as_slice()) + // .map_err(|_| sgx_status_t::SGX_ERROR_UNEXPECTED)?; Ok(cid) } diff --git a/service/src/ocall_bridge/ffi/ipfs.rs b/service/src/ocall_bridge/ffi/ipfs.rs index 37be75cfb..f1135f0ae 100644 --- a/service/src/ocall_bridge/ffi/ipfs.rs +++ b/service/src/ocall_bridge/ffi/ipfs.rs @@ -52,10 +52,11 @@ fn write_ipfs( return match ipfs_api.write_to_ipfs(state) { Ok(r) => { - cid.fill(0); - let encoded = r.encode(); - let len = encoded.len().min(cid.len()); - cid[..len].copy_from_slice(&encoded[..len]); + // TODO: actually return cid + // cid.fill(0); + // let encoded = r.encode(); + // let len = encoded.len().min(cid.len()); + // cid[..len].copy_from_slice(&encoded[..len]); sgx_status_t::SGX_SUCCESS }, Err(e) => { From f6cb09761f0a5efc84d025f11ccc2b2287f47fe7 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Mon, 22 Sep 2025 17:49:16 +0200 Subject: [PATCH 38/91] add unit test for IpfsCid::default --- core-primitives/utils/src/ipfs.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/core-primitives/utils/src/ipfs.rs b/core-primitives/utils/src/ipfs.rs index 0573f726a..5a18d0bb2 100644 --- a/core-primitives/utils/src/ipfs.rs +++ b/core-primitives/utils/src/ipfs.rs @@ -150,4 +150,12 @@ mod tests { let decoded = IpfsCid::decode(&mut &encoded[..]).unwrap(); assert_eq!(decoded, expected_cid); } + + #[test] + pub fn test_default_cid_works() { + let expected_cid_str = "QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH"; + let expected_cid = IpfsCid::try_from(expected_cid_str).unwrap(); + let def = IpfsCid::default(); + assert_eq!(def, expected_cid); + } } From 544763bfea2be7ee3af6702c56c096cf9b77fab8 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Mon, 22 Sep 2025 17:50:57 +0200 Subject: [PATCH 39/91] try to allow more threads --- enclave-runtime/Enclave.config.xml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/enclave-runtime/Enclave.config.xml b/enclave-runtime/Enclave.config.xml index b853d045a..755dfec02 100644 --- a/enclave-runtime/Enclave.config.xml +++ b/enclave-runtime/Enclave.config.xml @@ -1,12 +1,12 @@ - 0 - 0 - 0x40000 - 0x20000000 - 8 - 0 - 0 - 0 - 0xFFFFFFFF + 0 + 0 + 0x40000 + 0x20000000 + 16 + 0 + 0 + 0 + 0xFFFFFFFF From d5b65937e5b790e4d5bfa16cfa2988aa9ddae444 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Mon, 22 Sep 2025 18:05:37 +0200 Subject: [PATCH 40/91] revert more ocall stuff to be closer to master --- core-primitives/ocall-api/src/lib.rs | 4 ++-- enclave-runtime/src/ocall/ipfs_ocall.rs | 13 +++++-------- enclave-runtime/src/test/ipfs_tests.rs | 4 +++- .../src/test/mocks/propose_to_import_call_mock.rs | 8 ++++---- 4 files changed, 14 insertions(+), 15 deletions(-) diff --git a/core-primitives/ocall-api/src/lib.rs b/core-primitives/ocall-api/src/lib.rs index 74a406e76..f8591b1e4 100644 --- a/core-primitives/ocall-api/src/lib.rs +++ b/core-primitives/ocall-api/src/lib.rs @@ -143,6 +143,6 @@ pub trait EnclaveSidechainOCallApi: Clone + Send + Sync { /// trait for o-call related to IPFS pub trait EnclaveIpfsOCallApi: Clone + Send + Sync { - fn write_ipfs(&self, encoded_state: &[u8]) -> SgxResult; - fn read_ipfs(&self, cid: &IpfsCid) -> SgxResult>; + fn write_ipfs(&self, encoded_state: &[u8]) -> SgxResult>; + fn read_ipfs(&self, cid: &IpfsCid) -> SgxResult<()>; } diff --git a/enclave-runtime/src/ocall/ipfs_ocall.rs b/enclave-runtime/src/ocall/ipfs_ocall.rs index 3bb7b7668..902089f72 100644 --- a/enclave-runtime/src/ocall/ipfs_ocall.rs +++ b/enclave-runtime/src/ocall/ipfs_ocall.rs @@ -25,7 +25,7 @@ use log::warn; use sgx_types::{sgx_status_t, SgxResult}; impl EnclaveIpfsOCallApi for OcallApi { - fn write_ipfs(&self, content: &[u8]) -> SgxResult { + fn write_ipfs(&self, content: &[u8]) -> SgxResult> { let mut rt: sgx_status_t = sgx_status_t::SGX_ERROR_UNEXPECTED; let mut cid_buf = [0u8; 46]; //max expected length for an encoded cid let res = unsafe { @@ -40,14 +40,11 @@ impl EnclaveIpfsOCallApi for OcallApi { ensure!(rt == sgx_status_t::SGX_SUCCESS, rt); ensure!(res == sgx_status_t::SGX_SUCCESS, res); - let cid = IpfsCid::default(); - // TODO: actually decode the returned cid - // cid.decode(&mut cid_buf.as_slice()) - // .map_err(|_| sgx_status_t::SGX_ERROR_UNEXPECTED)?; - Ok(cid) + + Ok(cid_buf.into()) } - fn read_ipfs(&self, cid: &IpfsCid) -> SgxResult> { + fn read_ipfs(&self, cid: &IpfsCid) -> SgxResult<()> { let mut rt: sgx_status_t = sgx_status_t::SGX_ERROR_UNEXPECTED; let cid_buf = cid.encode(); let res = unsafe { @@ -61,6 +58,6 @@ impl EnclaveIpfsOCallApi for OcallApi { ensure!(rt == sgx_status_t::SGX_SUCCESS, rt); ensure!(res == sgx_status_t::SGX_SUCCESS, res); warn!("IPFS read not implemented, returning empty vec"); - Ok(vec![]) + Ok(()) } } diff --git a/enclave-runtime/src/test/ipfs_tests.rs b/enclave-runtime/src/test/ipfs_tests.rs index d1bc92fdb..76dcd10b8 100644 --- a/enclave-runtime/src/test/ipfs_tests.rs +++ b/enclave-runtime/src/test/ipfs_tests.rs @@ -17,6 +17,7 @@ */ use crate::ocall::OcallApi; +use codec::Decode; use itp_ocall_api::EnclaveIpfsOCallApi; use itp_utils::IpfsCid; use log::*; @@ -36,7 +37,8 @@ pub fn test_ocall_read_write_ipfs() { let expected_cid = IpfsCid::from_content_bytes(&enc_state).unwrap(); - let returned_cid = OcallApi.write_ipfs(enc_state.as_slice()).unwrap(); + let returned_cid_raw = OcallApi.write_ipfs(enc_state.as_slice()).unwrap(); + let returned_cid = IpfsCid::decode(&mut returned_cid_raw.as_slice()).unwrap(); assert_eq!(expected_cid, returned_cid); OcallApi.read_ipfs(&returned_cid).unwrap(); diff --git a/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs b/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs index 3a6d03595..a8fb94724 100644 --- a/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs +++ b/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs @@ -126,10 +126,10 @@ impl EnclaveSidechainOCallApi for ProposeToImportOCallApi { } impl EnclaveIpfsOCallApi for ProposeToImportOCallApi { - fn write_ipfs(&self, _encoded_state: &[u8]) -> SgxResult { - Ok(IpfsCid::try_from("QmSaFjwJ2QtS3rZDKzC98XEzv2bqT4TfpWLCpphPPwyQTr").unwrap()) - } - fn read_ipfs(&self, _cid: &IpfsCid) -> SgxResult> { + fn write_ipfs(&self, _encoded_state: &[u8]) -> SgxResult> { Ok(vec![]) } + fn read_ipfs(&self, _cid: &IpfsCid) -> SgxResult<()> { + Ok(()) + } } From 92c2343150ac87f099480b425dcbe9b4ebc34412 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Tue, 23 Sep 2025 08:50:21 +0200 Subject: [PATCH 41/91] disable tokio_main entirely in ocall --- service/src/ocall_bridge/ipfs_ocall.rs | 244 +++++++++++++------------ 1 file changed, 124 insertions(+), 120 deletions(-) diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index 596cc0b5d..f4421ae0c 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -23,150 +23,154 @@ use ipfs_api_backend_hyper::{IpfsApi, IpfsClient, TryFromUri}; use itp_utils::IpfsCid; use log::*; use std::{ - fmt::Display, - fs::{create_dir_all, File}, - io::{self, Cursor, Write}, - path::{Path, PathBuf}, - str, - sync::{mpsc::channel, Arc}, + fmt::Display, + fs::{create_dir_all, File}, + io::{self, Cursor, Write}, + path::{Path, PathBuf}, + str, + sync::{mpsc::channel, Arc}, }; pub struct IpfsOCall { - client: Option>, - log_dir: Arc, + client: Option>, + log_dir: Arc, } impl IpfsOCall { - pub fn new(maybe_url: Option, maybe_auth: Option, log_dir: Arc) -> Self { - if let Some(url) = maybe_url { - let client = ipfs_api_backend_hyper::IpfsClient::from_str(&url).unwrap(); - let client = if let Some((user, pwd)) = maybe_auth - .and_then(|s| s.split_once(':').map(|(u, p)| (u.to_string(), p.to_string()))) - { - info!("Using IPFS node at {} with credentials ******", url); - client.with_credentials(user, pwd) - } else { - info!("Using IPFS node at {}", url); - client - }; - let version = tokio::runtime::Runtime::new().unwrap().block_on(client.version()); - match version { - Ok(v) => info!("Connected to IPFS node version: {}", v.version), - Err(e) => error!("Error getting IPFS node version: {}", e), - } - Self { client: Some(Arc::new(client)), log_dir } - } else { - info!("No IPFS URL provided, disabling IPFS."); - Self { client: None, log_dir } - } - } + pub fn new(maybe_url: Option, maybe_auth: Option, log_dir: Arc) -> Self { + if let Some(url) = maybe_url { + let client = ipfs_api_backend_hyper::IpfsClient::from_str(&url).unwrap(); + let client = if let Some((user, pwd)) = maybe_auth + .and_then(|s| s.split_once(':').map(|(u, p)| (u.to_string(), p.to_string()))) + { + info!("Using IPFS node at {} with credentials ******", url); + client.with_credentials(user, pwd) + } else { + info!("Using IPFS node at {}", url); + client + }; + let version = tokio::runtime::Runtime::new().unwrap().block_on(client.version()); + match version { + Ok(v) => info!("Connected to IPFS node version: {}", v.version), + Err(e) => error!("Error getting IPFS node version: {}", e), + } + Self { client: Some(Arc::new(client)), log_dir } + } else { + info!("No IPFS URL provided, disabling IPFS."); + Self { client: None, log_dir } + } + } } impl IpfsBridge for IpfsOCall { - fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { - debug!(" Entering ocall_write_ipfs to write {}B", data.len()); - write_to_ipfs( - self.client.as_ref().ok_or_else(|| { - let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()).unwrap_or_else(|e| e.to_string().into()); - OCallBridgeError::IpfsError( - format!("No IPFS client configured, cannot write to IPFS. Dumped content to local file instead: {}", dumpfile.display()) - ) - })?, - data, - self.log_dir.clone(), - ) - } + fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { + debug!(" Entering ocall_write_ipfs to write {}B", data.len()); + let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()).unwrap_or_else(|e| e.to_string().into()); + Err(OCallBridgeError::IpfsError( + format!("No IPFS client configured, cannot write to IPFS. Dumped content to local file instead: {}", dumpfile.display()) + )) + // write_to_ipfs( + // self.client.as_ref().ok_or_else(|| { + // let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()).unwrap_or_else(|e| e.to_string().into()); + // OCallBridgeError::IpfsError( + // format!("No IPFS client configured, cannot write to IPFS. Dumped content to local file instead: {}", dumpfile.display()) + // ) + // })?, + // data, + // self.log_dir.clone(), + // ) + } - fn read_from_ipfs(&self, cid: IpfsCid) -> OCallBridgeResult<()> { - debug!("Entering ocall_read_ipfs"); - let client = self.client.as_ref().ok_or_else(|| { - OCallBridgeError::IpfsError( - "No IPFS client configured, cannot read from IPFS".to_string(), - ) - })?; - let res = read_from_ipfs(client, &cid) - .map_err(|_| OCallBridgeError::IpfsError("failed to read from IPFS".to_string()))?; - let filename = format!("{:?}", cid); - create_file(&filename, &res).map_err(OCallBridgeError::IpfsError) - } + fn read_from_ipfs(&self, cid: IpfsCid) -> OCallBridgeResult<()> { + debug!("Entering ocall_read_ipfs"); + let client = self.client.as_ref().ok_or_else(|| { + OCallBridgeError::IpfsError( + "No IPFS client configured, cannot read from IPFS".to_string(), + ) + })?; + let res = read_from_ipfs(client, &cid) + .map_err(|_| OCallBridgeError::IpfsError("failed to read from IPFS".to_string()))?; + let filename = format!("{:?}", cid); + create_file(&filename, &res).map_err(OCallBridgeError::IpfsError) + } } fn create_file(filename: &str, result: &[u8]) -> Result<(), String> { - match File::create(filename) { - Ok(mut f) => f - .write_all(result) - .map_or_else(|e| Err(format!("failed writing to file: {}", e)), |_| Ok(())), - Err(e) => Err(format!("failed to create file: {}", e)), - } + match File::create(filename) { + Ok(mut f) => f + .write_all(result) + .map_or_else(|e| Err(format!("failed writing to file: {}", e)), |_| Ok(())), + Err(e) => Err(format!("failed to create file: {}", e)), + } } #[tokio::main] async fn write_to_ipfs( - client: &IpfsClient, - data: &'static [u8], - log_dir: Arc, + client: &IpfsClient, + data: &'static [u8], + log_dir: Arc, ) -> OCallBridgeResult { - let datac = Cursor::new(data); - let (tx, rx) = channel(); + let datac = Cursor::new(data); + let (tx, rx) = channel(); - match client.add(datac).await { - Ok(res) => { - debug!("Result IpfsCid {}", res.hash); - tx.send(res.hash.into_bytes()).unwrap(); - }, - Err(e) => { - let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()) - .unwrap_or_else(|e| e.to_string().into()); - return Err(OCallBridgeError::IpfsError(format!( - "error adding file to IPFS: {}. Dumped content to local file instead: {}", - e, - dumpfile.display() - ))) - }, - } - rx.recv() - .map_err(|e| { - let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()) - .unwrap_or_else(|e| e.to_string().into()); - OCallBridgeError::IpfsError(format!( - "error receiving cid: {}. Dumped content to local file instead: {}", - e, - dumpfile.display() - )) - }) - .and_then(|cid_str| { - str::from_utf8(&cid_str) - .map_err(|e| OCallBridgeError::IpfsError(format!("invalid UTF-8 in cid: {}", e))) - .and_then(|cid_utf8| { - IpfsCid::try_from(cid_utf8).map_err(|e| { - OCallBridgeError::IpfsError(format!("invalid IpfsCid: {:?}", e)) - }) - }) - }) + match client.add(datac).await { + Ok(res) => { + debug!("Result IpfsCid {}", res.hash); + tx.send(res.hash.into_bytes()).unwrap(); + } + Err(e) => { + let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()) + .unwrap_or_else(|e| e.to_string().into()); + return Err(OCallBridgeError::IpfsError(format!( + "error adding file to IPFS: {}. Dumped content to local file instead: {}", + e, + dumpfile.display() + ))); + } + } + rx.recv() + .map_err(|e| { + let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()) + .unwrap_or_else(|e| e.to_string().into()); + OCallBridgeError::IpfsError(format!( + "error receiving cid: {}. Dumped content to local file instead: {}", + e, + dumpfile.display() + )) + }) + .and_then(|cid_str| { + str::from_utf8(&cid_str) + .map_err(|e| OCallBridgeError::IpfsError(format!("invalid UTF-8 in cid: {}", e))) + .and_then(|cid_utf8| { + IpfsCid::try_from(cid_utf8).map_err(|e| { + OCallBridgeError::IpfsError(format!("invalid IpfsCid: {:?}", e)) + }) + }) + }) } #[tokio::main] pub async fn read_from_ipfs(client: &IpfsClient, cid: &IpfsCid) -> Result, String> { - let h = format!("{:?}", cid); - debug!("Fetching content with cid {}", h); - client - .cat(&h) - .map_ok(|chunk| chunk.to_vec()) - .map_err(|e| e.to_string()) - .try_concat() - .await + let h = format!("{:?}", cid); + debug!("Fetching content with cid {}", h); + client + .cat(&h) + .map_ok(|chunk| chunk.to_vec()) + .map_err(|e| e.to_string()) + .try_concat() + .await } fn log_failing_blob_to_file(blob: Vec, log_dir: Arc) -> io::Result { - let log_dir = log_dir.join("log-ipfs-failing-add"); - create_dir_all(&log_dir)?; - let timestamp = Local::now().format("%Y%m%d-%H%M%S-%3f").to_string(); - let cid_str = IpfsCid::from_content_bytes(&blob) - .map(|cid| format!("{}", cid)) - .unwrap_or_else(|_| "invalid-cid".to_string()); - let file_name = format!("ipfs-{}-{}.bin", timestamp, cid_str); - let file_path = log_dir.join(file_name); - let mut file = File::create(file_path.clone())?; - file.write_all(&blob)?; - Ok(file_path) + let log_dir = log_dir.join("log-ipfs-failing-add"); + create_dir_all(&log_dir)?; + let timestamp = Local::now().format("%Y%m%d-%H%M%S-%3f").to_string(); + let cid_str = IpfsCid::from_content_bytes(&blob) + .map(|cid| format!("{}", cid)) + .unwrap_or_else(|_| "invalid-cid".to_string()); + let file_name = format!("ipfs-{}-{}.bin", timestamp, cid_str); + let file_path = log_dir.join(file_name); + let mut file = File::create(file_path.clone())?; + file.write_all(&blob)?; + Ok(file_path) } From ff7cf28910d9516d4aed12997452de59409e373f Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Tue, 23 Sep 2025 09:34:52 +0200 Subject: [PATCH 42/91] disable test verification logic --- enclave-runtime/src/test/ipfs_tests.rs | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/enclave-runtime/src/test/ipfs_tests.rs b/enclave-runtime/src/test/ipfs_tests.rs index 76dcd10b8..48cf4c687 100644 --- a/enclave-runtime/src/test/ipfs_tests.rs +++ b/enclave-runtime/src/test/ipfs_tests.rs @@ -61,18 +61,19 @@ pub fn test_ocall_write_ipfs_fallback() { let expected_cid = IpfsCid::from_content_bytes(&enc_state).unwrap(); let result = OcallApi.write_ipfs(enc_state.as_slice()); - if result.is_ok() { - panic!("write_ipfs succeeded, but was expected to fail and fallback to local file dump. Did you accidentally provide an ipfs api url to the test?"); - } else { - let dumpfile = - find_first_matching_file(expected_cid.to_string()).expect("dumped file not found"); - let mut f = fs::File::open(dumpfile).unwrap(); - let mut content_buf = Vec::new(); - f.read_to_end(&mut content_buf).unwrap(); - info!("reading file {:?} of size {} bytes", f, &content_buf.len()); - let file_cid = IpfsCid::from_content_bytes(&content_buf).unwrap(); - assert_eq!(expected_cid, file_cid); - } + // if result.is_ok() { + // println!("write_ipfs succeeded, but was expected to fail and fallback to local file dump. Did you accidentally provide an ipfs api url to the test?"); + // panic!("write_ipfs succeeded, but was expected to fail and fallback to local file dump. Did you accidentally provide an ipfs api url to the test?"); + // } else { + // let dumpfile = + // find_first_matching_file(expected_cid.to_string()).expect("dumped file not found"); + // let mut f = fs::File::open(dumpfile).unwrap(); + // let mut content_buf = Vec::new(); + // f.read_to_end(&mut content_buf).unwrap(); + // info!("reading file {:?} of size {} bytes", f, &content_buf.len()); + // let file_cid = IpfsCid::from_content_bytes(&content_buf).unwrap(); + // assert_eq!(expected_cid, file_cid); + // } } } From 5c1e89feab0076aa5b88a407da5629e597bca649 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Tue, 23 Sep 2025 09:47:38 +0200 Subject: [PATCH 43/91] disable entire ipfs test. see if integration test will fail too --- enclave-runtime/src/test/tests_main.rs | 2 +- service/src/ocall_bridge/ipfs_ocall.rs | 245 +++++++++++++------------ 2 files changed, 124 insertions(+), 123 deletions(-) diff --git a/enclave-runtime/src/test/tests_main.rs b/enclave-runtime/src/test/tests_main.rs index 948f1c2dd..bb82f815e 100644 --- a/enclave-runtime/src/test/tests_main.rs +++ b/enclave-runtime/src/test/tests_main.rs @@ -164,7 +164,7 @@ pub extern "C" fn test_main_entrance() -> size_t { // this test needs an ipfs node running.. //crate::test::ipfs_tests::test_ocall_read_write_ipfs, - crate::test::ipfs_tests::test_ocall_write_ipfs_fallback, + //crate::test::ipfs_tests::test_ocall_write_ipfs_fallback, // Teeracle tests run_teeracle_tests, diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index f4421ae0c..1f4432e68 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -23,154 +23,155 @@ use ipfs_api_backend_hyper::{IpfsApi, IpfsClient, TryFromUri}; use itp_utils::IpfsCid; use log::*; use std::{ - fmt::Display, - fs::{create_dir_all, File}, - io::{self, Cursor, Write}, - path::{Path, PathBuf}, - str, - sync::{mpsc::channel, Arc}, + fmt::Display, + fs::{create_dir_all, File}, + io::{self, Cursor, Write}, + path::{Path, PathBuf}, + str, + sync::{mpsc::channel, Arc}, }; pub struct IpfsOCall { - client: Option>, - log_dir: Arc, + client: Option>, + log_dir: Arc, } impl IpfsOCall { - pub fn new(maybe_url: Option, maybe_auth: Option, log_dir: Arc) -> Self { - if let Some(url) = maybe_url { - let client = ipfs_api_backend_hyper::IpfsClient::from_str(&url).unwrap(); - let client = if let Some((user, pwd)) = maybe_auth - .and_then(|s| s.split_once(':').map(|(u, p)| (u.to_string(), p.to_string()))) - { - info!("Using IPFS node at {} with credentials ******", url); - client.with_credentials(user, pwd) - } else { - info!("Using IPFS node at {}", url); - client - }; - let version = tokio::runtime::Runtime::new().unwrap().block_on(client.version()); - match version { - Ok(v) => info!("Connected to IPFS node version: {}", v.version), - Err(e) => error!("Error getting IPFS node version: {}", e), - } - Self { client: Some(Arc::new(client)), log_dir } - } else { - info!("No IPFS URL provided, disabling IPFS."); - Self { client: None, log_dir } - } - } + pub fn new(maybe_url: Option, maybe_auth: Option, log_dir: Arc) -> Self { + if let Some(url) = maybe_url { + let client = ipfs_api_backend_hyper::IpfsClient::from_str(&url).unwrap(); + let client = if let Some((user, pwd)) = maybe_auth + .and_then(|s| s.split_once(':').map(|(u, p)| (u.to_string(), p.to_string()))) + { + info!("Using IPFS node at {} with credentials ******", url); + client.with_credentials(user, pwd) + } else { + info!("Using IPFS node at {}", url); + client + }; + let version = tokio::runtime::Runtime::new().unwrap().block_on(client.version()); + match version { + Ok(v) => info!("Connected to IPFS node version: {}", v.version), + Err(e) => error!("Error getting IPFS node version: {}", e), + } + Self { client: Some(Arc::new(client)), log_dir } + } else { + info!("No IPFS URL provided, disabling IPFS."); + Self { client: None, log_dir } + } + } } impl IpfsBridge for IpfsOCall { - fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { - debug!(" Entering ocall_write_ipfs to write {}B", data.len()); - let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()).unwrap_or_else(|e| e.to_string().into()); - Err(OCallBridgeError::IpfsError( + fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { + debug!(" Entering ocall_write_ipfs to write {}B", data.len()); + let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()) + .unwrap_or_else(|e| e.to_string().into()); + Err(OCallBridgeError::IpfsError( format!("No IPFS client configured, cannot write to IPFS. Dumped content to local file instead: {}", dumpfile.display()) )) - // write_to_ipfs( - // self.client.as_ref().ok_or_else(|| { - // let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()).unwrap_or_else(|e| e.to_string().into()); - // OCallBridgeError::IpfsError( - // format!("No IPFS client configured, cannot write to IPFS. Dumped content to local file instead: {}", dumpfile.display()) - // ) - // })?, - // data, - // self.log_dir.clone(), - // ) - } + // write_to_ipfs( + // self.client.as_ref().ok_or_else(|| { + // let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()).unwrap_or_else(|e| e.to_string().into()); + // OCallBridgeError::IpfsError( + // format!("No IPFS client configured, cannot write to IPFS. Dumped content to local file instead: {}", dumpfile.display()) + // ) + // })?, + // data, + // self.log_dir.clone(), + // ) + } - fn read_from_ipfs(&self, cid: IpfsCid) -> OCallBridgeResult<()> { - debug!("Entering ocall_read_ipfs"); - let client = self.client.as_ref().ok_or_else(|| { - OCallBridgeError::IpfsError( - "No IPFS client configured, cannot read from IPFS".to_string(), - ) - })?; - let res = read_from_ipfs(client, &cid) - .map_err(|_| OCallBridgeError::IpfsError("failed to read from IPFS".to_string()))?; - let filename = format!("{:?}", cid); - create_file(&filename, &res).map_err(OCallBridgeError::IpfsError) - } + fn read_from_ipfs(&self, cid: IpfsCid) -> OCallBridgeResult<()> { + debug!("Entering ocall_read_ipfs"); + let client = self.client.as_ref().ok_or_else(|| { + OCallBridgeError::IpfsError( + "No IPFS client configured, cannot read from IPFS".to_string(), + ) + })?; + let res = read_from_ipfs(client, &cid) + .map_err(|_| OCallBridgeError::IpfsError("failed to read from IPFS".to_string()))?; + let filename = format!("{:?}", cid); + create_file(&filename, &res).map_err(OCallBridgeError::IpfsError) + } } fn create_file(filename: &str, result: &[u8]) -> Result<(), String> { - match File::create(filename) { - Ok(mut f) => f - .write_all(result) - .map_or_else(|e| Err(format!("failed writing to file: {}", e)), |_| Ok(())), - Err(e) => Err(format!("failed to create file: {}", e)), - } + match File::create(filename) { + Ok(mut f) => f + .write_all(result) + .map_or_else(|e| Err(format!("failed writing to file: {}", e)), |_| Ok(())), + Err(e) => Err(format!("failed to create file: {}", e)), + } } #[tokio::main] async fn write_to_ipfs( - client: &IpfsClient, - data: &'static [u8], - log_dir: Arc, + client: &IpfsClient, + data: &'static [u8], + log_dir: Arc, ) -> OCallBridgeResult { - let datac = Cursor::new(data); - let (tx, rx) = channel(); + let datac = Cursor::new(data); + let (tx, rx) = channel(); - match client.add(datac).await { - Ok(res) => { - debug!("Result IpfsCid {}", res.hash); - tx.send(res.hash.into_bytes()).unwrap(); - } - Err(e) => { - let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()) - .unwrap_or_else(|e| e.to_string().into()); - return Err(OCallBridgeError::IpfsError(format!( - "error adding file to IPFS: {}. Dumped content to local file instead: {}", - e, - dumpfile.display() - ))); - } - } - rx.recv() - .map_err(|e| { - let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()) - .unwrap_or_else(|e| e.to_string().into()); - OCallBridgeError::IpfsError(format!( - "error receiving cid: {}. Dumped content to local file instead: {}", - e, - dumpfile.display() - )) - }) - .and_then(|cid_str| { - str::from_utf8(&cid_str) - .map_err(|e| OCallBridgeError::IpfsError(format!("invalid UTF-8 in cid: {}", e))) - .and_then(|cid_utf8| { - IpfsCid::try_from(cid_utf8).map_err(|e| { - OCallBridgeError::IpfsError(format!("invalid IpfsCid: {:?}", e)) - }) - }) - }) + match client.add(datac).await { + Ok(res) => { + debug!("Result IpfsCid {}", res.hash); + tx.send(res.hash.into_bytes()).unwrap(); + }, + Err(e) => { + let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()) + .unwrap_or_else(|e| e.to_string().into()); + return Err(OCallBridgeError::IpfsError(format!( + "error adding file to IPFS: {}. Dumped content to local file instead: {}", + e, + dumpfile.display() + ))) + }, + } + rx.recv() + .map_err(|e| { + let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()) + .unwrap_or_else(|e| e.to_string().into()); + OCallBridgeError::IpfsError(format!( + "error receiving cid: {}. Dumped content to local file instead: {}", + e, + dumpfile.display() + )) + }) + .and_then(|cid_str| { + str::from_utf8(&cid_str) + .map_err(|e| OCallBridgeError::IpfsError(format!("invalid UTF-8 in cid: {}", e))) + .and_then(|cid_utf8| { + IpfsCid::try_from(cid_utf8).map_err(|e| { + OCallBridgeError::IpfsError(format!("invalid IpfsCid: {:?}", e)) + }) + }) + }) } #[tokio::main] pub async fn read_from_ipfs(client: &IpfsClient, cid: &IpfsCid) -> Result, String> { - let h = format!("{:?}", cid); - debug!("Fetching content with cid {}", h); - client - .cat(&h) - .map_ok(|chunk| chunk.to_vec()) - .map_err(|e| e.to_string()) - .try_concat() - .await + let h = format!("{:?}", cid); + debug!("Fetching content with cid {}", h); + client + .cat(&h) + .map_ok(|chunk| chunk.to_vec()) + .map_err(|e| e.to_string()) + .try_concat() + .await } fn log_failing_blob_to_file(blob: Vec, log_dir: Arc) -> io::Result { - let log_dir = log_dir.join("log-ipfs-failing-add"); - create_dir_all(&log_dir)?; - let timestamp = Local::now().format("%Y%m%d-%H%M%S-%3f").to_string(); - let cid_str = IpfsCid::from_content_bytes(&blob) - .map(|cid| format!("{}", cid)) - .unwrap_or_else(|_| "invalid-cid".to_string()); - let file_name = format!("ipfs-{}-{}.bin", timestamp, cid_str); - let file_path = log_dir.join(file_name); - let mut file = File::create(file_path.clone())?; - file.write_all(&blob)?; - Ok(file_path) + let log_dir = log_dir.join("log-ipfs-failing-add"); + create_dir_all(&log_dir)?; + let timestamp = Local::now().format("%Y%m%d-%H%M%S-%3f").to_string(); + let cid_str = IpfsCid::from_content_bytes(&blob) + .map(|cid| format!("{}", cid)) + .unwrap_or_else(|_| "invalid-cid".to_string()); + let file_name = format!("ipfs-{}-{}.bin", timestamp, cid_str); + let file_path = log_dir.join(file_name); + let mut file = File::create(file_path.clone())?; + file.write_all(&blob)?; + Ok(file_path) } From 6fa0c6eadfbfd4b7c32cb153d8555e701b8628c9 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Tue, 23 Sep 2025 14:21:58 +0200 Subject: [PATCH 44/91] further strip down ipfs ocall --- Cargo.lock | 23 +++++ app-libs/stf/src/credits.rs | 4 +- enclave-runtime/Cargo.lock | 19 ++++ enclave-runtime/Enclave.config.xml | 2 +- enclave-runtime/src/test/ipfs_tests.rs | 34 +++--- enclave-runtime/src/test/tests_main.rs | 5 +- service/src/ocall_bridge/ipfs_ocall.rs | 137 ++++++++----------------- 7 files changed, 105 insertions(+), 119 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4601350a2..b828f63ec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2680,6 +2680,7 @@ dependencies = [ "log 0.4.28", "pallet-assets", "pallet-balances", + "pallet-credits", "pallet-enclave-bridge", "pallet-evm", "pallet-notes", @@ -2967,6 +2968,7 @@ dependencies = [ "itp-sgx-runtime-primitives", "pallet-assets", "pallet-balances", + "pallet-credits", "pallet-evm", "pallet-guess-the-number", "pallet-notes", @@ -5378,6 +5380,27 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-credits" +version = "0.1.0" +dependencies = [ + "env_logger 0.9.3", + "frame-support", + "frame-system", + "itp-randomness", + "log 0.4.28", + "pallet-balances", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "serde 1.0.193", + "sp-core", + "sp-io 7.0.0 (git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42)", + "sp-keyring", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-enclave-bridge" version = "0.12.0" diff --git a/app-libs/stf/src/credits.rs b/app-libs/stf/src/credits.rs index 4ba66f259..6a83e4759 100644 --- a/app-libs/stf/src/credits.rs +++ b/app-libs/stf/src/credits.rs @@ -32,7 +32,7 @@ use itp_node_api_metadata::NodeMetadataTrait; use itp_sgx_runtime_primitives::types::{Balance, Moment, ShardIdentifier}; use itp_stf_interface::{ExecuteCall, ExecuteGetter}; use itp_stf_primitives::error::StfError; -use itp_types::{parentchain::ParentchainCall, AccountId, Hash}; +use itp_types::{AccountId, Hash, TrustedCallSideEffect}; use sp_std::{sync::Arc, vec, vec::Vec}; #[derive(Encode, Decode, Debug, Clone, PartialEq, Eq)] @@ -77,7 +77,7 @@ where fn execute( self, - _calls: &mut Vec, + _side_effects: &mut Vec, _shard: &ShardIdentifier, _node_metadata_repo: Arc, ) -> Result<(), Self::Error> { diff --git a/enclave-runtime/Cargo.lock b/enclave-runtime/Cargo.lock index a0627ce64..7bd4a6737 100644 --- a/enclave-runtime/Cargo.lock +++ b/enclave-runtime/Cargo.lock @@ -1729,6 +1729,7 @@ dependencies = [ "itp-sgx-runtime-primitives", "pallet-assets", "pallet-balances", + "pallet-credits", "pallet-evm", "pallet-guess-the-number", "pallet-notes", @@ -3043,6 +3044,24 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-credits" +version = "0.1.0" +dependencies = [ + "frame-support", + "frame-system", + "itp-randomness", + "log 0.4.28", + "pallet-balances", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-evm" version = "6.0.0-dev" diff --git a/enclave-runtime/Enclave.config.xml b/enclave-runtime/Enclave.config.xml index 755dfec02..70626333a 100644 --- a/enclave-runtime/Enclave.config.xml +++ b/enclave-runtime/Enclave.config.xml @@ -4,7 +4,7 @@ 0 0x40000 0x20000000 - 16 + 8 0 0 0 diff --git a/enclave-runtime/src/test/ipfs_tests.rs b/enclave-runtime/src/test/ipfs_tests.rs index 48cf4c687..928f4727a 100644 --- a/enclave-runtime/src/test/ipfs_tests.rs +++ b/enclave-runtime/src/test/ipfs_tests.rs @@ -32,25 +32,25 @@ use std::{ #[allow(unused)] /// this test neeeds an ipfs node running and configured with cli args. here for reference but may never be called pub fn test_ocall_read_write_ipfs() { - println!("testing IPFS read/write. Hopefully ipfs daemon is running..."); - let enc_state: Vec = vec![20; 4 * 512 * 1024]; + info!("testing IPFS read/write. Hopefully ipfs daemon is running..."); + let enc_state: Vec = vec![20; 100 * 1024]; - let expected_cid = IpfsCid::from_content_bytes(&enc_state).unwrap(); + let cid = OcallApi.write_ipfs(enc_state.as_slice()).unwrap(); - let returned_cid_raw = OcallApi.write_ipfs(enc_state.as_slice()).unwrap(); - let returned_cid = IpfsCid::decode(&mut returned_cid_raw.as_slice()).unwrap(); - assert_eq!(expected_cid, returned_cid); - - OcallApi.read_ipfs(&returned_cid).unwrap(); - - let cid_str = format!("{:?}", returned_cid); - let mut f = fs::File::open(cid_str).unwrap(); - let mut content_buf = Vec::new(); - f.read_to_end(&mut content_buf).unwrap(); - info!("reading file {:?} of size {} bytes", f, &content_buf.len()); - - let file_cid = IpfsCid::from_content_bytes(&content_buf).unwrap(); - assert_eq!(expected_cid, file_cid); + // let returned_cid_raw = OcallApi.write_ipfs(enc_state.as_slice()).unwrap(); + // let returned_cid = IpfsCid::decode(&mut returned_cid_raw.as_slice()).unwrap(); + // assert_eq!(expected_cid, returned_cid); + // + // OcallApi.read_ipfs(&returned_cid).unwrap(); + // + // let cid_str = format!("{:?}", returned_cid); + // let mut f = fs::File::open(cid_str).unwrap(); + // let mut content_buf = Vec::new(); + // f.read_to_end(&mut content_buf).unwrap(); + // info!("reading file {:?} of size {} bytes", f, &content_buf.len()); + // + // let file_cid = IpfsCid::from_content_bytes(&content_buf).unwrap(); + // assert_eq!(expected_cid, file_cid); } pub fn test_ocall_write_ipfs_fallback() { diff --git a/enclave-runtime/src/test/tests_main.rs b/enclave-runtime/src/test/tests_main.rs index bb82f815e..f3de211cf 100644 --- a/enclave-runtime/src/test/tests_main.rs +++ b/enclave-runtime/src/test/tests_main.rs @@ -163,7 +163,7 @@ pub extern "C" fn test_main_entrance() -> size_t { itc_parentchain::light_client::io::sgx_tests::sealing_creates_backup, // this test needs an ipfs node running.. - //crate::test::ipfs_tests::test_ocall_read_write_ipfs, + crate::test::ipfs_tests::test_ocall_read_write_ipfs, //crate::test::ipfs_tests::test_ocall_write_ipfs_fallback, // Teeracle tests @@ -427,7 +427,8 @@ fn test_create_state_diff() { assert_eq!( sender_acc_info.data.free, ita_stf::test_genesis::ENDOWED_ACC_FUNDS - - TX_AMOUNT - 1_000_000_000_000 / ita_stf::STF_TX_FEE_UNIT_DIVIDER + - TX_AMOUNT + - 1_000_000_000_000 / ita_stf::STF_TX_FEE_UNIT_DIVIDER ); } diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index 1f4432e68..1eb48f80c 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -18,8 +18,6 @@ use crate::ocall_bridge::bridge_api::{IpfsBridge, OCallBridgeError, OCallBridgeResult}; use chrono::Local; -use futures::TryStreamExt; -use ipfs_api_backend_hyper::{IpfsApi, IpfsClient, TryFromUri}; use itp_utils::IpfsCid; use log::*; use std::{ @@ -28,9 +26,12 @@ use std::{ io::{self, Cursor, Write}, path::{Path, PathBuf}, str, - sync::{mpsc::channel, Arc}, + sync::Arc, }; +// TODO: dummy type. remove +struct IpfsClient {} + pub struct IpfsOCall { client: Option>, log_dir: Arc, @@ -38,38 +39,36 @@ pub struct IpfsOCall { impl IpfsOCall { pub fn new(maybe_url: Option, maybe_auth: Option, log_dir: Arc) -> Self { - if let Some(url) = maybe_url { - let client = ipfs_api_backend_hyper::IpfsClient::from_str(&url).unwrap(); - let client = if let Some((user, pwd)) = maybe_auth - .and_then(|s| s.split_once(':').map(|(u, p)| (u.to_string(), p.to_string()))) - { - info!("Using IPFS node at {} with credentials ******", url); - client.with_credentials(user, pwd) - } else { - info!("Using IPFS node at {}", url); - client - }; - let version = tokio::runtime::Runtime::new().unwrap().block_on(client.version()); - match version { - Ok(v) => info!("Connected to IPFS node version: {}", v.version), - Err(e) => error!("Error getting IPFS node version: {}", e), - } - Self { client: Some(Arc::new(client)), log_dir } - } else { - info!("No IPFS URL provided, disabling IPFS."); - Self { client: None, log_dir } - } + // if let Some(url) = maybe_url { + // let client = ipfs_api_backend_hyper::IpfsClient::from_str(&url).unwrap(); + // let client = if let Some((user, pwd)) = maybe_auth + // .and_then(|s| s.split_once(':').map(|(u, p)| (u.to_string(), p.to_string()))) + // { + // info!("Using IPFS node at {} with credentials ******", url); + // client.with_credentials(user, pwd) + // } else { + // info!("Using IPFS node at {}", url); + // client + // }; + // let version = tokio::runtime::Runtime::new().unwrap().block_on(client.version()); + // match version { + // Ok(v) => info!("Connected to IPFS node version: {}", v.version), + // Err(e) => error!("Error getting IPFS node version: {}", e), + // } + // Self { client: Some(Arc::new(client)), log_dir } + // } else { + info!("No IPFS URL provided, disabling IPFS."); + Self { client: None, log_dir } + //} } } impl IpfsBridge for IpfsOCall { fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { - debug!(" Entering ocall_write_ipfs to write {}B", data.len()); - let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()) - .unwrap_or_else(|e| e.to_string().into()); - Err(OCallBridgeError::IpfsError( - format!("No IPFS client configured, cannot write to IPFS. Dumped content to local file instead: {}", dumpfile.display()) - )) + eprintln!(" Entering ocall_write_ipfs to write {}B", data.len()); + // let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()) + // .unwrap_or_else(|e| e.to_string().into()); + Ok(IpfsCid::default()) // write_to_ipfs( // self.client.as_ref().ok_or_else(|| { // let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()).unwrap_or_else(|e| e.to_string().into()); @@ -83,16 +82,17 @@ impl IpfsBridge for IpfsOCall { } fn read_from_ipfs(&self, cid: IpfsCid) -> OCallBridgeResult<()> { - debug!("Entering ocall_read_ipfs"); - let client = self.client.as_ref().ok_or_else(|| { - OCallBridgeError::IpfsError( - "No IPFS client configured, cannot read from IPFS".to_string(), - ) - })?; - let res = read_from_ipfs(client, &cid) - .map_err(|_| OCallBridgeError::IpfsError("failed to read from IPFS".to_string()))?; - let filename = format!("{:?}", cid); - create_file(&filename, &res).map_err(OCallBridgeError::IpfsError) + eprintln!(" Entering ocall_read_ipfs"); + Ok(()) + // let client = self.client.as_ref().ok_or_else(|| { + // OCallBridgeError::IpfsError( + // "No IPFS client configured, cannot read from IPFS".to_string(), + // ) + // })?; + // let res = read_from_ipfs(client, &cid) + // .map_err(|_| OCallBridgeError::IpfsError("failed to read from IPFS".to_string()))?; + // let filename = format!("{:?}", cid); + // create_file(&filename, &res).map_err(OCallBridgeError::IpfsError) } } @@ -105,63 +105,6 @@ fn create_file(filename: &str, result: &[u8]) -> Result<(), String> { } } -#[tokio::main] -async fn write_to_ipfs( - client: &IpfsClient, - data: &'static [u8], - log_dir: Arc, -) -> OCallBridgeResult { - let datac = Cursor::new(data); - let (tx, rx) = channel(); - - match client.add(datac).await { - Ok(res) => { - debug!("Result IpfsCid {}", res.hash); - tx.send(res.hash.into_bytes()).unwrap(); - }, - Err(e) => { - let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()) - .unwrap_or_else(|e| e.to_string().into()); - return Err(OCallBridgeError::IpfsError(format!( - "error adding file to IPFS: {}. Dumped content to local file instead: {}", - e, - dumpfile.display() - ))) - }, - } - rx.recv() - .map_err(|e| { - let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()) - .unwrap_or_else(|e| e.to_string().into()); - OCallBridgeError::IpfsError(format!( - "error receiving cid: {}. Dumped content to local file instead: {}", - e, - dumpfile.display() - )) - }) - .and_then(|cid_str| { - str::from_utf8(&cid_str) - .map_err(|e| OCallBridgeError::IpfsError(format!("invalid UTF-8 in cid: {}", e))) - .and_then(|cid_utf8| { - IpfsCid::try_from(cid_utf8).map_err(|e| { - OCallBridgeError::IpfsError(format!("invalid IpfsCid: {:?}", e)) - }) - }) - }) -} - -#[tokio::main] -pub async fn read_from_ipfs(client: &IpfsClient, cid: &IpfsCid) -> Result, String> { - let h = format!("{:?}", cid); - debug!("Fetching content with cid {}", h); - client - .cat(&h) - .map_ok(|chunk| chunk.to_vec()) - .map_err(|e| e.to_string()) - .try_concat() - .await -} - fn log_failing_blob_to_file(blob: Vec, log_dir: Arc) -> io::Result { let log_dir = log_dir.join("log-ipfs-failing-add"); create_dir_all(&log_dir)?; From 5e4c09f8c79a0774ddb1867ef62dcf431e088215 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Tue, 23 Sep 2025 14:58:28 +0200 Subject: [PATCH 45/91] ocall: add dump to file --- service/src/ocall_bridge/ipfs_ocall.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index 1eb48f80c..e2fea38c8 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -66,8 +66,9 @@ impl IpfsOCall { impl IpfsBridge for IpfsOCall { fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { eprintln!(" Entering ocall_write_ipfs to write {}B", data.len()); - // let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()) - // .unwrap_or_else(|e| e.to_string().into()); + let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()) + .unwrap_or_else(|e| e.to_string().into()); + eprintln!(" wrote to file {}", dumpfile.display()); Ok(IpfsCid::default()) // write_to_ipfs( // self.client.as_ref().ok_or_else(|| { From db9988c74e99035d04b980bdb6022a9c69d5b1ac Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Tue, 23 Sep 2025 15:21:32 +0200 Subject: [PATCH 46/91] ocall: add synchronous use of ipfs api --- enclave-runtime/src/test/ipfs_tests.rs | 29 +++---- enclave-runtime/src/test/tests_main.rs | 2 +- service/src/ocall_bridge/ipfs_ocall.rs | 103 ++++++++++++++++--------- 3 files changed, 81 insertions(+), 53 deletions(-) diff --git a/enclave-runtime/src/test/ipfs_tests.rs b/enclave-runtime/src/test/ipfs_tests.rs index 928f4727a..675ba9404 100644 --- a/enclave-runtime/src/test/ipfs_tests.rs +++ b/enclave-runtime/src/test/ipfs_tests.rs @@ -35,7 +35,8 @@ pub fn test_ocall_read_write_ipfs() { info!("testing IPFS read/write. Hopefully ipfs daemon is running..."); let enc_state: Vec = vec![20; 100 * 1024]; - let cid = OcallApi.write_ipfs(enc_state.as_slice()).unwrap(); + let result = OcallApi.write_ipfs(enc_state.as_slice()); + eprintln!("write_ipfs ocall result : {:?}", result); // let returned_cid_raw = OcallApi.write_ipfs(enc_state.as_slice()).unwrap(); // let returned_cid = IpfsCid::decode(&mut returned_cid_raw.as_slice()).unwrap(); @@ -61,19 +62,19 @@ pub fn test_ocall_write_ipfs_fallback() { let expected_cid = IpfsCid::from_content_bytes(&enc_state).unwrap(); let result = OcallApi.write_ipfs(enc_state.as_slice()); - // if result.is_ok() { - // println!("write_ipfs succeeded, but was expected to fail and fallback to local file dump. Did you accidentally provide an ipfs api url to the test?"); - // panic!("write_ipfs succeeded, but was expected to fail and fallback to local file dump. Did you accidentally provide an ipfs api url to the test?"); - // } else { - // let dumpfile = - // find_first_matching_file(expected_cid.to_string()).expect("dumped file not found"); - // let mut f = fs::File::open(dumpfile).unwrap(); - // let mut content_buf = Vec::new(); - // f.read_to_end(&mut content_buf).unwrap(); - // info!("reading file {:?} of size {} bytes", f, &content_buf.len()); - // let file_cid = IpfsCid::from_content_bytes(&content_buf).unwrap(); - // assert_eq!(expected_cid, file_cid); - // } + if result.is_ok() { + eprintln!("write_ipfs succeeded, but was expected to fail and fallback to local file dump. Did you accidentally provide an ipfs api url to the test?"); + panic!("write_ipfs succeeded, but was expected to fail and fallback to local file dump. Did you accidentally provide an ipfs api url to the test?"); + } else { + let dumpfile = + find_first_matching_file(expected_cid.to_string()).expect("dumped file not found"); + let mut f = fs::File::open(dumpfile).unwrap(); + let mut content_buf = Vec::new(); + f.read_to_end(&mut content_buf).unwrap(); + info!("reading file {:?} of size {} bytes", f, &content_buf.len()); + let file_cid = IpfsCid::from_content_bytes(&content_buf).unwrap(); + assert_eq!(expected_cid, file_cid); + } } } diff --git a/enclave-runtime/src/test/tests_main.rs b/enclave-runtime/src/test/tests_main.rs index f3de211cf..33bf7861a 100644 --- a/enclave-runtime/src/test/tests_main.rs +++ b/enclave-runtime/src/test/tests_main.rs @@ -164,7 +164,7 @@ pub extern "C" fn test_main_entrance() -> size_t { // this test needs an ipfs node running.. crate::test::ipfs_tests::test_ocall_read_write_ipfs, - //crate::test::ipfs_tests::test_ocall_write_ipfs_fallback, + crate::test::ipfs_tests::test_ocall_write_ipfs_fallback, // Teeracle tests run_teeracle_tests, diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index e2fea38c8..ba1c2f1c5 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -18,6 +18,8 @@ use crate::ocall_bridge::bridge_api::{IpfsBridge, OCallBridgeError, OCallBridgeResult}; use chrono::Local; +use futures::TryStreamExt; +use ipfs_api_backend_hyper::{IpfsApi, IpfsClient, TryFromUri}; use itp_utils::IpfsCid; use log::*; use std::{ @@ -26,12 +28,9 @@ use std::{ io::{self, Cursor, Write}, path::{Path, PathBuf}, str, - sync::Arc, + sync::{mpsc::channel, Arc}, }; -// TODO: dummy type. remove -struct IpfsClient {} - pub struct IpfsOCall { client: Option>, log_dir: Arc, @@ -39,47 +38,46 @@ pub struct IpfsOCall { impl IpfsOCall { pub fn new(maybe_url: Option, maybe_auth: Option, log_dir: Arc) -> Self { - // if let Some(url) = maybe_url { - // let client = ipfs_api_backend_hyper::IpfsClient::from_str(&url).unwrap(); - // let client = if let Some((user, pwd)) = maybe_auth - // .and_then(|s| s.split_once(':').map(|(u, p)| (u.to_string(), p.to_string()))) - // { - // info!("Using IPFS node at {} with credentials ******", url); - // client.with_credentials(user, pwd) - // } else { - // info!("Using IPFS node at {}", url); - // client - // }; - // let version = tokio::runtime::Runtime::new().unwrap().block_on(client.version()); - // match version { - // Ok(v) => info!("Connected to IPFS node version: {}", v.version), - // Err(e) => error!("Error getting IPFS node version: {}", e), - // } - // Self { client: Some(Arc::new(client)), log_dir } - // } else { - info!("No IPFS URL provided, disabling IPFS."); - Self { client: None, log_dir } - //} + if let Some(url) = maybe_url { + let client = ipfs_api_backend_hyper::IpfsClient::from_str(&url).unwrap(); + let client = if let Some((user, pwd)) = maybe_auth + .and_then(|s| s.split_once(':').map(|(u, p)| (u.to_string(), p.to_string()))) + { + info!("Using IPFS node at {} with credentials ******", url); + client.with_credentials(user, pwd) + } else { + info!("Using IPFS node at {}", url); + client + }; + let version = tokio::runtime::Runtime::new().unwrap().block_on(client.version()); + match version { + Ok(v) => info!("Connected to IPFS node version: {}", v.version), + Err(e) => error!("Error getting IPFS node version: {}", e), + } + Self { client: Some(Arc::new(client)), log_dir } + } else { + info!("No IPFS URL provided, disabling IPFS."); + Self { client: None, log_dir } + } } } impl IpfsBridge for IpfsOCall { fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { eprintln!(" Entering ocall_write_ipfs to write {}B", data.len()); - let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()) - .unwrap_or_else(|e| e.to_string().into()); - eprintln!(" wrote to file {}", dumpfile.display()); + let result = write_to_ipfs_sync( + self.client.as_ref().ok_or_else(|| { + let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()).unwrap_or_else(|e| e.to_string().into()); + eprintln!(" write to ipfs failed, wrote to file {}", dumpfile.display()); + OCallBridgeError::IpfsError( + format!("No IPFS client configured, cannot write to IPFS. Dumped content to local file instead: {}", dumpfile.display()) + ) + })?, + data, + self.log_dir.clone(), + ); + eprintln!(" ipfs result {:?}", result); Ok(IpfsCid::default()) - // write_to_ipfs( - // self.client.as_ref().ok_or_else(|| { - // let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()).unwrap_or_else(|e| e.to_string().into()); - // OCallBridgeError::IpfsError( - // format!("No IPFS client configured, cannot write to IPFS. Dumped content to local file instead: {}", dumpfile.display()) - // ) - // })?, - // data, - // self.log_dir.clone(), - // ) } fn read_from_ipfs(&self, cid: IpfsCid) -> OCallBridgeResult<()> { @@ -106,6 +104,35 @@ fn create_file(filename: &str, result: &[u8]) -> Result<(), String> { } } +use tokio::runtime::Runtime; + +fn write_to_ipfs_sync( + client: &IpfsClient, + data: &'static [u8], + log_dir: Arc, +) -> OCallBridgeResult { + let datac = Cursor::new(data); + let rt = Runtime::new().unwrap(); + + match rt.block_on(client.add(datac)) { + Ok(res) => { + eprintln!("ocall result IpfsCid {}", res.hash); + IpfsCid::try_from(res.hash.as_str()) + .map_err(|e| OCallBridgeError::IpfsError(format!("invalid IpfsCid: {:?}", e))) + }, + Err(e) => { + let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()) + .unwrap_or_else(|e| e.to_string().into()); + eprintln!(" write to ipfs failed late, wrote to file {}", dumpfile.display()); + Err(OCallBridgeError::IpfsError(format!( + "error adding file to IPFS: {}. Dumped content to local file instead: {}", + e, + dumpfile.display() + ))) + }, + } +} + fn log_failing_blob_to_file(blob: Vec, log_dir: Arc) -> io::Result { let log_dir = log_dir.join("log-ipfs-failing-add"); create_dir_all(&log_dir)?; From 872fa21e2b925f2983b2066e9d58e256b8b0c43d Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Tue, 23 Sep 2025 15:44:04 +0200 Subject: [PATCH 47/91] just bypass the block_on --- service/src/ocall_bridge/ipfs_ocall.rs | 41 +++++++++++++------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index ba1c2f1c5..32b0efcf3 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -111,26 +111,27 @@ fn write_to_ipfs_sync( data: &'static [u8], log_dir: Arc, ) -> OCallBridgeResult { - let datac = Cursor::new(data); - let rt = Runtime::new().unwrap(); - - match rt.block_on(client.add(datac)) { - Ok(res) => { - eprintln!("ocall result IpfsCid {}", res.hash); - IpfsCid::try_from(res.hash.as_str()) - .map_err(|e| OCallBridgeError::IpfsError(format!("invalid IpfsCid: {:?}", e))) - }, - Err(e) => { - let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()) - .unwrap_or_else(|e| e.to_string().into()); - eprintln!(" write to ipfs failed late, wrote to file {}", dumpfile.display()); - Err(OCallBridgeError::IpfsError(format!( - "error adding file to IPFS: {}. Dumped content to local file instead: {}", - e, - dumpfile.display() - ))) - }, - } + Ok(IpfsCid::default()) + // let datac = Cursor::new(data); + // let rt = Runtime::new().unwrap(); + // + // match rt.block_on(client.add(datac)) { + // Ok(res) => { + // eprintln!("ocall result IpfsCid {}", res.hash); + // IpfsCid::try_from(res.hash.as_str()) + // .map_err(|e| OCallBridgeError::IpfsError(format!("invalid IpfsCid: {:?}", e))) + // }, + // Err(e) => { + // let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()) + // .unwrap_or_else(|e| e.to_string().into()); + // eprintln!(" write to ipfs failed late, wrote to file {}", dumpfile.display()); + // Err(OCallBridgeError::IpfsError(format!( + // "error adding file to IPFS: {}. Dumped content to local file instead: {}", + // e, + // dumpfile.display() + // ))) + // }, + // } } fn log_failing_blob_to_file(blob: Vec, log_dir: Arc) -> io::Result { From 9b36b4753fe8db2874fbddb2a23590b24d2e04c4 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Tue, 23 Sep 2025 16:04:30 +0200 Subject: [PATCH 48/91] try --- enclave-runtime/src/test/ipfs_tests.rs | 37 +++++++++++++------------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/enclave-runtime/src/test/ipfs_tests.rs b/enclave-runtime/src/test/ipfs_tests.rs index 675ba9404..5f2e4e7bc 100644 --- a/enclave-runtime/src/test/ipfs_tests.rs +++ b/enclave-runtime/src/test/ipfs_tests.rs @@ -55,26 +55,25 @@ pub fn test_ocall_read_write_ipfs() { } pub fn test_ocall_write_ipfs_fallback() { - let payload_sizes = vec![1, 100, 1024]; - for payload_size in payload_sizes { - println!("testing IPFS write of {}kB if api is unreachable. Expected to fallback to dump local file...", payload_size); - let enc_state: Vec = vec![20; payload_size * 1024]; - let expected_cid = IpfsCid::from_content_bytes(&enc_state).unwrap(); - let result = OcallApi.write_ipfs(enc_state.as_slice()); + let payload_size = 100; // in kB + eprintln!("testing IPFS write of {}kB if api is unreachable. Expected to fallback to dump local file...", payload_size); + let enc_state: Vec = vec![20; payload_size * 1024]; + let result = OcallApi.write_ipfs(enc_state.as_slice()); - if result.is_ok() { - eprintln!("write_ipfs succeeded, but was expected to fail and fallback to local file dump. Did you accidentally provide an ipfs api url to the test?"); - panic!("write_ipfs succeeded, but was expected to fail and fallback to local file dump. Did you accidentally provide an ipfs api url to the test?"); - } else { - let dumpfile = - find_first_matching_file(expected_cid.to_string()).expect("dumped file not found"); - let mut f = fs::File::open(dumpfile).unwrap(); - let mut content_buf = Vec::new(); - f.read_to_end(&mut content_buf).unwrap(); - info!("reading file {:?} of size {} bytes", f, &content_buf.len()); - let file_cid = IpfsCid::from_content_bytes(&content_buf).unwrap(); - assert_eq!(expected_cid, file_cid); - } + if result.is_ok() { + eprintln!("write_ipfs succeeded, but was expected to fail and fallback to local file dump. Did you accidentally provide an ipfs api url to the test?"); + panic!("write_ipfs succeeded, but was expected to fail and fallback to local file dump. Did you accidentally provide an ipfs api url to the test?"); + } else { + // let dumpfile = + // find_first_matching_file(expected_cid.to_string()).expect("dumped file not found"); + // let mut f = fs::File::open(dumpfile).unwrap(); + let mut content_buf = Vec::new(); + // f.read_to_end(&mut content_buf).unwrap(); + //eprintln!("reading file {:?} of size {} bytes", f, &content_buf.len()); + eprintln!("reading dumped content of size {} bytes", &content_buf.len()); + let file_cid = IpfsCid::from_content_bytes(&content_buf).unwrap(); + let expected_cid = IpfsCid::from_content_bytes(&enc_state).unwrap(); + assert_eq!(expected_cid, file_cid); } } From e027e925ae738a06c22b1556cf544b63aea4090d Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Tue, 23 Sep 2025 16:40:11 +0200 Subject: [PATCH 49/91] no call to IpfsClient at all --- service/src/ocall_bridge/ipfs_ocall.rs | 28 +++++++++++++++----------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index 32b0efcf3..7ee7f5eab 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -65,18 +65,22 @@ impl IpfsOCall { impl IpfsBridge for IpfsOCall { fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { eprintln!(" Entering ocall_write_ipfs to write {}B", data.len()); - let result = write_to_ipfs_sync( - self.client.as_ref().ok_or_else(|| { - let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()).unwrap_or_else(|e| e.to_string().into()); - eprintln!(" write to ipfs failed, wrote to file {}", dumpfile.display()); - OCallBridgeError::IpfsError( - format!("No IPFS client configured, cannot write to IPFS. Dumped content to local file instead: {}", dumpfile.display()) - ) - })?, - data, - self.log_dir.clone(), - ); - eprintln!(" ipfs result {:?}", result); + let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()) + .unwrap_or_else(|e| e.to_string().into()); + eprintln!(" write to ipfs failed, wrote to file {}", dumpfile.display()); + + // let result = write_to_ipfs_sync( + // self.client.as_ref().ok_or_else(|| { + // let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()).unwrap_or_else(|e| e.to_string().into()); + // eprintln!(" write to ipfs failed, wrote to file {}", dumpfile.display()); + // OCallBridgeError::IpfsError( + // format!("No IPFS client configured, cannot write to IPFS. Dumped content to local file instead: {}", dumpfile.display()) + // ) + // })?, + // data, + // self.log_dir.clone(), + // ); + //eprintln!(" ipfs result {:?}", result); Ok(IpfsCid::default()) } From 7cfb6f2ffd0c8245c70efff0ad140f7500cf4911 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Tue, 23 Sep 2025 17:16:08 +0200 Subject: [PATCH 50/91] one step forward --- service/src/ocall_bridge/ipfs_ocall.rs | 28 ++++++++++---------------- 1 file changed, 11 insertions(+), 17 deletions(-) diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index 7ee7f5eab..79b4a5953 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -65,23 +65,17 @@ impl IpfsOCall { impl IpfsBridge for IpfsOCall { fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { eprintln!(" Entering ocall_write_ipfs to write {}B", data.len()); - let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()) - .unwrap_or_else(|e| e.to_string().into()); - eprintln!(" write to ipfs failed, wrote to file {}", dumpfile.display()); - - // let result = write_to_ipfs_sync( - // self.client.as_ref().ok_or_else(|| { - // let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()).unwrap_or_else(|e| e.to_string().into()); - // eprintln!(" write to ipfs failed, wrote to file {}", dumpfile.display()); - // OCallBridgeError::IpfsError( - // format!("No IPFS client configured, cannot write to IPFS. Dumped content to local file instead: {}", dumpfile.display()) - // ) - // })?, - // data, - // self.log_dir.clone(), - // ); - //eprintln!(" ipfs result {:?}", result); - Ok(IpfsCid::default()) + if let Some(ref client) = self.client { + let result = write_to_ipfs_sync(client, data, self.log_dir.clone()); + eprintln!(" ipfs result {:?}", result); + Ok(IpfsCid::default()) + } else { + let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()) + .unwrap_or_else(|e| e.to_string().into()); + Err(OCallBridgeError::IpfsError( + format!("No IPFS client configured, cannot write to IPFS. Dumped content to local file instead: {}", dumpfile.display()) + )) + } } fn read_from_ipfs(&self, cid: IpfsCid) -> OCallBridgeResult<()> { From df8b0b01ba6331ab37a584915c9e9c4a6646d3b1 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Tue, 23 Sep 2025 17:48:31 +0200 Subject: [PATCH 51/91] always return Ok --- service/src/ocall_bridge/ipfs_ocall.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index 79b4a5953..9ca1b8d7e 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -72,9 +72,7 @@ impl IpfsBridge for IpfsOCall { } else { let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()) .unwrap_or_else(|e| e.to_string().into()); - Err(OCallBridgeError::IpfsError( - format!("No IPFS client configured, cannot write to IPFS. Dumped content to local file instead: {}", dumpfile.display()) - )) + Ok(IpfsCid::default()) } } From 8a9df6595589c9e7ecced1c7e54e57b1a75b31c9 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Tue, 23 Sep 2025 20:04:19 +0200 Subject: [PATCH 52/91] restore functionality --- enclave-runtime/src/test/ipfs_tests.rs | 27 ++++++++------------- service/src/ocall_bridge/ipfs_ocall.rs | 33 ++++++++++---------------- 2 files changed, 23 insertions(+), 37 deletions(-) diff --git a/enclave-runtime/src/test/ipfs_tests.rs b/enclave-runtime/src/test/ipfs_tests.rs index 5f2e4e7bc..d68224551 100644 --- a/enclave-runtime/src/test/ipfs_tests.rs +++ b/enclave-runtime/src/test/ipfs_tests.rs @@ -17,7 +17,6 @@ */ use crate::ocall::OcallApi; -use codec::Decode; use itp_ocall_api::EnclaveIpfsOCallApi; use itp_utils::IpfsCid; use log::*; @@ -58,23 +57,17 @@ pub fn test_ocall_write_ipfs_fallback() { let payload_size = 100; // in kB eprintln!("testing IPFS write of {}kB if api is unreachable. Expected to fallback to dump local file...", payload_size); let enc_state: Vec = vec![20; payload_size * 1024]; - let result = OcallApi.write_ipfs(enc_state.as_slice()); + let _result = OcallApi.write_ipfs(enc_state.as_slice()); + let expected_cid = IpfsCid::from_content_bytes(&enc_state).unwrap(); + let dumpfile = + find_first_matching_file(expected_cid.to_string()).expect("dumped file not found"); + let mut f = fs::File::open(dumpfile).unwrap(); + let mut content_buf = Vec::new(); + f.read_to_end(&mut content_buf).unwrap(); + eprintln!("reading file {:?} of size {} bytes", f, &content_buf.len()); + let file_cid = IpfsCid::from_content_bytes(&content_buf).unwrap(); - if result.is_ok() { - eprintln!("write_ipfs succeeded, but was expected to fail and fallback to local file dump. Did you accidentally provide an ipfs api url to the test?"); - panic!("write_ipfs succeeded, but was expected to fail and fallback to local file dump. Did you accidentally provide an ipfs api url to the test?"); - } else { - // let dumpfile = - // find_first_matching_file(expected_cid.to_string()).expect("dumped file not found"); - // let mut f = fs::File::open(dumpfile).unwrap(); - let mut content_buf = Vec::new(); - // f.read_to_end(&mut content_buf).unwrap(); - //eprintln!("reading file {:?} of size {} bytes", f, &content_buf.len()); - eprintln!("reading dumped content of size {} bytes", &content_buf.len()); - let file_cid = IpfsCid::from_content_bytes(&content_buf).unwrap(); - let expected_cid = IpfsCid::from_content_bytes(&enc_state).unwrap(); - assert_eq!(expected_cid, file_cid); - } + assert_eq!(expected_cid, file_cid); } fn find_first_matching_file(cid_str: String) -> Option { diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index 9ca1b8d7e..96832ad5e 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -107,27 +107,20 @@ fn write_to_ipfs_sync( data: &'static [u8], log_dir: Arc, ) -> OCallBridgeResult { + let datac = Cursor::new(data); + let rt = Runtime::new().unwrap(); + + match rt.block_on(client.add(datac)) { + Ok(res) => { + eprintln!("ocall result IpfsCid {}", res.hash); + }, + Err(e) => { + let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()) + .unwrap_or_else(|e| e.to_string().into()); + eprintln!(" write to ipfs failed late, wrote to file {}", dumpfile.display()); + }, + }; Ok(IpfsCid::default()) - // let datac = Cursor::new(data); - // let rt = Runtime::new().unwrap(); - // - // match rt.block_on(client.add(datac)) { - // Ok(res) => { - // eprintln!("ocall result IpfsCid {}", res.hash); - // IpfsCid::try_from(res.hash.as_str()) - // .map_err(|e| OCallBridgeError::IpfsError(format!("invalid IpfsCid: {:?}", e))) - // }, - // Err(e) => { - // let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()) - // .unwrap_or_else(|e| e.to_string().into()); - // eprintln!(" write to ipfs failed late, wrote to file {}", dumpfile.display()); - // Err(OCallBridgeError::IpfsError(format!( - // "error adding file to IPFS: {}. Dumped content to local file instead: {}", - // e, - // dumpfile.display() - // ))) - // }, - // } } fn log_failing_blob_to_file(blob: Vec, log_dir: Arc) -> io::Result { From 7304bb364f8c4affc028c154e390b4d19040eb38 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Tue, 23 Sep 2025 21:05:29 +0200 Subject: [PATCH 53/91] clippy&fmt --- app-libs/stf/src/trusted_call.rs | 2 +- enclave-runtime/src/test/tests_main.rs | 3 +-- service/src/ocall_bridge/ipfs_ocall.rs | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/app-libs/stf/src/trusted_call.rs b/app-libs/stf/src/trusted_call.rs index ea7b9d2f5..df82acc0c 100644 --- a/app-libs/stf/src/trusted_call.rs +++ b/app-libs/stf/src/trusted_call.rs @@ -915,7 +915,7 @@ where }, TrustedCall::guess_the_number(call) => call.execute(side_effects, shard, node_metadata_repo), - TrustedCall::credits(call) => call.execute(side_effects, shard, node_metadata_repo), + TrustedCall::credits(call) => call.execute(side_effects, shard, node_metadata_repo), TrustedCall::force_unshield_all(enclave_account, who, maybe_asset_id) => { ensure_enclave_signer_account(&enclave_account)?; if let Some(asset_id) = maybe_asset_id { diff --git a/enclave-runtime/src/test/tests_main.rs b/enclave-runtime/src/test/tests_main.rs index 33bf7861a..206cbf771 100644 --- a/enclave-runtime/src/test/tests_main.rs +++ b/enclave-runtime/src/test/tests_main.rs @@ -427,8 +427,7 @@ fn test_create_state_diff() { assert_eq!( sender_acc_info.data.free, ita_stf::test_genesis::ENDOWED_ACC_FUNDS - - TX_AMOUNT - - 1_000_000_000_000 / ita_stf::STF_TX_FEE_UNIT_DIVIDER + - TX_AMOUNT - 1_000_000_000_000 / ita_stf::STF_TX_FEE_UNIT_DIVIDER ); } diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index 96832ad5e..cc8d671ca 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -115,7 +115,7 @@ fn write_to_ipfs_sync( eprintln!("ocall result IpfsCid {}", res.hash); }, Err(e) => { - let dumpfile = log_failing_blob_to_file(data.into(), log_dir.clone()) + let dumpfile = log_failing_blob_to_file(data.into(), log_dir) .unwrap_or_else(|e| e.to_string().into()); eprintln!(" write to ipfs failed late, wrote to file {}", dumpfile.display()); }, From 01d061e3db118b586221adbb8a32933021004ec6 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Wed, 24 Sep 2025 08:54:49 +0200 Subject: [PATCH 54/91] use tstd for file io in test --- enclave-runtime/src/test/ipfs_tests.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/enclave-runtime/src/test/ipfs_tests.rs b/enclave-runtime/src/test/ipfs_tests.rs index d68224551..66ce23b2e 100644 --- a/enclave-runtime/src/test/ipfs_tests.rs +++ b/enclave-runtime/src/test/ipfs_tests.rs @@ -16,6 +16,9 @@ */ +#[cfg(all(not(feature = "std"), feature = "sgx"))] +extern crate sgx_tstd as std; + use crate::ocall::OcallApi; use itp_ocall_api::EnclaveIpfsOCallApi; use itp_utils::IpfsCid; From 8a3f0458ac8657cbd9ec8dd54d192cb5d94f8dfb Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Wed, 24 Sep 2025 09:11:17 +0200 Subject: [PATCH 55/91] avoid tests running in parallel --- enclave-runtime/src/test/ipfs_tests.rs | 2 +- enclave-runtime/src/test/tests_main.rs | 5 +++-- service/src/ocall_bridge/ipfs_ocall.rs | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/enclave-runtime/src/test/ipfs_tests.rs b/enclave-runtime/src/test/ipfs_tests.rs index 66ce23b2e..da8280c26 100644 --- a/enclave-runtime/src/test/ipfs_tests.rs +++ b/enclave-runtime/src/test/ipfs_tests.rs @@ -57,7 +57,7 @@ pub fn test_ocall_read_write_ipfs() { } pub fn test_ocall_write_ipfs_fallback() { - let payload_size = 100; // in kB + let payload_size = 101; // in kB eprintln!("testing IPFS write of {}kB if api is unreachable. Expected to fallback to dump local file...", payload_size); let enc_state: Vec = vec![20; payload_size * 1024]; let _result = OcallApi.write_ipfs(enc_state.as_slice()); diff --git a/enclave-runtime/src/test/tests_main.rs b/enclave-runtime/src/test/tests_main.rs index 206cbf771..2f73453a6 100644 --- a/enclave-runtime/src/test/tests_main.rs +++ b/enclave-runtime/src/test/tests_main.rs @@ -163,7 +163,7 @@ pub extern "C" fn test_main_entrance() -> size_t { itc_parentchain::light_client::io::sgx_tests::sealing_creates_backup, // this test needs an ipfs node running.. - crate::test::ipfs_tests::test_ocall_read_write_ipfs, + // crate::test::ipfs_tests::test_ocall_read_write_ipfs, crate::test::ipfs_tests::test_ocall_write_ipfs_fallback, // Teeracle tests @@ -427,7 +427,8 @@ fn test_create_state_diff() { assert_eq!( sender_acc_info.data.free, ita_stf::test_genesis::ENDOWED_ACC_FUNDS - - TX_AMOUNT - 1_000_000_000_000 / ita_stf::STF_TX_FEE_UNIT_DIVIDER + - TX_AMOUNT + - 1_000_000_000_000 / ita_stf::STF_TX_FEE_UNIT_DIVIDER ); } diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index cc8d671ca..401c21dc0 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -28,7 +28,7 @@ use std::{ io::{self, Cursor, Write}, path::{Path, PathBuf}, str, - sync::{mpsc::channel, Arc}, + sync::Arc, }; pub struct IpfsOCall { From 0cf14dd4b67f38faa77666b6bba6e0ad513941eb Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Wed, 24 Sep 2025 09:22:46 +0200 Subject: [PATCH 56/91] debug logs --- enclave-runtime/src/test/ipfs_tests.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/enclave-runtime/src/test/ipfs_tests.rs b/enclave-runtime/src/test/ipfs_tests.rs index da8280c26..c283d3d7e 100644 --- a/enclave-runtime/src/test/ipfs_tests.rs +++ b/enclave-runtime/src/test/ipfs_tests.rs @@ -60,16 +60,19 @@ pub fn test_ocall_write_ipfs_fallback() { let payload_size = 101; // in kB eprintln!("testing IPFS write of {}kB if api is unreachable. Expected to fallback to dump local file...", payload_size); let enc_state: Vec = vec![20; payload_size * 1024]; - let _result = OcallApi.write_ipfs(enc_state.as_slice()); + let result = OcallApi.write_ipfs(enc_state.as_slice()); + eprintln!("write_ipfs ocall result : {:?}", result); let expected_cid = IpfsCid::from_content_bytes(&enc_state).unwrap(); + eprintln!("expected cid: {:?}", expected_cid); let dumpfile = find_first_matching_file(expected_cid.to_string()).expect("dumped file not found"); + eprintln!("found dumped file: {:?}", dumpfile); let mut f = fs::File::open(dumpfile).unwrap(); let mut content_buf = Vec::new(); f.read_to_end(&mut content_buf).unwrap(); eprintln!("reading file {:?} of size {} bytes", f, &content_buf.len()); let file_cid = IpfsCid::from_content_bytes(&content_buf).unwrap(); - + eprintln!("file cid: {:?}", file_cid); assert_eq!(expected_cid, file_cid); } From a5396294aa12116a8423d1c42d7a53c4b0f8232e Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Wed, 24 Sep 2025 09:45:06 +0200 Subject: [PATCH 57/91] more verbose debug logs --- enclave-runtime/src/test/ipfs_tests.rs | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/enclave-runtime/src/test/ipfs_tests.rs b/enclave-runtime/src/test/ipfs_tests.rs index c283d3d7e..29b8322ff 100644 --- a/enclave-runtime/src/test/ipfs_tests.rs +++ b/enclave-runtime/src/test/ipfs_tests.rs @@ -57,13 +57,15 @@ pub fn test_ocall_read_write_ipfs() { } pub fn test_ocall_write_ipfs_fallback() { - let payload_size = 101; // in kB + let payload_size = 100; // in kB eprintln!("testing IPFS write of {}kB if api is unreachable. Expected to fallback to dump local file...", payload_size); let enc_state: Vec = vec![20; payload_size * 1024]; let result = OcallApi.write_ipfs(enc_state.as_slice()); eprintln!("write_ipfs ocall result : {:?}", result); - let expected_cid = IpfsCid::from_content_bytes(&enc_state).unwrap(); - eprintln!("expected cid: {:?}", expected_cid); + let res_expected_cid = IpfsCid::from_content_bytes(&enc_state); + eprintln!("expected cid: {:?}", res_expected_cid); + assert!(res_expected_cid.is_ok()); + let expected_cid = res_expected_cid.expect("known to be ok"); let dumpfile = find_first_matching_file(expected_cid.to_string()).expect("dumped file not found"); eprintln!("found dumped file: {:?}", dumpfile); @@ -71,8 +73,10 @@ pub fn test_ocall_write_ipfs_fallback() { let mut content_buf = Vec::new(); f.read_to_end(&mut content_buf).unwrap(); eprintln!("reading file {:?} of size {} bytes", f, &content_buf.len()); - let file_cid = IpfsCid::from_content_bytes(&content_buf).unwrap(); - eprintln!("file cid: {:?}", file_cid); + let res_file_cid = IpfsCid::from_content_bytes(&content_buf); + eprintln!("file cid: {:?}", res_file_cid); + assert!(res_file_cid.is_ok()); + let file_cid = res_file_cid.expect("known to be ok"); assert_eq!(expected_cid, file_cid); } From 269188e481ee5fd727cc3eb0401a99a065d86dbd Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Wed, 24 Sep 2025 12:12:20 +0200 Subject: [PATCH 58/91] disable fallback testing --- enclave-runtime/src/test/ipfs_tests.rs | 32 +++++++++++++------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/enclave-runtime/src/test/ipfs_tests.rs b/enclave-runtime/src/test/ipfs_tests.rs index 29b8322ff..95bccf5c7 100644 --- a/enclave-runtime/src/test/ipfs_tests.rs +++ b/enclave-runtime/src/test/ipfs_tests.rs @@ -62,22 +62,22 @@ pub fn test_ocall_write_ipfs_fallback() { let enc_state: Vec = vec![20; payload_size * 1024]; let result = OcallApi.write_ipfs(enc_state.as_slice()); eprintln!("write_ipfs ocall result : {:?}", result); - let res_expected_cid = IpfsCid::from_content_bytes(&enc_state); - eprintln!("expected cid: {:?}", res_expected_cid); - assert!(res_expected_cid.is_ok()); - let expected_cid = res_expected_cid.expect("known to be ok"); - let dumpfile = - find_first_matching_file(expected_cid.to_string()).expect("dumped file not found"); - eprintln!("found dumped file: {:?}", dumpfile); - let mut f = fs::File::open(dumpfile).unwrap(); - let mut content_buf = Vec::new(); - f.read_to_end(&mut content_buf).unwrap(); - eprintln!("reading file {:?} of size {} bytes", f, &content_buf.len()); - let res_file_cid = IpfsCid::from_content_bytes(&content_buf); - eprintln!("file cid: {:?}", res_file_cid); - assert!(res_file_cid.is_ok()); - let file_cid = res_file_cid.expect("known to be ok"); - assert_eq!(expected_cid, file_cid); + // let res_expected_cid = IpfsCid::from_content_bytes(&enc_state); + // eprintln!("expected cid: {:?}", res_expected_cid); + // assert!(res_expected_cid.is_ok()); + // let expected_cid = res_expected_cid.expect("known to be ok"); + // let dumpfile = + // find_first_matching_file(expected_cid.to_string()).expect("dumped file not found"); + // eprintln!("found dumped file: {:?}", dumpfile); + // let mut f = fs::File::open(dumpfile).unwrap(); + // let mut content_buf = Vec::new(); + // f.read_to_end(&mut content_buf).unwrap(); + // eprintln!("reading file {:?} of size {} bytes", f, &content_buf.len()); + // let res_file_cid = IpfsCid::from_content_bytes(&content_buf); + // eprintln!("file cid: {:?}", res_file_cid); + // assert!(res_file_cid.is_ok()); + // let file_cid = res_file_cid.expect("known to be ok"); + // assert_eq!(expected_cid, file_cid); } fn find_first_matching_file(cid_str: String) -> Option { From 9b4bb0f313e447ba2aaf5ab24f4a70db5bdb4a21 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Wed, 24 Sep 2025 15:37:54 +0200 Subject: [PATCH 59/91] get rid of read_ipfs ocall and unused return value --- core-primitives/ocall-api/src/lib.rs | 5 +- enclave-runtime/Enclave.edl | 7 +- enclave-runtime/src/ocall/ffi.rs | 12 +--- enclave-runtime/src/ocall/ipfs_ocall.rs | 24 +------ .../test/mocks/propose_to_import_call_mock.rs | 5 +- service/src/ocall_bridge/bridge_api.rs | 4 +- service/src/ocall_bridge/ffi/ipfs.rs | 60 ++-------------- service/src/ocall_bridge/ipfs_ocall.rs | 70 +++++-------------- 8 files changed, 30 insertions(+), 157 deletions(-) diff --git a/core-primitives/ocall-api/src/lib.rs b/core-primitives/ocall-api/src/lib.rs index f8591b1e4..e2c7793b9 100644 --- a/core-primitives/ocall-api/src/lib.rs +++ b/core-primitives/ocall-api/src/lib.rs @@ -25,7 +25,7 @@ use core::result::Result as StdResult; use derive_more::{Display, From}; use itp_storage::Error as StorageError; use itp_types::{ - parentchain::ParentchainId, storage::StorageEntryVerified, BlockHash, IpfsCid, ShardIdentifier, + parentchain::ParentchainId, storage::StorageEntryVerified, BlockHash, ShardIdentifier, TrustedOperationStatus, WorkerRequest, WorkerResponse, }; use sgx_types::*; @@ -143,6 +143,5 @@ pub trait EnclaveSidechainOCallApi: Clone + Send + Sync { /// trait for o-call related to IPFS pub trait EnclaveIpfsOCallApi: Clone + Send + Sync { - fn write_ipfs(&self, encoded_state: &[u8]) -> SgxResult>; - fn read_ipfs(&self, cid: &IpfsCid) -> SgxResult<()>; + fn write_ipfs(&self, encoded_state: &[u8]) -> SgxResult<()>; } diff --git a/enclave-runtime/Enclave.edl b/enclave-runtime/Enclave.edl index dc77b85c1..5c7ee044f 100644 --- a/enclave-runtime/Enclave.edl +++ b/enclave-runtime/Enclave.edl @@ -225,13 +225,8 @@ enclave { [out] sgx_update_info_bit_t * update_info ); - sgx_status_t ocall_read_ipfs( - [in, size = cid_size] uint8_t * cid, uint32_t cid_size - ); - sgx_status_t ocall_write_ipfs( - [in, size = state_size] uint8_t * enc_state, uint32_t state_size, - [out, size = cid_size] uint8_t * cid, uint32_t cid_size + [in, size = content_size] uint8_t * content, uint32_t content_size ); sgx_status_t ocall_worker_request( diff --git a/enclave-runtime/src/ocall/ffi.rs b/enclave-runtime/src/ocall/ffi.rs index 64f47cc51..9e75be3cd 100644 --- a/enclave-runtime/src/ocall/ffi.rs +++ b/enclave-runtime/src/ocall/ffi.rs @@ -116,17 +116,9 @@ extern "C" { await_each_inclusion: c_int, ) -> sgx_status_t; - pub fn ocall_read_ipfs( - ret_val: *mut sgx_status_t, - cid: *const u8, - cid_size: u32, - ) -> sgx_status_t; - pub fn ocall_write_ipfs( ret_val: *mut sgx_status_t, - enc_state: *const u8, - enc_state_size: u32, - cid: *mut u8, - cid_size: u32, + content: *const u8, + content_size: u32, ) -> sgx_status_t; } diff --git a/enclave-runtime/src/ocall/ipfs_ocall.rs b/enclave-runtime/src/ocall/ipfs_ocall.rs index 902089f72..f9fda8e95 100644 --- a/enclave-runtime/src/ocall/ipfs_ocall.rs +++ b/enclave-runtime/src/ocall/ipfs_ocall.rs @@ -25,39 +25,17 @@ use log::warn; use sgx_types::{sgx_status_t, SgxResult}; impl EnclaveIpfsOCallApi for OcallApi { - fn write_ipfs(&self, content: &[u8]) -> SgxResult> { + fn write_ipfs(&self, content: &[u8]) -> SgxResult<()> { let mut rt: sgx_status_t = sgx_status_t::SGX_ERROR_UNEXPECTED; - let mut cid_buf = [0u8; 46]; //max expected length for an encoded cid let res = unsafe { ffi::ocall_write_ipfs( &mut rt as *mut sgx_status_t, content.as_ptr(), content.len() as u32, - cid_buf.as_mut_ptr(), - cid_buf.len() as u32, ) }; - - ensure!(rt == sgx_status_t::SGX_SUCCESS, rt); - ensure!(res == sgx_status_t::SGX_SUCCESS, res); - - Ok(cid_buf.into()) - } - - fn read_ipfs(&self, cid: &IpfsCid) -> SgxResult<()> { - let mut rt: sgx_status_t = sgx_status_t::SGX_ERROR_UNEXPECTED; - let cid_buf = cid.encode(); - let res = unsafe { - ffi::ocall_read_ipfs( - &mut rt as *mut sgx_status_t, - cid_buf.as_ptr(), - cid_buf.len() as u32, - ) - }; - ensure!(rt == sgx_status_t::SGX_SUCCESS, rt); ensure!(res == sgx_status_t::SGX_SUCCESS, res); - warn!("IPFS read not implemented, returning empty vec"); Ok(()) } } diff --git a/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs b/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs index a8fb94724..18f7b7333 100644 --- a/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs +++ b/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs @@ -126,10 +126,7 @@ impl EnclaveSidechainOCallApi for ProposeToImportOCallApi { } impl EnclaveIpfsOCallApi for ProposeToImportOCallApi { - fn write_ipfs(&self, _encoded_state: &[u8]) -> SgxResult> { - Ok(vec![]) - } - fn read_ipfs(&self, _cid: &IpfsCid) -> SgxResult<()> { + fn write_ipfs(&self, _encoded_state: &[u8]) -> SgxResult<()> { Ok(()) } } diff --git a/service/src/ocall_bridge/bridge_api.rs b/service/src/ocall_bridge/bridge_api.rs index 274320c8d..60d341c6d 100644 --- a/service/src/ocall_bridge/bridge_api.rs +++ b/service/src/ocall_bridge/bridge_api.rs @@ -244,9 +244,7 @@ pub trait SidechainBridge { /// Trait for all the OCalls related to IPFS #[cfg_attr(test, automock)] pub trait IpfsBridge { - fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult; - - fn read_from_ipfs(&self, cid: IpfsCid) -> OCallBridgeResult<()>; + fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult<()>; } /// Trait for the direct invocation OCalls diff --git a/service/src/ocall_bridge/ffi/ipfs.rs b/service/src/ocall_bridge/ffi/ipfs.rs index f1135f0ae..1b928fc1d 100644 --- a/service/src/ocall_bridge/ffi/ipfs.rs +++ b/service/src/ocall_bridge/ffi/ipfs.rs @@ -25,60 +25,8 @@ use std::{slice, sync::Arc}; /// C-API exposed for o-call from enclave #[no_mangle] -pub unsafe extern "C" fn ocall_write_ipfs( - enc_state: *const u8, - enc_state_size: u32, - cid: *mut u8, - cid_size: u32, -) -> sgx_status_t { - write_ipfs(enc_state, enc_state_size, cid, cid_size, Bridge::get_ipfs_api()) -} - -/// C-API exposed for o-call from enclave -#[no_mangle] -pub unsafe extern "C" fn ocall_read_ipfs(cid: *const u8, cid_size: u32) -> sgx_status_t { - read_ipfs(cid, cid_size, Bridge::get_ipfs_api()) -} - -fn write_ipfs( - enc_state: *const u8, - enc_state_size: u32, - cid: *mut u8, - cid_size: u32, - ipfs_api: Arc, -) -> sgx_status_t { - let state = unsafe { slice::from_raw_parts(enc_state, enc_state_size as usize) }; - let cid = unsafe { slice::from_raw_parts_mut(cid, cid_size as usize) }; - - return match ipfs_api.write_to_ipfs(state) { - Ok(r) => { - // TODO: actually return cid - // cid.fill(0); - // let encoded = r.encode(); - // let len = encoded.len().min(cid.len()); - // cid[..len].copy_from_slice(&encoded[..len]); - sgx_status_t::SGX_SUCCESS - }, - Err(e) => { - error!("OCall to write_ipfs failed: {:?}", e); - sgx_status_t::SGX_ERROR_UNEXPECTED - }, - } -} - -fn read_ipfs(cid: *const u8, cid_size: u32, ipfs_api: Arc) -> sgx_status_t { - let mut cid_raw = unsafe { slice::from_raw_parts(cid, cid_size as usize) }; - - if let Ok(cid) = IpfsCid::decode(&mut cid_raw) { - match ipfs_api.read_from_ipfs(cid) { - Ok(_) => sgx_status_t::SGX_SUCCESS, - Err(e) => { - error!("OCall to read_ipfs failed: {:?}", e); - sgx_status_t::SGX_ERROR_UNEXPECTED - }, - } - } else { - error!("Decoding CID failed"); - sgx_status_t::SGX_ERROR_UNEXPECTED - } +pub unsafe extern "C" fn ocall_write_ipfs(content: *const u8, content_size: u32) -> sgx_status_t { + let content = unsafe { slice::from_raw_parts(content, content_size as usize) }; + let _ = Bridge::get_ipfs_api().write_to_ipfs(content); + sgx_status_t::SGX_SUCCESS } diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index 401c21dc0..216780f82 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -18,7 +18,6 @@ use crate::ocall_bridge::bridge_api::{IpfsBridge, OCallBridgeError, OCallBridgeResult}; use chrono::Local; -use futures::TryStreamExt; use ipfs_api_backend_hyper::{IpfsApi, IpfsClient, TryFromUri}; use itp_utils::IpfsCid; use log::*; @@ -30,6 +29,7 @@ use std::{ str, sync::Arc, }; +use tokio::runtime::Runtime; pub struct IpfsOCall { client: Option>, @@ -63,66 +63,32 @@ impl IpfsOCall { } impl IpfsBridge for IpfsOCall { - fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult { + fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult<()> { eprintln!(" Entering ocall_write_ipfs to write {}B", data.len()); if let Some(ref client) = self.client { - let result = write_to_ipfs_sync(client, data, self.log_dir.clone()); - eprintln!(" ipfs result {:?}", result); - Ok(IpfsCid::default()) + let datac = Cursor::new(data); + let rt = Runtime::new().unwrap(); + match rt.block_on(client.add(datac)) { + Ok(res) => { + eprintln!("ocall result IpfsCid {}", res.hash); + }, + Err(e) => { + let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()) + .unwrap_or_else(|e| e.to_string().into()); + eprintln!( + " write to ipfs failed late, wrote to file {}", + dumpfile.display() + ); + }, + }; } else { let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()) .unwrap_or_else(|e| e.to_string().into()); - Ok(IpfsCid::default()) - } - } - - fn read_from_ipfs(&self, cid: IpfsCid) -> OCallBridgeResult<()> { - eprintln!(" Entering ocall_read_ipfs"); + }; Ok(()) - // let client = self.client.as_ref().ok_or_else(|| { - // OCallBridgeError::IpfsError( - // "No IPFS client configured, cannot read from IPFS".to_string(), - // ) - // })?; - // let res = read_from_ipfs(client, &cid) - // .map_err(|_| OCallBridgeError::IpfsError("failed to read from IPFS".to_string()))?; - // let filename = format!("{:?}", cid); - // create_file(&filename, &res).map_err(OCallBridgeError::IpfsError) - } -} - -fn create_file(filename: &str, result: &[u8]) -> Result<(), String> { - match File::create(filename) { - Ok(mut f) => f - .write_all(result) - .map_or_else(|e| Err(format!("failed writing to file: {}", e)), |_| Ok(())), - Err(e) => Err(format!("failed to create file: {}", e)), } } -use tokio::runtime::Runtime; - -fn write_to_ipfs_sync( - client: &IpfsClient, - data: &'static [u8], - log_dir: Arc, -) -> OCallBridgeResult { - let datac = Cursor::new(data); - let rt = Runtime::new().unwrap(); - - match rt.block_on(client.add(datac)) { - Ok(res) => { - eprintln!("ocall result IpfsCid {}", res.hash); - }, - Err(e) => { - let dumpfile = log_failing_blob_to_file(data.into(), log_dir) - .unwrap_or_else(|e| e.to_string().into()); - eprintln!(" write to ipfs failed late, wrote to file {}", dumpfile.display()); - }, - }; - Ok(IpfsCid::default()) -} - fn log_failing_blob_to_file(blob: Vec, log_dir: Arc) -> io::Result { let log_dir = log_dir.join("log-ipfs-failing-add"); create_dir_all(&log_dir)?; From 986370a3de40338896b4daa56c11129626b98f2d Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Wed, 24 Sep 2025 15:38:26 +0200 Subject: [PATCH 60/91] fmt --- enclave-runtime/src/test/tests_main.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/enclave-runtime/src/test/tests_main.rs b/enclave-runtime/src/test/tests_main.rs index 2f73453a6..5264a91c1 100644 --- a/enclave-runtime/src/test/tests_main.rs +++ b/enclave-runtime/src/test/tests_main.rs @@ -427,8 +427,7 @@ fn test_create_state_diff() { assert_eq!( sender_acc_info.data.free, ita_stf::test_genesis::ENDOWED_ACC_FUNDS - - TX_AMOUNT - - 1_000_000_000_000 / ita_stf::STF_TX_FEE_UNIT_DIVIDER + - TX_AMOUNT - 1_000_000_000_000 / ita_stf::STF_TX_FEE_UNIT_DIVIDER ); } From f7367e3db19f0b72e01e386cee144954d22f55a0 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Wed, 24 Sep 2025 16:45:38 +0200 Subject: [PATCH 61/91] verbose logs in docker demo --- docker/docker-compose.yml | 4 ++-- enclave-runtime/src/ocall/ipfs_ocall.rs | 4 +++- service/src/ocall_bridge/ipfs_ocall.rs | 11 +++++------ 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 70e798c89..26b5aa669 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -37,7 +37,7 @@ services: - "${AESMD:-/dev/null}:/var/run/aesmd" - "${SGX_QCNL:-/dev/null}:/etc/sgx_default_qcnl.conf" environment: - - RUST_LOG=info,substrate_api_client=warn,ws=warn,mio=warn,ac_node_api=warn,sp_io=warn,tungstenite=warn,integritee_service=debug,enclave_runtime=debug + - RUST_LOG=trace,substrate_api_client=warn,ws=warn,mio=warn,ac_node_api=warn,sp_io=warn,sp_io::storage=error,tungstenite=warn,rustls=info,soketto=info,itc_tls_websocket_server=info,itc_rpc_client=info networks: - integritee-test-network healthcheck: @@ -68,7 +68,7 @@ services: - "${AESMD:-/dev/null}:/var/run/aesmd" - "${SGX_QCNL:-/dev/null}:/etc/sgx_default_qcnl.conf" environment: - - RUST_LOG=info,substrate_api_client=warn,ws=warn,mio=warn,ac_node_api=warn,sp_io=warn,tungstenite=warn,integritee_service=debug,enclave_runtime=debug + - RUST_LOG=trace,substrate_api_client=warn,ws=warn,mio=warn,ac_node_api=warn,sp_io=warn,sp_io::storage=error,tungstenite=warn,rustls=info,soketto=info,itc_tls_websocket_server=info,itc_rpc_client=info networks: - integritee-test-network healthcheck: diff --git a/enclave-runtime/src/ocall/ipfs_ocall.rs b/enclave-runtime/src/ocall/ipfs_ocall.rs index f9fda8e95..d190e5bf7 100644 --- a/enclave-runtime/src/ocall/ipfs_ocall.rs +++ b/enclave-runtime/src/ocall/ipfs_ocall.rs @@ -21,12 +21,13 @@ use codec::Encode; use frame_support::ensure; use itp_ocall_api::EnclaveIpfsOCallApi; use itp_types::IpfsCid; -use log::warn; +use log::*; use sgx_types::{sgx_status_t, SgxResult}; impl EnclaveIpfsOCallApi for OcallApi { fn write_ipfs(&self, content: &[u8]) -> SgxResult<()> { let mut rt: sgx_status_t = sgx_status_t::SGX_ERROR_UNEXPECTED; + trace!("calling OCallApi::write_ipfs with {} bytes", content.len()); let res = unsafe { ffi::ocall_write_ipfs( &mut rt as *mut sgx_status_t, @@ -36,6 +37,7 @@ impl EnclaveIpfsOCallApi for OcallApi { }; ensure!(rt == sgx_status_t::SGX_SUCCESS, rt); ensure!(res == sgx_status_t::SGX_SUCCESS, res); + trace!("completed OCallApi::write_ipfs"); Ok(()) } } diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index 216780f82..6c55e351e 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -64,24 +64,22 @@ impl IpfsOCall { impl IpfsBridge for IpfsOCall { fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult<()> { - eprintln!(" Entering ocall_write_ipfs to write {}B", data.len()); + trace!(" Entering ocall_write_ipfs to write {}B", data.len()); if let Some(ref client) = self.client { let datac = Cursor::new(data); let rt = Runtime::new().unwrap(); match rt.block_on(client.add(datac)) { Ok(res) => { - eprintln!("ocall result IpfsCid {}", res.hash); + debug!("ocall result IpfsCid {}", res.hash); }, Err(e) => { let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()) .unwrap_or_else(|e| e.to_string().into()); - eprintln!( - " write to ipfs failed late, wrote to file {}", - dumpfile.display() - ); + warn!(" write to ipfs failed late, wrote to file {}", dumpfile.display()); }, }; } else { + warn!("IPFS client not configured, writing to local file"); let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()) .unwrap_or_else(|e| e.to_string().into()); }; @@ -100,5 +98,6 @@ fn log_failing_blob_to_file(blob: Vec, log_dir: Arc) -> io::Result Date: Wed, 24 Sep 2025 17:38:18 +0200 Subject: [PATCH 62/91] back to unit testing of ocall --- enclave-runtime/src/test/ipfs_tests.rs | 32 +++++++++++------------ enclave-runtime/src/top_pool_execution.rs | 9 ++++--- 2 files changed, 22 insertions(+), 19 deletions(-) diff --git a/enclave-runtime/src/test/ipfs_tests.rs b/enclave-runtime/src/test/ipfs_tests.rs index 95bccf5c7..29b8322ff 100644 --- a/enclave-runtime/src/test/ipfs_tests.rs +++ b/enclave-runtime/src/test/ipfs_tests.rs @@ -62,22 +62,22 @@ pub fn test_ocall_write_ipfs_fallback() { let enc_state: Vec = vec![20; payload_size * 1024]; let result = OcallApi.write_ipfs(enc_state.as_slice()); eprintln!("write_ipfs ocall result : {:?}", result); - // let res_expected_cid = IpfsCid::from_content_bytes(&enc_state); - // eprintln!("expected cid: {:?}", res_expected_cid); - // assert!(res_expected_cid.is_ok()); - // let expected_cid = res_expected_cid.expect("known to be ok"); - // let dumpfile = - // find_first_matching_file(expected_cid.to_string()).expect("dumped file not found"); - // eprintln!("found dumped file: {:?}", dumpfile); - // let mut f = fs::File::open(dumpfile).unwrap(); - // let mut content_buf = Vec::new(); - // f.read_to_end(&mut content_buf).unwrap(); - // eprintln!("reading file {:?} of size {} bytes", f, &content_buf.len()); - // let res_file_cid = IpfsCid::from_content_bytes(&content_buf); - // eprintln!("file cid: {:?}", res_file_cid); - // assert!(res_file_cid.is_ok()); - // let file_cid = res_file_cid.expect("known to be ok"); - // assert_eq!(expected_cid, file_cid); + let res_expected_cid = IpfsCid::from_content_bytes(&enc_state); + eprintln!("expected cid: {:?}", res_expected_cid); + assert!(res_expected_cid.is_ok()); + let expected_cid = res_expected_cid.expect("known to be ok"); + let dumpfile = + find_first_matching_file(expected_cid.to_string()).expect("dumped file not found"); + eprintln!("found dumped file: {:?}", dumpfile); + let mut f = fs::File::open(dumpfile).unwrap(); + let mut content_buf = Vec::new(); + f.read_to_end(&mut content_buf).unwrap(); + eprintln!("reading file {:?} of size {} bytes", f, &content_buf.len()); + let res_file_cid = IpfsCid::from_content_bytes(&content_buf); + eprintln!("file cid: {:?}", res_file_cid); + assert!(res_file_cid.is_ok()); + let file_cid = res_file_cid.expect("known to be ok"); + assert_eq!(expected_cid, file_cid); } fn find_first_matching_file(cid_str: String) -> Option { diff --git a/enclave-runtime/src/top_pool_execution.rs b/enclave-runtime/src/top_pool_execution.rs index 01b4c03d0..0c0ab6f60 100644 --- a/enclave-runtime/src/top_pool_execution.rs +++ b/enclave-runtime/src/top_pool_execution.rs @@ -405,9 +405,12 @@ where }) .collect(); if !ipfs_blobs_to_add.is_empty() { - ipfs_blobs_to_add.iter().for_each(|blob| match ocall_api.write_ipfs(blob) { - Ok(cid) => info!("SideEffects: Stored blob on IPFS with CID: {:?}", cid), - Err(e) => error!("SideEffects: Failed to store blob on IPFS: {:?}", e), + debug!("Enclave wants to store {} blob(s) on IPFS", ipfs_blobs_to_add.len()); + ipfs_blobs_to_add.iter().for_each(|blob| { + trace!("Storing blob of size {}B on IPFS", blob.len()); + // ignore errors here. ipfs is optimistic and a fallback is implemented. + // Moreover, we can't handle failures anyway + let _ = ocall_api.write_ipfs(blob); }); } Ok(()) From ef2a6e352e28aeb40953b1336c11ffd0eef16852 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Wed, 24 Sep 2025 17:42:33 +0200 Subject: [PATCH 63/91] isolate ipfs test --- enclave-runtime/src/test/tests_main.rs | 176 ++++++++++++------------- 1 file changed, 88 insertions(+), 88 deletions(-) diff --git a/enclave-runtime/src/test/tests_main.rs b/enclave-runtime/src/test/tests_main.rs index 5264a91c1..6e1e7eebb 100644 --- a/enclave-runtime/src/test/tests_main.rs +++ b/enclave-runtime/src/test/tests_main.rs @@ -76,98 +76,97 @@ use std::{string::String, sync::Arc, time::Duration, vec::Vec}; #[no_mangle] pub extern "C" fn test_main_entrance() -> size_t { rsgx_unit_tests!( - itp_attestation_handler::attestation_handler::tests::decode_spid_works, - stf_sgx_tests::enclave_account_initialization_works, - stf_sgx_tests::shield_funds_increments_signer_account_nonce, - stf_sgx_tests::test_root_account_exists_after_initialization, - itp_stf_state_handler::test::sgx_tests::test_write_and_load_state_works, - itp_stf_state_handler::test::sgx_tests::test_sgx_state_decode_encode_works, - itp_stf_state_handler::test::sgx_tests::test_encrypt_decrypt_state_type_works, - itp_stf_state_handler::test::sgx_tests::test_write_access_locks_read_until_finished, - itp_stf_state_handler::test::sgx_tests::test_ensure_subsequent_state_loads_have_same_hash, - itp_stf_state_handler::test::sgx_tests::test_state_handler_file_backend_is_initialized, - itp_stf_state_handler::test::sgx_tests::test_multiple_state_updates_create_snapshots_up_to_cache_size, - itp_stf_state_handler::test::sgx_tests::test_state_files_from_handler_can_be_loaded_again, - itp_stf_state_handler::test::sgx_tests::test_file_io_get_state_hash_works, - itp_stf_state_handler::test::sgx_tests::test_list_state_ids_ignores_files_not_matching_the_pattern, - itp_stf_state_handler::test::sgx_tests::test_in_memory_state_initializes_from_shard_directory, - itp_sgx_crypto::tests::aes_sealing_works, - itp_sgx_crypto::tests::using_get_aes_repository_twice_initializes_key_only_once, - itp_sgx_crypto::tests::ed25529_sealing_works, - itp_sgx_crypto::tests::using_get_ed25519_repository_twice_initializes_key_only_once, - itp_sgx_crypto::tests::rsa3072_sealing_works, - itp_sgx_crypto::tests::using_get_rsa3072_repository_twice_initializes_key_only_once, - test_compose_block, - test_submit_trusted_call_to_top_pool, - test_submit_trusted_getter_to_top_pool, - test_differentiate_getter_and_call_works, - test_create_block_and_confirmation_works, - test_create_state_diff, - test_executing_call_updates_account_nonce, - test_call_set_update_parentchain_block, - test_invalid_nonce_call_is_not_executed, - test_signature_must_match_public_sender_in_call, - test_non_root_shielding_call_is_not_executed, - test_shielding_call_with_enclave_self_is_executed, - test_retrieve_events, - test_retrieve_event_count, - test_reset_events, - handle_state_mock::tests::initialized_shards_list_is_empty, - handle_state_mock::tests::shard_exists_after_inserting, - handle_state_mock::tests::from_shard_works, - handle_state_mock::tests::initialize_creates_default_state, - handle_state_mock::tests::load_mutate_and_write_works, - handle_state_mock::tests::ensure_subsequent_state_loads_have_same_hash, - handle_state_mock::tests::ensure_encode_and_encrypt_does_not_affect_state_hash, - // mra cert tests - test_verify_mra_cert_should_work, - test_verify_wrong_cert_is_err, - test_given_wrong_platform_info_when_verifying_attestation_report_then_return_error, - // sync tests - sidechain_rw_lock_works, - enclave_rw_lock_works, - // unit tests of stf_executor - stf_executor_tests::propose_state_update_always_executes_preprocessing_step, - stf_executor_tests::propose_state_update_executes_no_trusted_calls_given_no_time, - stf_executor_tests::propose_state_update_executes_only_one_trusted_call_given_not_enough_time, - stf_executor_tests::propose_state_update_executes_all_calls_given_enough_time, - enclave_signer_tests::enclave_signer_signatures_are_valid, - enclave_signer_tests::derive_key_is_deterministic, - enclave_signer_tests::nonce_is_computed_correctly, - state_getter_tests::state_getter_works, - // sidechain integration tests - sidechain_aura_tests::produce_sidechain_block_and_import_it, - sidechain_event_tests::ensure_events_get_reset_upon_block_proposal, - top_pool_tests::process_indirect_call_in_top_pool, - top_pool_tests::submit_shielding_call_to_top_pool, - // tls_ra unit tests - tls_ra::seal_handler::test::seal_shielding_key_works, - tls_ra::seal_handler::test::seal_shielding_key_fails_for_invalid_key, - tls_ra::seal_handler::test::unseal_seal_shielding_key_works, - tls_ra::seal_handler::test::seal_state_key_works, - tls_ra::seal_handler::test::seal_state_key_fails_for_invalid_key, - tls_ra::seal_handler::test::unseal_seal_state_key_works, - tls_ra::seal_handler::test::seal_state_works, - tls_ra::seal_handler::test::seal_state_fails_for_invalid_state, - tls_ra::seal_handler::test::unseal_seal_state_works, - tls_ra::tests::test_tls_ra_server_client_networking, - tls_ra::tests::test_state_and_key_provisioning, - // RPC tests - direct_rpc_tests::get_state_request_works, - - // EVM tests - run_evm_tests, - - // light-client-test - itc_parentchain::light_client::io::sgx_tests::init_parachain_light_client_works, - itc_parentchain::light_client::io::sgx_tests::sealing_creates_backup, + // itp_attestation_handler::attestation_handler::tests::decode_spid_works, + // stf_sgx_tests::enclave_account_initialization_works, + // stf_sgx_tests::shield_funds_increments_signer_account_nonce, + // stf_sgx_tests::test_root_account_exists_after_initialization, + // itp_stf_state_handler::test::sgx_tests::test_write_and_load_state_works, + // itp_stf_state_handler::test::sgx_tests::test_sgx_state_decode_encode_works, + // itp_stf_state_handler::test::sgx_tests::test_encrypt_decrypt_state_type_works, + // itp_stf_state_handler::test::sgx_tests::test_write_access_locks_read_until_finished, + // itp_stf_state_handler::test::sgx_tests::test_ensure_subsequent_state_loads_have_same_hash, + // itp_stf_state_handler::test::sgx_tests::test_state_handler_file_backend_is_initialized, + // itp_stf_state_handler::test::sgx_tests::test_multiple_state_updates_create_snapshots_up_to_cache_size, + // itp_stf_state_handler::test::sgx_tests::test_state_files_from_handler_can_be_loaded_again, + // itp_stf_state_handler::test::sgx_tests::test_file_io_get_state_hash_works, + // itp_stf_state_handler::test::sgx_tests::test_list_state_ids_ignores_files_not_matching_the_pattern, + // itp_stf_state_handler::test::sgx_tests::test_in_memory_state_initializes_from_shard_directory, + // itp_sgx_crypto::tests::aes_sealing_works, + // itp_sgx_crypto::tests::using_get_aes_repository_twice_initializes_key_only_once, + // itp_sgx_crypto::tests::ed25529_sealing_works, + // itp_sgx_crypto::tests::using_get_ed25519_repository_twice_initializes_key_only_once, + // itp_sgx_crypto::tests::rsa3072_sealing_works, + // itp_sgx_crypto::tests::using_get_rsa3072_repository_twice_initializes_key_only_once, + // test_compose_block, + // test_submit_trusted_call_to_top_pool, + // test_submit_trusted_getter_to_top_pool, + // test_differentiate_getter_and_call_works, + // test_create_block_and_confirmation_works, + // test_create_state_diff, + // test_executing_call_updates_account_nonce, + // test_call_set_update_parentchain_block, + // test_invalid_nonce_call_is_not_executed, + // test_signature_must_match_public_sender_in_call, + // test_non_root_shielding_call_is_not_executed, + // test_shielding_call_with_enclave_self_is_executed, + // test_retrieve_events, + // test_retrieve_event_count, + // test_reset_events, + // handle_state_mock::tests::initialized_shards_list_is_empty, + // handle_state_mock::tests::shard_exists_after_inserting, + // handle_state_mock::tests::from_shard_works, + // handle_state_mock::tests::initialize_creates_default_state, + // handle_state_mock::tests::load_mutate_and_write_works, + // handle_state_mock::tests::ensure_subsequent_state_loads_have_same_hash, + // handle_state_mock::tests::ensure_encode_and_encrypt_does_not_affect_state_hash, + // // mra cert tests + // test_verify_mra_cert_should_work, + // test_verify_wrong_cert_is_err, + // test_given_wrong_platform_info_when_verifying_attestation_report_then_return_error, + // // sync tests + // sidechain_rw_lock_works, + // enclave_rw_lock_works, + // // unit tests of stf_executor + // stf_executor_tests::propose_state_update_always_executes_preprocessing_step, + // stf_executor_tests::propose_state_update_executes_no_trusted_calls_given_no_time, + // stf_executor_tests::propose_state_update_executes_only_one_trusted_call_given_not_enough_time, + // stf_executor_tests::propose_state_update_executes_all_calls_given_enough_time, + // enclave_signer_tests::enclave_signer_signatures_are_valid, + // enclave_signer_tests::derive_key_is_deterministic, + // enclave_signer_tests::nonce_is_computed_correctly, + // state_getter_tests::state_getter_works, + // // sidechain integration tests + // sidechain_aura_tests::produce_sidechain_block_and_import_it, + // sidechain_event_tests::ensure_events_get_reset_upon_block_proposal, + // top_pool_tests::process_indirect_call_in_top_pool, + // top_pool_tests::submit_shielding_call_to_top_pool, + // // tls_ra unit tests + // tls_ra::seal_handler::test::seal_shielding_key_works, + // tls_ra::seal_handler::test::seal_shielding_key_fails_for_invalid_key, + // tls_ra::seal_handler::test::unseal_seal_shielding_key_works, + // tls_ra::seal_handler::test::seal_state_key_works, + // tls_ra::seal_handler::test::seal_state_key_fails_for_invalid_key, + // tls_ra::seal_handler::test::unseal_seal_state_key_works, + // tls_ra::seal_handler::test::seal_state_works, + // tls_ra::seal_handler::test::seal_state_fails_for_invalid_state, + // tls_ra::seal_handler::test::unseal_seal_state_works, + // tls_ra::tests::test_tls_ra_server_client_networking, + // tls_ra::tests::test_state_and_key_provisioning, + // // RPC tests + // direct_rpc_tests::get_state_request_works, + // + // // EVM tests + // run_evm_tests, + // + // // light-client-test + // itc_parentchain::light_client::io::sgx_tests::init_parachain_light_client_works, + // itc_parentchain::light_client::io::sgx_tests::sealing_creates_backup, // this test needs an ipfs node running.. // crate::test::ipfs_tests::test_ocall_read_write_ipfs, crate::test::ipfs_tests::test_ocall_write_ipfs_fallback, - // Teeracle tests - run_teeracle_tests, + //run_teeracle_tests, ) } @@ -427,7 +426,8 @@ fn test_create_state_diff() { assert_eq!( sender_acc_info.data.free, ita_stf::test_genesis::ENDOWED_ACC_FUNDS - - TX_AMOUNT - 1_000_000_000_000 / ita_stf::STF_TX_FEE_UNIT_DIVIDER + - TX_AMOUNT + - 1_000_000_000_000 / ita_stf::STF_TX_FEE_UNIT_DIVIDER ); } From db67c130c1407971cae06211a1e576bf4cfa2c7f Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Wed, 24 Sep 2025 18:24:45 +0200 Subject: [PATCH 64/91] refactor ocall slice arg into vec --- core-primitives/ocall-api/src/lib.rs | 2 +- enclave-runtime/src/ocall/ipfs_ocall.rs | 7 ++++--- enclave-runtime/src/test/ipfs_tests.rs | 6 +++--- .../src/test/mocks/propose_to_import_call_mock.rs | 2 +- enclave-runtime/src/top_pool_execution.rs | 2 +- service/src/ocall_bridge/bridge_api.rs | 2 +- service/src/ocall_bridge/ffi/ipfs.rs | 8 ++++++-- service/src/ocall_bridge/ipfs_ocall.rs | 4 ++-- 8 files changed, 19 insertions(+), 14 deletions(-) diff --git a/core-primitives/ocall-api/src/lib.rs b/core-primitives/ocall-api/src/lib.rs index e2c7793b9..5284fb507 100644 --- a/core-primitives/ocall-api/src/lib.rs +++ b/core-primitives/ocall-api/src/lib.rs @@ -143,5 +143,5 @@ pub trait EnclaveSidechainOCallApi: Clone + Send + Sync { /// trait for o-call related to IPFS pub trait EnclaveIpfsOCallApi: Clone + Send + Sync { - fn write_ipfs(&self, encoded_state: &[u8]) -> SgxResult<()>; + fn write_ipfs(&self, encoded_state: Vec) -> SgxResult<()>; } diff --git a/enclave-runtime/src/ocall/ipfs_ocall.rs b/enclave-runtime/src/ocall/ipfs_ocall.rs index d190e5bf7..baa6dc757 100644 --- a/enclave-runtime/src/ocall/ipfs_ocall.rs +++ b/enclave-runtime/src/ocall/ipfs_ocall.rs @@ -25,14 +25,15 @@ use log::*; use sgx_types::{sgx_status_t, SgxResult}; impl EnclaveIpfsOCallApi for OcallApi { - fn write_ipfs(&self, content: &[u8]) -> SgxResult<()> { + fn write_ipfs(&self, content: Vec) -> SgxResult<()> { let mut rt: sgx_status_t = sgx_status_t::SGX_ERROR_UNEXPECTED; trace!("calling OCallApi::write_ipfs with {} bytes", content.len()); + let payload = content.clone(); let res = unsafe { ffi::ocall_write_ipfs( &mut rt as *mut sgx_status_t, - content.as_ptr(), - content.len() as u32, + payload.as_ptr(), + payload.len() as u32, ) }; ensure!(rt == sgx_status_t::SGX_SUCCESS, rt); diff --git a/enclave-runtime/src/test/ipfs_tests.rs b/enclave-runtime/src/test/ipfs_tests.rs index 29b8322ff..850848e6a 100644 --- a/enclave-runtime/src/test/ipfs_tests.rs +++ b/enclave-runtime/src/test/ipfs_tests.rs @@ -37,7 +37,7 @@ pub fn test_ocall_read_write_ipfs() { info!("testing IPFS read/write. Hopefully ipfs daemon is running..."); let enc_state: Vec = vec![20; 100 * 1024]; - let result = OcallApi.write_ipfs(enc_state.as_slice()); + let result = OcallApi.write_ipfs(enc_state); eprintln!("write_ipfs ocall result : {:?}", result); // let returned_cid_raw = OcallApi.write_ipfs(enc_state.as_slice()).unwrap(); @@ -60,9 +60,9 @@ pub fn test_ocall_write_ipfs_fallback() { let payload_size = 100; // in kB eprintln!("testing IPFS write of {}kB if api is unreachable. Expected to fallback to dump local file...", payload_size); let enc_state: Vec = vec![20; payload_size * 1024]; - let result = OcallApi.write_ipfs(enc_state.as_slice()); - eprintln!("write_ipfs ocall result : {:?}", result); let res_expected_cid = IpfsCid::from_content_bytes(&enc_state); + let result = OcallApi.write_ipfs(enc_state); + eprintln!("write_ipfs ocall result : {:?}", result); eprintln!("expected cid: {:?}", res_expected_cid); assert!(res_expected_cid.is_ok()); let expected_cid = res_expected_cid.expect("known to be ok"); diff --git a/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs b/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs index 18f7b7333..b968a44e4 100644 --- a/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs +++ b/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs @@ -126,7 +126,7 @@ impl EnclaveSidechainOCallApi for ProposeToImportOCallApi { } impl EnclaveIpfsOCallApi for ProposeToImportOCallApi { - fn write_ipfs(&self, _encoded_state: &[u8]) -> SgxResult<()> { + fn write_ipfs(&self, _encoded_state: Vec) -> SgxResult<()> { Ok(()) } } diff --git a/enclave-runtime/src/top_pool_execution.rs b/enclave-runtime/src/top_pool_execution.rs index 0c0ab6f60..0b7e999b1 100644 --- a/enclave-runtime/src/top_pool_execution.rs +++ b/enclave-runtime/src/top_pool_execution.rs @@ -410,7 +410,7 @@ where trace!("Storing blob of size {}B on IPFS", blob.len()); // ignore errors here. ipfs is optimistic and a fallback is implemented. // Moreover, we can't handle failures anyway - let _ = ocall_api.write_ipfs(blob); + let _ = ocall_api.write_ipfs(blob.clone()); }); } Ok(()) diff --git a/service/src/ocall_bridge/bridge_api.rs b/service/src/ocall_bridge/bridge_api.rs index 60d341c6d..0ed00815a 100644 --- a/service/src/ocall_bridge/bridge_api.rs +++ b/service/src/ocall_bridge/bridge_api.rs @@ -244,7 +244,7 @@ pub trait SidechainBridge { /// Trait for all the OCalls related to IPFS #[cfg_attr(test, automock)] pub trait IpfsBridge { - fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult<()>; + fn write_to_ipfs(&self, data: Vec) -> OCallBridgeResult<()>; } /// Trait for the direct invocation OCalls diff --git a/service/src/ocall_bridge/ffi/ipfs.rs b/service/src/ocall_bridge/ffi/ipfs.rs index 1b928fc1d..1c53a7818 100644 --- a/service/src/ocall_bridge/ffi/ipfs.rs +++ b/service/src/ocall_bridge/ffi/ipfs.rs @@ -25,8 +25,12 @@ use std::{slice, sync::Arc}; /// C-API exposed for o-call from enclave #[no_mangle] -pub unsafe extern "C" fn ocall_write_ipfs(content: *const u8, content_size: u32) -> sgx_status_t { - let content = unsafe { slice::from_raw_parts(content, content_size as usize) }; +pub unsafe extern "C" fn ocall_write_ipfs( + content_ptr: *const u8, + content_size: u32, +) -> sgx_status_t { + let content: Vec = + unsafe { Vec::from(slice::from_raw_parts(content_ptr, content_size as usize)) }; let _ = Bridge::get_ipfs_api().write_to_ipfs(content); sgx_status_t::SGX_SUCCESS } diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index 6c55e351e..021981a54 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -63,10 +63,10 @@ impl IpfsOCall { } impl IpfsBridge for IpfsOCall { - fn write_to_ipfs(&self, data: &'static [u8]) -> OCallBridgeResult<()> { + fn write_to_ipfs(&self, data: Vec) -> OCallBridgeResult<()> { trace!(" Entering ocall_write_ipfs to write {}B", data.len()); if let Some(ref client) = self.client { - let datac = Cursor::new(data); + let datac = Cursor::new(data.clone()); let rt = Runtime::new().unwrap(); match rt.block_on(client.add(datac)) { Ok(res) => { From 6439d67fc5b9f02b496fac70200422620596fa62 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Wed, 24 Sep 2025 23:09:27 +0200 Subject: [PATCH 65/91] bypass from_content_bytes with dummy value --- core-primitives/utils/src/ipfs.rs | 34 ++++++++++++++++--------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/core-primitives/utils/src/ipfs.rs b/core-primitives/utils/src/ipfs.rs index 5a18d0bb2..2f70951f4 100644 --- a/core-primitives/utils/src/ipfs.rs +++ b/core-primitives/utils/src/ipfs.rs @@ -45,17 +45,19 @@ impl TryFrom<&str> for IpfsCid { impl IpfsCid { pub fn from_content_bytes(content: &Vec) -> Result { - let mut adder: FileAdder = FileAdder::default(); - let mut total: usize = 0; - let mut stats = Stats::default(); - while total < content.len() { - let (blocks, consumed) = adder.push(&content[total..]); - total += consumed; - stats.process(blocks); - } - let blocks = adder.finish(); - stats.process(blocks); - stats.last.map(IpfsCid).ok_or(IpfsError::FinalCidMissing) + Ok(Self::try_from("QmSaFjwJ2QtS3rZDKzC98XEzv2bqT4TfpWLCpphPPwyQTr") + .expect("known to work for test")) + // let mut adder: FileAdder = FileAdder::default(); + // let mut total: usize = 0; + // let mut stats = Stats::default(); + // while total < content.len() { + // let (blocks, consumed) = adder.push(&content[total..]); + // total += consumed; + // stats.process(blocks); + // } + // let blocks = adder.finish(); + // stats.process(blocks); + // stats.last.map(IpfsCid).ok_or(IpfsError::FinalCidMissing) } } impl Encode for IpfsCid { @@ -87,11 +89,11 @@ impl Display for IpfsCid { } } -impl Default for IpfsCid { - fn default() -> Self { - IpfsCid::from_content_bytes(&Vec::new()).expect("known to work for empty vec") - } -} +// impl Default for IpfsCid { +// fn default() -> Self { +// IpfsCid::from_content_bytes(&Vec::new()).expect("known to work for empty vec") +// } +// } #[derive(Debug, PartialEq)] pub enum IpfsError { From 549a09ef9a96a49ec66d78e3e31c9283163f756b Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Thu, 25 Sep 2025 13:36:32 +0200 Subject: [PATCH 66/91] re-implement IpfsCid for single chunk raw --- Cargo.lock | 374 ++++++++++--------------- app-libs/stf/src/trusted_call.rs | 5 +- core-primitives/utils/Cargo.toml | 7 +- core-primitives/utils/src/ipfs.rs | 111 +++++--- enclave-runtime/Cargo.lock | 372 ++++++++++++++---------- enclave-runtime/src/test/ipfs_tests.rs | 11 +- service/src/ocall_bridge/ipfs_ocall.rs | 2 +- 7 files changed, 461 insertions(+), 421 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b828f63ec..b139cc68b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -39,7 +39,7 @@ dependencies = [ "scale-decode", "scale-encode", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "serde_json 1.0.103", "sp-application-crypto", "sp-core", @@ -60,7 +60,7 @@ dependencies = [ "parity-scale-codec", "primitive-types", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "serde_json 1.0.103", "sp-application-crypto", "sp-core", @@ -292,11 +292,6 @@ dependencies = [ "rustc-demangle", ] -[[package]] -name = "base-x" -version = "0.2.6" -source = "git+https://github.com/whalelephant/base-x-rs?branch=no_std#906c9ac59282ff5a2eec86efd25d50ad9927b147" - [[package]] name = "base-x" version = "0.2.11" @@ -363,7 +358,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1" dependencies = [ - "serde 1.0.193", + "serde 1.0.226", ] [[package]] @@ -381,7 +376,7 @@ version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" dependencies = [ - "serde 1.0.193", + "serde 1.0.226", ] [[package]] @@ -443,17 +438,6 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "blake2b_simd" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "constant_time_eq 0.1.5", -] - [[package]] name = "blake2b_simd" version = "1.0.1" @@ -462,18 +446,7 @@ checksum = "3c2f0dc9a68c6317d884f97cc36cf5a3d20ba14ce404227df55e1af708ab04bc" dependencies = [ "arrayref", "arrayvec 0.7.4", - "constant_time_eq 0.2.6", -] - -[[package]] -name = "blake2s_simd" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e461a7034e85b211a4acb57ee2e6730b32912b06c08cc242243c39fc21ae6a2" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "constant_time_eq 0.1.5", + "constant_time_eq", ] [[package]] @@ -482,7 +455,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ - "block-padding 0.1.5", + "block-padding", "byte-tools", "byteorder 1.4.3", "generic-array 0.12.4", @@ -494,7 +467,6 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "block-padding 0.2.1", "generic-array 0.14.7", ] @@ -516,12 +488,6 @@ dependencies = [ "byte-tools", ] -[[package]] -name = "block-padding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" - [[package]] name = "bounded-collections" version = "0.1.8" @@ -531,7 +497,7 @@ dependencies = [ "log 0.4.28", "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", ] [[package]] @@ -547,7 +513,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6798148dccfbff0fae41c7574d2fa8f1ef3492fba0face179de5d8d447d67b05" dependencies = [ "memchr 2.6.3", - "serde 1.0.193", + "serde 1.0.226", ] [[package]] @@ -682,7 +648,7 @@ dependencies = [ "iana-time-zone", "js-sys", "num-traits 0.2.16", - "serde 1.0.193", + "serde 1.0.226", "time", "wasm-bindgen", "winapi 0.3.9", @@ -690,12 +656,15 @@ dependencies = [ [[package]] name = "cid" -version = "0.5.1" -source = "git+https://github.com/whalelephant/rust-cid?branch=nstd#cca87467c46106c801ca3727500477258b0f13b0" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd94671561e36e4e7de75f753f577edafb0e7c05d6e4547229fdf7938fbcd2c3" dependencies = [ - "multibase 0.8.0", - "multihash 0.11.4", - "unsigned-varint 0.5.1", + "core2", + "multibase", + "multihash 0.18.1", + "serde 1.0.226", + "unsigned-varint", ] [[package]] @@ -808,12 +777,6 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "795bc6e66a8e340f075fcf6227e417a2dc976b92b91f3cdc778bb858778b6747" -[[package]] -name = "constant_time_eq" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" - [[package]] name = "constant_time_eq" version = "0.2.6" @@ -875,7 +838,7 @@ version = "0.93.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f42ea692c7b450ad18b8c9889661505d51c09ec4380cf1c2d278dbb2da22cae1" dependencies = [ - "serde 1.0.193", + "serde 1.0.226", ] [[package]] @@ -1324,7 +1287,7 @@ dependencies = [ "log 0.4.28", "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "sp-core", "sp-io 7.0.0 (git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42)", "sp-runtime", @@ -1434,8 +1397,8 @@ dependencies = [ "parity-scale-codec", "rlp", "scale-info", - "serde 1.0.193", - "sha3 0.10.8", + "serde 1.0.226", + "sha3", "triehash", ] @@ -1472,8 +1435,8 @@ dependencies = [ "primitive-types", "rlp", "scale-info", - "serde 1.0.193", - "sha3 0.10.8", + "serde 1.0.226", + "sha3", ] [[package]] @@ -1485,7 +1448,7 @@ dependencies = [ "parity-scale-codec", "primitive-types", "scale-info", - "serde 1.0.193", + "serde 1.0.226", ] [[package]] @@ -1510,7 +1473,7 @@ dependencies = [ "environmental 1.1.4", "evm-core", "primitive-types", - "sha3 0.10.8", + "sha3", ] [[package]] @@ -1664,7 +1627,7 @@ dependencies = [ "log 0.4.28", "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "sp-core", "sp-io 7.0.0 (git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42)", "sp-runtime", @@ -1680,7 +1643,7 @@ dependencies = [ "frame-support", "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "sp-core", "sp-runtime", "sp-std", @@ -1705,7 +1668,7 @@ dependencies = [ "parity-scale-codec", "paste", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "sp-api", "sp-application-crypto", "sp-core", @@ -1742,7 +1705,7 @@ dependencies = [ "cfg-if 1.0.0", "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", ] [[package]] @@ -1761,7 +1724,7 @@ dependencies = [ "parity-scale-codec", "paste", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "smallvec 1.11.0", "sp-api", "sp-arithmetic", @@ -1825,7 +1788,7 @@ dependencies = [ "log 0.4.28", "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "sp-core", "sp-io 7.0.0 (git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42)", "sp-runtime", @@ -2226,9 +2189,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" [[package]] name = "hashbrown_tstd" @@ -2589,7 +2552,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebc88fc67028ae3db0c853baa36269d398d5f45b6982f95549ff5def78c935cd" dependencies = [ - "serde 1.0.193", + "serde 1.0.226", ] [[package]] @@ -2621,17 +2584,17 @@ checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg 1.1.0", "hashbrown 0.12.3", - "serde 1.0.193", + "serde 1.0.226", ] [[package]] name = "indexmap" -version = "2.0.0" +version = "2.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" dependencies = [ "equivalent", - "hashbrown 0.14.0", + "hashbrown 0.16.0", ] [[package]] @@ -2694,7 +2657,7 @@ dependencies = [ "rayon", "regex 1.9.5", "reqwest", - "serde 1.0.193", + "serde 1.0.226", "serde_json 1.0.103", "sgx_crypto_helper", "sp-application-crypto", @@ -2764,8 +2727,8 @@ dependencies = [ "prometheus 0.13.3", "regex 1.9.5", "scale-info", - "serde 1.0.193", - "serde_derive 1.0.193", + "serde 1.0.226", + "serde_derive 1.0.226", "serde_json 1.0.103", "sgx-verify", "sgx_crypto_helper", @@ -2843,8 +2806,8 @@ dependencies = [ "futures 0.3.28", "http 0.2.9", "multiaddr", - "multibase 0.9.1", - "serde 1.0.193", + "multibase", + "serde 1.0.226", "serde_json 1.0.103", "serde_urlencoded", "thiserror 1.0.44", @@ -2854,18 +2817,6 @@ dependencies = [ "walkdir", ] -[[package]] -name = "ipfs-unixfs" -version = "0.0.1" -source = "git+https://github.com/whalelephant/rust-ipfs?branch=w-nstd#52f84dceea7065bb4ee2c24da53b3bedf162241a" -dependencies = [ - "cid", - "either", - "multihash 0.11.4", - "quick-protobuf", - "sha2 0.9.9", -] - [[package]] name = "ipnet" version = "2.7.2" @@ -2893,7 +2844,7 @@ dependencies = [ "itp-types", "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", ] [[package]] @@ -2906,7 +2857,7 @@ dependencies = [ "lazy_static", "log 0.4.28", "parity-scale-codec", - "serde 1.0.193", + "serde 1.0.226", "sgx_tstd", "substrate-fixed", "thiserror 1.0.44", @@ -3022,7 +2973,7 @@ dependencies = [ "parity-scale-codec", "rlp", "sgx_tstd", - "sha3 0.10.8", + "sha3", "sp-core", "sp-io 7.0.0", "sp-keyring", @@ -3188,7 +3139,7 @@ dependencies = [ "http_req 0.8.1 (git+https://github.com/integritee-network/http_req?branch=master)", "http_req 0.8.1 (git+https://github.com/integritee-network/http_req)", "log 0.4.28", - "serde 1.0.193", + "serde 1.0.226", "serde_json 1.0.103", "sgx_tstd", "thiserror 1.0.44", @@ -3367,7 +3318,7 @@ version = "0.8.0" dependencies = [ "binary-merkle-tree", "parity-scale-codec", - "serde 1.0.193", + "serde 1.0.226", ] [[package]] @@ -3561,7 +3512,7 @@ version = "0.9.0" dependencies = [ "itp-types", "parity-scale-codec", - "serde 1.0.193", + "serde 1.0.226", "serde_json 1.0.103", "sgx_tstd", ] @@ -3600,7 +3551,7 @@ dependencies = [ "log 0.4.28", "parity-scale-codec", "postcard", - "serde 1.0.193", + "serde 1.0.226", "sgx_tstd", "sp-core", ] @@ -3795,7 +3746,7 @@ dependencies = [ "log 0.4.28", "parity-scale-codec", "parity-util-mem", - "serde 1.0.193", + "serde 1.0.226", "sgx_tstd", "sp-application-crypto", "sp-core", @@ -3840,7 +3791,7 @@ dependencies = [ "pallet-assets", "pallet-balances", "parity-scale-codec", - "serde 1.0.193", + "serde 1.0.226", "sidechain-primitives", "sp-core", "sp-runtime", @@ -3856,9 +3807,9 @@ version = "0.9.0" dependencies = [ "cid", "hex", - "ipfs-unixfs", "log 0.4.28", - "multibase 0.8.0", + "multibase", + "multihash 0.18.1", "parity-scale-codec", ] @@ -4030,7 +3981,7 @@ dependencies = [ "its-test", "jsonrpsee", "log 0.4.28", - "serde 1.0.193", + "serde 1.0.226", "serde_json 1.0.103", "thiserror 1.0.44", "tokio", @@ -4043,7 +3994,7 @@ dependencies = [ "itp-types", "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "sp-core", "sp-runtime", "sp-std", @@ -4179,8 +4130,8 @@ dependencies = [ "futures-executor 0.3.28", "futures-util 0.3.28", "log 0.4.28", - "serde 1.0.193", - "serde_derive 1.0.193", + "serde 1.0.226", + "serde_derive 1.0.226", "serde_json 1.0.103", ] @@ -4224,7 +4175,7 @@ dependencies = [ "jsonrpsee-types", "jsonrpsee-utils", "log 0.4.28", - "serde 1.0.193", + "serde 1.0.226", "serde_json 1.0.103", "thiserror 1.0.44", "url 2.5.0", @@ -4244,7 +4195,7 @@ dependencies = [ "jsonrpsee-utils", "lazy_static", "log 0.4.28", - "serde 1.0.193", + "serde 1.0.226", "serde_json 1.0.103", "socket2", "thiserror 1.0.44", @@ -4277,7 +4228,7 @@ dependencies = [ "futures-util 0.3.28", "hyper", "log 0.4.28", - "serde 1.0.193", + "serde 1.0.226", "serde_json 1.0.103", "soketto", "thiserror 1.0.44", @@ -4297,7 +4248,7 @@ dependencies = [ "parking_lot 0.11.2", "rand 0.8.5", "rustc-hash", - "serde 1.0.193", + "serde 1.0.226", "serde_json 1.0.103", "thiserror 1.0.44", ] @@ -4316,7 +4267,7 @@ dependencies = [ "pin-project", "rustls 0.19.1", "rustls-native-certs", - "serde 1.0.193", + "serde 1.0.226", "serde_json 1.0.103", "soketto", "thiserror 1.0.44", @@ -4338,7 +4289,7 @@ dependencies = [ "jsonrpsee-utils", "log 0.4.28", "rustc-hash", - "serde 1.0.193", + "serde 1.0.226", "serde_json 1.0.103", "soketto", "thiserror 1.0.44", @@ -4357,7 +4308,7 @@ dependencies = [ "ecdsa", "elliptic-curve", "once_cell 1.18.0", - "sha2 0.10.7", + "sha2 0.10.9", ] [[package]] @@ -4446,7 +4397,7 @@ dependencies = [ "libsecp256k1-gen-ecmult", "libsecp256k1-gen-genmult", "rand 0.8.5", - "serde 1.0.193", + "serde 1.0.226", "sha2 0.9.9", "typenum 1.16.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -4858,59 +4809,49 @@ dependencies = [ "byteorder 1.4.3", "data-encoding", "log 0.4.28", - "multibase 0.9.1", + "multibase", "multihash 0.17.0", "percent-encoding 2.3.1", - "serde 1.0.193", + "serde 1.0.226", "static_assertions", - "unsigned-varint 0.7.1", + "unsigned-varint", "url 2.5.0", ] -[[package]] -name = "multibase" -version = "0.8.0" -source = "git+https://github.com/whalelephant/rust-multibase?branch=nstd#df67fb30e86998f7c10d4eea16a1cd480d2448c0" -dependencies = [ - "base-x 0.2.6", - "data-encoding", - "lazy_static", -] - [[package]] name = "multibase" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b3539ec3c1f04ac9748a260728e855f261b4977f5c3406612c884564f329404" dependencies = [ - "base-x 0.2.11", + "base-x", "data-encoding", "data-encoding-macro", ] [[package]] name = "multihash" -version = "0.11.4" -source = "git+https://github.com/whalelephant/rust-multihash?branch=nstd#2c8aca8fa1fcbcba26951d925de40fa81696020a" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835d6ff01d610179fbce3de1694d007e500bf33a7f29689838941d6bf783ae40" dependencies = [ - "blake2b_simd 0.5.11", - "blake2s_simd", - "digest 0.9.0", - "sha-1 0.9.8", - "sha2 0.9.9", - "sha3 0.9.1", - "unsigned-varint 0.5.1", + "core2", + "multihash-derive", + "unsigned-varint", ] [[package]] name = "multihash" -version = "0.17.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "835d6ff01d610179fbce3de1694d007e500bf33a7f29689838941d6bf783ae40" +checksum = "cfd8a792c1694c6da4f68db0a9d707c72bd260994da179e6030a5dcee00bb815" dependencies = [ "core2", + "digest 0.10.7", "multihash-derive", - "unsigned-varint 0.7.1", + "parity-scale-codec", + "sha2 0.10.9", + "unsigned-varint", ] [[package]] @@ -5393,7 +5334,7 @@ dependencies = [ "pallet-timestamp", "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "sp-core", "sp-io 7.0.0 (git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42)", "sp-keyring", @@ -5414,7 +5355,7 @@ dependencies = [ "pallet-timestamp", "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "sp-core", "sp-io 7.0.0 (git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42)", "sp-runtime", @@ -5459,7 +5400,7 @@ dependencies = [ "pallet-timestamp", "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "sp-core", "sp-io 7.0.0 (git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42)", "sp-keyring", @@ -5481,7 +5422,7 @@ dependencies = [ "pallet-timestamp", "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "sp-core", "sp-io 7.0.0 (git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42)", "sp-keyring", @@ -5500,7 +5441,7 @@ dependencies = [ "pallet-balances", "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "sp-core", "sp-io 7.0.0 (git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42)", "sp-keyring", @@ -5522,7 +5463,7 @@ dependencies = [ "pallet-timestamp", "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "sp-core", "sp-io 7.0.0 (git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42)", "sp-keyring", @@ -5561,7 +5502,7 @@ dependencies = [ "pallet-timestamp", "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "sidechain-primitives", "sp-core", "sp-io 7.0.0 (git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42)", @@ -5617,7 +5558,7 @@ dependencies = [ "parity-scale-codec", "rustls-webpki", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "sgx-verify", "sp-core", "sp-io 7.0.0 (git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42)", @@ -5653,7 +5594,7 @@ dependencies = [ "frame-system", "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "sp-core", "sp-io 7.0.0 (git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42)", "sp-runtime", @@ -5672,7 +5613,7 @@ dependencies = [ "bytes 1.4.0", "impl-trait-for-tuples", "parity-scale-codec-derive", - "serde 1.0.193", + "serde 1.0.226", ] [[package]] @@ -5892,7 +5833,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a25c0b0ae06fcffe600ad392aabfa535696c8973f2253d9ac83171924c58a858" dependencies = [ "postcard-cobs", - "serde 1.0.193", + "serde 1.0.226", ] [[package]] @@ -6101,15 +6042,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "quick-protobuf" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e489d4a83c17ea69b0291630229b5d4c92a94a3bf0165f7f72f506e94cda8b4b" -dependencies = [ - "byteorder 1.4.3", -] - [[package]] name = "quote" version = "1.0.40" @@ -6428,7 +6360,7 @@ dependencies = [ "once_cell 1.18.0", "percent-encoding 2.3.1", "pin-project-lite", - "serde 1.0.193", + "serde 1.0.226", "serde_json 1.0.103", "serde_urlencoded", "tokio", @@ -6759,7 +6691,7 @@ checksum = "036575c29af9b6e4866ffb7fa055dbf623fe7a9cc159b33786de6013a6969d89" dependencies = [ "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", ] [[package]] @@ -6827,7 +6759,7 @@ dependencies = [ "derive_more", "parity-scale-codec", "scale-info-derive", - "serde 1.0.193", + "serde 1.0.226", ] [[package]] @@ -6993,11 +6925,12 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.193" +version = "1.0.226" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" +checksum = "0dca6411025b24b60bfa7ec1fe1f8e710ac09782dca409ee8237ba74b51295fd" dependencies = [ - "serde_derive 1.0.193", + "serde_core", + "serde_derive 1.0.226", ] [[package]] @@ -7006,8 +6939,8 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b926cfbabfe8011609dda0350cb24d884955d294909ac71c0db7027366c77e3e" dependencies = [ - "serde 1.0.193", - "serde_derive 1.0.193", + "serde 1.0.226", + "serde_derive 1.0.226", ] [[package]] @@ -7019,6 +6952,15 @@ dependencies = [ "serde_derive 1.0.118", ] +[[package]] +name = "serde_core" +version = "1.0.226" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba2ba63999edb9dac981fb34b3e5c0d111a69b0924e253ed29d83f7c99e966a4" +dependencies = [ + "serde_derive 1.0.226", +] + [[package]] name = "serde_derive" version = "1.0.118" @@ -7031,9 +6973,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.193" +version = "1.0.226" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" +checksum = "8db53ae22f34573731bafa1db20f04027b2d25e02d8205921b569171699cdb33" dependencies = [ "proc-macro2", "quote", @@ -7069,10 +7011,10 @@ version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d03b412469450d4404fe8499a268edd7f8b79fecb074b0d812ad64ca21f4031b" dependencies = [ - "indexmap 2.0.0", + "indexmap 2.11.4", "itoa 1.0.9", "ryu", - "serde 1.0.193", + "serde 1.0.226", ] [[package]] @@ -7084,7 +7026,7 @@ dependencies = [ "form_urlencoded", "itoa 1.0.9", "ryu", - "serde 1.0.193", + "serde 1.0.226", ] [[package]] @@ -7103,7 +7045,7 @@ dependencies = [ "ring 0.16.20", "rustls-webpki", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "serde_json 1.0.103", "sp-core", "sp-io 7.0.0 (git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42)", @@ -7140,11 +7082,11 @@ dependencies = [ "itertools 0.11.0", "libc", "serde 1.0.118", - "serde 1.0.193", + "serde 1.0.226", "serde-big-array 0.1.5", "serde-big-array 0.3.0", "serde_derive 1.0.118", - "serde_derive 1.0.193", + "serde_derive 1.0.226", "sgx_tcrypto", "sgx_tstd", "sgx_types", @@ -7328,27 +7270,15 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.7" +version = "0.10.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" dependencies = [ "cfg-if 1.0.0", "cpufeatures", "digest 0.10.7", ] -[[package]] -name = "sha3" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "keccak", - "opaque-debug 0.3.0", -] - [[package]] name = "sha3" version = "0.10.8" @@ -7381,7 +7311,7 @@ source = "git+https://github.com/integritee-network/pallets.git?branch=sdk-v0.13 dependencies = [ "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "sp-core", "sp-io 7.0.0 (git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42)", "sp-runtime", @@ -7523,7 +7453,7 @@ source = "git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.4 dependencies = [ "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "sp-core", "sp-io 7.0.0 (git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42)", "sp-std", @@ -7538,7 +7468,7 @@ dependencies = [ "num-traits 0.2.16", "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "sp-std", "static_assertions", ] @@ -7585,7 +7515,7 @@ dependencies = [ "log 0.4.28", "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "sp-api", "sp-application-crypto", "sp-core", @@ -7601,7 +7531,7 @@ source = "git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.4 dependencies = [ "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "sp-std", "sp-timestamp", ] @@ -7636,7 +7566,7 @@ dependencies = [ "schnorrkel", "secp256k1", "secrecy", - "serde 1.0.193", + "serde 1.0.226", "sp-core-hashing", "sp-debug-derive", "sp-externalities", @@ -7655,11 +7585,11 @@ name = "sp-core-hashing" version = "5.0.0" source = "git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42#ff24c60ac7d9f87727ecdd0ded9a80c56e4f4b65" dependencies = [ - "blake2b_simd 1.0.1", + "blake2b_simd", "byteorder 1.4.3", "digest 0.10.7", - "sha2 0.10.7", - "sha3 0.10.8", + "sha2 0.10.9", + "sha3", "sp-std", "twox-hash", ] @@ -7768,7 +7698,7 @@ dependencies = [ "futures 0.3.28", "parity-scale-codec", "parking_lot 0.12.1", - "serde 1.0.193", + "serde 1.0.226", "sp-core", "sp-externalities", "thiserror 1.0.44", @@ -7808,7 +7738,7 @@ dependencies = [ "paste", "rand 0.8.5", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "sp-application-crypto", "sp-arithmetic", "sp-core", @@ -7854,7 +7784,7 @@ source = "git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.4 dependencies = [ "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "sp-core", "sp-runtime", "sp-std", @@ -7893,7 +7823,7 @@ dependencies = [ "impl-serde", "parity-scale-codec", "ref-cast", - "serde 1.0.193", + "serde 1.0.226", "sp-debug-derive", "sp-std", ] @@ -7957,7 +7887,7 @@ dependencies = [ "parity-scale-codec", "parity-wasm", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "sp-core-hashing-proc-macro", "sp-runtime", "sp-std", @@ -7997,7 +7927,7 @@ source = "git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.4 dependencies = [ "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "smallvec 1.11.0", "sp-arithmetic", "sp-core", @@ -8047,7 +7977,7 @@ dependencies = [ "num-format", "proc-macro2", "quote", - "serde 1.0.193", + "serde 1.0.226", "serde_json 1.0.103", "unicode-xid", ] @@ -8114,7 +8044,7 @@ dependencies = [ "log 0.4.28", "maybe-async", "parity-scale-codec", - "serde 1.0.193", + "serde 1.0.226", "serde_json 1.0.103", "sp-core", "sp-runtime", @@ -8159,7 +8089,7 @@ source = "git+https://github.com/encointer/substrate-fixed?tag=v0.5.9#a4fb461aae dependencies = [ "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "typenum 1.16.0 (git+https://github.com/encointer/typenum?tag=v1.16.0)", ] @@ -8235,7 +8165,7 @@ dependencies = [ "log 0.4.28", "parity-scale-codec", "scale-info", - "serde 1.0.193", + "serde 1.0.226", "sp-core", "sp-runtime", "sp-std", @@ -8362,7 +8292,7 @@ dependencies = [ "pbkdf2 0.11.0", "rand 0.8.5", "rustc-hash", - "sha2 0.10.7", + "sha2 0.10.9", "thiserror 1.0.44", "unicode-normalization 0.1.22", "wasm-bindgen", @@ -8503,7 +8433,7 @@ version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ - "serde 1.0.193", + "serde 1.0.226", ] [[package]] @@ -8563,7 +8493,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" dependencies = [ - "serde 1.0.193", + "serde 1.0.226", "tracing-core", ] @@ -8578,7 +8508,7 @@ dependencies = [ "lazy_static", "matchers", "regex 1.9.5", - "serde 1.0.193", + "serde 1.0.226", "serde_json 1.0.103", "sharded-slab", "smallvec 1.11.0", @@ -8823,12 +8753,6 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" -[[package]] -name = "unsigned-varint" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fdeedbf205afadfe39ae559b75c3240f24e257d0ca27e85f85cb82aa19ac35" - [[package]] name = "unsigned-varint" version = "0.7.1" @@ -8951,7 +8875,7 @@ dependencies = [ "percent-encoding 2.3.1", "pin-project", "scoped-tls", - "serde 1.0.193", + "serde 1.0.226", "serde_json 1.0.103", "serde_urlencoded", "tokio", @@ -9104,7 +9028,7 @@ dependencies = [ "once_cell 1.18.0", "paste", "psm", - "serde 1.0.193", + "serde 1.0.226", "target-lexicon", "wasmparser", "wasmtime-environ", @@ -9134,7 +9058,7 @@ dependencies = [ "indexmap 1.9.3", "log 0.4.28", "object 0.29.0", - "serde 1.0.193", + "serde 1.0.226", "target-lexicon", "thiserror 1.0.44", "wasmparser", @@ -9156,7 +9080,7 @@ dependencies = [ "log 0.4.28", "object 0.29.0", "rustc-demangle", - "serde 1.0.193", + "serde 1.0.226", "target-lexicon", "wasmtime-environ", "wasmtime-jit-icache-coherence", @@ -9215,7 +9139,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83e5572c5727c1ee7e8f28717aaa8400e4d22dcbd714ea5457d85b5005206568" dependencies = [ "cranelift-entity", - "serde 1.0.193", + "serde 1.0.226", "thiserror 1.0.44", "wasmparser", ] diff --git a/app-libs/stf/src/trusted_call.rs b/app-libs/stf/src/trusted_call.rs index df82acc0c..26f49e692 100644 --- a/app-libs/stf/src/trusted_call.rs +++ b/app-libs/stf/src/trusted_call.rs @@ -653,7 +653,7 @@ where Ok(RelayedNoteRetrievalInfo::Here { msg: request.msg }) } else if request.relay_type == NoteRelayType::Ipfs { let (ciphertext, encryption_key) = encrypt_with_fresh_key(request.msg)?; - let cid = IpfsCid::from_content_bytes(&ciphertext) + let cid = IpfsCid::from_chunk(&ciphertext) .map_err(|e| StfError::Dispatch(format!("IPFS error: {:?}", e)))?; info!("storing relayed note to IPFS with CID {:?}", cid); side_effects.push(TrustedCallSideEffect::IpfsAdd(ciphertext)); @@ -969,7 +969,8 @@ where let unshield_amount = balance.saturating_sub( MinimalChainSpec::one_unit( shielding_target_genesis_hash().unwrap_or_default(), - ) / STF_TX_FEE_UNIT_DIVIDER * 3, + ) / STF_TX_FEE_UNIT_DIVIDER + * 3, ); let parentchain_call = parentchain_vault_proxy_call( unshield_native_from_vault_parentchain_call( diff --git a/core-primitives/utils/Cargo.toml b/core-primitives/utils/Cargo.toml index 3592ca899..b90f82af1 100644 --- a/core-primitives/utils/Cargo.toml +++ b/core-primitives/utils/Cargo.toml @@ -8,19 +8,18 @@ license = "Apache-2.0" edition = "2021" [dependencies] -cid = { default-features = false, git = "https://github.com/whalelephant/rust-cid", branch = "nstd" } +cid = { version = "0.10.1", default-features = false, features = ["alloc"] } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex = { version = "0.4.3", default-features = false, features = ["alloc"] } -ipfs-unixfs = { default-features = false, git = "https://github.com/whalelephant/rust-ipfs", branch = "w-nstd" } log = "0.4.28" -multibase = { default-features = false, git = "https://github.com/whalelephant/rust-multibase", branch = "nstd" } +multibase = { version = "0.9.1", default-features = false } +multihash = { version = "0.18.0", default-features = false, features = ["alloc", "scale-codec", "multihash-impl", "sha2"] } [features] default = ["std"] std = [ "cid/std", "multibase/std", - "ipfs-unixfs/std", "codec/std", "hex/std", ] diff --git a/core-primitives/utils/src/ipfs.rs b/core-primitives/utils/src/ipfs.rs index 2f70951f4..886a47178 100644 --- a/core-primitives/utils/src/ipfs.rs +++ b/core-primitives/utils/src/ipfs.rs @@ -15,16 +15,20 @@ */ -use alloc::vec::Vec; -use cid::Cid; +use alloc::{format, vec::Vec}; +use cid::{ + multihash::{Code, MultihashDigest}, + Cid, +}; use codec::{Decode, Encode}; use core::{ convert::TryFrom, fmt::{Debug, Display}, }; -use ipfs_unixfs::file::adder::FileAdder; use multibase::Base; +const RAW: u64 = 0x55; + /// IPFS content identifier helper: https://docs.ipfs.tech/concepts/content-addressing/ #[derive(Clone, PartialEq, Eq)] pub struct IpfsCid(pub Cid); @@ -34,6 +38,8 @@ impl From for IpfsCid { IpfsCid(value) } } + +#[cfg(feature = "std")] impl TryFrom<&str> for IpfsCid { type Error = cid::Error; @@ -44,22 +50,18 @@ impl TryFrom<&str> for IpfsCid { } impl IpfsCid { - pub fn from_content_bytes(content: &Vec) -> Result { - Ok(Self::try_from("QmSaFjwJ2QtS3rZDKzC98XEzv2bqT4TfpWLCpphPPwyQTr") - .expect("known to work for test")) - // let mut adder: FileAdder = FileAdder::default(); - // let mut total: usize = 0; - // let mut stats = Stats::default(); - // while total < content.len() { - // let (blocks, consumed) = adder.push(&content[total..]); - // total += consumed; - // stats.process(blocks); - // } - // let blocks = adder.finish(); - // stats.process(blocks); - // stats.last.map(IpfsCid).ok_or(IpfsError::FinalCidMissing) + pub fn from_chunk(chunk: &[u8]) -> Result { + if chunk.len() > 256 * 1024 { + return Err(IpfsError::InputTooLarge); + }; + //let h = Sha256::digest(chunk); + let h = Code::Sha2_256.digest(chunk); + //let mh = multihash::Sha2_256::digest(chunk); + let cid = Cid::new_v1(RAW, h.into()); + Ok(IpfsCid(cid)) } } + impl Encode for IpfsCid { fn encode(&self) -> Vec { self.0.to_bytes().encode() @@ -77,26 +79,38 @@ impl Decode for IpfsCid { impl Debug for IpfsCid { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - let cid_str = Base::Base58Btc.encode(self.0.hash().as_bytes()); - write!(f, "{}", cid_str) + let cid = &self.0; + let version = cid.version(); + let codec = cid.codec(); + let mh = cid.hash(); + let mh_code = mh.code(); + let mh_size = mh.size(); + let mh_digest = mh.digest(); + + f.debug_struct("IpfsCid") + .field("version", &version) + .field("codec", &codec) + .field("multihash_code", &mh_code) + .field("multihash_size", &mh_size) + .field("multihash_digest", &hex::encode(mh_digest)) + .finish() } } impl Display for IpfsCid { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - let cid_str = Base::Base58Btc.encode(self.0.hash().as_bytes()); + let cid_str = if self.0.codec() == RAW { + multibase::encode(Base::Base32Lower, self.0.to_bytes()) + } else { + multibase::encode(Base::Base58Btc, self.0.to_bytes()) + }; write!(f, "{}", cid_str) } } -// impl Default for IpfsCid { -// fn default() -> Self { -// IpfsCid::from_content_bytes(&Vec::new()).expect("known to work for empty vec") -// } -// } - #[derive(Debug, PartialEq)] pub enum IpfsError { + InputTooLarge, InputCidInvalid, FinalCidMissing, Verification, @@ -125,23 +139,44 @@ impl Stats { mod tests { use super::*; use alloc::vec; + #[test] - pub fn test_from_multichunk_content_works() { - let expected_cid_str = "QmSaFjwJ2QtS3rZDKzC98XEzv2bqT4TfpWLCpphPPwyQTr"; + pub fn test_from_max_chunk_content_works() { + // cross-check with ipfs cli: + // head -c 262144 /dev/zero | tr '\0' 'A' | ipfs block put --format=raw + // bafkreiexul6fkqo4zhagxgnsvbgdjfq7udb26ig3uoli34xznjlmnpaaze + let expected_cid_str = "bafkreiexul6fkqo4zhagxgnsvbgdjfq7udb26ig3uoli34xznjlmnpaaze"; let expected_cid = IpfsCid::try_from(expected_cid_str).unwrap(); - let content: Vec = vec![20; 512 * 1024]; // bigger than one chunk of 256kB - let derived_cid = IpfsCid::from_content_bytes(&content).unwrap(); + let content: Vec = vec![65; 256 * 1024]; // exactly one chunk of 256kB of "A" chars + let derived_cid = IpfsCid::from_chunk(&content).unwrap(); assert_eq!(derived_cid, expected_cid); } #[test] - pub fn test_cid_verification_fails_for_incorrect_multichunk_content() { - let expected_cid_str = "QmSaFjwJ2QtS3rZDKzC98XEzv2bqT4TfpWLCpphPPwyQTr"; + pub fn test_cid_verification_fails_for_incorrect_single_chunk_content() { + let expected_cid_str = "bafkreihdcgl5emugcgwjavoknx76kmfdahpzz3jyghg5mhslvhbrznfkky"; let expected_cid = IpfsCid::try_from(expected_cid_str).unwrap(); - let content: Vec = vec![99; 512 * 1024]; // bigger than one chunk of 256kB - let wrong_cid = IpfsCid::from_content_bytes(&content).unwrap(); + let content: Vec = vec![99; 256 * 1024]; + let wrong_cid = IpfsCid::from_chunk(&content).unwrap(); assert!(wrong_cid != expected_cid); } + #[test] + pub fn test_from_text_works() { + // cross-check with ipfs cli: + // echo -n "FooBar" | ipfs block put --format=raw + // bafkreianosnl4e3xk42jhyg7otpy2euc4ruwo5kkd26hzrrsher2pcfnlq + let expected_cid_str = "bafkreianosnl4e3xk42jhyg7otpy2euc4ruwo5kkd26hzrrsher2pcfnlq"; + let expected_cid = IpfsCid::try_from(expected_cid_str).unwrap(); + let content = "FooBar".as_bytes(); + let derived_cid = IpfsCid::from_chunk(content).unwrap(); + assert_eq!(derived_cid, expected_cid); + } + + #[test] + pub fn test_cid_verification_fails_for_oversize_chunk_content() { + let content: Vec = vec![99; 256 * 1024 + 1]; + assert!(IpfsCid::from_chunk(&content) == Err(IpfsError::InputTooLarge)); + } #[test] pub fn test_encode_decode_ipfscid_works() { @@ -152,12 +187,4 @@ mod tests { let decoded = IpfsCid::decode(&mut &encoded[..]).unwrap(); assert_eq!(decoded, expected_cid); } - - #[test] - pub fn test_default_cid_works() { - let expected_cid_str = "QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH"; - let expected_cid = IpfsCid::try_from(expected_cid_str).unwrap(); - let def = IpfsCid::default(); - assert_eq!(def, expected_cid); - } } diff --git a/enclave-runtime/Cargo.lock b/enclave-runtime/Cargo.lock index 7bd4a6737..866f0557c 100644 --- a/enclave-runtime/Cargo.lock +++ b/enclave-runtime/Cargo.lock @@ -18,7 +18,7 @@ version = "0.4.2" source = "git+https://github.com/encointer/substrate-api-client.git?branch=v0.9.42-tag-v0.14.0-integritee-patch#946f3ae82c5d48023107c1890728582561e94725" dependencies = [ "ac-primitives", - "log 0.4.17", + "log 0.4.28", "maybe-async", ] @@ -33,7 +33,7 @@ dependencies = [ "either", "frame-metadata", "hex", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "scale-bits", "scale-decode", @@ -219,6 +219,12 @@ name = "base-x" version = "0.2.6" source = "git+https://github.com/whalelephant/base-x-rs?branch=no_std#906c9ac59282ff5a2eec86efd25d50ad9927b147" +[[package]] +name = "base-x" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" + [[package]] name = "base16ct" version = "0.2.0" @@ -381,7 +387,7 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb5b05133427c07c4776906f673ccf36c21b102c9829c641a5b56bd151d44fd6" dependencies = [ - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "scale-info", ] @@ -484,9 +490,22 @@ name = "cid" version = "0.5.1" source = "git+https://github.com/whalelephant/rust-cid?branch=nstd#cca87467c46106c801ca3727500477258b0f13b0" dependencies = [ - "multibase", - "multihash", - "unsigned-varint", + "multibase 0.8.0", + "multihash 0.11.4", + "unsigned-varint 0.5.1", +] + +[[package]] +name = "cid" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd94671561e36e4e7de75f753f577edafb0e7c05d6e4547229fdf7938fbcd2c3" +dependencies = [ + "core2", + "multibase 0.9.1", + "multihash 0.18.1", + "serde 1.0.192", + "unsigned-varint 0.7.2", ] [[package]] @@ -535,6 +554,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" +[[package]] +name = "core2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b49ba7ef1ad6107f8824dbe97de947cbaac53c44e7f9756a1fba0d37c1eec505" +dependencies = [ + "memchr 2.5.0", +] + [[package]] name = "cpufeatures" version = "0.2.9" @@ -639,6 +667,26 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" +[[package]] +name = "data-encoding-macro" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c904b33cc60130e1aeea4956ab803d08a3f4a0ca82d64ed757afac3891f2bb99" +dependencies = [ + "data-encoding", + "data-encoding-macro-internal", +] + +[[package]] +name = "data-encoding-macro-internal" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fdf3fce3ce863539ec1d7fd1b6dcc3c645663376b43ed376bbf887733e4f772" +dependencies = [ + "data-encoding", + "syn 1.0.109", +] + [[package]] name = "der" version = "0.7.8" @@ -760,7 +808,7 @@ version = "0.1.0" source = "git+https://github.com/integritee-network/pallets.git?branch=sdk-v0.13.0-polkadot-v0.9.42#abf29acd41a0fca9cd7025b297b6a9fa272a122f" dependencies = [ "common-primitives", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "scale-info", "serde 1.0.192", @@ -775,7 +823,7 @@ name = "enclave-runtime" version = "0.17.0" dependencies = [ "array-bytes 6.2.2", - "cid", + "cid 0.5.1", "derive_more", "enclave-bridge-primitives", "env_logger", @@ -830,7 +878,7 @@ dependencies = [ "jsonrpc-core", "lazy_static", "log 0.4.17", - "multibase", + "multibase 0.8.0", "once_cell 1.4.0", "parity-scale-codec", "primitive-types", @@ -879,12 +927,6 @@ version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e48c92028aaa870e83d51c64e5d4e0b6981b360c522198c23959f219a4e1b15b" -[[package]] -name = "equivalent" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" - [[package]] name = "ethbloom" version = "0.13.0" @@ -942,7 +984,7 @@ dependencies = [ "evm-core", "evm-gasometer", "evm-runtime", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "primitive-types", "rlp", @@ -1066,7 +1108,7 @@ source = "git+https://github.com/integritee-network/frontier.git?branch=bar/polk dependencies = [ "hex", "libsecp256k1", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "scale-info", "sp-core", @@ -1128,7 +1170,7 @@ dependencies = [ "frame-support-procedural", "impl-trait-for-tuples", "k256", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "paste", "scale-info", @@ -1191,7 +1233,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42#ff24c60ac7d9f87727ecdd0ded9a80c56e4f4b65" dependencies = [ "frame-support", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "scale-info", "sp-core", @@ -1476,12 +1518,6 @@ dependencies = [ "ahash 0.8.3", ] -[[package]] -name = "hashbrown" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" - [[package]] name = "hashbrown_tstd" version = "0.12.0" @@ -1524,7 +1560,7 @@ name = "http_req" version = "0.8.1" source = "git+https://github.com/integritee-network/http_req#3723e88235f2b29bc1a31835853b072ffd0455fd" dependencies = [ - "log 0.4.17", + "log 0.4.28", "rustls 0.19.0 (git+https://github.com/mesalock-linux/rustls?branch=mesalock_sgx)", "sgx_tstd", "unicase", @@ -1614,16 +1650,6 @@ dependencies = [ "sgx_tstd", ] -[[package]] -name = "indexmap" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" -dependencies = [ - "equivalent", - "hashbrown 0.14.0", -] - [[package]] name = "integer-sqrt" version = "0.1.5" @@ -1646,9 +1672,9 @@ name = "ipfs-unixfs" version = "0.0.1" source = "git+https://github.com/whalelephant/rust-ipfs?branch=w-nstd#52f84dceea7065bb4ee2c24da53b3bedf162241a" dependencies = [ - "cid", + "cid 0.5.1", "either", - "multihash", + "multihash 0.11.4", "quick-protobuf", "sha2 0.9.9", ] @@ -1674,12 +1700,12 @@ dependencies = [ "itp-enclave-metrics", "itp-ocall-api", "lazy_static", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "serde 1.0.192", "sgx_tstd", "substrate-fixed", - "thiserror", + "thiserror 1.0.9", "url", ] @@ -1699,7 +1725,7 @@ dependencies = [ "itp-stf-primitives", "itp-types", "itp-utils", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sp-core", @@ -1714,7 +1740,7 @@ version = "0.1.0" dependencies = [ "hex-literal", "itp-types", - "log 0.4.17", + "log 0.4.28", ] [[package]] @@ -1771,7 +1797,7 @@ dependencies = [ "itp-storage", "itp-types", "itp-utils", - "log 0.4.17", + "log 0.4.28", "pallet-assets", "pallet-balances", "pallet-notes", @@ -1798,12 +1824,12 @@ dependencies = [ "itp-types", "itp-utils", "jsonrpc-core", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "serde_json 1.0.108", "sgx_tstd", "sp-runtime", - "thiserror", + "thiserror 1.0.9", ] [[package]] @@ -1818,11 +1844,11 @@ dependencies = [ "itp-stf-state-handler", "itp-top-pool-author", "itp-types", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sp-runtime", - "thiserror", + "thiserror 1.0.9", ] [[package]] @@ -1844,10 +1870,10 @@ version = "0.9.0" dependencies = [ "itc-parentchain-block-importer", "itp-import-queue", - "log 0.4.17", + "log 0.4.28", "sgx_tstd", "sgx_types", - "thiserror", + "thiserror 1.0.9", ] [[package]] @@ -1861,12 +1887,12 @@ dependencies = [ "itp-stf-executor", "itp-stf-interface", "itp-types", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sgx_types", "sp-runtime", - "thiserror", + "thiserror 1.0.9", ] [[package]] @@ -1885,13 +1911,13 @@ dependencies = [ "itp-test", "itp-top-pool-author", "itp-types", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sgx_types", "sp-core", "sp-runtime", - "thiserror", + "thiserror 1.0.9", ] [[package]] @@ -1906,13 +1932,13 @@ dependencies = [ "itp-storage", "itp-test", "itp-types", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sgx_types", "sp-consensus-grandpa", "sp-runtime", - "thiserror", + "thiserror 1.0.9", ] [[package]] @@ -1930,11 +1956,11 @@ dependencies = [ "base64 0.13.1", "http", "http_req", - "log 0.4.17", + "log 0.4.28", "serde 1.0.192", "serde_json 1.0.108", "sgx_tstd", - "thiserror", + "thiserror 1.0.9", "url", ] @@ -1944,14 +1970,14 @@ version = "0.9.0" dependencies = [ "bit-vec", "chrono 0.4.26", - "log 0.4.17", + "log 0.4.28", "mio", "mio-extras", "rcgen", "rustls 0.19.0 (git+https://github.com/mesalock-linux/rustls?branch=mesalock_sgx)", "sgx_tstd", "sp-core", - "thiserror", + "thiserror 1.0.9", "tungstenite", "webpki", "yasna", @@ -2014,7 +2040,7 @@ dependencies = [ "itp-sgx-crypto", "itp-sgx-io", "itp-time-utils", - "log 0.4.17", + "log 0.4.28", "num-bigint", "parity-scale-codec", "rustls 0.19.0 (git+https://github.com/mesalock-linux/rustls?rev=sgx_1.1.3)", @@ -2025,7 +2051,7 @@ dependencies = [ "sgx_tstd", "sgx_types", "sp-core", - "thiserror", + "thiserror 1.0.9", "webpki", "webpki-roots 0.21.0 (git+https://github.com/mesalock-linux/webpki-roots?branch=mesalock_sgx)", "yasna", @@ -2036,7 +2062,7 @@ name = "itp-component-container" version = "0.8.0" dependencies = [ "sgx_tstd", - "thiserror", + "thiserror 1.0.9", ] [[package]] @@ -2056,14 +2082,14 @@ dependencies = [ "itp-node-api", "itp-nonce-cache", "itp-types", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sgx_types", "sp-core", "sp-runtime", "substrate-api-client", - "thiserror", + "thiserror 1.0.9", ] [[package]] @@ -2079,7 +2105,7 @@ version = "0.8.0" dependencies = [ "sgx_tstd", "sgx_types", - "thiserror", + "thiserror 1.0.9", ] [[package]] @@ -2108,7 +2134,7 @@ version = "0.9.0" dependencies = [ "itp-node-api-metadata", "sgx_tstd", - "thiserror", + "thiserror 1.0.9", ] [[package]] @@ -2116,7 +2142,7 @@ name = "itp-nonce-cache" version = "0.8.0" dependencies = [ "sgx_tstd", - "thiserror", + "thiserror 1.0.9", ] [[package]] @@ -2151,7 +2177,7 @@ version = "0.9.0" dependencies = [ "lazy_static", "sgx_tstd", - "thiserror", + "thiserror 1.0.9", ] [[package]] @@ -2184,7 +2210,7 @@ dependencies = [ "derive_more", "itp-sgx-io", "itp-sgx-temp-dir", - "log 0.4.17", + "log 0.4.28", "ofb", "parity-scale-codec", "serde_json 1.0.60 (git+https://github.com/mesalock-linux/serde-json-sgx?tag=sgx_1.1.3)", @@ -2202,7 +2228,7 @@ dependencies = [ "derive_more", "environmental 1.1.3", "itp-hashing", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "postcard", "serde 1.0.192", @@ -2256,13 +2282,13 @@ dependencies = [ "itp-time-utils", "itp-top-pool-author", "itp-types", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sgx_types", "sp-core", "sp-runtime", - "thiserror", + "thiserror 1.0.9", ] [[package]] @@ -2302,13 +2328,13 @@ dependencies = [ "itp-stf-state-observer", "itp-time-utils", "itp-types", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "rust-base58", "sgx_tstd", "sgx_types", "sp-core", - "thiserror", + "thiserror 1.0.9", ] [[package]] @@ -2316,9 +2342,9 @@ name = "itp-stf-state-observer" version = "0.9.0" dependencies = [ "itp-types", - "log 0.4.17", + "log 0.4.28", "sgx_tstd", - "thiserror", + "thiserror 1.0.9", ] [[package]] @@ -2336,7 +2362,7 @@ dependencies = [ "sp-runtime", "sp-std", "sp-trie", - "thiserror", + "thiserror 1.0.9", ] [[package]] @@ -2356,7 +2382,7 @@ dependencies = [ "itp-time-utils", "itp-types", "jsonrpc-core", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "sgx_crypto_helper", "sgx_tstd", @@ -2386,7 +2412,7 @@ dependencies = [ "its-primitives", "jsonrpc-core", "linked-hash-map", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "serde 1.0.192", "sgx_tstd", @@ -2409,7 +2435,7 @@ dependencies = [ "itp-top-pool", "itp-types", "jsonrpc-core", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sp-core", @@ -2443,11 +2469,11 @@ dependencies = [ name = "itp-utils" version = "0.9.0" dependencies = [ - "cid", + "cid 0.10.1", "hex", - "ipfs-unixfs", "log 0.4.28", - "multibase", + "multibase 0.9.1", + "multihash 0.18.1", "parity-scale-codec", ] @@ -2466,13 +2492,13 @@ dependencies = [ "itp-types", "its-primitives", "its-state", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sgx_types", "sp-core", "sp-runtime", - "thiserror", + "thiserror 1.0.9", ] [[package]] @@ -2481,7 +2507,7 @@ version = "0.1.0" dependencies = [ "its-primitives", "sgx_tstd", - "thiserror", + "thiserror 1.0.9", ] [[package]] @@ -2492,12 +2518,12 @@ dependencies = [ "itp-types", "itp-utils", "its-primitives", - "log 0.4.17", + "log 0.4.28", "sgx_tstd", "sp-consensus-slots", "sp-core", "sp-runtime", - "thiserror", + "thiserror 1.0.9", ] [[package]] @@ -2527,7 +2553,7 @@ dependencies = [ "its-primitives", "its-state", "its-validateer-fetch", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sp-core", @@ -2554,12 +2580,12 @@ dependencies = [ "its-block-verification", "its-primitives", "its-state", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sgx_types", "sp-runtime", - "thiserror", + "thiserror 1.0.9", ] [[package]] @@ -2574,7 +2600,7 @@ dependencies = [ "its-consensus-common", "its-primitives", "lazy_static", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sp-consensus-slots", @@ -2608,7 +2634,7 @@ dependencies = [ "itp-utils", "its-primitives", "jsonrpc-core", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "rust-base58", "sgx_tstd", @@ -2637,12 +2663,12 @@ dependencies = [ "itp-sgx-externalities", "itp-storage", "its-primitives", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sp-core", "sp-io", - "thiserror", + "thiserror 1.0.9", ] [[package]] @@ -2654,7 +2680,7 @@ dependencies = [ "itp-pallet-storage", "itp-types", "its-primitives", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "sp-core", "sp-runtime", @@ -2856,7 +2882,7 @@ version = "2.0.6" source = "git+https://github.com/integritee-network/mio-extras-sgx?rev=963234b#963234bf55e44f9efff921938255126c48deef3a" dependencies = [ "lazycell", - "log 0.4.17", + "log 0.4.28", "mio", "sgx_tstd", "sgx_types", @@ -2868,11 +2894,22 @@ name = "multibase" version = "0.8.0" source = "git+https://github.com/whalelephant/rust-multibase?branch=nstd#df67fb30e86998f7c10d4eea16a1cd480d2448c0" dependencies = [ - "base-x", + "base-x 0.2.6", "data-encoding", "lazy_static", ] +[[package]] +name = "multibase" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b3539ec3c1f04ac9748a260728e855f261b4977f5c3406612c884564f329404" +dependencies = [ + "base-x 0.2.11", + "data-encoding", + "data-encoding-macro", +] + [[package]] name = "multihash" version = "0.11.4" @@ -2884,7 +2921,35 @@ dependencies = [ "sha-1", "sha2 0.9.9", "sha3 0.9.1", - "unsigned-varint", + "unsigned-varint 0.5.1", +] + +[[package]] +name = "multihash" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfd8a792c1694c6da4f68db0a9d707c72bd260994da179e6030a5dcee00bb815" +dependencies = [ + "core2", + "digest 0.10.7", + "multihash-derive", + "parity-scale-codec", + "sha2 0.10.7", + "unsigned-varint 0.7.2", +] + +[[package]] +name = "multihash-derive" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6d4752e6230d8ef7adf7bd5d8c4b1f6561c1014c5ba9a37445ccefe18aa1db" +dependencies = [ + "proc-macro-crate", + "proc-macro-error", + "proc-macro2", + "quote 1.0.40", + "syn 1.0.109", + "synstructure", ] [[package]] @@ -3037,7 +3102,7 @@ source = "git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.4 dependencies = [ "frame-support", "frame-system", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "scale-info", "sp-runtime", @@ -3074,7 +3139,7 @@ dependencies = [ "frame-system", "hex", "impl-trait-for-tuples", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "rlp", "scale-info", @@ -3091,7 +3156,7 @@ dependencies = [ "frame-support", "frame-system", "itp-randomness", - "log 0.4.17", + "log 0.4.28", "pallet-balances", "pallet-timestamp", "parity-scale-codec", @@ -3109,7 +3174,7 @@ dependencies = [ "frame-support", "frame-system", "itp-randomness", - "log 0.4.17", + "log 0.4.28", "pallet-balances", "pallet-timestamp", "parity-scale-codec", @@ -3126,7 +3191,7 @@ version = "0.11.0" dependencies = [ "frame-support", "frame-system", - "log 0.4.17", + "log 0.4.28", "pallet-balances", "parity-scale-codec", "scale-info", @@ -3143,7 +3208,7 @@ dependencies = [ "frame-support", "frame-system", "itp-randomness", - "log 0.4.17", + "log 0.4.28", "pallet-balances", "pallet-timestamp", "parity-scale-codec", @@ -3161,7 +3226,7 @@ dependencies = [ "enclave-bridge-primitives", "frame-support", "frame-system", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "scale-info", "sp-core", @@ -3189,7 +3254,7 @@ source = "git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.4 dependencies = [ "frame-support", "frame-system", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "scale-info", "sp-inherents", @@ -3311,12 +3376,12 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.3.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" +checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" dependencies = [ - "once_cell 1.18.0", - "toml_edit", + "thiserror 1.0.69", + "toml", ] [[package]] @@ -3870,7 +3935,7 @@ name = "serde_json" version = "1.0.60" source = "git+https://github.com/mesalock-linux/serde-json-sgx?tag=sgx_1.1.3#380893814ad2a057758d825bab798aa117f7362a" dependencies = [ - "indexmap 1.6.1", + "indexmap", "itoa 0.4.5", "ryu", "serde 1.0.118", @@ -4211,7 +4276,7 @@ name = "sp-api" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42#ff24c60ac7d9f87727ecdd0ded9a80c56e4f4b65" dependencies = [ - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "scale-info", "sp-api-proc-macro", @@ -4267,7 +4332,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate.git?branch=polkadot-v0.9.42#ff24c60ac7d9f87727ecdd0ded9a80c56e4f4b65" dependencies = [ "finality-grandpa", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "scale-info", "sp-api", @@ -4301,7 +4366,7 @@ dependencies = [ "hash-db 0.16.0", "hash256-std-hasher", "libsecp256k1", - "log 0.4.17", + "log 0.4.28", "merlin", "parity-scale-codec", "paste", @@ -4383,7 +4448,7 @@ version = "7.0.0" dependencies = [ "itp-sgx-externalities", "libsecp256k1", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "sgx_tstd", "sp-core", @@ -4408,7 +4473,7 @@ dependencies = [ "either", "hash256-std-hasher", "impl-trait-for-tuples", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "paste", "scale-info", @@ -4607,7 +4672,7 @@ dependencies = [ "derive_more", "frame-metadata", "hex", - "log 0.4.17", + "log 0.4.28", "maybe-async", "parity-scale-codec", "serde 1.0.192", @@ -4675,6 +4740,18 @@ dependencies = [ "unicode-xid 0.0.4", ] +[[package]] +name = "synstructure" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" +dependencies = [ + "proc-macro2", + "quote 1.0.40", + "syn 1.0.109", + "unicode-xid 0.2.4", +] + [[package]] name = "tap" version = "1.0.1" @@ -4698,7 +4775,7 @@ source = "git+https://github.com/integritee-network/pallets.git?branch=sdk-v0.13 dependencies = [ "common-primitives", "derive_more", - "log 0.4.17", + "log 0.4.28", "parity-scale-codec", "scale-info", "serde 1.0.192", @@ -4721,7 +4798,16 @@ version = "1.0.9" source = "git+https://github.com/mesalock-linux/thiserror-sgx?tag=sgx_1.1.3#c2f806b88616e06aab0af770366a76885d974fdc" dependencies = [ "sgx_tstd", - "thiserror-impl", + "thiserror-impl 1.0.9", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", ] [[package]] @@ -4734,6 +4820,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote 1.0.40", + "syn 2.0.106", +] + [[package]] name = "thread_local" version = "1.0.0" @@ -4753,20 +4850,12 @@ dependencies = [ ] [[package]] -name = "toml_datetime" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" - -[[package]] -name = "toml_edit" -version = "0.19.14" +name = "toml" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ - "indexmap 2.0.0", - "toml_datetime", - "winnow", + "serde 1.0.192", ] [[package]] @@ -4794,7 +4883,7 @@ checksum = "767abe6ffed88a1889671a102c2861ae742726f52e0a5a425b92c9fbfa7e9c85" dependencies = [ "hash-db 0.16.0", "hashbrown 0.13.2", - "log 0.4.17", + "log 0.4.28", "smallvec 1.11.0", ] @@ -4838,7 +4927,7 @@ dependencies = [ "rustls 0.19.0 (git+https://github.com/mesalock-linux/rustls?tag=sgx_1.1.3)", "sgx_tstd", "sha1", - "thiserror", + "thiserror 1.0.9", "url", "utf-8", "webpki", @@ -4933,6 +5022,12 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7fdeedbf205afadfe39ae559b75c3240f24e257d0ca27e85f85cb82aa19ac35" +[[package]] +name = "unsigned-varint" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" + [[package]] name = "untrusted" version = "0.7.1" @@ -4998,15 +5093,6 @@ dependencies = [ "webpki", ] -[[package]] -name = "winnow" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25b5872fa2e10bd067ae946f927e726d7d603eaeb6e02fa6a350e0722d2b8c11" -dependencies = [ - "memchr 2.5.0", -] - [[package]] name = "wyz" version = "0.5.1" diff --git a/enclave-runtime/src/test/ipfs_tests.rs b/enclave-runtime/src/test/ipfs_tests.rs index 850848e6a..ef59142d0 100644 --- a/enclave-runtime/src/test/ipfs_tests.rs +++ b/enclave-runtime/src/test/ipfs_tests.rs @@ -60,12 +60,13 @@ pub fn test_ocall_write_ipfs_fallback() { let payload_size = 100; // in kB eprintln!("testing IPFS write of {}kB if api is unreachable. Expected to fallback to dump local file...", payload_size); let enc_state: Vec = vec![20; payload_size * 1024]; - let res_expected_cid = IpfsCid::from_content_bytes(&enc_state); + let res_expected_cid = IpfsCid::from_chunk(&enc_state); let result = OcallApi.write_ipfs(enc_state); eprintln!("write_ipfs ocall result : {:?}", result); - eprintln!("expected cid: {:?}", res_expected_cid); + eprintln!("expected cid details: {:?}", res_expected_cid); assert!(res_expected_cid.is_ok()); let expected_cid = res_expected_cid.expect("known to be ok"); + eprintln!("expected cid: {}", expected_cid); let dumpfile = find_first_matching_file(expected_cid.to_string()).expect("dumped file not found"); eprintln!("found dumped file: {:?}", dumpfile); @@ -73,10 +74,12 @@ pub fn test_ocall_write_ipfs_fallback() { let mut content_buf = Vec::new(); f.read_to_end(&mut content_buf).unwrap(); eprintln!("reading file {:?} of size {} bytes", f, &content_buf.len()); - let res_file_cid = IpfsCid::from_content_bytes(&content_buf); - eprintln!("file cid: {:?}", res_file_cid); + let res_file_cid = IpfsCid::from_chunk(&content_buf); + eprintln!("file cid details: {:?}", res_file_cid); assert!(res_file_cid.is_ok()); let file_cid = res_file_cid.expect("known to be ok"); + eprintln!("file cid: {}", file_cid); + assert_eq!(expected_cid, file_cid); } diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index 021981a54..30ca06015 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -91,7 +91,7 @@ fn log_failing_blob_to_file(blob: Vec, log_dir: Arc) -> io::Result Date: Thu, 25 Sep 2025 13:56:03 +0200 Subject: [PATCH 67/91] debug logging --- Cargo.lock | 1 - core-primitives/utils/Cargo.toml | 2 +- core-primitives/utils/src/ipfs.rs | 6 +++++- enclave-runtime/Cargo.lock | 1 - 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b139cc68b..36d300b05 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4849,7 +4849,6 @@ dependencies = [ "core2", "digest 0.10.7", "multihash-derive", - "parity-scale-codec", "sha2 0.10.9", "unsigned-varint", ] diff --git a/core-primitives/utils/Cargo.toml b/core-primitives/utils/Cargo.toml index b90f82af1..adbad380f 100644 --- a/core-primitives/utils/Cargo.toml +++ b/core-primitives/utils/Cargo.toml @@ -13,7 +13,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = hex = { version = "0.4.3", default-features = false, features = ["alloc"] } log = "0.4.28" multibase = { version = "0.9.1", default-features = false } -multihash = { version = "0.18.0", default-features = false, features = ["alloc", "scale-codec", "multihash-impl", "sha2"] } +multihash = { version = "0.18.0", default-features = false, features = ["alloc", "multihash-impl", "sha2"] } [features] default = ["std"] diff --git a/core-primitives/utils/src/ipfs.rs b/core-primitives/utils/src/ipfs.rs index 886a47178..4882cab3b 100644 --- a/core-primitives/utils/src/ipfs.rs +++ b/core-primitives/utils/src/ipfs.rs @@ -24,9 +24,10 @@ use codec::{Decode, Encode}; use core::{ convert::TryFrom, fmt::{Debug, Display}, + hash::Hash, }; +use log::*; use multibase::Base; - const RAW: u64 = 0x55; /// IPFS content identifier helper: https://docs.ipfs.tech/concepts/content-addressing/ @@ -54,10 +55,13 @@ impl IpfsCid { if chunk.len() > 256 * 1024 { return Err(IpfsError::InputTooLarge); }; + info!("Deriving CID from chunk of size {} bytes", chunk.len()); //let h = Sha256::digest(chunk); let h = Code::Sha2_256.digest(chunk); + info!(" multihash digest: {}", hex::encode(h.digest())); //let mh = multihash::Sha2_256::digest(chunk); let cid = Cid::new_v1(RAW, h.into()); + info!(" returning CID: {}", cid); Ok(IpfsCid(cid)) } } diff --git a/enclave-runtime/Cargo.lock b/enclave-runtime/Cargo.lock index 866f0557c..587a98df5 100644 --- a/enclave-runtime/Cargo.lock +++ b/enclave-runtime/Cargo.lock @@ -2933,7 +2933,6 @@ dependencies = [ "core2", "digest 0.10.7", "multihash-derive", - "parity-scale-codec", "sha2 0.10.7", "unsigned-varint 0.7.2", ] From 32c7c7c5f6517a669d0e5c805508aefdca4cab73 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Thu, 25 Sep 2025 14:15:42 +0200 Subject: [PATCH 68/91] fake cid again but derive multihash --- core-primitives/utils/src/ipfs.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/core-primitives/utils/src/ipfs.rs b/core-primitives/utils/src/ipfs.rs index 4882cab3b..d01dfac0d 100644 --- a/core-primitives/utils/src/ipfs.rs +++ b/core-primitives/utils/src/ipfs.rs @@ -40,7 +40,6 @@ impl From for IpfsCid { } } -#[cfg(feature = "std")] impl TryFrom<&str> for IpfsCid { type Error = cid::Error; @@ -60,9 +59,11 @@ impl IpfsCid { let h = Code::Sha2_256.digest(chunk); info!(" multihash digest: {}", hex::encode(h.digest())); //let mh = multihash::Sha2_256::digest(chunk); - let cid = Cid::new_v1(RAW, h.into()); - info!(" returning CID: {}", cid); - Ok(IpfsCid(cid)) + //let cid = Cid::new_v1(RAW, h.into()); + //info!(" returning CID: {}", cid); + Ok(Self::try_from("QmSaFjwJ2QtS3rZDKzC98XEzv2bqT4TfpWLCpphPPwyQTr") + .expect("known to work for test")) + //Ok(IpfsCid(cid)) } } From 73f10a12153175fc26a1026bb43421e26d1e78c3 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Thu, 25 Sep 2025 14:21:07 +0200 Subject: [PATCH 69/91] fake cid again --- core-primitives/utils/src/ipfs.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core-primitives/utils/src/ipfs.rs b/core-primitives/utils/src/ipfs.rs index d01dfac0d..235604e9e 100644 --- a/core-primitives/utils/src/ipfs.rs +++ b/core-primitives/utils/src/ipfs.rs @@ -56,8 +56,8 @@ impl IpfsCid { }; info!("Deriving CID from chunk of size {} bytes", chunk.len()); //let h = Sha256::digest(chunk); - let h = Code::Sha2_256.digest(chunk); - info!(" multihash digest: {}", hex::encode(h.digest())); + //let h = Code::Sha2_256.digest(chunk); + //info!(" multihash digest: {}", hex::encode(h.digest())); //let mh = multihash::Sha2_256::digest(chunk); //let cid = Cid::new_v1(RAW, h.into()); //info!(" returning CID: {}", cid); From 87c5ce0ab7b4a47f2283cfd027677feda68f550e Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Thu, 25 Sep 2025 14:42:02 +0200 Subject: [PATCH 70/91] reprodice sha2 256 --- Cargo.lock | 1 + core-primitives/utils/Cargo.toml | 3 +++ core-primitives/utils/src/ipfs.rs | 5 +++++ enclave-runtime/Cargo.lock | 11 ++++++----- 4 files changed, 15 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 36d300b05..4d9d1128e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3811,6 +3811,7 @@ dependencies = [ "multibase", "multihash 0.18.1", "parity-scale-codec", + "sha2 0.10.9", ] [[package]] diff --git a/core-primitives/utils/Cargo.toml b/core-primitives/utils/Cargo.toml index adbad380f..7720f84ad 100644 --- a/core-primitives/utils/Cargo.toml +++ b/core-primitives/utils/Cargo.toml @@ -9,11 +9,14 @@ edition = "2021" [dependencies] cid = { version = "0.10.1", default-features = false, features = ["alloc"] } +sha2 = { version = "0.10.9", default-features = false } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex = { version = "0.4.3", default-features = false, features = ["alloc"] } log = "0.4.28" multibase = { version = "0.9.1", default-features = false } multihash = { version = "0.18.0", default-features = false, features = ["alloc", "multihash-impl", "sha2"] } +#sp-io = { path = "../../core-primitives/substrate-sgx/sp-io" } +# sgx_tcrypto = { branch = "master", git = "https://github.com/apache/teaclave-sgx-sdk.git" } [features] default = ["std"] diff --git a/core-primitives/utils/src/ipfs.rs b/core-primitives/utils/src/ipfs.rs index 235604e9e..ac8217445 100644 --- a/core-primitives/utils/src/ipfs.rs +++ b/core-primitives/utils/src/ipfs.rs @@ -28,6 +28,9 @@ use core::{ }; use log::*; use multibase::Base; +use sha2::{Digest, Sha256}; +// use sgx_tcrypto::{rsgx_sha256_slice, SgxEccHandle}; +// use sp_io::hashing::sha2_256; const RAW: u64 = 0x55; /// IPFS content identifier helper: https://docs.ipfs.tech/concepts/content-addressing/ @@ -55,6 +58,8 @@ impl IpfsCid { return Err(IpfsError::InputTooLarge); }; info!("Deriving CID from chunk of size {} bytes", chunk.len()); + let hash = Sha256::digest(b"hello world"); + info!(" sha2-256 digest: {}", hex::encode(hash)); //let h = Sha256::digest(chunk); //let h = Code::Sha2_256.digest(chunk); //info!(" multihash digest: {}", hex::encode(h.digest())); diff --git a/enclave-runtime/Cargo.lock b/enclave-runtime/Cargo.lock index 587a98df5..2579511b2 100644 --- a/enclave-runtime/Cargo.lock +++ b/enclave-runtime/Cargo.lock @@ -2475,6 +2475,7 @@ dependencies = [ "multibase 0.9.1", "multihash 0.18.1", "parity-scale-codec", + "sha2 0.10.9", ] [[package]] @@ -2708,7 +2709,7 @@ dependencies = [ "cfg-if 1.0.0", "ecdsa", "elliptic-curve", - "sha2 0.10.7", + "sha2 0.10.9", ] [[package]] @@ -2933,7 +2934,7 @@ dependencies = [ "core2", "digest 0.10.7", "multihash-derive", - "sha2 0.10.7", + "sha2 0.10.9", "unsigned-varint 0.7.2", ] @@ -4184,9 +4185,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.7" +version = "0.10.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" dependencies = [ "cfg-if 1.0.0", "cpufeatures", @@ -4391,7 +4392,7 @@ dependencies = [ "blake2b_simd 1.0.1", "byteorder 1.4.3", "digest 0.10.7", - "sha2 0.10.7", + "sha2 0.10.9", "sha3 0.10.8", "sp-std", "twox-hash", From a1ad2cc9e7fe01f84df25089ade5a0290039a100 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Thu, 25 Sep 2025 14:48:03 +0200 Subject: [PATCH 71/91] simple remove sha2_256 call --- core-primitives/utils/src/ipfs.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core-primitives/utils/src/ipfs.rs b/core-primitives/utils/src/ipfs.rs index ac8217445..cda03cb37 100644 --- a/core-primitives/utils/src/ipfs.rs +++ b/core-primitives/utils/src/ipfs.rs @@ -58,8 +58,8 @@ impl IpfsCid { return Err(IpfsError::InputTooLarge); }; info!("Deriving CID from chunk of size {} bytes", chunk.len()); - let hash = Sha256::digest(b"hello world"); - info!(" sha2-256 digest: {}", hex::encode(hash)); + //let hash = Sha256::digest(b"hello world"); + //info!(" sha2-256 digest: {}", hex::encode(hash)); //let h = Sha256::digest(chunk); //let h = Code::Sha2_256.digest(chunk); //info!(" multihash digest: {}", hex::encode(h.digest())); From 3f622c4b7694177a67cb49570949d5cc36b77fd3 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Thu, 25 Sep 2025 15:21:57 +0200 Subject: [PATCH 72/91] derive sha256 with sgx primitive --- Cargo.lock | 38 ++++++++++++++ Cargo.toml | 1 + core-primitives/ipfs-cid/src/lib.rs | 70 ++++++++++++++++++++++++++ core-primitives/utils/src/ipfs.rs | 4 +- enclave-runtime/Cargo.lock | 33 ++++++++++++ enclave-runtime/Cargo.toml | 1 + enclave-runtime/src/test/ipfs_tests.rs | 5 ++ 7 files changed, 151 insertions(+), 1 deletion(-) create mode 100644 core-primitives/ipfs-cid/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 4d9d1128e..c988e2ea8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3406,6 +3406,44 @@ dependencies = [ "thiserror 1.0.9", ] +[[package]] +name = "itp-ipfs-cid" +version = "0.1.0" +dependencies = [ + "arrayvec 0.7.4", + "base64 0.13.0 (git+https://github.com/mesalock-linux/rust-base64-sgx?rev=sgx_1.1.3)", + "base64 0.13.1", + "bit-vec", + "chrono 0.4.11", + "chrono 0.4.26", + "hex", + "httparse 1.4.1", + "itertools 0.10.5", + "itp-ocall-api", + "itp-settings", + "itp-sgx-crypto", + "itp-sgx-io", + "log 0.4.28", + "num-bigint 0.2.5", + "parity-scale-codec", + "rustls 0.19.0 (git+https://github.com/mesalock-linux/rustls?rev=sgx_1.1.3)", + "rustls 0.19.1", + "serde_json 1.0.103", + "serde_json 1.0.60 (git+https://github.com/mesalock-linux/serde-json-sgx?tag=sgx_1.1.3)", + "sgx_rand", + "sgx_tcrypto", + "sgx_tse", + "sgx_tstd", + "sgx_types", + "sp-core", + "thiserror 1.0.44", + "thiserror 1.0.9", + "webpki 0.21.4 (registry+https://github.com/rust-lang/crates.io-index)", + "webpki 0.21.4 (git+https://github.com/mesalock-linux/webpki?branch=mesalock_sgx)", + "webpki-roots 0.21.0 (git+https://github.com/mesalock-linux/webpki-roots?branch=mesalock_sgx)", + "yasna 0.3.1", +] + [[package]] name = "itp-networking-utils" version = "0.9.0" diff --git a/Cargo.toml b/Cargo.toml index 95c637d1b..3ca2ebe48 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,6 +34,7 @@ members = [ "core-primitives/enclave-metrics", "core-primitives/extrinsics-factory", "core-primitives/hashing", + "core-primitives/ipfs-cid", "core-primitives/networking-utils", "core-primitives/node-api", "core-primitives/node-api/api-client-extensions", diff --git a/core-primitives/ipfs-cid/src/lib.rs b/core-primitives/ipfs-cid/src/lib.rs new file mode 100644 index 000000000..0be002ed8 --- /dev/null +++ b/core-primitives/ipfs-cid/src/lib.rs @@ -0,0 +1,70 @@ +/* + Copyright 2021 Integritee AG + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(all(feature = "std", feature = "sgx"))] +compile_error!("feature \"std\" and feature \"sgx\" cannot be enabled at the same time"); + +#[cfg(all(not(feature = "std"), feature = "sgx"))] +#[macro_use] +extern crate sgx_tstd as std; + +use core::fmt::Debug; +use log::*; +use sgx_tcrypto::{rsgx_sha256_slice, SgxEccHandle}; +// re-export module to properly feature gate sgx and regular std environment +#[cfg(all(not(feature = "std"), feature = "sgx"))] +pub mod sgx_reexport_prelude { + pub use base64_sgx as base64; + pub use chrono_sgx as chrono; + pub use rustls_sgx as rustls; + pub use serde_json_sgx as serde_json; + pub use thiserror_sgx as thiserror; + pub use webpki_sgx as webpki; + pub use yasna_sgx as yasna; +} + +#[derive(Clone, PartialEq, Eq)] +pub struct IpfsCid { + hash: [u8; 32], +} + +impl IpfsCid { + pub fn from_chunk(chunk: &[u8]) -> Result { + if chunk.len() > 256 * 1024 { + return Err(IpfsError::InputTooLarge); + }; + let hash = rsgx_sha256_slice(&chunk).map_err(|_| IpfsError::InputTooLarge)?; + info!("hash: {:?}", hash); + Ok(Self { hash }) + } +} + +impl Debug for IpfsCid { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "IpfsCid: hash: {} ", hex::encode(self.hash)) + } +} + +#[derive(Debug, PartialEq)] +pub enum IpfsError { + InputTooLarge, + InputCidInvalid, + FinalCidMissing, + Verification, +} diff --git a/core-primitives/utils/src/ipfs.rs b/core-primitives/utils/src/ipfs.rs index cda03cb37..59a2218e4 100644 --- a/core-primitives/utils/src/ipfs.rs +++ b/core-primitives/utils/src/ipfs.rs @@ -15,6 +15,9 @@ */ +#[cfg(all(not(feature = "std"), feature = "sgx"))] +use crate::sgx_reexport_prelude::*; + use alloc::{format, vec::Vec}; use cid::{ multihash::{Code, MultihashDigest}, @@ -29,7 +32,6 @@ use core::{ use log::*; use multibase::Base; use sha2::{Digest, Sha256}; -// use sgx_tcrypto::{rsgx_sha256_slice, SgxEccHandle}; // use sp_io::hashing::sha2_256; const RAW: u64 = 0x55; diff --git a/enclave-runtime/Cargo.lock b/enclave-runtime/Cargo.lock index 2579511b2..0f6caa17f 100644 --- a/enclave-runtime/Cargo.lock +++ b/enclave-runtime/Cargo.lock @@ -847,6 +847,7 @@ dependencies = [ "itp-enclave-metrics", "itp-extrinsics-factory", "itp-import-queue", + "itp-ipfs-cid", "itp-node-api", "itp-node-api-metadata", "itp-nonce-cache", @@ -2108,6 +2109,38 @@ dependencies = [ "thiserror 1.0.9", ] +[[package]] +name = "itp-ipfs-cid" +version = "0.1.0" +dependencies = [ + "arrayvec 0.7.4", + "base64 0.13.0 (git+https://github.com/mesalock-linux/rust-base64-sgx?rev=sgx_1.1.3)", + "bit-vec", + "chrono 0.4.11", + "hex", + "httparse", + "itertools 0.10.5", + "itp-ocall-api", + "itp-settings", + "itp-sgx-crypto", + "itp-sgx-io", + "log 0.4.28", + "num-bigint", + "parity-scale-codec", + "rustls 0.19.0 (git+https://github.com/mesalock-linux/rustls?rev=sgx_1.1.3)", + "serde_json 1.0.60 (git+https://github.com/mesalock-linux/serde-json-sgx?tag=sgx_1.1.3)", + "sgx_rand", + "sgx_tcrypto", + "sgx_tse", + "sgx_tstd", + "sgx_types", + "sp-core", + "thiserror 1.0.9", + "webpki", + "webpki-roots 0.21.0 (git+https://github.com/mesalock-linux/webpki-roots?branch=mesalock_sgx)", + "yasna", +] + [[package]] name = "itp-node-api" version = "0.9.0" diff --git a/enclave-runtime/Cargo.toml b/enclave-runtime/Cargo.toml index b781cf5bd..444bb8bbe 100644 --- a/enclave-runtime/Cargo.toml +++ b/enclave-runtime/Cargo.toml @@ -108,6 +108,7 @@ itp-component-container = { path = "../core-primitives/component-container", def itp-enclave-metrics = { path = "../core-primitives/enclave-metrics", default-features = false, features = ["sgx"] } itp-extrinsics-factory = { path = "../core-primitives/extrinsics-factory", default-features = false, features = ["sgx"] } itp-import-queue = { path = "../core-primitives/import-queue", default-features = false, features = ["sgx"] } +itp-ipfs-cid = { path = "../core-primitives/ipfs-cid", default-features = false, features = ["sgx"] } itp-node-api = { path = "../core-primitives/node-api", default-features = false, features = ["sgx"] } itp-node-api-metadata = { path = "../core-primitives/node-api/metadata", default-features = false } itp-nonce-cache = { path = "../core-primitives/nonce-cache", default-features = false, features = ["sgx"] } diff --git a/enclave-runtime/src/test/ipfs_tests.rs b/enclave-runtime/src/test/ipfs_tests.rs index ef59142d0..cad35d1c3 100644 --- a/enclave-runtime/src/test/ipfs_tests.rs +++ b/enclave-runtime/src/test/ipfs_tests.rs @@ -20,6 +20,7 @@ extern crate sgx_tstd as std; use crate::ocall::OcallApi; +use itp_ipfs_cid::IpfsCid as ItpIpfsCid; use itp_ocall_api::EnclaveIpfsOCallApi; use itp_utils::IpfsCid; use log::*; @@ -81,6 +82,10 @@ pub fn test_ocall_write_ipfs_fallback() { eprintln!("file cid: {}", file_cid); assert_eq!(expected_cid, file_cid); + + // now try the alternative: + let alt_cid = ItpIpfsCid::from_chunk(&content_buf).expect("known to be ok"); + eprintln!("alternative file cid: {:?}", alt_cid); } fn find_first_matching_file(cid_str: String) -> Option { From 96660bfb98b7ec46cbc7b32b8e8b220a3ce93a84 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Thu, 25 Sep 2025 15:22:57 +0200 Subject: [PATCH 73/91] add missing toml --- core-primitives/ipfs-cid/Cargo.toml | 103 ++++++++++++++++++++++++++++ 1 file changed, 103 insertions(+) create mode 100644 core-primitives/ipfs-cid/Cargo.toml diff --git a/core-primitives/ipfs-cid/Cargo.toml b/core-primitives/ipfs-cid/Cargo.toml new file mode 100644 index 000000000..45eeaaf0b --- /dev/null +++ b/core-primitives/ipfs-cid/Cargo.toml @@ -0,0 +1,103 @@ +[package] +name = "itp-ipfs-cid" +version = "0.1.0" +authors = ["Integritee AG "] +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +# crates-io no_std deps +arrayvec = { version = "0.7.1", default-features = false } +bit-vec = { version = "0.6", default-features = false } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +hex = { version = "0.4.3", default-features = false, features = ["alloc"] } +itertools = { default-features = false, version = "0.10.1" } +log = { version = "0.4", default-features = false } + +# std only deps +base64 = { version = "0.13", features = ["alloc"], optional = true } +chrono = { version = "0.4.19", features = ["alloc"], optional = true } +rustls = { version = "0.19", optional = true } +serde_json = { version = "1.0", features = ["preserve_order"], optional = true } +thiserror = { version = "1.0", optional = true } +webpki = { version = "0.21", optional = true } + +# mesalock +base64_sgx = { package = "base64", rev = "sgx_1.1.3", git = "https://github.com/mesalock-linux/rust-base64-sgx", optional = true } +chrono_sgx = { package = "chrono", git = "https://github.com/mesalock-linux/chrono-sgx", optional = true } +num-bigint = { optional = true, git = "https://github.com/mesalock-linux/num-bigint-sgx" } +rustls_sgx = { package = "rustls", rev = "sgx_1.1.3", features = ["dangerous_configuration"], git = "https://github.com/mesalock-linux/rustls", optional = true } +serde_json_sgx = { package = "serde_json", tag = "sgx_1.1.3", features = ["preserve_order"], git = "https://github.com/mesalock-linux/serde-json-sgx", optional = true } +thiserror_sgx = { package = "thiserror", git = "https://github.com/mesalock-linux/thiserror-sgx", tag = "sgx_1.1.3", optional = true } +webpki-roots = { git = "https://github.com/mesalock-linux/webpki-roots", branch = "mesalock_sgx" } +webpki_sgx = { package = "webpki", git = "https://github.com/mesalock-linux/webpki", branch = "mesalock_sgx", optional = true } +yasna_sgx = { package = "yasna", optional = true, default-features = false, features = ["bit-vec", "num-bigint", "chrono", "mesalock_sgx"], git = "https://github.com/mesalock-linux/yasna.rs-sgx", rev = "sgx_1.1.3" } + +# sgx +sgx_rand = { branch = "master", git = "https://github.com/apache/teaclave-sgx-sdk.git", optional = true } +sgx_tcrypto = { branch = "master", git = "https://github.com/apache/teaclave-sgx-sdk.git", optional = true } +sgx_tse = { branch = "master", git = "https://github.com/apache/teaclave-sgx-sdk.git", optional = true } +sgx_tstd = { branch = "master", git = "https://github.com/apache/teaclave-sgx-sdk.git", features = ["untrusted_fs", "net", "backtrace"], optional = true } +sgx_types = { branch = "master", git = "https://github.com/apache/teaclave-sgx-sdk.git", features = ["extra_traits"] } + +# local deps +itp-ocall-api = { path = "../ocall-api", default-features = false } +itp-settings = { path = "../settings" } +itp-sgx-crypto = { path = "../sgx/crypto", default-features = false } +itp-sgx-io = { path = "../sgx/io", default-features = false } + +# integritee +httparse = { default-features = false, git = "https://github.com/integritee-network/httparse-sgx", branch = "sgx-experimental" } + +# substrate deps +sp-core = { default-features = false, features = ["full_crypto"], git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.42" } + +[features] +default = ["std"] +std = [ + # crates-io no_std + "arrayvec/std", + "codec/std", + "hex/std", + "log/std", + "itertools/use_std", + # optional std only + "base64", + "chrono", + "rustls", + "serde_json", + "thiserror", + "webpki", + # local + "itp-ocall-api/std", + "itp-sgx-io/std", + "itp-sgx-crypto/std", + # substrate + "sp-core/std", + # integritee + "httparse/std", +] + +sgx = [ + # sgx-only + "base64_sgx", + "chrono_sgx", + "rustls_sgx", + "serde_json_sgx", + "thiserror_sgx", + "webpki_sgx", + "yasna_sgx", + "sgx_tse", + "sgx_tstd", + "sgx_rand", + "sgx_tcrypto", + "num-bigint", + # local + "itp-sgx-io/sgx", + "itp-sgx-crypto/sgx", + # integritee + "httparse/mesalock_sgx", +] +test = [] +production = [] From 513e7220456e9291f9767c8a456609404fdede29 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Thu, 25 Sep 2025 15:38:02 +0200 Subject: [PATCH 74/91] wrap cid manually --- Cargo.lock | 3 +++ core-primitives/ipfs-cid/Cargo.toml | 4 +++- core-primitives/ipfs-cid/src/lib.rs | 8 +++++++- enclave-runtime/Cargo.lock | 3 +++ 4 files changed, 16 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c988e2ea8..d2ff6cdc6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3416,6 +3416,7 @@ dependencies = [ "bit-vec", "chrono 0.4.11", "chrono 0.4.26", + "cid", "hex", "httparse 1.4.1", "itertools 0.10.5", @@ -3424,6 +3425,8 @@ dependencies = [ "itp-sgx-crypto", "itp-sgx-io", "log 0.4.28", + "multibase", + "multihash 0.18.1", "num-bigint 0.2.5", "parity-scale-codec", "rustls 0.19.0 (git+https://github.com/mesalock-linux/rustls?rev=sgx_1.1.3)", diff --git a/core-primitives/ipfs-cid/Cargo.toml b/core-primitives/ipfs-cid/Cargo.toml index 45eeaaf0b..36ff1310b 100644 --- a/core-primitives/ipfs-cid/Cargo.toml +++ b/core-primitives/ipfs-cid/Cargo.toml @@ -14,7 +14,9 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = hex = { version = "0.4.3", default-features = false, features = ["alloc"] } itertools = { default-features = false, version = "0.10.1" } log = { version = "0.4", default-features = false } - +cid = { version = "0.10.1", default-features = false, features = ["alloc"] } +multibase = { version = "0.9.1", default-features = false } +multihash = { version = "0.18.0", default-features = false, features = ["alloc"] } # std only deps base64 = { version = "0.13", features = ["alloc"], optional = true } chrono = { version = "0.4.19", features = ["alloc"], optional = true } diff --git a/core-primitives/ipfs-cid/src/lib.rs b/core-primitives/ipfs-cid/src/lib.rs index 0be002ed8..cdc0f4dee 100644 --- a/core-primitives/ipfs-cid/src/lib.rs +++ b/core-primitives/ipfs-cid/src/lib.rs @@ -38,7 +38,10 @@ pub mod sgx_reexport_prelude { pub use webpki_sgx as webpki; pub use yasna_sgx as yasna; } - +use cid::Cid; +use multihash::Multihash; +const SHA2_256: u64 = 0x12; +const RAW: u64 = 0x55; #[derive(Clone, PartialEq, Eq)] pub struct IpfsCid { hash: [u8; 32], @@ -51,6 +54,9 @@ impl IpfsCid { }; let hash = rsgx_sha256_slice(&chunk).map_err(|_| IpfsError::InputTooLarge)?; info!("hash: {:?}", hash); + let mh = Multihash::wrap(SHA2_256, &hash).map_err(|_| IpfsError::InputTooLarge)?; + let cid = Cid::new_v1(RAW, mh); + info!("cid: {:?}", cid); Ok(Self { hash }) } } diff --git a/enclave-runtime/Cargo.lock b/enclave-runtime/Cargo.lock index 0f6caa17f..cdb03c7d5 100644 --- a/enclave-runtime/Cargo.lock +++ b/enclave-runtime/Cargo.lock @@ -2117,6 +2117,7 @@ dependencies = [ "base64 0.13.0 (git+https://github.com/mesalock-linux/rust-base64-sgx?rev=sgx_1.1.3)", "bit-vec", "chrono 0.4.11", + "cid 0.10.1", "hex", "httparse", "itertools 0.10.5", @@ -2125,6 +2126,8 @@ dependencies = [ "itp-sgx-crypto", "itp-sgx-io", "log 0.4.28", + "multibase 0.9.1", + "multihash 0.18.1", "num-bigint", "parity-scale-codec", "rustls 0.19.0 (git+https://github.com/mesalock-linux/rustls?rev=sgx_1.1.3)", From 9f3766dc9f0e1c813e8abb84cbd3fe72a3cfc3c0 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Thu, 25 Sep 2025 16:11:44 +0200 Subject: [PATCH 75/91] refactor into fully functional itp-ipfs-cid --- Cargo.lock | 1 + core-primitives/ipfs-cid/Cargo.toml | 4 + core-primitives/ipfs-cid/src/lib.rs | 144 ++++++++++++++++++++++++++-- 3 files changed, 140 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d2ff6cdc6..c81160ff1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3438,6 +3438,7 @@ dependencies = [ "sgx_tse", "sgx_tstd", "sgx_types", + "sha2 0.10.9", "sp-core", "thiserror 1.0.44", "thiserror 1.0.9", diff --git a/core-primitives/ipfs-cid/Cargo.toml b/core-primitives/ipfs-cid/Cargo.toml index 36ff1310b..692bb37b5 100644 --- a/core-primitives/ipfs-cid/Cargo.toml +++ b/core-primitives/ipfs-cid/Cargo.toml @@ -55,6 +55,8 @@ httparse = { default-features = false, git = "https://github.com/integritee-netw # substrate deps sp-core = { default-features = false, features = ["full_crypto"], git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.42" } +sha2 = { version = "0.10.9", default-features = false, optional = true } + [features] default = ["std"] std = [ @@ -71,6 +73,8 @@ std = [ "serde_json", "thiserror", "webpki", + "sgx_tcrypto", + "sha2", # local "itp-ocall-api/std", "itp-sgx-io/std", diff --git a/core-primitives/ipfs-cid/src/lib.rs b/core-primitives/ipfs-cid/src/lib.rs index cdc0f4dee..2b405dff2 100644 --- a/core-primitives/ipfs-cid/src/lib.rs +++ b/core-primitives/ipfs-cid/src/lib.rs @@ -24,9 +24,12 @@ compile_error!("feature \"std\" and feature \"sgx\" cannot be enabled at the sam #[macro_use] extern crate sgx_tstd as std; -use core::fmt::Debug; use log::*; + +#[cfg(all(not(feature = "std"), feature = "sgx"))] use sgx_tcrypto::{rsgx_sha256_slice, SgxEccHandle}; +#[cfg(not(all(not(feature = "std"), feature = "sgx")))] +use sha2::{Digest, Sha256}; // re-export module to properly feature gate sgx and regular std environment #[cfg(all(not(feature = "std"), feature = "sgx"))] pub mod sgx_reexport_prelude { @@ -39,32 +42,101 @@ pub mod sgx_reexport_prelude { pub use yasna_sgx as yasna; } use cid::Cid; +use codec::{Decode, Encode}; +use multibase::Base; use multihash::Multihash; +use std::{ + convert::TryFrom, + fmt::{Debug, Display}, + vec::Vec, +}; const SHA2_256: u64 = 0x12; const RAW: u64 = 0x55; #[derive(Clone, PartialEq, Eq)] -pub struct IpfsCid { - hash: [u8; 32], +pub struct IpfsCid(pub Cid); + +impl From for IpfsCid { + fn from(value: Cid) -> Self { + IpfsCid(value) + } +} + +impl TryFrom<&str> for IpfsCid { + type Error = cid::Error; + + fn try_from(value: &str) -> Result { + let cid = Cid::try_from(value)?; + Ok(IpfsCid(cid)) + } +} + +impl Encode for IpfsCid { + fn encode(&self) -> Vec { + self.0.to_bytes().encode() + } +} + +impl Decode for IpfsCid { + fn decode(input: &mut I) -> Result { + let bytes: Vec = Decode::decode(input)?; + let cid = Cid::try_from(bytes) + .map_err(|_| codec::Error::from("Failed to decode IpfsCid from bytes"))?; + Ok(IpfsCid(cid)) + } +} + +impl Debug for IpfsCid { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let cid = &self.0; + let version = cid.version(); + let codec = cid.codec(); + let mh = cid.hash(); + let mh_code = mh.code(); + let mh_size = mh.size(); + let mh_digest = mh.digest(); + + f.debug_struct("IpfsCid") + .field("version", &version) + .field("codec", &codec) + .field("multihash_code", &mh_code) + .field("multihash_size", &mh_size) + .field("multihash_digest", &hex::encode(mh_digest)) + .finish() + } } +impl Display for IpfsCid { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let cid_str = if self.0.codec() == RAW { + multibase::encode(Base::Base32Lower, self.0.to_bytes()) + } else { + multibase::encode(Base::Base58Btc, self.0.to_bytes()) + }; + write!(f, "{}", cid_str) + } +} impl IpfsCid { pub fn from_chunk(chunk: &[u8]) -> Result { if chunk.len() > 256 * 1024 { return Err(IpfsError::InputTooLarge); }; - let hash = rsgx_sha256_slice(&chunk).map_err(|_| IpfsError::InputTooLarge)?; + let hash = hasher(chunk)?; info!("hash: {:?}", hash); let mh = Multihash::wrap(SHA2_256, &hash).map_err(|_| IpfsError::InputTooLarge)?; let cid = Cid::new_v1(RAW, mh); info!("cid: {:?}", cid); - Ok(Self { hash }) + Ok(Self(cid)) } } -impl Debug for IpfsCid { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "IpfsCid: hash: {} ", hex::encode(self.hash)) - } +#[cfg(all(not(feature = "std"), feature = "sgx"))] +// sha2 crashes enclaves. therefore we need to use this SDK-provided hasher for sgx builds +fn hasher(chunk: &[u8]) -> Result<[u8; 32], IpfsError> { + rsgx_sha256_slice(&chunk).map_err(|_| IpfsError::InputTooLarge) +} +#[cfg(not(all(not(feature = "std"), feature = "sgx")))] +fn hasher(chunk: &[u8]) -> Result<[u8; 32], IpfsError> { + Ok(Sha256::digest(chunk).into()) } #[derive(Debug, PartialEq)] @@ -74,3 +146,57 @@ pub enum IpfsError { FinalCidMissing, Verification, } + +#[cfg(test)] +mod tests { + use super::*; + use std::vec; + + #[test] + pub fn test_from_max_chunk_content_works() { + // cross-check with ipfs cli: + // head -c 262144 /dev/zero | tr '\0' 'A' | ipfs block put --format=raw + // bafkreiexul6fkqo4zhagxgnsvbgdjfq7udb26ig3uoli34xznjlmnpaaze + let expected_cid_str = "bafkreiexul6fkqo4zhagxgnsvbgdjfq7udb26ig3uoli34xznjlmnpaaze"; + let expected_cid = IpfsCid::try_from(expected_cid_str).unwrap(); + let content: Vec = vec![65; 256 * 1024]; // exactly one chunk of 256kB of "A" chars + let derived_cid = IpfsCid::from_chunk(&content).unwrap(); + assert_eq!(derived_cid, expected_cid); + } + + #[test] + pub fn test_cid_verification_fails_for_incorrect_single_chunk_content() { + let expected_cid_str = "bafkreihdcgl5emugcgwjavoknx76kmfdahpzz3jyghg5mhslvhbrznfkky"; + let expected_cid = IpfsCid::try_from(expected_cid_str).unwrap(); + let content: Vec = vec![99; 256 * 1024]; + let wrong_cid = IpfsCid::from_chunk(&content).unwrap(); + assert!(wrong_cid != expected_cid); + } + #[test] + pub fn test_from_text_works() { + // cross-check with ipfs cli: + // echo -n "FooBar" | ipfs block put --format=raw + // bafkreianosnl4e3xk42jhyg7otpy2euc4ruwo5kkd26hzrrsher2pcfnlq + let expected_cid_str = "bafkreianosnl4e3xk42jhyg7otpy2euc4ruwo5kkd26hzrrsher2pcfnlq"; + let expected_cid = IpfsCid::try_from(expected_cid_str).unwrap(); + let content = "FooBar".as_bytes(); + let derived_cid = IpfsCid::from_chunk(content).unwrap(); + assert_eq!(derived_cid, expected_cid); + } + + #[test] + pub fn test_cid_verification_fails_for_oversize_chunk_content() { + let content: Vec = vec![99; 256 * 1024 + 1]; + assert!(IpfsCid::from_chunk(&content) == Err(IpfsError::InputTooLarge)); + } + + #[test] + pub fn test_encode_decode_ipfscid_works() { + let expected_cid_str = "QmSaFjwJ2QtS3rZDKzC98XEzv2bqT4TfpWLCpphPPwyQTr"; + let expected_cid = IpfsCid::try_from(expected_cid_str).unwrap(); + let encoded = expected_cid.encode(); + assert_eq!(encoded.len(), 34 + 1); + let decoded = IpfsCid::decode(&mut &encoded[..]).unwrap(); + assert_eq!(decoded, expected_cid); + } +} From 016dd8cfa7746f016a273a1fd9d7beffa7419edf Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Thu, 25 Sep 2025 16:30:35 +0200 Subject: [PATCH 76/91] swap all usages of IpfsCid for new crate --- Cargo.lock | 3 + app-libs/stf/Cargo.toml | 3 + app-libs/stf/src/relayed_note.rs | 2 +- app-libs/stf/src/trusted_call.rs | 3 +- .../node-api/api-client-extensions/Cargo.toml | 1 + .../src/pallet_teerex.rs | 5 +- core-primitives/types/src/lib.rs | 1 - core-primitives/utils/src/ipfs.rs | 202 ------------------ core-primitives/utils/src/lib.rs | 2 - enclave-runtime/Cargo.lock | 1 + enclave-runtime/src/ocall/ipfs_ocall.rs | 2 +- enclave-runtime/src/test/ipfs_tests.rs | 8 +- .../test/mocks/propose_to_import_call_mock.rs | 5 +- service/Cargo.toml | 1 + service/src/ocall_bridge/bridge_api.rs | 2 +- service/src/ocall_bridge/ffi/ipfs.rs | 2 +- service/src/ocall_bridge/ipfs_ocall.rs | 2 +- service/src/tests/mock.rs | 2 +- 18 files changed, 23 insertions(+), 224 deletions(-) delete mode 100644 core-primitives/utils/src/ipfs.rs diff --git a/Cargo.lock b/Cargo.lock index c81160ff1..26676b51d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2701,6 +2701,7 @@ dependencies = [ "itp-api-client-types", "itp-enclave-api", "itp-enclave-metrics", + "itp-ipfs-cid", "itp-node-api", "itp-settings", "itp-sgx-temp-dir", @@ -2950,6 +2951,7 @@ dependencies = [ "ita-parentchain-specs", "ita-sgx-runtime", "itp-hashing", + "itp-ipfs-cid", "itp-node-api", "itp-node-api-metadata", "itp-pallet-storage", @@ -3257,6 +3259,7 @@ name = "itp-api-client-extensions" version = "0.9.0" dependencies = [ "itp-api-client-types", + "itp-ipfs-cid", "itp-types", "log 0.4.28", "sp-consensus-grandpa", diff --git a/app-libs/stf/Cargo.toml b/app-libs/stf/Cargo.toml index ef89c677a..ce58a0300 100644 --- a/app-libs/stf/Cargo.toml +++ b/app-libs/stf/Cargo.toml @@ -20,6 +20,7 @@ ita-assets-map = { path = "../assets-map", default-features = false } ita-parentchain-specs = { default-features = false, path = "../parentchain-specs" } ita-sgx-runtime = { default-features = false, path = "../sgx-runtime" } itp-hashing = { default-features = false, path = "../../core-primitives/hashing" } +itp-ipfs-cid = { default-features = false, path = "../../core-primitives/ipfs-cid" } itp-node-api = { default-features = false, path = "../../core-primitives/node-api" } itp-node-api-metadata = { default-features = false, path = "../../core-primitives/node-api/metadata" } itp-pallet-storage = { path = "../../core-primitives/pallet-storage", default-features = false } @@ -59,6 +60,7 @@ default = ["std"] evm = ["ita-sgx-runtime/evm"] sgx = [ "sgx_tstd", + "itp-ipfs-cid/sgx", "itp-sgx-externalities/sgx", "sp-io/sgx", "itp-node-api/sgx", @@ -72,6 +74,7 @@ std = [ "ita-sgx-runtime/std", "ita-parentchain-specs/std", "itp-hashing/std", + "itp-ipfs-cid/std", "itp-pallet-storage/std", "itp-sgx-externalities/std", "itp-stf-interface/std", diff --git a/app-libs/stf/src/relayed_note.rs b/app-libs/stf/src/relayed_note.rs index 1d15d89b1..d1baebcdf 100644 --- a/app-libs/stf/src/relayed_note.rs +++ b/app-libs/stf/src/relayed_note.rs @@ -17,7 +17,7 @@ use codec::{Decode, Encode}; use core::fmt::Debug; -use itp_utils::IpfsCid; +use itp_ipfs_cid::IpfsCid; use sp_std::vec::Vec; pub type ConversationId = u32; diff --git a/app-libs/stf/src/trusted_call.rs b/app-libs/stf/src/trusted_call.rs index 26f49e692..e4cef8016 100644 --- a/app-libs/stf/src/trusted_call.rs +++ b/app-libs/stf/src/trusted_call.rs @@ -52,6 +52,7 @@ use ita_sgx_runtime::{ SessionProxyRole, ShardManagement, System, }; pub use ita_sgx_runtime::{Balance, Index}; +use itp_ipfs_cid::IpfsCid; use itp_node_api::metadata::{provider::AccessNodeMetadata, NodeMetadataTrait}; use itp_node_api_metadata::{ frame_system::SystemCallIndexes, @@ -70,7 +71,7 @@ use itp_types::{ parentchain::{GenericMortality, ParentchainCall, ParentchainId, ProxyType}, Address, Moment, OpaqueCall, TrustedCallSideEffect, }; -use itp_utils::{stringify::account_id_to_string, IpfsCid}; +use itp_utils::stringify::account_id_to_string; use log::*; use pallet_notes::{TimestampedTrustedNote, TrustedNote}; use sp_core::{ diff --git a/core-primitives/node-api/api-client-extensions/Cargo.toml b/core-primitives/node-api/api-client-extensions/Cargo.toml index d54f56075..e3b84909d 100644 --- a/core-primitives/node-api/api-client-extensions/Cargo.toml +++ b/core-primitives/node-api/api-client-extensions/Cargo.toml @@ -17,6 +17,7 @@ substrate-api-client = { default-features = false, features = ["std", "sync-api" # local deps itp-api-client-types = { path = "../api-client-types" } +itp-ipfs-cid = { path = "../../ipfs-cid" } itp-types = { path = "../../types" } [features] diff --git a/core-primitives/node-api/api-client-extensions/src/pallet_teerex.rs b/core-primitives/node-api/api-client-extensions/src/pallet_teerex.rs index 9d3c73aea..69bbbf0b4 100644 --- a/core-primitives/node-api/api-client-extensions/src/pallet_teerex.rs +++ b/core-primitives/node-api/api-client-extensions/src/pallet_teerex.rs @@ -17,9 +17,8 @@ use crate::ApiResult; use itp_api_client_types::{traits::GetStorage, Api, Config, Request}; -use itp_types::{ - AccountId, IpfsCid, MultiEnclave, ShardIdentifier, ShardSignerStatus, ShardStatus, -}; +use itp_ipfs_cid::IpfsCid; +use itp_types::{AccountId, MultiEnclave, ShardIdentifier, ShardSignerStatus, ShardStatus}; use log::error; pub const TEEREX: &str = "Teerex"; diff --git a/core-primitives/types/src/lib.rs b/core-primitives/types/src/lib.rs index dcd0364ed..b36b3f0d5 100644 --- a/core-primitives/types/src/lib.rs +++ b/core-primitives/types/src/lib.rs @@ -41,7 +41,6 @@ pub type Nonce = u32; pub use itp_sgx_runtime_primitives::types::*; -pub use itp_utils::IpfsCid; pub type MrEnclave = [u8; 32]; pub type ConfirmCallFn = ([u8; 2], ShardIdentifier, H256, Vec); diff --git a/core-primitives/utils/src/ipfs.rs b/core-primitives/utils/src/ipfs.rs deleted file mode 100644 index 59a2218e4..000000000 --- a/core-primitives/utils/src/ipfs.rs +++ /dev/null @@ -1,202 +0,0 @@ -/* - Copyright 2021 Integritee AG - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -*/ - -#[cfg(all(not(feature = "std"), feature = "sgx"))] -use crate::sgx_reexport_prelude::*; - -use alloc::{format, vec::Vec}; -use cid::{ - multihash::{Code, MultihashDigest}, - Cid, -}; -use codec::{Decode, Encode}; -use core::{ - convert::TryFrom, - fmt::{Debug, Display}, - hash::Hash, -}; -use log::*; -use multibase::Base; -use sha2::{Digest, Sha256}; -// use sp_io::hashing::sha2_256; -const RAW: u64 = 0x55; - -/// IPFS content identifier helper: https://docs.ipfs.tech/concepts/content-addressing/ -#[derive(Clone, PartialEq, Eq)] -pub struct IpfsCid(pub Cid); - -impl From for IpfsCid { - fn from(value: Cid) -> Self { - IpfsCid(value) - } -} - -impl TryFrom<&str> for IpfsCid { - type Error = cid::Error; - - fn try_from(value: &str) -> Result { - let cid = Cid::try_from(value)?; - Ok(IpfsCid(cid)) - } -} - -impl IpfsCid { - pub fn from_chunk(chunk: &[u8]) -> Result { - if chunk.len() > 256 * 1024 { - return Err(IpfsError::InputTooLarge); - }; - info!("Deriving CID from chunk of size {} bytes", chunk.len()); - //let hash = Sha256::digest(b"hello world"); - //info!(" sha2-256 digest: {}", hex::encode(hash)); - //let h = Sha256::digest(chunk); - //let h = Code::Sha2_256.digest(chunk); - //info!(" multihash digest: {}", hex::encode(h.digest())); - //let mh = multihash::Sha2_256::digest(chunk); - //let cid = Cid::new_v1(RAW, h.into()); - //info!(" returning CID: {}", cid); - Ok(Self::try_from("QmSaFjwJ2QtS3rZDKzC98XEzv2bqT4TfpWLCpphPPwyQTr") - .expect("known to work for test")) - //Ok(IpfsCid(cid)) - } -} - -impl Encode for IpfsCid { - fn encode(&self) -> Vec { - self.0.to_bytes().encode() - } -} - -impl Decode for IpfsCid { - fn decode(input: &mut I) -> Result { - let bytes: Vec = Decode::decode(input)?; - let cid = Cid::try_from(bytes) - .map_err(|_| codec::Error::from("Failed to decode IpfsCid from bytes"))?; - Ok(IpfsCid(cid)) - } -} - -impl Debug for IpfsCid { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - let cid = &self.0; - let version = cid.version(); - let codec = cid.codec(); - let mh = cid.hash(); - let mh_code = mh.code(); - let mh_size = mh.size(); - let mh_digest = mh.digest(); - - f.debug_struct("IpfsCid") - .field("version", &version) - .field("codec", &codec) - .field("multihash_code", &mh_code) - .field("multihash_size", &mh_size) - .field("multihash_digest", &hex::encode(mh_digest)) - .finish() - } -} - -impl Display for IpfsCid { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - let cid_str = if self.0.codec() == RAW { - multibase::encode(Base::Base32Lower, self.0.to_bytes()) - } else { - multibase::encode(Base::Base58Btc, self.0.to_bytes()) - }; - write!(f, "{}", cid_str) - } -} - -#[derive(Debug, PartialEq)] -pub enum IpfsError { - InputTooLarge, - InputCidInvalid, - FinalCidMissing, - Verification, -} - -/// IPFS chunk blocks helper -/// See https://ipfs-search.readthedocs.io/en/latest/ipfs_datatypes.html#files -#[derive(Default)] -pub struct Stats { - pub blocks: usize, - pub block_bytes: u64, - pub last: Option, -} - -impl Stats { - fn process)>>(&mut self, new_blocks: I) { - for (cid, block) in new_blocks { - self.last = Some(cid); - self.blocks += 1; - self.block_bytes += block.len() as u64; - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloc::vec; - - #[test] - pub fn test_from_max_chunk_content_works() { - // cross-check with ipfs cli: - // head -c 262144 /dev/zero | tr '\0' 'A' | ipfs block put --format=raw - // bafkreiexul6fkqo4zhagxgnsvbgdjfq7udb26ig3uoli34xznjlmnpaaze - let expected_cid_str = "bafkreiexul6fkqo4zhagxgnsvbgdjfq7udb26ig3uoli34xznjlmnpaaze"; - let expected_cid = IpfsCid::try_from(expected_cid_str).unwrap(); - let content: Vec = vec![65; 256 * 1024]; // exactly one chunk of 256kB of "A" chars - let derived_cid = IpfsCid::from_chunk(&content).unwrap(); - assert_eq!(derived_cid, expected_cid); - } - - #[test] - pub fn test_cid_verification_fails_for_incorrect_single_chunk_content() { - let expected_cid_str = "bafkreihdcgl5emugcgwjavoknx76kmfdahpzz3jyghg5mhslvhbrznfkky"; - let expected_cid = IpfsCid::try_from(expected_cid_str).unwrap(); - let content: Vec = vec![99; 256 * 1024]; - let wrong_cid = IpfsCid::from_chunk(&content).unwrap(); - assert!(wrong_cid != expected_cid); - } - #[test] - pub fn test_from_text_works() { - // cross-check with ipfs cli: - // echo -n "FooBar" | ipfs block put --format=raw - // bafkreianosnl4e3xk42jhyg7otpy2euc4ruwo5kkd26hzrrsher2pcfnlq - let expected_cid_str = "bafkreianosnl4e3xk42jhyg7otpy2euc4ruwo5kkd26hzrrsher2pcfnlq"; - let expected_cid = IpfsCid::try_from(expected_cid_str).unwrap(); - let content = "FooBar".as_bytes(); - let derived_cid = IpfsCid::from_chunk(content).unwrap(); - assert_eq!(derived_cid, expected_cid); - } - - #[test] - pub fn test_cid_verification_fails_for_oversize_chunk_content() { - let content: Vec = vec![99; 256 * 1024 + 1]; - assert!(IpfsCid::from_chunk(&content) == Err(IpfsError::InputTooLarge)); - } - - #[test] - pub fn test_encode_decode_ipfscid_works() { - let expected_cid_str = "QmSaFjwJ2QtS3rZDKzC98XEzv2bqT4TfpWLCpphPPwyQTr"; - let expected_cid = IpfsCid::try_from(expected_cid_str).unwrap(); - let encoded = expected_cid.encode(); - assert_eq!(encoded.len(), 34 + 1); - let decoded = IpfsCid::decode(&mut &encoded[..]).unwrap(); - assert_eq!(decoded, expected_cid); - } -} diff --git a/core-primitives/utils/src/lib.rs b/core-primitives/utils/src/lib.rs index 2bc5c3bc6..297ff5090 100644 --- a/core-primitives/utils/src/lib.rs +++ b/core-primitives/utils/src/lib.rs @@ -25,12 +25,10 @@ pub mod buffer; pub mod error; pub mod hex; pub mod hex_display; -pub mod ipfs; pub mod stringify; // Public re-exports. pub use self::{ buffer::write_slice_and_whitespace_pad, hex::{FromHexPrefixed, ToHexPrefixed}, - ipfs::IpfsCid, }; diff --git a/enclave-runtime/Cargo.lock b/enclave-runtime/Cargo.lock index cdb03c7d5..60fc7e4c5 100644 --- a/enclave-runtime/Cargo.lock +++ b/enclave-runtime/Cargo.lock @@ -1786,6 +1786,7 @@ dependencies = [ "ita-parentchain-specs", "ita-sgx-runtime", "itp-hashing", + "itp-ipfs-cid", "itp-node-api", "itp-node-api-metadata", "itp-pallet-storage", diff --git a/enclave-runtime/src/ocall/ipfs_ocall.rs b/enclave-runtime/src/ocall/ipfs_ocall.rs index baa6dc757..e3d737809 100644 --- a/enclave-runtime/src/ocall/ipfs_ocall.rs +++ b/enclave-runtime/src/ocall/ipfs_ocall.rs @@ -19,8 +19,8 @@ use crate::ocall::{ffi, OcallApi}; use alloc::vec::Vec; use codec::Encode; use frame_support::ensure; +use itp_ipfs_cid::IpfsCid; use itp_ocall_api::EnclaveIpfsOCallApi; -use itp_types::IpfsCid; use log::*; use sgx_types::{sgx_status_t, SgxResult}; diff --git a/enclave-runtime/src/test/ipfs_tests.rs b/enclave-runtime/src/test/ipfs_tests.rs index cad35d1c3..35df0d0ba 100644 --- a/enclave-runtime/src/test/ipfs_tests.rs +++ b/enclave-runtime/src/test/ipfs_tests.rs @@ -20,9 +20,8 @@ extern crate sgx_tstd as std; use crate::ocall::OcallApi; -use itp_ipfs_cid::IpfsCid as ItpIpfsCid; +use itp_ipfs_cid::IpfsCid; use itp_ocall_api::EnclaveIpfsOCallApi; -use itp_utils::IpfsCid; use log::*; use std::{ fs, @@ -80,12 +79,7 @@ pub fn test_ocall_write_ipfs_fallback() { assert!(res_file_cid.is_ok()); let file_cid = res_file_cid.expect("known to be ok"); eprintln!("file cid: {}", file_cid); - assert_eq!(expected_cid, file_cid); - - // now try the alternative: - let alt_cid = ItpIpfsCid::from_chunk(&content_buf).expect("known to be ok"); - eprintln!("alternative file cid: {:?}", alt_cid); } fn find_first_matching_file(cid_str: String) -> Option { diff --git a/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs b/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs index b968a44e4..e56869979 100644 --- a/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs +++ b/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs @@ -19,12 +19,13 @@ use crate::test::mocks::types::TestBlockImporter; use codec::{Decode, Encode}; use itc_parentchain::primitives::ParentchainId; +use itp_ipfs_cid::IpfsCid; use itp_ocall_api::{ EnclaveIpfsOCallApi, EnclaveOnChainOCallApi, EnclaveSidechainOCallApi, Result, }; use itp_types::{ - storage::StorageEntryVerified, BlockHash, Header as ParentchainHeader, IpfsCid, - ShardIdentifier, WorkerRequest, WorkerResponse, H256, + storage::StorageEntryVerified, BlockHash, Header as ParentchainHeader, ShardIdentifier, + WorkerRequest, WorkerResponse, H256, }; use its_primitives::types::block::SignedBlock as SignedSidechainBlockType; use its_sidechain::consensus_common::BlockImport; diff --git a/service/Cargo.toml b/service/Cargo.toml index a5efdde9e..93dfc34f4 100644 --- a/service/Cargo.toml +++ b/service/Cargo.toml @@ -48,6 +48,7 @@ itc-rpc-server = { path = "../core/rpc-server" } itp-api-client-types = { path = "../core-primitives/node-api/api-client-types" } itp-enclave-api = { path = "../core-primitives/enclave-api" } itp-enclave-metrics = { path = "../core-primitives/enclave-metrics" } +itp-ipfs-cid = { path = "../core-primitives/ipfs-cid" } itp-node-api = { path = "../core-primitives/node-api" } itp-settings = { path = "../core-primitives/settings" } itp-stf-interface = { path = "../core-primitives/stf-interface" } diff --git a/service/src/ocall_bridge/bridge_api.rs b/service/src/ocall_bridge/bridge_api.rs index 0ed00815a..b86a561cf 100644 --- a/service/src/ocall_bridge/bridge_api.rs +++ b/service/src/ocall_bridge/bridge_api.rs @@ -17,7 +17,7 @@ */ use itp_enclave_api::remote_attestation::QveReport; -use itp_types::IpfsCid; +use itp_ipfs_cid::IpfsCid; use lazy_static::lazy_static; use log::*; use parking_lot::RwLock; diff --git a/service/src/ocall_bridge/ffi/ipfs.rs b/service/src/ocall_bridge/ffi/ipfs.rs index 1c53a7818..043b1103b 100644 --- a/service/src/ocall_bridge/ffi/ipfs.rs +++ b/service/src/ocall_bridge/ffi/ipfs.rs @@ -18,7 +18,7 @@ use crate::ocall_bridge::bridge_api::{Bridge, IpfsBridge}; use codec::{Decode, Encode}; -use itp_utils::IpfsCid; +use itp_ipfs_cid::IpfsCid; use log::*; use sgx_types::sgx_status_t; use std::{slice, sync::Arc}; diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index 30ca06015..cd4fb7c64 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -19,7 +19,7 @@ use crate::ocall_bridge::bridge_api::{IpfsBridge, OCallBridgeError, OCallBridgeResult}; use chrono::Local; use ipfs_api_backend_hyper::{IpfsApi, IpfsClient, TryFromUri}; -use itp_utils::IpfsCid; +use itp_ipfs_cid::IpfsCid; use log::*; use std::{ fmt::Display, diff --git a/service/src/tests/mock.rs b/service/src/tests/mock.rs index f06a2bf74..3a9f98d6d 100644 --- a/service/src/tests/mock.rs +++ b/service/src/tests/mock.rs @@ -17,12 +17,12 @@ use codec::Encode; use enclave_bridge_primitives::ShardSignerStatus; +use itp_ipfs_cid::IpfsCid; use itp_node_api::api_client::{ApiResult, PalletTeerexApi}; use itp_types::{ parentchain::BlockNumber, AccountId, MultiEnclave, SgxBuildMode, SgxEnclave, SgxReportData, SgxStatus, ShardIdentifier, H256 as Hash, }; -use itp_utils::IpfsCid; pub struct TestNodeApi; From 13b6f268795268ef19cf561bd7ece7ba4b0e770d Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Thu, 25 Sep 2025 16:38:39 +0200 Subject: [PATCH 77/91] clean up test logging --- enclave-runtime/src/test/ipfs_tests.rs | 41 +++++--------------------- enclave-runtime/src/test/tests_main.rs | 3 -- 2 files changed, 8 insertions(+), 36 deletions(-) diff --git a/enclave-runtime/src/test/ipfs_tests.rs b/enclave-runtime/src/test/ipfs_tests.rs index 35df0d0ba..1af760a5c 100644 --- a/enclave-runtime/src/test/ipfs_tests.rs +++ b/enclave-runtime/src/test/ipfs_tests.rs @@ -31,54 +31,29 @@ use std::{ vec::Vec, }; -#[allow(unused)] -/// this test neeeds an ipfs node running and configured with cli args. here for reference but may never be called -pub fn test_ocall_read_write_ipfs() { - info!("testing IPFS read/write. Hopefully ipfs daemon is running..."); - let enc_state: Vec = vec![20; 100 * 1024]; - - let result = OcallApi.write_ipfs(enc_state); - eprintln!("write_ipfs ocall result : {:?}", result); - - // let returned_cid_raw = OcallApi.write_ipfs(enc_state.as_slice()).unwrap(); - // let returned_cid = IpfsCid::decode(&mut returned_cid_raw.as_slice()).unwrap(); - // assert_eq!(expected_cid, returned_cid); - // - // OcallApi.read_ipfs(&returned_cid).unwrap(); - // - // let cid_str = format!("{:?}", returned_cid); - // let mut f = fs::File::open(cid_str).unwrap(); - // let mut content_buf = Vec::new(); - // f.read_to_end(&mut content_buf).unwrap(); - // info!("reading file {:?} of size {} bytes", f, &content_buf.len()); - // - // let file_cid = IpfsCid::from_content_bytes(&content_buf).unwrap(); - // assert_eq!(expected_cid, file_cid); -} - pub fn test_ocall_write_ipfs_fallback() { let payload_size = 100; // in kB - eprintln!("testing IPFS write of {}kB if api is unreachable. Expected to fallback to dump local file...", payload_size); + info!("testing IPFS write of {}kB if api is unreachable. Expected to fallback to dump local file...", payload_size); let enc_state: Vec = vec![20; payload_size * 1024]; let res_expected_cid = IpfsCid::from_chunk(&enc_state); let result = OcallApi.write_ipfs(enc_state); - eprintln!("write_ipfs ocall result : {:?}", result); - eprintln!("expected cid details: {:?}", res_expected_cid); + debug!("write_ipfs ocall result : {:?}", result); + debug!("expected cid details: {:?}", res_expected_cid); assert!(res_expected_cid.is_ok()); let expected_cid = res_expected_cid.expect("known to be ok"); - eprintln!("expected cid: {}", expected_cid); + info!("expected cid: {}", expected_cid); let dumpfile = find_first_matching_file(expected_cid.to_string()).expect("dumped file not found"); - eprintln!("found dumped file: {:?}", dumpfile); + info!("found dumped file: {:?}", dumpfile); let mut f = fs::File::open(dumpfile).unwrap(); let mut content_buf = Vec::new(); f.read_to_end(&mut content_buf).unwrap(); - eprintln!("reading file {:?} of size {} bytes", f, &content_buf.len()); + debug!("reading file {:?} of size {} bytes", f, &content_buf.len()); let res_file_cid = IpfsCid::from_chunk(&content_buf); - eprintln!("file cid details: {:?}", res_file_cid); + debug!("file cid details: {:?}", res_file_cid); assert!(res_file_cid.is_ok()); let file_cid = res_file_cid.expect("known to be ok"); - eprintln!("file cid: {}", file_cid); + debug!("file cid: {}", file_cid); assert_eq!(expected_cid, file_cid); } diff --git a/enclave-runtime/src/test/tests_main.rs b/enclave-runtime/src/test/tests_main.rs index 6e1e7eebb..008dab7b5 100644 --- a/enclave-runtime/src/test/tests_main.rs +++ b/enclave-runtime/src/test/tests_main.rs @@ -161,9 +161,6 @@ pub extern "C" fn test_main_entrance() -> size_t { // // light-client-test // itc_parentchain::light_client::io::sgx_tests::init_parachain_light_client_works, // itc_parentchain::light_client::io::sgx_tests::sealing_creates_backup, - - // this test needs an ipfs node running.. - // crate::test::ipfs_tests::test_ocall_read_write_ipfs, crate::test::ipfs_tests::test_ocall_write_ipfs_fallback, // Teeracle tests //run_teeracle_tests, From 5f887359537682602dc349fc6897b934f13ac726 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Thu, 25 Sep 2025 16:40:18 +0200 Subject: [PATCH 78/91] revert enclave test isolation --- enclave-runtime/src/test/tests_main.rs | 175 +++++++++++++------------ 1 file changed, 89 insertions(+), 86 deletions(-) diff --git a/enclave-runtime/src/test/tests_main.rs b/enclave-runtime/src/test/tests_main.rs index 008dab7b5..1c8e0a235 100644 --- a/enclave-runtime/src/test/tests_main.rs +++ b/enclave-runtime/src/test/tests_main.rs @@ -76,94 +76,97 @@ use std::{string::String, sync::Arc, time::Duration, vec::Vec}; #[no_mangle] pub extern "C" fn test_main_entrance() -> size_t { rsgx_unit_tests!( - // itp_attestation_handler::attestation_handler::tests::decode_spid_works, - // stf_sgx_tests::enclave_account_initialization_works, - // stf_sgx_tests::shield_funds_increments_signer_account_nonce, - // stf_sgx_tests::test_root_account_exists_after_initialization, - // itp_stf_state_handler::test::sgx_tests::test_write_and_load_state_works, - // itp_stf_state_handler::test::sgx_tests::test_sgx_state_decode_encode_works, - // itp_stf_state_handler::test::sgx_tests::test_encrypt_decrypt_state_type_works, - // itp_stf_state_handler::test::sgx_tests::test_write_access_locks_read_until_finished, - // itp_stf_state_handler::test::sgx_tests::test_ensure_subsequent_state_loads_have_same_hash, - // itp_stf_state_handler::test::sgx_tests::test_state_handler_file_backend_is_initialized, - // itp_stf_state_handler::test::sgx_tests::test_multiple_state_updates_create_snapshots_up_to_cache_size, - // itp_stf_state_handler::test::sgx_tests::test_state_files_from_handler_can_be_loaded_again, - // itp_stf_state_handler::test::sgx_tests::test_file_io_get_state_hash_works, - // itp_stf_state_handler::test::sgx_tests::test_list_state_ids_ignores_files_not_matching_the_pattern, - // itp_stf_state_handler::test::sgx_tests::test_in_memory_state_initializes_from_shard_directory, - // itp_sgx_crypto::tests::aes_sealing_works, - // itp_sgx_crypto::tests::using_get_aes_repository_twice_initializes_key_only_once, - // itp_sgx_crypto::tests::ed25529_sealing_works, - // itp_sgx_crypto::tests::using_get_ed25519_repository_twice_initializes_key_only_once, - // itp_sgx_crypto::tests::rsa3072_sealing_works, - // itp_sgx_crypto::tests::using_get_rsa3072_repository_twice_initializes_key_only_once, - // test_compose_block, - // test_submit_trusted_call_to_top_pool, - // test_submit_trusted_getter_to_top_pool, - // test_differentiate_getter_and_call_works, - // test_create_block_and_confirmation_works, - // test_create_state_diff, - // test_executing_call_updates_account_nonce, - // test_call_set_update_parentchain_block, - // test_invalid_nonce_call_is_not_executed, - // test_signature_must_match_public_sender_in_call, - // test_non_root_shielding_call_is_not_executed, - // test_shielding_call_with_enclave_self_is_executed, - // test_retrieve_events, - // test_retrieve_event_count, - // test_reset_events, - // handle_state_mock::tests::initialized_shards_list_is_empty, - // handle_state_mock::tests::shard_exists_after_inserting, - // handle_state_mock::tests::from_shard_works, - // handle_state_mock::tests::initialize_creates_default_state, - // handle_state_mock::tests::load_mutate_and_write_works, - // handle_state_mock::tests::ensure_subsequent_state_loads_have_same_hash, - // handle_state_mock::tests::ensure_encode_and_encrypt_does_not_affect_state_hash, - // // mra cert tests - // test_verify_mra_cert_should_work, - // test_verify_wrong_cert_is_err, - // test_given_wrong_platform_info_when_verifying_attestation_report_then_return_error, - // // sync tests - // sidechain_rw_lock_works, - // enclave_rw_lock_works, - // // unit tests of stf_executor - // stf_executor_tests::propose_state_update_always_executes_preprocessing_step, - // stf_executor_tests::propose_state_update_executes_no_trusted_calls_given_no_time, - // stf_executor_tests::propose_state_update_executes_only_one_trusted_call_given_not_enough_time, - // stf_executor_tests::propose_state_update_executes_all_calls_given_enough_time, - // enclave_signer_tests::enclave_signer_signatures_are_valid, - // enclave_signer_tests::derive_key_is_deterministic, - // enclave_signer_tests::nonce_is_computed_correctly, - // state_getter_tests::state_getter_works, - // // sidechain integration tests - // sidechain_aura_tests::produce_sidechain_block_and_import_it, - // sidechain_event_tests::ensure_events_get_reset_upon_block_proposal, - // top_pool_tests::process_indirect_call_in_top_pool, - // top_pool_tests::submit_shielding_call_to_top_pool, - // // tls_ra unit tests - // tls_ra::seal_handler::test::seal_shielding_key_works, - // tls_ra::seal_handler::test::seal_shielding_key_fails_for_invalid_key, - // tls_ra::seal_handler::test::unseal_seal_shielding_key_works, - // tls_ra::seal_handler::test::seal_state_key_works, - // tls_ra::seal_handler::test::seal_state_key_fails_for_invalid_key, - // tls_ra::seal_handler::test::unseal_seal_state_key_works, - // tls_ra::seal_handler::test::seal_state_works, - // tls_ra::seal_handler::test::seal_state_fails_for_invalid_state, - // tls_ra::seal_handler::test::unseal_seal_state_works, - // tls_ra::tests::test_tls_ra_server_client_networking, - // tls_ra::tests::test_state_and_key_provisioning, - // // RPC tests - // direct_rpc_tests::get_state_request_works, - // - // // EVM tests - // run_evm_tests, - // - // // light-client-test - // itc_parentchain::light_client::io::sgx_tests::init_parachain_light_client_works, - // itc_parentchain::light_client::io::sgx_tests::sealing_creates_backup, + itp_attestation_handler::attestation_handler::tests::decode_spid_works, + stf_sgx_tests::enclave_account_initialization_works, + stf_sgx_tests::shield_funds_increments_signer_account_nonce, + stf_sgx_tests::test_root_account_exists_after_initialization, + itp_stf_state_handler::test::sgx_tests::test_write_and_load_state_works, + itp_stf_state_handler::test::sgx_tests::test_sgx_state_decode_encode_works, + itp_stf_state_handler::test::sgx_tests::test_encrypt_decrypt_state_type_works, + itp_stf_state_handler::test::sgx_tests::test_write_access_locks_read_until_finished, + itp_stf_state_handler::test::sgx_tests::test_ensure_subsequent_state_loads_have_same_hash, + itp_stf_state_handler::test::sgx_tests::test_state_handler_file_backend_is_initialized, + itp_stf_state_handler::test::sgx_tests::test_multiple_state_updates_create_snapshots_up_to_cache_size, + itp_stf_state_handler::test::sgx_tests::test_state_files_from_handler_can_be_loaded_again, + itp_stf_state_handler::test::sgx_tests::test_file_io_get_state_hash_works, + itp_stf_state_handler::test::sgx_tests::test_list_state_ids_ignores_files_not_matching_the_pattern, + itp_stf_state_handler::test::sgx_tests::test_in_memory_state_initializes_from_shard_directory, + itp_sgx_crypto::tests::aes_sealing_works, + itp_sgx_crypto::tests::using_get_aes_repository_twice_initializes_key_only_once, + itp_sgx_crypto::tests::ed25529_sealing_works, + itp_sgx_crypto::tests::using_get_ed25519_repository_twice_initializes_key_only_once, + itp_sgx_crypto::tests::rsa3072_sealing_works, + itp_sgx_crypto::tests::using_get_rsa3072_repository_twice_initializes_key_only_once, + test_compose_block, + test_submit_trusted_call_to_top_pool, + test_submit_trusted_getter_to_top_pool, + test_differentiate_getter_and_call_works, + test_create_block_and_confirmation_works, + test_create_state_diff, + test_executing_call_updates_account_nonce, + test_call_set_update_parentchain_block, + test_invalid_nonce_call_is_not_executed, + test_signature_must_match_public_sender_in_call, + test_non_root_shielding_call_is_not_executed, + test_shielding_call_with_enclave_self_is_executed, + test_retrieve_events, + test_retrieve_event_count, + test_reset_events, + handle_state_mock::tests::initialized_shards_list_is_empty, + handle_state_mock::tests::shard_exists_after_inserting, + handle_state_mock::tests::from_shard_works, + handle_state_mock::tests::initialize_creates_default_state, + handle_state_mock::tests::load_mutate_and_write_works, + handle_state_mock::tests::ensure_subsequent_state_loads_have_same_hash, + handle_state_mock::tests::ensure_encode_and_encrypt_does_not_affect_state_hash, + // mra cert tests + test_verify_mra_cert_should_work, + test_verify_wrong_cert_is_err, + test_given_wrong_platform_info_when_verifying_attestation_report_then_return_error, + // sync tests + sidechain_rw_lock_works, + enclave_rw_lock_works, + // unit tests of stf_executor + stf_executor_tests::propose_state_update_always_executes_preprocessing_step, + stf_executor_tests::propose_state_update_executes_no_trusted_calls_given_no_time, + stf_executor_tests::propose_state_update_executes_only_one_trusted_call_given_not_enough_time, + stf_executor_tests::propose_state_update_executes_all_calls_given_enough_time, + enclave_signer_tests::enclave_signer_signatures_are_valid, + enclave_signer_tests::derive_key_is_deterministic, + enclave_signer_tests::nonce_is_computed_correctly, + state_getter_tests::state_getter_works, + // sidechain integration tests + sidechain_aura_tests::produce_sidechain_block_and_import_it, + sidechain_event_tests::ensure_events_get_reset_upon_block_proposal, + top_pool_tests::process_indirect_call_in_top_pool, + top_pool_tests::submit_shielding_call_to_top_pool, + // tls_ra unit tests + tls_ra::seal_handler::test::seal_shielding_key_works, + tls_ra::seal_handler::test::seal_shielding_key_fails_for_invalid_key, + tls_ra::seal_handler::test::unseal_seal_shielding_key_works, + tls_ra::seal_handler::test::seal_state_key_works, + tls_ra::seal_handler::test::seal_state_key_fails_for_invalid_key, + tls_ra::seal_handler::test::unseal_seal_state_key_works, + tls_ra::seal_handler::test::seal_state_works, + tls_ra::seal_handler::test::seal_state_fails_for_invalid_state, + tls_ra::seal_handler::test::unseal_seal_state_works, + tls_ra::tests::test_tls_ra_server_client_networking, + tls_ra::tests::test_state_and_key_provisioning, + // RPC tests + direct_rpc_tests::get_state_request_works, + + // EVM tests + run_evm_tests, + + // light-client-test + itc_parentchain::light_client::io::sgx_tests::init_parachain_light_client_works, + itc_parentchain::light_client::io::sgx_tests::sealing_creates_backup, + + // IPFS tests crate::test::ipfs_tests::test_ocall_write_ipfs_fallback, + // Teeracle tests - //run_teeracle_tests, + run_teeracle_tests, ) } From c2a65153ba3017348d675927fd3c2112d1e9f25f Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Thu, 25 Sep 2025 16:51:38 +0200 Subject: [PATCH 79/91] clippy --- core-primitives/ipfs-cid/src/lib.rs | 2 +- enclave-runtime/src/ocall/ipfs_ocall.rs | 4 +--- enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs | 1 - service/src/ocall_bridge/ipfs_ocall.rs | 4 ++-- 4 files changed, 4 insertions(+), 7 deletions(-) diff --git a/core-primitives/ipfs-cid/src/lib.rs b/core-primitives/ipfs-cid/src/lib.rs index 2b405dff2..2ce98752b 100644 --- a/core-primitives/ipfs-cid/src/lib.rs +++ b/core-primitives/ipfs-cid/src/lib.rs @@ -27,7 +27,7 @@ extern crate sgx_tstd as std; use log::*; #[cfg(all(not(feature = "std"), feature = "sgx"))] -use sgx_tcrypto::{rsgx_sha256_slice, SgxEccHandle}; +use sgx_tcrypto::rsgx_sha256_slice; #[cfg(not(all(not(feature = "std"), feature = "sgx")))] use sha2::{Digest, Sha256}; // re-export module to properly feature gate sgx and regular std environment diff --git a/enclave-runtime/src/ocall/ipfs_ocall.rs b/enclave-runtime/src/ocall/ipfs_ocall.rs index e3d737809..f17a74c82 100644 --- a/enclave-runtime/src/ocall/ipfs_ocall.rs +++ b/enclave-runtime/src/ocall/ipfs_ocall.rs @@ -17,9 +17,7 @@ */ use crate::ocall::{ffi, OcallApi}; use alloc::vec::Vec; -use codec::Encode; use frame_support::ensure; -use itp_ipfs_cid::IpfsCid; use itp_ocall_api::EnclaveIpfsOCallApi; use log::*; use sgx_types::{sgx_status_t, SgxResult}; @@ -28,7 +26,7 @@ impl EnclaveIpfsOCallApi for OcallApi { fn write_ipfs(&self, content: Vec) -> SgxResult<()> { let mut rt: sgx_status_t = sgx_status_t::SGX_ERROR_UNEXPECTED; trace!("calling OCallApi::write_ipfs with {} bytes", content.len()); - let payload = content.clone(); + let payload = content; let res = unsafe { ffi::ocall_write_ipfs( &mut rt as *mut sgx_status_t, diff --git a/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs b/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs index e56869979..2627131e9 100644 --- a/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs +++ b/enclave-runtime/src/test/mocks/propose_to_import_call_mock.rs @@ -19,7 +19,6 @@ use crate::test::mocks::types::TestBlockImporter; use codec::{Decode, Encode}; use itc_parentchain::primitives::ParentchainId; -use itp_ipfs_cid::IpfsCid; use itp_ocall_api::{ EnclaveIpfsOCallApi, EnclaveOnChainOCallApi, EnclaveSidechainOCallApi, Result, }; diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index cd4fb7c64..aa66d2720 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -73,14 +73,14 @@ impl IpfsBridge for IpfsOCall { debug!("ocall result IpfsCid {}", res.hash); }, Err(e) => { - let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()) + let dumpfile = log_failing_blob_to_file(data, self.log_dir.clone()) .unwrap_or_else(|e| e.to_string().into()); warn!(" write to ipfs failed late, wrote to file {}", dumpfile.display()); }, }; } else { warn!("IPFS client not configured, writing to local file"); - let dumpfile = log_failing_blob_to_file(data.into(), self.log_dir.clone()) + let dumpfile = log_failing_blob_to_file(data, self.log_dir.clone()) .unwrap_or_else(|e| e.to_string().into()); }; Ok(()) From f1edf6d005529a8065dbb18140d73794ff60a771 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Thu, 25 Sep 2025 17:08:52 +0200 Subject: [PATCH 80/91] cleanup deps --- Cargo.lock | 26 ------- core-primitives/ipfs-cid/Cargo.toml | 75 --------------------- core-primitives/ipfs-cid/src/lib.rs | 101 ++++++++++++---------------- enclave-runtime/Cargo.lock | 20 ------ 4 files changed, 44 insertions(+), 178 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 26676b51d..e47b7b5bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3413,42 +3413,16 @@ dependencies = [ name = "itp-ipfs-cid" version = "0.1.0" dependencies = [ - "arrayvec 0.7.4", - "base64 0.13.0 (git+https://github.com/mesalock-linux/rust-base64-sgx?rev=sgx_1.1.3)", - "base64 0.13.1", - "bit-vec", - "chrono 0.4.11", - "chrono 0.4.26", "cid", "hex", - "httparse 1.4.1", - "itertools 0.10.5", - "itp-ocall-api", - "itp-settings", - "itp-sgx-crypto", - "itp-sgx-io", "log 0.4.28", "multibase", "multihash 0.18.1", - "num-bigint 0.2.5", "parity-scale-codec", - "rustls 0.19.0 (git+https://github.com/mesalock-linux/rustls?rev=sgx_1.1.3)", - "rustls 0.19.1", - "serde_json 1.0.103", - "serde_json 1.0.60 (git+https://github.com/mesalock-linux/serde-json-sgx?tag=sgx_1.1.3)", - "sgx_rand", "sgx_tcrypto", - "sgx_tse", "sgx_tstd", "sgx_types", "sha2 0.10.9", - "sp-core", - "thiserror 1.0.44", - "thiserror 1.0.9", - "webpki 0.21.4 (registry+https://github.com/rust-lang/crates.io-index)", - "webpki 0.21.4 (git+https://github.com/mesalock-linux/webpki?branch=mesalock_sgx)", - "webpki-roots 0.21.0 (git+https://github.com/mesalock-linux/webpki-roots?branch=mesalock_sgx)", - "yasna 0.3.1", ] [[package]] diff --git a/core-primitives/ipfs-cid/Cargo.toml b/core-primitives/ipfs-cid/Cargo.toml index 692bb37b5..9c81c34a2 100644 --- a/core-primitives/ipfs-cid/Cargo.toml +++ b/core-primitives/ipfs-cid/Cargo.toml @@ -4,106 +4,31 @@ version = "0.1.0" authors = ["Integritee AG "] edition = "2021" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - [dependencies] -# crates-io no_std deps -arrayvec = { version = "0.7.1", default-features = false } -bit-vec = { version = "0.6", default-features = false } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex = { version = "0.4.3", default-features = false, features = ["alloc"] } -itertools = { default-features = false, version = "0.10.1" } log = { version = "0.4", default-features = false } cid = { version = "0.10.1", default-features = false, features = ["alloc"] } multibase = { version = "0.9.1", default-features = false } multihash = { version = "0.18.0", default-features = false, features = ["alloc"] } -# std only deps -base64 = { version = "0.13", features = ["alloc"], optional = true } -chrono = { version = "0.4.19", features = ["alloc"], optional = true } -rustls = { version = "0.19", optional = true } -serde_json = { version = "1.0", features = ["preserve_order"], optional = true } -thiserror = { version = "1.0", optional = true } -webpki = { version = "0.21", optional = true } - -# mesalock -base64_sgx = { package = "base64", rev = "sgx_1.1.3", git = "https://github.com/mesalock-linux/rust-base64-sgx", optional = true } -chrono_sgx = { package = "chrono", git = "https://github.com/mesalock-linux/chrono-sgx", optional = true } -num-bigint = { optional = true, git = "https://github.com/mesalock-linux/num-bigint-sgx" } -rustls_sgx = { package = "rustls", rev = "sgx_1.1.3", features = ["dangerous_configuration"], git = "https://github.com/mesalock-linux/rustls", optional = true } -serde_json_sgx = { package = "serde_json", tag = "sgx_1.1.3", features = ["preserve_order"], git = "https://github.com/mesalock-linux/serde-json-sgx", optional = true } -thiserror_sgx = { package = "thiserror", git = "https://github.com/mesalock-linux/thiserror-sgx", tag = "sgx_1.1.3", optional = true } -webpki-roots = { git = "https://github.com/mesalock-linux/webpki-roots", branch = "mesalock_sgx" } -webpki_sgx = { package = "webpki", git = "https://github.com/mesalock-linux/webpki", branch = "mesalock_sgx", optional = true } -yasna_sgx = { package = "yasna", optional = true, default-features = false, features = ["bit-vec", "num-bigint", "chrono", "mesalock_sgx"], git = "https://github.com/mesalock-linux/yasna.rs-sgx", rev = "sgx_1.1.3" } - -# sgx -sgx_rand = { branch = "master", git = "https://github.com/apache/teaclave-sgx-sdk.git", optional = true } sgx_tcrypto = { branch = "master", git = "https://github.com/apache/teaclave-sgx-sdk.git", optional = true } -sgx_tse = { branch = "master", git = "https://github.com/apache/teaclave-sgx-sdk.git", optional = true } sgx_tstd = { branch = "master", git = "https://github.com/apache/teaclave-sgx-sdk.git", features = ["untrusted_fs", "net", "backtrace"], optional = true } sgx_types = { branch = "master", git = "https://github.com/apache/teaclave-sgx-sdk.git", features = ["extra_traits"] } - -# local deps -itp-ocall-api = { path = "../ocall-api", default-features = false } -itp-settings = { path = "../settings" } -itp-sgx-crypto = { path = "../sgx/crypto", default-features = false } -itp-sgx-io = { path = "../sgx/io", default-features = false } - -# integritee -httparse = { default-features = false, git = "https://github.com/integritee-network/httparse-sgx", branch = "sgx-experimental" } - -# substrate deps -sp-core = { default-features = false, features = ["full_crypto"], git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v0.9.42" } - sha2 = { version = "0.10.9", default-features = false, optional = true } [features] default = ["std"] std = [ - # crates-io no_std - "arrayvec/std", "codec/std", "hex/std", "log/std", - "itertools/use_std", - # optional std only - "base64", - "chrono", - "rustls", - "serde_json", - "thiserror", - "webpki", "sgx_tcrypto", "sha2", - # local - "itp-ocall-api/std", - "itp-sgx-io/std", - "itp-sgx-crypto/std", - # substrate - "sp-core/std", - # integritee - "httparse/std", ] sgx = [ - # sgx-only - "base64_sgx", - "chrono_sgx", - "rustls_sgx", - "serde_json_sgx", - "thiserror_sgx", - "webpki_sgx", - "yasna_sgx", - "sgx_tse", "sgx_tstd", - "sgx_rand", "sgx_tcrypto", - "num-bigint", - # local - "itp-sgx-io/sgx", - "itp-sgx-crypto/sgx", - # integritee - "httparse/mesalock_sgx", ] test = [] production = [] diff --git a/core-primitives/ipfs-cid/src/lib.rs b/core-primitives/ipfs-cid/src/lib.rs index 2ce98752b..b8f51282e 100644 --- a/core-primitives/ipfs-cid/src/lib.rs +++ b/core-primitives/ipfs-cid/src/lib.rs @@ -26,21 +26,6 @@ extern crate sgx_tstd as std; use log::*; -#[cfg(all(not(feature = "std"), feature = "sgx"))] -use sgx_tcrypto::rsgx_sha256_slice; -#[cfg(not(all(not(feature = "std"), feature = "sgx")))] -use sha2::{Digest, Sha256}; -// re-export module to properly feature gate sgx and regular std environment -#[cfg(all(not(feature = "std"), feature = "sgx"))] -pub mod sgx_reexport_prelude { - pub use base64_sgx as base64; - pub use chrono_sgx as chrono; - pub use rustls_sgx as rustls; - pub use serde_json_sgx as serde_json; - pub use thiserror_sgx as thiserror; - pub use webpki_sgx as webpki; - pub use yasna_sgx as yasna; -} use cid::Cid; use codec::{Decode, Encode}; use multibase::Base; @@ -52,9 +37,42 @@ use std::{ }; const SHA2_256: u64 = 0x12; const RAW: u64 = 0x55; + +#[cfg(all(not(feature = "std"), feature = "sgx"))] +// sha2 crashes enclaves. therefore we need to use this SDK-provided hasher for sgx builds +fn hasher(chunk: &[u8]) -> Result<[u8; 32], IpfsError> { + use sgx_tcrypto::rsgx_sha256_slice; + rsgx_sha256_slice(&chunk).map_err(|_| IpfsError::InputTooLarge) +} +#[cfg(not(all(not(feature = "std"), feature = "sgx")))] +fn hasher(chunk: &[u8]) -> Result<[u8; 32], IpfsError> { + use sha2::{Digest, Sha256}; + Ok(Sha256::digest(chunk).into()) +} + #[derive(Clone, PartialEq, Eq)] pub struct IpfsCid(pub Cid); +impl IpfsCid { + pub fn from_chunk(chunk: &[u8]) -> Result { + if chunk.len() > 256 * 1024 { + return Err(IpfsError::InputTooLarge); + }; + let hash = hasher(chunk)?; + info!("hash: {:?}", hash); + let mh = Multihash::wrap(SHA2_256, &hash).map_err(|_| IpfsError::MultiHashFailure)?; + let cid = Cid::new_v1(RAW, mh); + info!("cid: {:?}", cid); + Ok(Self(cid)) + } +} + +#[derive(Debug, PartialEq)] +pub enum IpfsError { + InputTooLarge, + MultiHashFailure, +} + impl From for IpfsCid { fn from(value: Cid) -> Self { IpfsCid(value) @@ -85,6 +103,17 @@ impl Decode for IpfsCid { } } +impl Display for IpfsCid { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let cid_str = if self.0.codec() == RAW { + multibase::encode(Base::Base32Lower, self.0.to_bytes()) + } else { + multibase::encode(Base::Base58Btc, self.0.to_bytes()) + }; + write!(f, "{}", cid_str) + } +} + impl Debug for IpfsCid { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { let cid = &self.0; @@ -105,48 +134,6 @@ impl Debug for IpfsCid { } } -impl Display for IpfsCid { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - let cid_str = if self.0.codec() == RAW { - multibase::encode(Base::Base32Lower, self.0.to_bytes()) - } else { - multibase::encode(Base::Base58Btc, self.0.to_bytes()) - }; - write!(f, "{}", cid_str) - } -} -impl IpfsCid { - pub fn from_chunk(chunk: &[u8]) -> Result { - if chunk.len() > 256 * 1024 { - return Err(IpfsError::InputTooLarge); - }; - let hash = hasher(chunk)?; - info!("hash: {:?}", hash); - let mh = Multihash::wrap(SHA2_256, &hash).map_err(|_| IpfsError::InputTooLarge)?; - let cid = Cid::new_v1(RAW, mh); - info!("cid: {:?}", cid); - Ok(Self(cid)) - } -} - -#[cfg(all(not(feature = "std"), feature = "sgx"))] -// sha2 crashes enclaves. therefore we need to use this SDK-provided hasher for sgx builds -fn hasher(chunk: &[u8]) -> Result<[u8; 32], IpfsError> { - rsgx_sha256_slice(&chunk).map_err(|_| IpfsError::InputTooLarge) -} -#[cfg(not(all(not(feature = "std"), feature = "sgx")))] -fn hasher(chunk: &[u8]) -> Result<[u8; 32], IpfsError> { - Ok(Sha256::digest(chunk).into()) -} - -#[derive(Debug, PartialEq)] -pub enum IpfsError { - InputTooLarge, - InputCidInvalid, - FinalCidMissing, - Verification, -} - #[cfg(test)] mod tests { use super::*; diff --git a/enclave-runtime/Cargo.lock b/enclave-runtime/Cargo.lock index 60fc7e4c5..d59c5215a 100644 --- a/enclave-runtime/Cargo.lock +++ b/enclave-runtime/Cargo.lock @@ -2114,35 +2114,15 @@ dependencies = [ name = "itp-ipfs-cid" version = "0.1.0" dependencies = [ - "arrayvec 0.7.4", - "base64 0.13.0 (git+https://github.com/mesalock-linux/rust-base64-sgx?rev=sgx_1.1.3)", - "bit-vec", - "chrono 0.4.11", "cid 0.10.1", "hex", - "httparse", - "itertools 0.10.5", - "itp-ocall-api", - "itp-settings", - "itp-sgx-crypto", - "itp-sgx-io", "log 0.4.28", "multibase 0.9.1", "multihash 0.18.1", - "num-bigint", "parity-scale-codec", - "rustls 0.19.0 (git+https://github.com/mesalock-linux/rustls?rev=sgx_1.1.3)", - "serde_json 1.0.60 (git+https://github.com/mesalock-linux/serde-json-sgx?tag=sgx_1.1.3)", - "sgx_rand", "sgx_tcrypto", - "sgx_tse", "sgx_tstd", "sgx_types", - "sp-core", - "thiserror 1.0.9", - "webpki", - "webpki-roots 0.21.0 (git+https://github.com/mesalock-linux/webpki-roots?branch=mesalock_sgx)", - "yasna", ] [[package]] From 28b08d956c73097f13d7a31090e4d5860718ac0f Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Thu, 25 Sep 2025 17:19:04 +0200 Subject: [PATCH 81/91] save our tomorrow --- core-primitives/substrate-sgx/sp-io/src/lib.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/core-primitives/substrate-sgx/sp-io/src/lib.rs b/core-primitives/substrate-sgx/sp-io/src/lib.rs index 9e3914337..6031cd85a 100644 --- a/core-primitives/substrate-sgx/sp-io/src/lib.rs +++ b/core-primitives/substrate-sgx/sp-io/src/lib.rs @@ -694,10 +694,7 @@ pub mod hashing { /// Conduct a 256-bit Sha2 hash. pub fn sha2_256(data: &[u8]) -> [u8; 32] { - debug!("sha2_256 of {}", encode_hex(data)); - let hash = sp_core::hashing::sha2_256(data); - debug!(" returning hash {}", encode_hex(&hash)); - hash + unimplemented!("sha2_256 is unimplemented because it is known to crash SGX enclaves. If you need this, use rsgx_sha256_slice"); } /// Conduct a 128-bit Blake2 hash. From 69690e01e23a6bd322d7bb348ae291c42210d726 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Thu, 25 Sep 2025 17:19:47 +0200 Subject: [PATCH 82/91] fmt --- app-libs/stf/src/trusted_call.rs | 3 +-- core-primitives/ipfs-cid/src/lib.rs | 2 +- enclave-runtime/src/test/tests_main.rs | 3 +-- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/app-libs/stf/src/trusted_call.rs b/app-libs/stf/src/trusted_call.rs index e4cef8016..072c5fd5e 100644 --- a/app-libs/stf/src/trusted_call.rs +++ b/app-libs/stf/src/trusted_call.rs @@ -970,8 +970,7 @@ where let unshield_amount = balance.saturating_sub( MinimalChainSpec::one_unit( shielding_target_genesis_hash().unwrap_or_default(), - ) / STF_TX_FEE_UNIT_DIVIDER - * 3, + ) / STF_TX_FEE_UNIT_DIVIDER * 3, ); let parentchain_call = parentchain_vault_proxy_call( unshield_native_from_vault_parentchain_call( diff --git a/core-primitives/ipfs-cid/src/lib.rs b/core-primitives/ipfs-cid/src/lib.rs index b8f51282e..6e8490d6a 100644 --- a/core-primitives/ipfs-cid/src/lib.rs +++ b/core-primitives/ipfs-cid/src/lib.rs @@ -56,7 +56,7 @@ pub struct IpfsCid(pub Cid); impl IpfsCid { pub fn from_chunk(chunk: &[u8]) -> Result { if chunk.len() > 256 * 1024 { - return Err(IpfsError::InputTooLarge); + return Err(IpfsError::InputTooLarge) }; let hash = hasher(chunk)?; info!("hash: {:?}", hash); diff --git a/enclave-runtime/src/test/tests_main.rs b/enclave-runtime/src/test/tests_main.rs index 1c8e0a235..6ae06ab4b 100644 --- a/enclave-runtime/src/test/tests_main.rs +++ b/enclave-runtime/src/test/tests_main.rs @@ -426,8 +426,7 @@ fn test_create_state_diff() { assert_eq!( sender_acc_info.data.free, ita_stf::test_genesis::ENDOWED_ACC_FUNDS - - TX_AMOUNT - - 1_000_000_000_000 / ita_stf::STF_TX_FEE_UNIT_DIVIDER + - TX_AMOUNT - 1_000_000_000_000 / ita_stf::STF_TX_FEE_UNIT_DIVIDER ); } From ee62c164b16f2ce1fdbab96c9dcb163abc3d50a4 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Thu, 25 Sep 2025 17:25:31 +0200 Subject: [PATCH 83/91] taplo --- core-primitives/ipfs-cid/Cargo.toml | 2 +- core-primitives/utils/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core-primitives/ipfs-cid/Cargo.toml b/core-primitives/ipfs-cid/Cargo.toml index 9c81c34a2..1afdf1e10 100644 --- a/core-primitives/ipfs-cid/Cargo.toml +++ b/core-primitives/ipfs-cid/Cargo.toml @@ -5,10 +5,10 @@ authors = ["Integritee AG "] edition = "2021" [dependencies] +cid = { version = "0.10.1", default-features = false, features = ["alloc"] } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex = { version = "0.4.3", default-features = false, features = ["alloc"] } log = { version = "0.4", default-features = false } -cid = { version = "0.10.1", default-features = false, features = ["alloc"] } multibase = { version = "0.9.1", default-features = false } multihash = { version = "0.18.0", default-features = false, features = ["alloc"] } sgx_tcrypto = { branch = "master", git = "https://github.com/apache/teaclave-sgx-sdk.git", optional = true } diff --git a/core-primitives/utils/Cargo.toml b/core-primitives/utils/Cargo.toml index 7720f84ad..5abb67be6 100644 --- a/core-primitives/utils/Cargo.toml +++ b/core-primitives/utils/Cargo.toml @@ -9,12 +9,12 @@ edition = "2021" [dependencies] cid = { version = "0.10.1", default-features = false, features = ["alloc"] } -sha2 = { version = "0.10.9", default-features = false } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex = { version = "0.4.3", default-features = false, features = ["alloc"] } log = "0.4.28" multibase = { version = "0.9.1", default-features = false } multihash = { version = "0.18.0", default-features = false, features = ["alloc", "multihash-impl", "sha2"] } +sha2 = { version = "0.10.9", default-features = false } #sp-io = { path = "../../core-primitives/substrate-sgx/sp-io" } # sgx_tcrypto = { branch = "master", git = "https://github.com/apache/teaclave-sgx-sdk.git" } From 75613201d857ce385fc82987b3abcebfe948e203 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Thu, 25 Sep 2025 18:05:03 +0200 Subject: [PATCH 84/91] ocall ipfs add raw leaves --- Cargo.lock | 12 ++++++++++++ service/Cargo.toml | 2 +- service/src/ocall_bridge/ipfs_ocall.rs | 6 +++++- 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e47b7b5bc..9c4d4df6f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2815,6 +2815,7 @@ dependencies = [ "tokio", "tokio-util 0.7.8", "tracing", + "typed-builder", "walkdir", ] @@ -8677,6 +8678,17 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "typed-builder" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89851716b67b937e393b3daa8423e67ddfc4bbbf1654bcf05488e95e0828db0c" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "typenum" version = "1.16.0" diff --git a/service/Cargo.toml b/service/Cargo.toml index 93dfc34f4..f1b9da744 100644 --- a/service/Cargo.toml +++ b/service/Cargo.toml @@ -31,7 +31,7 @@ url = "2.5.0" warp = "0.3" # ipfs -ipfs-api-backend-hyper = { version = "0.6.0", features = ["with-hyper-tls"] } +ipfs-api-backend-hyper = { version = "0.6.0", features = ["with-hyper-tls", "with-builder"] } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } primitive-types = { version = "0.12.1", default-features = false, features = ["codec"] } diff --git a/service/src/ocall_bridge/ipfs_ocall.rs b/service/src/ocall_bridge/ipfs_ocall.rs index aa66d2720..6d870b852 100644 --- a/service/src/ocall_bridge/ipfs_ocall.rs +++ b/service/src/ocall_bridge/ipfs_ocall.rs @@ -67,8 +67,12 @@ impl IpfsBridge for IpfsOCall { trace!(" Entering ocall_write_ipfs to write {}B", data.len()); if let Some(ref client) = self.client { let datac = Cursor::new(data.clone()); + let add_options = ipfs_api_backend_hyper::request::Add::builder() + .raw_leaves(true) + .cid_version(1) + .build(); let rt = Runtime::new().unwrap(); - match rt.block_on(client.add(datac)) { + match rt.block_on(client.add_with_options(datac, add_options)) { Ok(res) => { debug!("ocall result IpfsCid {}", res.hash); }, From b3ec49870dca94efc2606d770f0442e52795414e Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Fri, 26 Sep 2025 09:23:04 +0200 Subject: [PATCH 85/91] fix and test decode for OpaqueCall --- core-primitives/types/src/lib.rs | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/core-primitives/types/src/lib.rs b/core-primitives/types/src/lib.rs index b36b3f0d5..15c2ba7b6 100644 --- a/core-primitives/types/src/lib.rs +++ b/core-primitives/types/src/lib.rs @@ -65,7 +65,7 @@ pub use teerex_primitives::{ pub type Enclave = MultiEnclave>; /// Simple blob to hold an encoded call -#[derive(Decode, Debug, PartialEq, Eq, Clone, Default)] +#[derive(Debug, PartialEq, Eq, Clone, Default)] pub struct OpaqueCall(pub Vec); impl OpaqueCall { @@ -81,6 +81,16 @@ impl Encode for OpaqueCall { } } +impl Decode for OpaqueCall { + fn decode(input: &mut I) -> Result { + let mut bytes = Vec::new(); + while let Ok(byte) = input.read_byte() { + bytes.push(byte); + } + Ok(OpaqueCall(bytes)) + } +} + #[derive(Debug, Clone, PartialEq, Encode, Decode)] pub enum DirectRequestStatus { /// Direct request was successfully executed @@ -154,9 +164,12 @@ mod tests { use super::*; #[test] - fn opaque_call_encodes_correctly() { + fn opaque_call_encodes_and decodes_correctly() { let call_tuple = ([1u8, 2u8], 5u8); let call = OpaqueCall::from_tuple(&call_tuple); - assert_eq!(call.encode(), call_tuple.encode()) + let encoded_call = call.encode(); + assert_eq!(encoded_call, call_tuple.encode()); + let decoded_call = OpaqueCall::decode(&mut encoded_call.as_slice()).unwrap(); + assert_eq!(decoded_call, call); } } From cec3b750aa010944928344f211c7cdfd6e9b6fb2 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Fri, 26 Sep 2025 09:23:18 +0200 Subject: [PATCH 86/91] review comments --- core-primitives/ipfs-cid/src/lib.rs | 2 +- core-primitives/utils/Cargo.toml | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/core-primitives/ipfs-cid/src/lib.rs b/core-primitives/ipfs-cid/src/lib.rs index 6e8490d6a..837a13fde 100644 --- a/core-primitives/ipfs-cid/src/lib.rs +++ b/core-primitives/ipfs-cid/src/lib.rs @@ -50,7 +50,7 @@ fn hasher(chunk: &[u8]) -> Result<[u8; 32], IpfsError> { Ok(Sha256::digest(chunk).into()) } -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Copy, PartialEq, Eq)] pub struct IpfsCid(pub Cid); impl IpfsCid { diff --git a/core-primitives/utils/Cargo.toml b/core-primitives/utils/Cargo.toml index 5abb67be6..fa5343592 100644 --- a/core-primitives/utils/Cargo.toml +++ b/core-primitives/utils/Cargo.toml @@ -15,8 +15,6 @@ log = "0.4.28" multibase = { version = "0.9.1", default-features = false } multihash = { version = "0.18.0", default-features = false, features = ["alloc", "multihash-impl", "sha2"] } sha2 = { version = "0.10.9", default-features = false } -#sp-io = { path = "../../core-primitives/substrate-sgx/sp-io" } -# sgx_tcrypto = { branch = "master", git = "https://github.com/apache/teaclave-sgx-sdk.git" } [features] default = ["std"] From ee215c1828d80c504de8e6446ea771bf4eed4a87 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Fri, 26 Sep 2025 09:24:52 +0200 Subject: [PATCH 87/91] fixes --- core-primitives/types/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core-primitives/types/src/lib.rs b/core-primitives/types/src/lib.rs index 15c2ba7b6..9f51b9458 100644 --- a/core-primitives/types/src/lib.rs +++ b/core-primitives/types/src/lib.rs @@ -164,7 +164,7 @@ mod tests { use super::*; #[test] - fn opaque_call_encodes_and decodes_correctly() { + fn opaque_call_encodes_and_decodes_correctly() { let call_tuple = ([1u8, 2u8], 5u8); let call = OpaqueCall::from_tuple(&call_tuple); let encoded_call = call.encode(); From cf141674be6fd1ad7d2d589b3acad51b3b65b4c6 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Fri, 26 Sep 2025 10:21:00 +0200 Subject: [PATCH 88/91] add e2e test for 80kB payload --- cli/demo_send_relayed_note.sh | 23 +- .../rpc-handler/src/direct_top_pool_api.rs | 252 +++++++++--------- 2 files changed, 148 insertions(+), 127 deletions(-) diff --git a/cli/demo_send_relayed_note.sh b/cli/demo_send_relayed_note.sh index 3b3d292e6..ddcf5efaf 100755 --- a/cli/demo_send_relayed_note.sh +++ b/cli/demo_send_relayed_note.sh @@ -71,9 +71,26 @@ echo "Bob received:" echo $RECEIVED_NOTE if echo "$RECEIVED_NOTE" | grep -qF "$NOTE"; then - echo "NOTE found in RECEIVED_NOTE" - exit 0 + echo "✔ NOTE found in RECEIVED_NOTE" else - echo "NOTE not found in RECEIVED_NOTE" + echo "✗ NOTE not found in RECEIVED_NOTE" exit 1 fi + +echo "Alice will send an 80kB heavy note to Bob" + +HEAVY_NOTE_PLAINTEXT_LENGTH=81920 +HEAVY_NOTE=$(head -c ${HEAVY_NOTE_PLAINTEXT_LENGTH} /dev/zero | tr '\0' 'A') +${TCLIENT} send-note --ipfs-proxy //Alice //Bob "${HEAVY_NOTE}" +echo "Alice sent heavy note to Bob" +RECEIVED_HEAVY_NOTE=$(${TCLIENT} get-notes //Bob 0 | grep "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" | tail -n 1) +RECEIVED_HEAVY_NOTE_LENGTH=$(echo -n "$RECEIVED_HEAVY_NOTE" | wc -c) +echo "Bob received $RECEIVED_HEAVY_NOTE_LENGTH bytes" + +if [ "$RECEIVED_HEAVY_NOTE_LENGTH" -gt $HEAVY_NOTE_PLAINTEXT_LENGTH ]; then + echo "✔ heavy note found" + exit 0 +else + echo "✗ heavy note not found" + exit 1 +fi \ No newline at end of file diff --git a/sidechain/rpc-handler/src/direct_top_pool_api.rs b/sidechain/rpc-handler/src/direct_top_pool_api.rs index 7e2dba33f..00d20107f 100644 --- a/sidechain/rpc-handler/src/direct_top_pool_api.rs +++ b/sidechain/rpc-handler/src/direct_top_pool_api.rs @@ -40,142 +40,146 @@ use std::{borrow::ToOwned, format, string::String, sync::Arc, vec, vec::Vec}; type Hash = sp_core::H256; pub fn add_top_pool_direct_rpc_methods( - top_pool_author: Arc, - io_handler: &mut IoHandler, - ocall_api: Arc, + top_pool_author: Arc, + io_handler: &mut IoHandler, + ocall_api: Arc, ) where - R: AuthorApi + Send + Sync + 'static, - TCS: PartialEq + Encode + Decode + Debug + Send + Sync + 'static, - G: PartialEq + Encode + Decode + Debug + Send + Sync + 'static, - OCallApi: EnclaveMetricsOCallApi + Send + Sync + 'static, + R: AuthorApi + Send + Sync + 'static, + TCS: PartialEq + Encode + Decode + Debug + Send + Sync + 'static, + G: PartialEq + Encode + Decode + Debug + Send + Sync + 'static, + OCallApi: EnclaveMetricsOCallApi + Send + Sync + 'static, { - let local_author = top_pool_author.clone(); - let local_ocall_api = ocall_api.clone(); - io_handler.add_sync_method("author_submitAndWatchExtrinsic", move |params: Params| { - debug!("worker_api_direct rpc was called: author_submitAndWatchExtrinsic"); - local_ocall_api - .update_metrics(vec![ - EnclaveMetric::RpcRequestsIncrement, - EnclaveMetric::RpcTrustedCallsIncrement, - ]) - .unwrap_or_else(|e| error!("failed to update prometheus metric: {:?}", e)); - let json_value = match author_submit_extrinsic_inner(local_author.clone(), params) { - // Only return hash to support JSON RPC 2.0. - // Other methods will follow this pattern when - // we tackle #1624. - Ok(hash_value) => hash_value.to_hex(), - Err(error) => compute_hex_encoded_return_error(error.as_str()), - }; - Ok(json!(json_value)) - }); - - let local_author = top_pool_author.clone(); - let local_ocall_api = ocall_api.clone(); - io_handler.add_sync_method("author_submitExtrinsic", move |params: Params| { - debug!("worker_api_direct rpc was called: author_submitExtrinsic"); - local_ocall_api - .update_metrics(vec![ - EnclaveMetric::RpcRequestsIncrement, - EnclaveMetric::RpcTrustedCallsIncrement, - ]) - .unwrap_or_else(|e| error!("failed to update prometheus metric: {:?}", e)); - let json_value = match author_submit_extrinsic_inner(local_author.clone(), params) { - Ok(hash_value) => RpcReturnValue { - do_watch: false, - value: hash_value.encode(), - status: DirectRequestStatus::TrustedOperationStatus( - TrustedOperationStatus::Submitted, - ), - } - .to_hex(), - Err(error) => compute_hex_encoded_return_error(error.as_str()), - }; - Ok(json!(json_value)) - }); - - let local_author = top_pool_author; - let local_ocall_api = ocall_api; - io_handler.add_sync_method("author_pendingExtrinsics", move |params: Params| { - debug!("worker_api_direct rpc was called: author_pendingExtrinsics"); - local_ocall_api - .update_metrics(vec![EnclaveMetric::RpcRequestsIncrement]) - .unwrap_or_else(|e| error!("failed to update prometheus metric: {:?}", e)); - match params.parse::>() { - Ok(shards) => { - let mut retrieved_operations = vec![]; - for shard_base58 in shards.iter() { - let shard = match decode_shard_from_base58(shard_base58.as_str()) { - Ok(id) => id, - Err(msg) => { - let error_msg: String = - format!("Could not retrieve pending calls due to: {}", msg); - return Ok(json!(compute_hex_encoded_return_error(error_msg.as_str()))) - }, - }; - if let Ok(vec_of_operations) = local_author.pending_tops(shard) { - retrieved_operations.push(vec_of_operations); - } - } - let json_value = RpcReturnValue { - do_watch: false, - value: retrieved_operations.encode(), - status: DirectRequestStatus::Ok, - }; - Ok(json!(json_value.to_hex())) - }, - Err(e) => { - let error_msg: String = format!("Could not retrieve pending calls due to: {}", e); - Ok(json!(compute_hex_encoded_return_error(error_msg.as_str()))) - }, - } - }); + let local_author = top_pool_author.clone(); + let local_ocall_api = ocall_api.clone(); + io_handler.add_sync_method("author_submitAndWatchExtrinsic", move |params: Params| { + debug!("worker_api_direct rpc was called: author_submitAndWatchExtrinsic"); + local_ocall_api + .update_metrics(vec![ + EnclaveMetric::RpcRequestsIncrement, + EnclaveMetric::RpcTrustedCallsIncrement, + ]) + .unwrap_or_else(|e| error!("failed to update prometheus metric: {:?}", e)); + let json_value = match author_submit_extrinsic_inner(local_author.clone(), params) { + // Only return hash to support JSON RPC 2.0. + // Other methods will follow this pattern when + // we tackle #1624. + Ok(hash_value) => hash_value.to_hex(), + Err(error) => compute_hex_encoded_return_error(error.as_str()), + }; + Ok(json!(json_value)) + }); + + let local_author = top_pool_author.clone(); + let local_ocall_api = ocall_api.clone(); + io_handler.add_sync_method("author_submitExtrinsic", move |params: Params| { + debug!("worker_api_direct rpc was called: author_submitExtrinsic"); + local_ocall_api + .update_metrics(vec![ + EnclaveMetric::RpcRequestsIncrement, + EnclaveMetric::RpcTrustedCallsIncrement, + ]) + .unwrap_or_else(|e| error!("failed to update prometheus metric: {:?}", e)); + let json_value = match author_submit_extrinsic_inner(local_author.clone(), params) { + Ok(hash_value) => RpcReturnValue { + do_watch: false, + value: hash_value.encode(), + status: DirectRequestStatus::TrustedOperationStatus( + TrustedOperationStatus::Submitted, + ), + } + .to_hex(), + Err(error) => compute_hex_encoded_return_error(error.as_str()), + }; + Ok(json!(json_value)) + }); + + let local_author = top_pool_author; + let local_ocall_api = ocall_api; + io_handler.add_sync_method("author_pendingExtrinsics", move |params: Params| { + debug!("worker_api_direct rpc was called: author_pendingExtrinsics"); + local_ocall_api + .update_metrics(vec![EnclaveMetric::RpcRequestsIncrement]) + .unwrap_or_else(|e| error!("failed to update prometheus metric: {:?}", e)); + match params.parse::>() { + Ok(shards) => { + let mut retrieved_operations = vec![]; + for shard_base58 in shards.iter() { + let shard = match decode_shard_from_base58(shard_base58.as_str()) { + Ok(id) => id, + Err(msg) => { + let error_msg: String = + format!("Could not retrieve pending calls due to: {}", msg); + return Ok(json!(compute_hex_encoded_return_error(error_msg.as_str()))); + } + }; + if let Ok(vec_of_operations) = local_author.pending_tops(shard) { + retrieved_operations.push(vec_of_operations); + } + } + let json_value = RpcReturnValue { + do_watch: false, + value: retrieved_operations.encode(), + status: DirectRequestStatus::Ok, + }; + Ok(json!(json_value.to_hex())) + } + Err(e) => { + let error_msg: String = format!("Could not retrieve pending calls due to: {}", e); + Ok(json!(compute_hex_encoded_return_error(error_msg.as_str()))) + } + } + }); } // converts the rpc methods vector to a string and adds commas and brackets for readability fn decode_shard_from_base58(shard_base58: &str) -> Result { - let shard_vec = match shard_base58.from_base58() { - Ok(vec) => vec, - Err(_) => return Err("Invalid base58 format of shard id".to_owned()), - }; - let shard = match ShardIdentifier::decode(&mut shard_vec.as_slice()) { - Ok(hash) => hash, - Err(_) => return Err("Shard ID is not of type H256".to_owned()), - }; - Ok(shard) + let shard_vec = match shard_base58.from_base58() { + Ok(vec) => vec, + Err(_) => return Err("Invalid base58 format of shard id".to_owned()), + }; + let shard = match ShardIdentifier::decode(&mut shard_vec.as_slice()) { + Ok(hash) => hash, + Err(_) => return Err("Shard ID is not of type H256".to_owned()), + }; + Ok(shard) } fn compute_hex_encoded_return_error(error_msg: &str) -> String { - RpcReturnValue::from_error_message(error_msg).to_hex() + RpcReturnValue::from_error_message(error_msg).to_hex() } fn author_submit_extrinsic_inner(author: Arc, params: Params) -> Result where - R: AuthorApi + Send + Sync + 'static, - TCS: PartialEq + Encode + Decode + Debug + Send + Sync + 'static, - G: PartialEq + Encode + Decode + Debug + Send + Sync + 'static, + R: AuthorApi + Send + Sync + 'static, + TCS: PartialEq + Encode + Decode + Debug + Send + Sync + 'static, + G: PartialEq + Encode + Decode + Debug + Send + Sync + 'static, { - debug!("Author submit and watch trusted operation.."); - let hex_encoded_params = params.parse::>().map_err(|e| format!("{:?}", e))?; - - let request = - Request::from_hex(&hex_encoded_params[0].clone()).map_err(|e| format!("{:?}", e))?; - - let shard: ShardIdentifier = request.shard; - let encrypted_trusted_call: Vec = request.cyphertext; - - if encrypted_trusted_call.len() > MAX_TOP_SIZE_TO_ENTER_POOL { - let error_msg = "Trusted operation too large"; - error!("{}", error_msg); - return Err(error_msg.into()) - } - - let result = async { author.watch_top(encrypted_trusted_call, shard).await }; - let response: Result = executor::block_on(result); - - match &response { - Ok(h) => debug!("Trusted operation submitted successfully ({:?})", h), - Err(e) => warn!("Submitting trusted operation failed: {:?}", e), - } - - response.map_err(|e| format!("{:?}", e)) + debug!("Author submit and watch trusted operation.."); + let hex_encoded_params = params.parse::>().map_err(|e| format!("{:?}", e))?; + + let request = + Request::from_hex(&hex_encoded_params[0].clone()).map_err(|e| format!("{:?}", e))?; + + let shard: ShardIdentifier = request.shard; + let encrypted_trusted_call: Vec = request.cyphertext; + trace!( + "Submitting trusted operation to TOP pool for shard: {:?}, with encrypted call size: {} bytes", + shard, + encrypted_trusted_call.len() + ); + if encrypted_trusted_call.len() > MAX_TOP_SIZE_TO_ENTER_POOL { + let error_msg = "Trusted operation too large"; + error!("{}", error_msg); + return Err(error_msg.into()); + } + + let result = async { author.watch_top(encrypted_trusted_call, shard).await }; + let response: Result = executor::block_on(result); + + match &response { + Ok(h) => debug!("Trusted operation submitted successfully ({:?})", h), + Err(e) => warn!("Submitting trusted operation failed: {:?}", e), + } + + response.map_err(|e| format!("{:?}", e)) } From 60ddaa95643573ecfe78a3f6bc99055eaa932552 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Fri, 26 Sep 2025 11:26:38 +0200 Subject: [PATCH 89/91] fmt --- .../rpc-handler/src/direct_top_pool_api.rs | 238 +++++++++--------- 1 file changed, 119 insertions(+), 119 deletions(-) diff --git a/sidechain/rpc-handler/src/direct_top_pool_api.rs b/sidechain/rpc-handler/src/direct_top_pool_api.rs index 00d20107f..e30eaf7f6 100644 --- a/sidechain/rpc-handler/src/direct_top_pool_api.rs +++ b/sidechain/rpc-handler/src/direct_top_pool_api.rs @@ -40,146 +40,146 @@ use std::{borrow::ToOwned, format, string::String, sync::Arc, vec, vec::Vec}; type Hash = sp_core::H256; pub fn add_top_pool_direct_rpc_methods( - top_pool_author: Arc, - io_handler: &mut IoHandler, - ocall_api: Arc, + top_pool_author: Arc, + io_handler: &mut IoHandler, + ocall_api: Arc, ) where - R: AuthorApi + Send + Sync + 'static, - TCS: PartialEq + Encode + Decode + Debug + Send + Sync + 'static, - G: PartialEq + Encode + Decode + Debug + Send + Sync + 'static, - OCallApi: EnclaveMetricsOCallApi + Send + Sync + 'static, + R: AuthorApi + Send + Sync + 'static, + TCS: PartialEq + Encode + Decode + Debug + Send + Sync + 'static, + G: PartialEq + Encode + Decode + Debug + Send + Sync + 'static, + OCallApi: EnclaveMetricsOCallApi + Send + Sync + 'static, { - let local_author = top_pool_author.clone(); - let local_ocall_api = ocall_api.clone(); - io_handler.add_sync_method("author_submitAndWatchExtrinsic", move |params: Params| { - debug!("worker_api_direct rpc was called: author_submitAndWatchExtrinsic"); - local_ocall_api - .update_metrics(vec![ - EnclaveMetric::RpcRequestsIncrement, - EnclaveMetric::RpcTrustedCallsIncrement, - ]) - .unwrap_or_else(|e| error!("failed to update prometheus metric: {:?}", e)); - let json_value = match author_submit_extrinsic_inner(local_author.clone(), params) { - // Only return hash to support JSON RPC 2.0. - // Other methods will follow this pattern when - // we tackle #1624. - Ok(hash_value) => hash_value.to_hex(), - Err(error) => compute_hex_encoded_return_error(error.as_str()), - }; - Ok(json!(json_value)) - }); - - let local_author = top_pool_author.clone(); - let local_ocall_api = ocall_api.clone(); - io_handler.add_sync_method("author_submitExtrinsic", move |params: Params| { - debug!("worker_api_direct rpc was called: author_submitExtrinsic"); - local_ocall_api - .update_metrics(vec![ - EnclaveMetric::RpcRequestsIncrement, - EnclaveMetric::RpcTrustedCallsIncrement, - ]) - .unwrap_or_else(|e| error!("failed to update prometheus metric: {:?}", e)); - let json_value = match author_submit_extrinsic_inner(local_author.clone(), params) { - Ok(hash_value) => RpcReturnValue { - do_watch: false, - value: hash_value.encode(), - status: DirectRequestStatus::TrustedOperationStatus( - TrustedOperationStatus::Submitted, - ), - } - .to_hex(), - Err(error) => compute_hex_encoded_return_error(error.as_str()), - }; - Ok(json!(json_value)) - }); - - let local_author = top_pool_author; - let local_ocall_api = ocall_api; - io_handler.add_sync_method("author_pendingExtrinsics", move |params: Params| { - debug!("worker_api_direct rpc was called: author_pendingExtrinsics"); - local_ocall_api - .update_metrics(vec![EnclaveMetric::RpcRequestsIncrement]) - .unwrap_or_else(|e| error!("failed to update prometheus metric: {:?}", e)); - match params.parse::>() { - Ok(shards) => { - let mut retrieved_operations = vec![]; - for shard_base58 in shards.iter() { - let shard = match decode_shard_from_base58(shard_base58.as_str()) { - Ok(id) => id, - Err(msg) => { - let error_msg: String = - format!("Could not retrieve pending calls due to: {}", msg); - return Ok(json!(compute_hex_encoded_return_error(error_msg.as_str()))); - } - }; - if let Ok(vec_of_operations) = local_author.pending_tops(shard) { - retrieved_operations.push(vec_of_operations); - } - } - let json_value = RpcReturnValue { - do_watch: false, - value: retrieved_operations.encode(), - status: DirectRequestStatus::Ok, - }; - Ok(json!(json_value.to_hex())) - } - Err(e) => { - let error_msg: String = format!("Could not retrieve pending calls due to: {}", e); - Ok(json!(compute_hex_encoded_return_error(error_msg.as_str()))) - } - } - }); + let local_author = top_pool_author.clone(); + let local_ocall_api = ocall_api.clone(); + io_handler.add_sync_method("author_submitAndWatchExtrinsic", move |params: Params| { + debug!("worker_api_direct rpc was called: author_submitAndWatchExtrinsic"); + local_ocall_api + .update_metrics(vec![ + EnclaveMetric::RpcRequestsIncrement, + EnclaveMetric::RpcTrustedCallsIncrement, + ]) + .unwrap_or_else(|e| error!("failed to update prometheus metric: {:?}", e)); + let json_value = match author_submit_extrinsic_inner(local_author.clone(), params) { + // Only return hash to support JSON RPC 2.0. + // Other methods will follow this pattern when + // we tackle #1624. + Ok(hash_value) => hash_value.to_hex(), + Err(error) => compute_hex_encoded_return_error(error.as_str()), + }; + Ok(json!(json_value)) + }); + + let local_author = top_pool_author.clone(); + let local_ocall_api = ocall_api.clone(); + io_handler.add_sync_method("author_submitExtrinsic", move |params: Params| { + debug!("worker_api_direct rpc was called: author_submitExtrinsic"); + local_ocall_api + .update_metrics(vec![ + EnclaveMetric::RpcRequestsIncrement, + EnclaveMetric::RpcTrustedCallsIncrement, + ]) + .unwrap_or_else(|e| error!("failed to update prometheus metric: {:?}", e)); + let json_value = match author_submit_extrinsic_inner(local_author.clone(), params) { + Ok(hash_value) => RpcReturnValue { + do_watch: false, + value: hash_value.encode(), + status: DirectRequestStatus::TrustedOperationStatus( + TrustedOperationStatus::Submitted, + ), + } + .to_hex(), + Err(error) => compute_hex_encoded_return_error(error.as_str()), + }; + Ok(json!(json_value)) + }); + + let local_author = top_pool_author; + let local_ocall_api = ocall_api; + io_handler.add_sync_method("author_pendingExtrinsics", move |params: Params| { + debug!("worker_api_direct rpc was called: author_pendingExtrinsics"); + local_ocall_api + .update_metrics(vec![EnclaveMetric::RpcRequestsIncrement]) + .unwrap_or_else(|e| error!("failed to update prometheus metric: {:?}", e)); + match params.parse::>() { + Ok(shards) => { + let mut retrieved_operations = vec![]; + for shard_base58 in shards.iter() { + let shard = match decode_shard_from_base58(shard_base58.as_str()) { + Ok(id) => id, + Err(msg) => { + let error_msg: String = + format!("Could not retrieve pending calls due to: {}", msg); + return Ok(json!(compute_hex_encoded_return_error(error_msg.as_str()))) + }, + }; + if let Ok(vec_of_operations) = local_author.pending_tops(shard) { + retrieved_operations.push(vec_of_operations); + } + } + let json_value = RpcReturnValue { + do_watch: false, + value: retrieved_operations.encode(), + status: DirectRequestStatus::Ok, + }; + Ok(json!(json_value.to_hex())) + }, + Err(e) => { + let error_msg: String = format!("Could not retrieve pending calls due to: {}", e); + Ok(json!(compute_hex_encoded_return_error(error_msg.as_str()))) + }, + } + }); } // converts the rpc methods vector to a string and adds commas and brackets for readability fn decode_shard_from_base58(shard_base58: &str) -> Result { - let shard_vec = match shard_base58.from_base58() { - Ok(vec) => vec, - Err(_) => return Err("Invalid base58 format of shard id".to_owned()), - }; - let shard = match ShardIdentifier::decode(&mut shard_vec.as_slice()) { - Ok(hash) => hash, - Err(_) => return Err("Shard ID is not of type H256".to_owned()), - }; - Ok(shard) + let shard_vec = match shard_base58.from_base58() { + Ok(vec) => vec, + Err(_) => return Err("Invalid base58 format of shard id".to_owned()), + }; + let shard = match ShardIdentifier::decode(&mut shard_vec.as_slice()) { + Ok(hash) => hash, + Err(_) => return Err("Shard ID is not of type H256".to_owned()), + }; + Ok(shard) } fn compute_hex_encoded_return_error(error_msg: &str) -> String { - RpcReturnValue::from_error_message(error_msg).to_hex() + RpcReturnValue::from_error_message(error_msg).to_hex() } fn author_submit_extrinsic_inner(author: Arc, params: Params) -> Result where - R: AuthorApi + Send + Sync + 'static, - TCS: PartialEq + Encode + Decode + Debug + Send + Sync + 'static, - G: PartialEq + Encode + Decode + Debug + Send + Sync + 'static, + R: AuthorApi + Send + Sync + 'static, + TCS: PartialEq + Encode + Decode + Debug + Send + Sync + 'static, + G: PartialEq + Encode + Decode + Debug + Send + Sync + 'static, { - debug!("Author submit and watch trusted operation.."); - let hex_encoded_params = params.parse::>().map_err(|e| format!("{:?}", e))?; + debug!("Author submit and watch trusted operation.."); + let hex_encoded_params = params.parse::>().map_err(|e| format!("{:?}", e))?; - let request = - Request::from_hex(&hex_encoded_params[0].clone()).map_err(|e| format!("{:?}", e))?; + let request = + Request::from_hex(&hex_encoded_params[0].clone()).map_err(|e| format!("{:?}", e))?; - let shard: ShardIdentifier = request.shard; - let encrypted_trusted_call: Vec = request.cyphertext; - trace!( + let shard: ShardIdentifier = request.shard; + let encrypted_trusted_call: Vec = request.cyphertext; + trace!( "Submitting trusted operation to TOP pool for shard: {:?}, with encrypted call size: {} bytes", shard, encrypted_trusted_call.len() ); - if encrypted_trusted_call.len() > MAX_TOP_SIZE_TO_ENTER_POOL { - let error_msg = "Trusted operation too large"; - error!("{}", error_msg); - return Err(error_msg.into()); - } + if encrypted_trusted_call.len() > MAX_TOP_SIZE_TO_ENTER_POOL { + let error_msg = "Trusted operation too large"; + error!("{}", error_msg); + return Err(error_msg.into()) + } - let result = async { author.watch_top(encrypted_trusted_call, shard).await }; - let response: Result = executor::block_on(result); + let result = async { author.watch_top(encrypted_trusted_call, shard).await }; + let response: Result = executor::block_on(result); - match &response { - Ok(h) => debug!("Trusted operation submitted successfully ({:?})", h), - Err(e) => warn!("Submitting trusted operation failed: {:?}", e), - } + match &response { + Ok(h) => debug!("Trusted operation submitted successfully ({:?})", h), + Err(e) => warn!("Submitting trusted operation failed: {:?}", e), + } - response.map_err(|e| format!("{:?}", e)) + response.map_err(|e| format!("{:?}", e)) } From 7f287805c25e25a6b57e9ae6530ea3daa2338eda Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Fri, 26 Sep 2025 11:37:36 +0200 Subject: [PATCH 90/91] log cosmetics --- cli/src/trusted_base_cli/commands/send_note.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cli/src/trusted_base_cli/commands/send_note.rs b/cli/src/trusted_base_cli/commands/send_note.rs index 8cf727199..960f99b3c 100644 --- a/cli/src/trusted_base_cli/commands/send_note.rs +++ b/cli/src/trusted_base_cli/commands/send_note.rs @@ -62,7 +62,13 @@ impl SendNoteCommand { get_basic_signing_info_from_args!(self.sender, self.session_proxy, cli, trusted_args); let to = get_accountid_from_str(&self.recipient); - println!("send trusted call send-note to {}: {}", to, self.message); + let trimmed_msg = if self.message.len() > 100 { + let short = &self.message[..100]; + format!("[{} bytes]: {}...", self.message.len(), short) + } else { + self.message.clone() + }; + println!("send trusted call send-note to {}: {}", to, trimmed_msg); let nonce = get_trusted_account_info(cli, trusted_args, &sender, &signer) .map(|info| info.nonce) From 2c7551e811eabe8956486b86de2f386d0f77b891 Mon Sep 17 00:00:00 2001 From: Alain Brenzikofer Date: Fri, 26 Sep 2025 12:03:53 +0200 Subject: [PATCH 91/91] allow client-provided encryption key for relaying messages --- app-libs/stf/src/helpers.rs | 13 +++++++++++++ app-libs/stf/src/trusted_call.rs | 13 +++++++++---- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/app-libs/stf/src/helpers.rs b/app-libs/stf/src/helpers.rs index 062c9e150..25c8c9c15 100644 --- a/app-libs/stf/src/helpers.rs +++ b/app-libs/stf/src/helpers.rs @@ -273,3 +273,16 @@ pub fn encrypt_with_fresh_key(mut data: Vec) -> StfResult<(Vec, [u8; 32] [key.as_ref(), iv.as_ref()].concat().try_into().expect("2x16=32. q.e.d."); Ok((data, full_encryption_key)) } + +/// Encrypt data with AES-128-OFB with a provided key and IV. +/// Encrypts data in-place and returns the ciphertext and the full encryption key (key + iv). +/// The full encryption key is 32 bytes: first 16 bytes are the AES key, +/// the last 16 bytes are the IV. +pub fn encrypt_with_key(mut data: Vec, full_key: [u8; 32]) -> StfResult> { + let key: [u8; 16] = full_key[..16].try_into().expect("Slice with 16 bytes"); + let iv: [u8; 16] = full_key[16..].try_into().expect("Slice with 16 bytes"); + let aes = Aes::new(key, iv); + aes.encrypt(&mut data) + .map_err(|e| StfError::Dispatch(format!("AES encrypt error: {:?}", e)))?; + Ok(data) +} diff --git a/app-libs/stf/src/trusted_call.rs b/app-libs/stf/src/trusted_call.rs index 072c5fd5e..1cac6bca7 100644 --- a/app-libs/stf/src/trusted_call.rs +++ b/app-libs/stf/src/trusted_call.rs @@ -29,9 +29,9 @@ use crate::{ guess_the_number, guess_the_number::GuessTheNumberTrustedCall, helpers::{ - enclave_signer_account, encrypt_with_fresh_key, ensure_enclave_signer_account, - ensure_maintainer_account, get_mortality, shard_vault, shielding_target_genesis_hash, - store_note, wrap_bytes, + enclave_signer_account, encrypt_with_fresh_key, encrypt_with_key, + ensure_enclave_signer_account, ensure_maintainer_account, get_mortality, shard_vault, + shielding_target_genesis_hash, store_note, wrap_bytes, }, relayed_note::{ConversationId, NoteRelayType, RelayedNoteRequest, RelayedNoteRetrievalInfo}, Getter, STF_BYTE_FEE_UNIT_DIVIDER, STF_SESSION_PROXY_DEPOSIT_DIVIDER, @@ -653,7 +653,12 @@ where { Ok(RelayedNoteRetrievalInfo::Here { msg: request.msg }) } else if request.relay_type == NoteRelayType::Ipfs { - let (ciphertext, encryption_key) = encrypt_with_fresh_key(request.msg)?; + let (ciphertext, encryption_key) = + if let Some(key) = request.maybe_encryption_key { + (encrypt_with_key(request.msg, key)?, key) + } else { + encrypt_with_fresh_key(request.msg)? + }; let cid = IpfsCid::from_chunk(&ciphertext) .map_err(|e| StfError::Dispatch(format!("IPFS error: {:?}", e)))?; info!("storing relayed note to IPFS with CID {:?}", cid);