From c42793f9172e72efde7de60aad34cc7dbba251b9 Mon Sep 17 00:00:00 2001 From: Jannik Straube Date: Thu, 6 Feb 2025 01:51:03 +0100 Subject: [PATCH 01/85] ability to automatically set shm size based on sys memory --- miner/src/cli/command.rs | 6 ++++++ miner/src/docker/docker_manager.rs | 2 ++ miner/src/docker/service.rs | 16 ++++++++++++++-- 3 files changed, 22 insertions(+), 2 deletions(-) diff --git a/miner/src/cli/command.rs b/miner/src/cli/command.rs index b21cc791..f06f5124 100644 --- a/miner/src/cli/command.rs +++ b/miner/src/cli/command.rs @@ -225,9 +225,15 @@ pub async fn execute_command( None => None, }; + let system_memory = node_config + .compute_specs + .as_ref() + .map(|specs| specs.ram_mb.unwrap_or(0)); + let docker_service = Arc::new(DockerService::new( cancellation_token.clone(), has_gpu, + system_memory, task_bridge.socket_path.clone(), docker_storage_path, )); diff --git a/miner/src/docker/docker_manager.rs b/miner/src/docker/docker_manager.rs index 01a472b8..998378d8 100644 --- a/miner/src/docker/docker_manager.rs +++ b/miner/src/docker/docker_manager.rs @@ -105,6 +105,7 @@ impl DockerManager { gpu_enabled: bool, // Simple Vec of (host_path, container_path, read_only) volumes: Option>, + shm_size: Option, ) -> Result { println!("Starting to pull image: {}", image); @@ -178,6 +179,7 @@ impl DockerManager { options: Some(HashMap::new()), }]), binds: volume_binds, + shm_size: shm_size.map(|s| s as i64), ..Default::default() }) } else { diff --git a/miner/src/docker/service.rs b/miner/src/docker/service.rs index 874d1f6c..d6595bff 100644 --- a/miner/src/docker/service.rs +++ b/miner/src/docker/service.rs @@ -18,6 +18,7 @@ pub struct DockerService { cancellation_token: CancellationToken, pub state: Arc, has_gpu: bool, + system_memory_mb: Option, task_bridge_socket_path: String, } @@ -27,6 +28,7 @@ impl DockerService { pub fn new( cancellation_token: CancellationToken, has_gpu: bool, + system_memory_mb: Option, task_bridge_socket_path: String, storage_path: Option, ) -> Self { @@ -36,6 +38,7 @@ impl DockerService { cancellation_token, state: Arc::new(DockerState::new()), has_gpu, + system_memory_mb, task_bridge_socket_path, } } @@ -142,6 +145,7 @@ impl DockerService { let manager_clone = manager_clone.clone(); let state_clone = state.clone(); let has_gpu = self.has_gpu; + let system_memory_mb = self.system_memory_mb.clone(); let task_bridge_socket_path = self.task_bridge_socket_path.clone(); let handle = tokio::spawn(async move { let payload = task_clone.unwrap(); @@ -170,8 +174,14 @@ impl DockerService { false, ) ]; - - match manager_clone.start_container(&payload.image, &container_task_id, Some(env_vars), Some(cmd), has_gpu, Some(volumes)).await { + let shm_size = match system_memory_mb { + Some(mem_mb) => (mem_mb as u64) * 1024 * 1024 / 2, // Convert MB to bytes and divide by 2 + None => { + Console::warning("System memory not available, using default shm size"); + 67108864 // Default to 64MB in bytes + } + }; + match manager_clone.start_container(&payload.image, &container_task_id, Some(env_vars), Some(cmd), has_gpu, Some(volumes), Some(shm_size)).await { Ok(container_id) => { Console::info("DockerService", &format!("Container started with id: {}", container_id)); }, @@ -276,6 +286,7 @@ mod tests { let docker_service = DockerService::new( cancellation_token.clone(), false, + Some(1024), "/tmp/com.prime.miner/metrics.sock".to_string(), None, ); @@ -319,6 +330,7 @@ mod tests { let docker_service = DockerService::new( cancellation_token.clone(), false, + Some(1024), "/tmp/com.prime.miner/metrics.sock".to_string(), None, ); From f1f753801cf735c95042e05abe9583f5cac9d364 Mon Sep 17 00:00:00 2001 From: Jannik Straube Date: Thu, 6 Feb 2025 01:53:32 +0100 Subject: [PATCH 02/85] clippy --- miner/src/docker/docker_manager.rs | 1 + miner/src/docker/service.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/miner/src/docker/docker_manager.rs b/miner/src/docker/docker_manager.rs index 998378d8..a4963e17 100644 --- a/miner/src/docker/docker_manager.rs +++ b/miner/src/docker/docker_manager.rs @@ -95,6 +95,7 @@ impl DockerManager { Ok(()) } + #[allow(clippy::too_many_arguments)] /// Start a new container with the given image and configuration pub async fn start_container( &self, diff --git a/miner/src/docker/service.rs b/miner/src/docker/service.rs index d6595bff..d1797f21 100644 --- a/miner/src/docker/service.rs +++ b/miner/src/docker/service.rs @@ -145,7 +145,7 @@ impl DockerService { let manager_clone = manager_clone.clone(); let state_clone = state.clone(); let has_gpu = self.has_gpu; - let system_memory_mb = self.system_memory_mb.clone(); + let system_memory_mb = self.system_memory_mb; let task_bridge_socket_path = self.task_bridge_socket_path.clone(); let handle = tokio::spawn(async move { let payload = task_clone.unwrap(); From ca57e744d639012cb0fdf601c2988b00d643bf55 Mon Sep 17 00:00:00 2001 From: Jannik Straube Date: Thu, 6 Feb 2025 11:30:06 +0100 Subject: [PATCH 03/85] bump version --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f9bdaf10..7e647f59 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2211,7 +2211,7 @@ dependencies = [ [[package]] name = "discovery" -version = "0.1.2" +version = "0.1.3" dependencies = [ "actix-web", "alloy", @@ -3596,7 +3596,7 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "miner" -version = "0.1.2" +version = "0.1.3" dependencies = [ "actix-web", "alloy", @@ -4020,7 +4020,7 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "orchestrator" -version = "0.1.2" +version = "0.1.3" dependencies = [ "actix-web", "alloy", @@ -5868,7 +5868,7 @@ dependencies = [ [[package]] name = "validator" -version = "0.1.2" +version = "0.1.3" dependencies = [ "actix-web", "alloy", diff --git a/Cargo.toml b/Cargo.toml index a270743e..41eb450f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ members = ["discovery", "miner", "validator", "shared", "orchestrator", "dev-uti resolver = "2" [workspace.package] -version = "0.1.2" +version = "0.1.3" edition = "2021" [workspace.features] From ad9042753b180f9e0cf2445ef1ce2c34be377341 Mon Sep 17 00:00:00 2001 From: JannikSt Date: Thu, 6 Feb 2025 16:43:30 +0100 Subject: [PATCH 04/85] Feature/disable ejection (#111) * feature: ability to disable node ejection --- orchestrator/Dockerfile | 2 ++ orchestrator/src/main.rs | 5 ++++ orchestrator/src/node/status_update.rs | 39 ++++++++++++++++++-------- 3 files changed, 35 insertions(+), 11 deletions(-) diff --git a/orchestrator/Dockerfile b/orchestrator/Dockerfile index d8bf6283..528ef6eb 100644 --- a/orchestrator/Dockerfile +++ b/orchestrator/Dockerfile @@ -16,6 +16,7 @@ ENV DISCOVERY_REFRESH_INTERVAL="10" ENV REDIS_STORE_URL="redis://localhost:6380" ENV DISCOVERY_URL="http://localhost:8089" ENV ADMIN_API_KEY="admin" +ENV DISABLE_EJECTION="false" RUN echo '#!/bin/sh\n\ exec /usr/local/bin/orchestrator \ @@ -30,6 +31,7 @@ $([ ! -z "$HOST" ] && echo "--host $HOST") \ --redis-store-url "$REDIS_STORE_URL" \ --discovery-url "$DISCOVERY_URL" \ --admin-api-key "$ADMIN_API_KEY" \ +$([ "$DISABLE_EJECTION" = "true" ] && echo "--disable-ejection") \ "$@"' > /entrypoint.sh && \ chmod +x /entrypoint.sh diff --git a/orchestrator/src/main.rs b/orchestrator/src/main.rs index 1702688d..67393f9e 100644 --- a/orchestrator/src/main.rs +++ b/orchestrator/src/main.rs @@ -64,6 +64,10 @@ struct Args { /// Admin api key #[arg(short = 'a', long, default_value = "admin")] admin_api_key: String, + + /// Disable instance ejection from chain + #[arg(long)] + disable_ejection: bool, } #[tokio::main] @@ -138,6 +142,7 @@ async fn main() -> Result<()> { None, contracts.clone(), compute_pool_id, + args.disable_ejection, ); status_updater.run().await }); diff --git a/orchestrator/src/node/status_update.rs b/orchestrator/src/node/status_update.rs index 386e9682..873d2600 100644 --- a/orchestrator/src/node/status_update.rs +++ b/orchestrator/src/node/status_update.rs @@ -14,6 +14,7 @@ pub struct NodeStatusUpdater { missing_heartbeat_threshold: u32, contracts: Arc, pool_id: u32, + disable_ejection: bool, } impl NodeStatusUpdater { @@ -23,6 +24,7 @@ impl NodeStatusUpdater { missing_heartbeat_threshold: Option, contracts: Arc, pool_id: u32, + disable_ejection: bool, ) -> Self { Self { store_context, @@ -30,6 +32,7 @@ impl NodeStatusUpdater { missing_heartbeat_threshold: missing_heartbeat_threshold.unwrap_or(3), contracts, pool_id, + disable_ejection, } } @@ -65,18 +68,25 @@ impl NodeStatusUpdater { } }; if node_in_pool { - let tx = self - .contracts - .compute_pool - .eject_node(self.pool_id, node.address) - .await; - match tx { - Result::Ok(_) => { - println!("Ejected node: {:?}", node.address); - } - Result::Err(e) => { - println!("Error ejecting node: {}", e); + if !self.disable_ejection { + let tx = self + .contracts + .compute_pool + .eject_node(self.pool_id, node.address) + .await; + match tx { + Result::Ok(_) => { + println!("Ejected node: {:?}", node.address); + } + Result::Err(e) => { + println!("Error ejecting node: {}", e); + } } + } else { + println!( + "Ejection is disabled, skipping ejection of node: {:?}", + node.address + ); } } } @@ -181,6 +191,7 @@ mod tests { None, Arc::new(contracts), 0, + false, ); let node = OrchestratorNode { address: Address::from_str("0x0000000000000000000000000000000000000000").unwrap(), @@ -248,6 +259,7 @@ mod tests { None, Arc::new(contracts), 0, + false, ); tokio::spawn(async move { updater @@ -288,6 +300,7 @@ mod tests { None, Arc::new(contracts), 0, + false, ); tokio::spawn(async move { updater @@ -338,6 +351,7 @@ mod tests { None, Arc::new(contracts), 0, + false, ); tokio::spawn(async move { updater @@ -391,6 +405,7 @@ mod tests { None, Arc::new(contracts), 0, + false, ); tokio::spawn(async move { updater @@ -453,6 +468,7 @@ mod tests { None, Arc::new(contracts), 0, + false, ); tokio::spawn(async move { updater @@ -515,6 +531,7 @@ mod tests { None, Arc::new(contracts), 0, + false, ); tokio::spawn(async move { updater From bf52611b893018babbaf964bacd69f3691eef7f2 Mon Sep 17 00:00:00 2001 From: JannikSt Date: Thu, 6 Feb 2025 16:43:52 +0100 Subject: [PATCH 05/85] support ability to restore metrics via orchestrator (#112) --- orchestrator/src/api/routes/metrics.rs | 53 ++++++++++++- .../src/store/domains/metrics_store.rs | 79 +++++++++++++++++-- 2 files changed, 124 insertions(+), 8 deletions(-) diff --git a/orchestrator/src/api/routes/metrics.rs b/orchestrator/src/api/routes/metrics.rs index ce692c99..384b5ccc 100644 --- a/orchestrator/src/api/routes/metrics.rs +++ b/orchestrator/src/api/routes/metrics.rs @@ -1,10 +1,22 @@ use crate::api::server::AppState; use actix_web::{ - web::{self, get, Data}, + web::{self, delete, get, post, Data, Path}, HttpResponse, Scope, }; +use serde::Deserialize; use serde_json::json; +#[derive(Deserialize)] +struct ManualMetricEntry { + label: String, + value: f64, +} + +#[derive(Deserialize)] +struct DeleteMetricRequest { + label: String, + address: String, +} async fn get_metrics(app_state: Data) -> HttpResponse { let metrics = app_state .store_context @@ -13,6 +25,43 @@ async fn get_metrics(app_state: Data) -> HttpResponse { HttpResponse::Ok().json(json!({"success": true, "metrics": metrics})) } +async fn get_all_metrics(app_state: Data) -> HttpResponse { + let metrics = app_state.store_context.metrics_store.get_all_metrics(); + HttpResponse::Ok().json(json!({"success": true, "metrics": metrics})) +} + +// for potential backup restore purposes +async fn create_metric( + app_state: Data, + metric: web::Json, +) -> HttpResponse { + app_state + .store_context + .metrics_store + .store_manual_metrics(metric.label.clone(), metric.value); + HttpResponse::Ok().json(json!({"success": true})) +} + +async fn delete_metric( + app_state: Data, + task_id: Path, + body: web::Json, +) -> HttpResponse { + let success = + app_state + .store_context + .metrics_store + .delete_metric(&task_id, &body.label, &body.address); + + HttpResponse::Ok().json(json!({ + "success": success + })) +} + pub fn metrics_routes() -> Scope { - web::scope("/metrics").route("", get().to(get_metrics)) + web::scope("/metrics") + .route("", get().to(get_metrics)) + .route("/all", get().to(get_all_metrics)) + .route("", post().to(create_metric)) + .route("/{task_id}", delete().to(delete_metric)) } diff --git a/orchestrator/src/store/domains/metrics_store.rs b/orchestrator/src/store/domains/metrics_store.rs index ad55d4b0..53f0f658 100644 --- a/orchestrator/src/store/domains/metrics_store.rs +++ b/orchestrator/src/store/domains/metrics_store.rs @@ -32,23 +32,63 @@ impl MetricsStore { } for entry in metrics { + let task_id = if entry.key.task_id.is_empty() { + "manual".to_string() + } else { + entry.key.task_id + }; + let cleaned_label = self.clean_label(&entry.key.label); let redis_key = format!( "{}:{}:{}", - ORCHESTRATOR_METRICS_STORE, entry.key.task_id, cleaned_label + ORCHESTRATOR_METRICS_STORE, task_id, cleaned_label ); let mut con = self.redis.client.get_connection().unwrap(); - if let Err(err) = con.hset( - redis_key, - sender_address.to_string(), - entry.value.to_string(), - ) as RedisResult<()> + + let address = if task_id == "manual" { + Address::ZERO.to_string() + } else { + sender_address.to_string() + }; + + if let Err(err) = + con.hset(redis_key, address, entry.value.to_string()) as RedisResult<()> { error!("Could not update metric value in redis: {}", err); } } } + pub fn store_manual_metrics(&self, label: String, value: f64) { + self.store_metrics( + Some(vec![MetricEntry { + key: shared::models::metric::MetricKey { + task_id: "".to_string(), + label, + }, + value, + }]), + Address::ZERO, + ); + } + + pub fn delete_metric(&self, task_id: &str, label: &str, address: &str) -> bool { + let mut con = self.redis.client.get_connection().unwrap(); + let cleaned_label = self.clean_label(label); + let redis_key = format!( + "{}:{}:{}", + ORCHESTRATOR_METRICS_STORE, task_id, cleaned_label + ); + + match con.hdel::<_, _, i64>(redis_key, address.to_string()) { + Ok(deleted) => deleted == 1, + Err(err) => { + error!("Could not delete metric from redis: {}", err); + false + } + } + } + pub fn get_aggregate_metrics_for_task(&self, task_id: &str) -> HashMap { let mut con = self.redis.client.get_connection().unwrap(); let all_keys: Vec = con @@ -125,6 +165,33 @@ impl MetricsStore { result } + + pub fn get_all_metrics(&self) -> HashMap>> { + let mut con = self.redis.client.get_connection().unwrap(); + let all_keys: Vec = con + .keys(format!("{}:*:*", ORCHESTRATOR_METRICS_STORE)) + .unwrap(); + let mut result: HashMap>> = HashMap::new(); + + for key in all_keys { + if let [_, _, task_id, metric_name] = key.split(":").collect::>()[..] { + let values: HashMap = con.hgetall(&key).unwrap(); + + for (node_addr, value) in values { + if let Ok(val) = value.parse::() { + result + .entry(task_id.to_string()) + .or_default() + .entry(metric_name.to_string()) + .or_default() + .insert(node_addr, val); + } + } + } + } + + result + } } #[cfg(test)] From 8e8808a3cc38b4a5f9216868078cbe436836bd92 Mon Sep 17 00:00:00 2001 From: Jannik Straube Date: Thu, 6 Feb 2025 16:45:58 +0100 Subject: [PATCH 06/85] bump version --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7e647f59..e1f721c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2211,7 +2211,7 @@ dependencies = [ [[package]] name = "discovery" -version = "0.1.3" +version = "0.1.4" dependencies = [ "actix-web", "alloy", @@ -3596,7 +3596,7 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "miner" -version = "0.1.3" +version = "0.1.4" dependencies = [ "actix-web", "alloy", @@ -4020,7 +4020,7 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "orchestrator" -version = "0.1.3" +version = "0.1.4" dependencies = [ "actix-web", "alloy", @@ -5868,7 +5868,7 @@ dependencies = [ [[package]] name = "validator" -version = "0.1.3" +version = "0.1.4" dependencies = [ "actix-web", "alloy", diff --git a/Cargo.toml b/Cargo.toml index 41eb450f..120ba93d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ members = ["discovery", "miner", "validator", "shared", "orchestrator", "dev-uti resolver = "2" [workspace.package] -version = "0.1.3" +version = "0.1.4" edition = "2021" [workspace.features] From b698e3227c57b112945b502c9e9f6dd6707b260b Mon Sep 17 00:00:00 2001 From: JannikSt Date: Thu, 6 Feb 2025 18:22:57 +0100 Subject: [PATCH 07/85] improve resiliency of validator (#114) * improve the resiliency of validator --- Cargo.lock | 9 ++-- Cargo.toml | 2 +- validator/Cargo.toml | 1 + validator/src/main.rs | 119 +++++++++++++++++++++++++++--------------- 4 files changed, 83 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e1f721c4..e06c835b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2211,7 +2211,7 @@ dependencies = [ [[package]] name = "discovery" -version = "0.1.4" +version = "0.1.5" dependencies = [ "actix-web", "alloy", @@ -3596,7 +3596,7 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "miner" -version = "0.1.4" +version = "0.1.5" dependencies = [ "actix-web", "alloy", @@ -4020,7 +4020,7 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "orchestrator" -version = "0.1.4" +version = "0.1.5" dependencies = [ "actix-web", "alloy", @@ -5868,10 +5868,11 @@ dependencies = [ [[package]] name = "validator" -version = "0.1.4" +version = "0.1.5" dependencies = [ "actix-web", "alloy", + "anyhow", "clap 4.5.27", "env_logger 0.11.6", "log", diff --git a/Cargo.toml b/Cargo.toml index 120ba93d..d84cb45e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ members = ["discovery", "miner", "validator", "shared", "orchestrator", "dev-uti resolver = "2" [workspace.package] -version = "0.1.4" +version = "0.1.5" edition = "2021" [workspace.features] diff --git a/validator/Cargo.toml b/validator/Cargo.toml index b7f697d1..87500640 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -6,6 +6,7 @@ edition.workspace = true [dependencies] actix-web = "4.9.0" alloy = { version = "0.9.2", features = ["full"] } +anyhow = "1.0.95" clap = { version = "4.5.26", features = ["derive"] } env_logger = "0.11.6" log = "0.4.25" diff --git a/validator/src/main.rs b/validator/src/main.rs index 04dd7747..80f78fa2 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1,6 +1,7 @@ use actix_web::{web, App, HttpResponse, HttpServer, Responder}; use alloy::primitives::{hex, Address}; use alloy::signers::Signer; +use anyhow::{Context, Result}; use clap::Parser; use log::LevelFilter; use log::{error, info}; @@ -64,57 +65,74 @@ fn main() { .with_compute_pool() .build() .unwrap(); - loop { - async fn _generate_signature( - wallet: &Wallet, - message: &str, - ) -> Result> { + async fn _generate_signature(wallet: &Wallet, message: &str) -> Result { let signature = wallet .signer .sign_message(message.as_bytes()) - .await? + .await + .context("Failed to sign message")? .as_bytes(); Ok(format!("0x{}", hex::encode(signature))) } - let nodes: Result, Box> = - runtime.block_on(async { - let discovery_route = "/api/validator"; - let address = validator_wallet - .wallet - .default_signer() - .address() - .to_string(); - let signature = _generate_signature(&validator_wallet, discovery_route) - .await - .unwrap(); - - let mut headers = reqwest::header::HeaderMap::new(); - headers.insert("x-address", address.parse().unwrap()); - headers.insert("x-signature", signature.parse().unwrap()); - - info!("Fetching nodes from: {}{}", discovery_url, discovery_route); - let response = reqwest::Client::new() - .get(format!("{}{}", discovery_url, discovery_route)) - .headers(headers) - .send() - .await?; - - let response_text = response.text().await?; - let parsed_response: ApiResponse> = - serde_json::from_str(&response_text)?; - - if !parsed_response.success { - error!("Failed to fetch nodes: {:?}", parsed_response); - return Ok(vec![]); - } + let nodes = match runtime.block_on(async { + let discovery_route = "/api/validator"; + let address = validator_wallet + .wallet + .default_signer() + .address() + .to_string(); + + let signature = _generate_signature(&validator_wallet, discovery_route) + .await + .context("Failed to generate signature")?; + + let mut headers = reqwest::header::HeaderMap::new(); + headers.insert( + "x-address", + reqwest::header::HeaderValue::from_str(&address) + .context("Failed to create address header")?, + ); + headers.insert( + "x-signature", + reqwest::header::HeaderValue::from_str(&signature) + .context("Failed to create signature header")?, + ); + + info!("Fetching nodes from: {}{}", discovery_url, discovery_route); + let response = reqwest::Client::new() + .get(format!("{}{}", discovery_url, discovery_route)) + .headers(headers) + .send() + .await + .context("Failed to fetch nodes")?; + + let response_text = response + .text() + .await + .context("Failed to get response text")?; + + let parsed_response: ApiResponse> = + serde_json::from_str(&response_text).context("Failed to parse response")?; + + if !parsed_response.success { + error!("Failed to fetch nodes: {:?}", parsed_response); + return Ok::, anyhow::Error>(vec![]); + } + + Ok(parsed_response.data) + }) { + Ok(n) => n, + Err(e) => { + error!("Error in node fetching loop: {:#}", e); + std::thread::sleep(std::time::Duration::from_secs(10)); + continue; + } + }; - Ok(parsed_response.data) - }); let non_validated_nodes: Vec = nodes .iter() - .flatten() .filter(|node| !node.is_validated) .cloned() .collect(); @@ -122,13 +140,28 @@ fn main() { info!("Non validated nodes: {:?}", non_validated_nodes); for node in non_validated_nodes { - let node_address = node.id.trim_start_matches("0x").parse::
().unwrap(); + let node_address = match node.id.trim_start_matches("0x").parse::
() { + Ok(addr) => addr, + Err(e) => { + error!("Failed to parse node address {}: {}", node.id, e); + continue; + } + }; - let provider_address = node + let provider_address = match node .provider_address .trim_start_matches("0x") .parse::
() - .unwrap(); + { + Ok(addr) => addr, + Err(e) => { + error!( + "Failed to parse provider address {}: {}", + node.provider_address, e + ); + continue; + } + }; if let Err(e) = runtime.block_on( contracts From bbabc26ddebd4a2cf637519a98c10ecf3cac3f0c Mon Sep 17 00:00:00 2001 From: Jannik Straube Date: Thu, 6 Feb 2025 18:28:34 +0100 Subject: [PATCH 08/85] resolve conflicts --- Cargo.lock | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 29b07adf..e06c835b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2211,11 +2211,7 @@ dependencies = [ [[package]] name = "discovery" -<<<<<<< HEAD version = "0.1.5" -======= -version = "0.1.4" ->>>>>>> main dependencies = [ "actix-web", "alloy", @@ -3600,11 +3596,7 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "miner" -<<<<<<< HEAD version = "0.1.5" -======= -version = "0.1.4" ->>>>>>> main dependencies = [ "actix-web", "alloy", @@ -4028,11 +4020,7 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "orchestrator" -<<<<<<< HEAD version = "0.1.5" -======= -version = "0.1.4" ->>>>>>> main dependencies = [ "actix-web", "alloy", @@ -5880,11 +5868,7 @@ dependencies = [ [[package]] name = "validator" -<<<<<<< HEAD version = "0.1.5" -======= -version = "0.1.4" ->>>>>>> main dependencies = [ "actix-web", "alloy", From b7adeebe31fba49174295e9092e342a575e80848 Mon Sep 17 00:00:00 2001 From: JannikSt Date: Wed, 12 Feb 2025 02:07:14 +0200 Subject: [PATCH 09/85] add improvement to metrics reporting (#119) * add improvement to metrics reporting --- Cargo.lock | 8 +- Cargo.toml | 2 +- .../src/store/domains/metrics_store.rs | 125 +++++++++++++++++- 3 files changed, 125 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e06c835b..3e445a15 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2211,7 +2211,7 @@ dependencies = [ [[package]] name = "discovery" -version = "0.1.5" +version = "0.1.6" dependencies = [ "actix-web", "alloy", @@ -3596,7 +3596,7 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "miner" -version = "0.1.5" +version = "0.1.6" dependencies = [ "actix-web", "alloy", @@ -4020,7 +4020,7 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "orchestrator" -version = "0.1.5" +version = "0.1.6" dependencies = [ "actix-web", "alloy", @@ -5868,7 +5868,7 @@ dependencies = [ [[package]] name = "validator" -version = "0.1.5" +version = "0.1.6" dependencies = [ "actix-web", "alloy", diff --git a/Cargo.toml b/Cargo.toml index d84cb45e..96e55269 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ members = ["discovery", "miner", "validator", "shared", "orchestrator", "dev-uti resolver = "2" [workspace.package] -version = "0.1.5" +version = "0.1.6" edition = "2021" [workspace.features] diff --git a/orchestrator/src/store/domains/metrics_store.rs b/orchestrator/src/store/domains/metrics_store.rs index 53f0f658..7bede9f2 100644 --- a/orchestrator/src/store/domains/metrics_store.rs +++ b/orchestrator/src/store/domains/metrics_store.rs @@ -37,7 +37,6 @@ impl MetricsStore { } else { entry.key.task_id }; - let cleaned_label = self.clean_label(&entry.key.label); let redis_key = format!( "{}:{}:{}", @@ -51,10 +50,28 @@ impl MetricsStore { sender_address.to_string() }; - if let Err(err) = - con.hset(redis_key, address, entry.value.to_string()) as RedisResult<()> - { - error!("Could not update metric value in redis: {}", err); + let existing_value: Option = con.hget(&redis_key, &address).unwrap_or(None); + let should_update = match existing_value { + Some(val) => { + // Only check for max value on dashboard-progress metrics to maintain dashboard integrity + if entry.key.label.contains("dashboard-progress") { + match (val.parse::(), entry.value) { + (Ok(old_val), new_val) => new_val > old_val, + _ => true, + } + } else { + true + } + } + None => true, + }; + + if should_update { + if let Err(err) = + con.hset(redis_key, address, entry.value.to_string()) as RedisResult<()> + { + error!("Could not update metric value in redis: {}", err); + } } } } @@ -289,4 +306,102 @@ mod tests { Some(&1.0) ); } + #[tokio::test] + async fn test_store_metrics_value_overwrite() { + let app_state = create_test_app_state().await; + let metrics_store = app_state.store_context.metrics_store.clone(); + let node_addr = Address::ZERO; + + // Test dashboard-progress metric maintains max value + let metric_key = MetricKey { + task_id: "task_1".to_string(), + label: "dashboard-progress/test/value".to_string(), + }; + let metric = MetricEntry { + key: metric_key, + value: 2.0, + }; + metrics_store.store_metrics(Some(vec![metric]), node_addr); + + let metric_key = MetricKey { + task_id: "task_1".to_string(), + label: "dashboard-progress/test/value".to_string(), + }; + let metric = MetricEntry { + key: metric_key, + value: 1.0, + }; + metrics_store.store_metrics(Some(vec![metric]), node_addr); + + let metrics = metrics_store.get_metrics_for_node(node_addr); + assert_eq!( + metrics + .get("task_1") + .unwrap() + .get("dashboard-progress/test/value"), + Some(&2.0) + ); + + let metric_key = MetricKey { + task_id: "task_1".to_string(), + label: "dashboard-progress/test/value".to_string(), + }; + let metric = MetricEntry { + key: metric_key, + value: 3.0, + }; + metrics_store.store_metrics(Some(vec![metric]), node_addr); + + let metrics = metrics_store.get_metrics_for_node(node_addr); + assert_eq!( + metrics + .get("task_1") + .unwrap() + .get("dashboard-progress/test/value"), + Some(&3.0) + ); + + // Test non-dashboard metric gets overwritten regardless of value + let metric_key = MetricKey { + task_id: "task_1".to_string(), + label: "cpu_usage".to_string(), + }; + let metric = MetricEntry { + key: metric_key.clone(), + value: 2.0, + }; + metrics_store.store_metrics(Some(vec![metric]), node_addr); + + let metric = MetricEntry { + key: metric_key, + value: 1.0, + }; + metrics_store.store_metrics(Some(vec![metric]), node_addr); + + let metrics = metrics_store.get_metrics_for_node(node_addr); + assert_eq!(metrics.get("task_1").unwrap().get("cpu_usage"), Some(&1.0)); + + // Test another non-dashboard metric gets overwritten with larger value + let metric_key = MetricKey { + task_id: "task_1".to_string(), + label: "memory_usage".to_string(), + }; + let metric = MetricEntry { + key: metric_key.clone(), + value: 2.0, + }; + metrics_store.store_metrics(Some(vec![metric]), node_addr); + + let metric = MetricEntry { + key: metric_key, + value: 1.0, + }; + metrics_store.store_metrics(Some(vec![metric]), node_addr); + + let metrics = metrics_store.get_metrics_for_node(node_addr); + assert_eq!( + metrics.get("task_1").unwrap().get("memory_usage"), + Some(&1.0) + ); + } } From 0d3d5cdff7cb2780354c940b97be4138732e64e7 Mon Sep 17 00:00:00 2001 From: Jannik Straube Date: Wed, 12 Feb 2025 02:10:38 +0200 Subject: [PATCH 10/85] fmt --- Cargo.lock | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0ad1f02e..3e445a15 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2211,11 +2211,7 @@ dependencies = [ [[package]] name = "discovery" -<<<<<<< HEAD version = "0.1.6" -======= -version = "0.1.5" ->>>>>>> main dependencies = [ "actix-web", "alloy", @@ -3600,11 +3596,7 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "miner" -<<<<<<< HEAD version = "0.1.6" -======= -version = "0.1.5" ->>>>>>> main dependencies = [ "actix-web", "alloy", @@ -4028,11 +4020,7 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "orchestrator" -<<<<<<< HEAD version = "0.1.6" -======= -version = "0.1.5" ->>>>>>> main dependencies = [ "actix-web", "alloy", @@ -5880,11 +5868,7 @@ dependencies = [ [[package]] name = "validator" -<<<<<<< HEAD version = "0.1.6" -======= -version = "0.1.5" ->>>>>>> main dependencies = [ "actix-web", "alloy", From 8cb71b4072eac47ca1c27e670dafa6e74c2b1ead Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Tue, 4 Feb 2025 13:25:33 +0100 Subject: [PATCH 11/85] very basic validator functionality --- Cargo.lock | 261 ++++++++++++++++++++++++++---- miner/Cargo.toml | 1 + miner/src/api/routes/challenge.rs | 20 +++ miner/src/api/routes/mod.rs | 1 + miner/src/api/server.rs | 2 + shared/Cargo.toml | 1 + shared/src/models/challenge.rs | 32 ++++ shared/src/models/mod.rs | 1 + validator/Cargo.toml | 2 + validator/src/main.rs | 136 ++++++++++++++++ 10 files changed, 424 insertions(+), 33 deletions(-) create mode 100644 miner/src/api/routes/challenge.rs create mode 100644 shared/src/models/challenge.rs diff --git a/Cargo.lock b/Cargo.lock index 3e445a15..b73a7e57 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -49,7 +49,7 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite", - "rand", + "rand 0.8.5", "sha1", "smallvec", "tokio", @@ -207,10 +207,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if 1.0.0", - "getrandom", + "getrandom 0.2.15", "once_cell", "version_check", - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -499,7 +499,7 @@ dependencies = [ "alloy-genesis", "alloy-primitives", "k256", - "rand", + "rand 0.8.5", "serde_json", "tempfile", "thiserror 2.0.11", @@ -526,7 +526,7 @@ dependencies = [ "keccak-asm", "paste", "proptest", - "rand", + "rand 0.8.5", "ruint", "rustc-hash", "serde", @@ -738,7 +738,7 @@ dependencies = [ "alloy-signer", "async-trait", "k256", - "rand", + "rand 0.8.5", "thiserror 2.0.11", ] @@ -982,6 +982,15 @@ version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" +[[package]] +name = "approx" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cab112f0a86d568ea0e627cc1d6be74a1e9cd55214684db5561995f6dad897c6" +dependencies = [ + "num-traits", +] + [[package]] name = "arc-swap" version = "1.7.1" @@ -1099,7 +1108,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" dependencies = [ "num-traits", - "rand", + "rand 0.8.5", ] [[package]] @@ -1109,7 +1118,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" dependencies = [ "num-traits", - "rand", + "rand 0.8.5", ] [[package]] @@ -1535,6 +1544,12 @@ version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" +[[package]] +name = "bytemuck" +version = "1.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef657dfab802224e671f5818e9a4935f9b1957ed18e58292690cc39e7a4092a3" + [[package]] name = "byteorder" version = "1.5.0" @@ -1901,7 +1916,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "generic-array", - "rand_core", + "rand_core 0.6.4", "subtle", "zeroize", ] @@ -2291,7 +2306,7 @@ dependencies = [ "generic-array", "group", "pkcs8", - "rand_core", + "rand_core 0.6.4", "sec1", "subtle", "zeroize", @@ -2456,7 +2471,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -2479,7 +2494,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" dependencies = [ "byteorder", - "rand", + "rand 0.8.5", "rustc-hex", "static_assertions", ] @@ -2698,7 +2713,19 @@ checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if 1.0.0", "libc", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "wasi 0.13.3+wasi-0.2.2", + "windows-targets 0.52.6", ] [[package]] @@ -2733,7 +2760,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -3564,6 +3591,16 @@ dependencies = [ "libc", ] +[[package]] +name = "matrixmultiply" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9380b911e3e96d10c1f415da0876389aaf1b56759054eeb0de7df940c456ba1a" +dependencies = [ + "autocfg", + "rawpointer", +] + [[package]] name = "memchr" version = "2.7.4" @@ -3619,6 +3656,7 @@ dependencies = [ "lazy_static", "libc", "log", + "nalgebra", "nvml-wrapper", "regex", "reqwest", @@ -3674,7 +3712,7 @@ checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ "libc", "log", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "windows-sys 0.52.0", ] @@ -3702,6 +3740,33 @@ dependencies = [ "ws2_32-sys", ] +[[package]] +name = "nalgebra" +version = "0.33.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26aecdf64b707efd1310e3544d709c5c0ac61c13756046aaaba41be5c4f66a3b" +dependencies = [ + "approx", + "matrixmultiply", + "nalgebra-macros", + "num-complex", + "num-rational", + "num-traits", + "simba", + "typenum", +] + +[[package]] +name = "nalgebra-macros" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "254a5372af8fc138e36684761d3c0cdb758a4410e938babcff1c860ce14ddbfc" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.92", +] + [[package]] name = "native-tls" version = "0.2.12" @@ -3827,6 +3892,15 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-complex" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" +dependencies = [ + "num-traits", +] + [[package]] name = "num-conv" version = "0.1.0" @@ -3842,6 +3916,17 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-rational" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", +] + [[package]] name = "num-traits" version = "0.2.19" @@ -4190,7 +4275,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" dependencies = [ "phf_shared", - "rand", + "rand 0.8.5", ] [[package]] @@ -4294,7 +4379,7 @@ version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" dependencies = [ - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -4383,8 +4468,8 @@ dependencies = [ "bitflags 2.6.0", "lazy_static", "num-traits", - "rand", - "rand_chacha", + "rand 0.8.5", + "rand_chacha 0.3.1", "rand_xorshift", "regex-syntax", "rusty-fork", @@ -4429,11 +4514,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", - "rand_core", + "rand_chacha 0.3.1", + "rand_core 0.6.4", "serde", ] +[[package]] +name = "rand" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.0", + "zerocopy 0.8.15", +] + [[package]] name = "rand_chacha" version = "0.3.1" @@ -4441,7 +4537,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.0", ] [[package]] @@ -4450,7 +4556,17 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.15", +] + +[[package]] +name = "rand_core" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b08f3c9802962f7e1b25113931d94f43ed9725bebc59db9d0c3e9a23b67e15ff" +dependencies = [ + "getrandom 0.3.1", + "zerocopy 0.8.15", ] [[package]] @@ -4459,9 +4575,15 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "rand_core", + "rand_core 0.6.4", ] +[[package]] +name = "rawpointer" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" + [[package]] name = "rayon" version = "1.10.0" @@ -4511,7 +4633,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f9edfc3524eee83ea9043734d59b139f24b6974742e2e23b16faa2b8a33fe36" dependencies = [ - "rand", + "rand 0.8.5", "redis", "socket2", "tempfile", @@ -4541,7 +4663,7 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ - "getrandom", + "getrandom 0.2.15", "libredox", "thiserror 1.0.69", ] @@ -4552,7 +4674,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd6f9d3d47bdd2ad6945c5015a226ec6155d0bcdfd8f7cd29f86b71f8de99d2b" dependencies = [ - "getrandom", + "getrandom 0.2.15", "libredox", "thiserror 2.0.11", ] @@ -4654,7 +4776,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if 1.0.0", - "getrandom", + "getrandom 0.2.15", "libc", "spin", "untrusted", @@ -4689,7 +4811,7 @@ dependencies = [ "parity-scale-codec", "primitive-types", "proptest", - "rand", + "rand 0.8.5", "rlp", "ruint-macro", "serde", @@ -4816,6 +4938,15 @@ version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" +[[package]] +name = "safe_arch" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96b02de82ddbe1b636e6170c21be622223aea188ef2e139be0a5b219ec215323" +dependencies = [ + "bytemuck", +] + [[package]] name = "same-file" version = "1.0.6" @@ -5082,6 +5213,7 @@ dependencies = [ "dashmap", "futures-util", "hex", + "nalgebra", "redis", "serde", "serde_json", @@ -5118,7 +5250,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest 0.10.7", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "simba" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3a386a501cd104797982c15ae17aafe8b9261315b5d07e3ec803f2ea26be0fa" +dependencies = [ + "approx", + "num-complex", + "num-traits", + "paste", + "wide", ] [[package]] @@ -5727,7 +5872,7 @@ dependencies = [ "http 1.2.0", "httparse", "log", - "rand", + "rand 0.8.5", "rustls", "rustls-pki-types", "sha1", @@ -5862,7 +6007,7 @@ version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3758f5e68192bb96cc8f9b7e2c2cfdabb435499a28499a42f8f984092adad4b" dependencies = [ - "getrandom", + "getrandom 0.2.15", "serde", ] @@ -5876,6 +6021,8 @@ dependencies = [ "clap 4.5.27", "env_logger 0.11.6", "log", + "nalgebra", + "rand 0.9.0", "reqwest", "serde", "serde_json", @@ -5993,6 +6140,15 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasi" +version = "0.13.3+wasi-0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +dependencies = [ + "wit-bindgen-rt", +] + [[package]] name = "wasm-bindgen" version = "0.2.99" @@ -6134,6 +6290,16 @@ dependencies = [ "rustix", ] +[[package]] +name = "wide" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41b5576b9a81633f3e8df296ce0063042a73507636cbe956c61133dd7034ab22" +dependencies = [ + "bytemuck", + "safe_arch", +] + [[package]] name = "widestring" version = "1.1.0" @@ -6449,6 +6615,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "wit-bindgen-rt" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +dependencies = [ + "bitflags 2.6.0", +] + [[package]] name = "wrapcenum-derive" version = "0.4.1" @@ -6615,7 +6790,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "byteorder", - "zerocopy-derive", + "zerocopy-derive 0.7.35", +] + +[[package]] +name = "zerocopy" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1e101d4bc320b6f9abb68846837b70e25e380ca2f467ab494bf29fcc435fcc3" +dependencies = [ + "zerocopy-derive 0.8.15", ] [[package]] @@ -6629,6 +6813,17 @@ dependencies = [ "syn 2.0.92", ] +[[package]] +name = "zerocopy-derive" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03a73df1008145cd135b3c780d275c57c3e6ba8324a41bd5e0008fe167c3bc7c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.92", +] + [[package]] name = "zerofrom" version = "0.1.5" diff --git a/miner/Cargo.toml b/miner/Cargo.toml index 3ca01510..fab3f33d 100644 --- a/miner/Cargo.toml +++ b/miner/Cargo.toml @@ -41,6 +41,7 @@ chrono = "0.4" serial_test = "0.5.1" directories = "6.0.0" strip-ansi-escapes = "0.2.1" +nalgebra = "0.33.2" [dev-dependencies] tempfile = "=3.14.0" diff --git a/miner/src/api/routes/challenge.rs b/miner/src/api/routes/challenge.rs new file mode 100644 index 00000000..647f91b5 --- /dev/null +++ b/miner/src/api/routes/challenge.rs @@ -0,0 +1,20 @@ +use shared::models::challenge::calc_matrix; +use actix_web::{ + web::{self, post}, + HttpResponse, Scope, +}; +use shared::models::challenge::ChallengeRequest; + +pub async fn handle_challenge( + challenge: web::Json, + //app_state: Data, +) -> HttpResponse { + let result = calc_matrix(&challenge); + HttpResponse::Ok().json(result) +} + +pub fn challenge_routes() -> Scope { + web::scope("/challenge") + .route("", post().to(handle_challenge)) + .route("/", post().to(handle_challenge)) +} diff --git a/miner/src/api/routes/mod.rs b/miner/src/api/routes/mod.rs index 942b4f0f..0a2c83c3 100644 --- a/miner/src/api/routes/mod.rs +++ b/miner/src/api/routes/mod.rs @@ -1,3 +1,4 @@ pub mod invite; pub mod task; pub mod types; +pub mod challenge; \ No newline at end of file diff --git a/miner/src/api/server.rs b/miner/src/api/server.rs index 1077fc36..7fecba18 100644 --- a/miner/src/api/server.rs +++ b/miner/src/api/server.rs @@ -1,6 +1,7 @@ use crate::api::routes::invite::invite_routes; use crate::api::routes::task::task_routes; use crate::docker::DockerService; +use crate::api::routes::challenge::challenge_routes; use crate::operations::heartbeat::service::HeartbeatService; use actix_web::{middleware, web::Data, App, HttpServer}; use shared::security::auth_signature_middleware::{ValidateSignature, ValidatorState}; @@ -49,6 +50,7 @@ pub async fn start_server( .wrap(ValidateSignature::new(validator_state.clone())) .service(invite_routes()) .service(task_routes()) + .service(challenge_routes()) }) .bind((host, port))? .run() diff --git a/shared/Cargo.toml b/shared/Cargo.toml index 69def215..2b7e80ce 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -24,3 +24,4 @@ uuid = { version = "1.12.1", features = ["v4", "serde"] } redis = "0.28.1" dashmap = "6.1.0" anyhow = "1.0.95" +nalgebra = "0.33.2" diff --git a/shared/src/models/challenge.rs b/shared/src/models/challenge.rs new file mode 100644 index 00000000..7767e085 --- /dev/null +++ b/shared/src/models/challenge.rs @@ -0,0 +1,32 @@ +use serde::{Deserialize, Serialize}; +use nalgebra::DMatrix; + +#[derive(Deserialize, Serialize, Debug, Clone)] +pub struct ChallengeRequest { + pub rows_a: usize, + pub cols_a: usize, + pub data_a: Vec, + pub rows_b: usize, + pub cols_b: usize, + pub data_b: Vec, +} + +#[derive(Deserialize, Serialize, PartialEq, Debug)] +pub struct ChallengeResponse { + pub result: Vec, + pub rows: usize, + pub cols: usize, +} + + +pub fn calc_matrix(req: &ChallengeRequest) -> ChallengeResponse { + let a = DMatrix::from_vec(req.rows_a, req.cols_a, req.data_a.clone()); + let b = DMatrix::from_vec(req.rows_b, req.cols_b, req.data_b.clone()); + let c = a * b; + + ChallengeResponse { + rows: c.nrows(), + cols: c.ncols(), + result: c.data.as_vec().clone(), + } +} diff --git a/shared/src/models/mod.rs b/shared/src/models/mod.rs index 1f74f3bf..d2ebd023 100644 --- a/shared/src/models/mod.rs +++ b/shared/src/models/mod.rs @@ -4,3 +4,4 @@ pub mod invite; pub mod metric; pub mod node; pub mod task; +pub mod challenge; \ No newline at end of file diff --git a/validator/Cargo.toml b/validator/Cargo.toml index 87500640..5b22a7c1 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -10,6 +10,8 @@ anyhow = "1.0.95" clap = { version = "4.5.26", features = ["derive"] } env_logger = "0.11.6" log = "0.4.25" +nalgebra = "0.33.2" +rand = "0.9.0" reqwest = "0.12.12" serde = "1.0.217" serde_json = "1.0.135" diff --git a/validator/src/main.rs b/validator/src/main.rs index 80f78fa2..172c4b82 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -6,7 +6,11 @@ use clap::Parser; use log::LevelFilter; use log::{error, info}; use serde_json::json; +use nalgebra::DMatrix; +use rand::rng; +use rand::Rng; use shared::models::api::ApiResponse; +use shared::models::challenge::{ChallengeRequest, ChallengeResponse}; use shared::models::node::DiscoveryNode; use shared::web3::contracts::core::builder::ContractBuilder; use shared::web3::wallet::Wallet; @@ -163,6 +167,12 @@ fn main() { } }; + let challenge_result = runtime.block_on(challenge_node(&node)); + if challenge_result.is_err() { + error!("Failed to challenge node {}: {:?}", node.id, challenge_result); + continue; + } + if let Err(e) = runtime.block_on( contracts .prime_network @@ -176,3 +186,129 @@ fn main() { std::thread::sleep(std::time::Duration::from_secs(10)); } } + +fn random_challenge( + rows_a: usize, + cols_a: usize, + rows_b: usize, + cols_b: usize, +) -> ChallengeRequest { + let mut rng = rng(); + + let data_a: Vec = (0..(rows_a * cols_a)) + .map(|_| rng.random_range(0.0..1.0)) + .collect(); + + let data_b: Vec = (0..(rows_b * cols_b)) + .map(|_| rng.random_range(0.0..1.0)) + .collect(); + + ChallengeRequest { + rows_a, + cols_a, + data_a, + rows_b, + cols_b, + data_b, + } +} + +pub async fn challenge_node(node: &DiscoveryNode) -> Result> { + let node_url = format!("http://{}:{}", node.node.ip_address, node.node.port); + let challenge_route = "/challenge"; + + let mut headers = reqwest::header::HeaderMap::new(); + headers.insert("x-compute", "pytorch".parse().unwrap()); + //headers.insert("x-address", address.parse().unwrap()); + //headers.insert("x-signature", signature.parse().unwrap()); + + // create random challenge matrix + let challenge_matrix = random_challenge(3, 3, 3, 3); + + let a = DMatrix::from_vec( + challenge_matrix.rows_a, + challenge_matrix.cols_a, + challenge_matrix.data_a.clone(), + ); + let b = DMatrix::from_vec( + challenge_matrix.rows_b, + challenge_matrix.cols_b, + challenge_matrix.data_b.clone(), + ); + let c = a * b; + + let response = reqwest::Client::new() + .post(format!("{}{}", node_url, challenge_route)) + .json(&challenge_matrix) + .headers(headers) + .send() + .await?; + + let response_text = response.text().await?; + let parsed_response: ApiResponse> = + serde_json::from_str(&response_text)?; + + if !parsed_response.success { + Err("Error fetching challenge from node".into()) + } else if c.data.as_vec().clone() == parsed_response.data[0].result { + info!("Challenge successful"); + Ok(0) + } else { + error!("Challenge failed"); + Err("Node failed challenge".into()) + } +} + + +#[cfg(test)] +mod tests { + use super::*; + use actix_web::{test, App}; + use actix_web::{ + web::{self, post}, + HttpResponse, Scope, + }; + use shared::models::challenge::calc_matrix; + + pub async fn handle_challenge( + challenge: web::Json, + //app_state: Data, + ) -> HttpResponse { + let result = calc_matrix(&challenge); + HttpResponse::Ok().json(result) + } + + pub fn challenge_routes() -> Scope { + web::scope("/challenge") + .route("", post().to(handle_challenge)) + .route("/", post().to(handle_challenge)) + } + + #[actix_web::test] + async fn test_challenge_route() { + + let app = test::init_service( + App::new().service(challenge_routes()) + ).await; + + let challenge_request = ChallengeRequest { + rows_a: 3, + cols_a: 3, + data_a: vec![1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0], + rows_b: 3, + cols_b: 3, + data_b: vec![9.0,8.0,7.0,6.0,5.0,4.0,3.0,2.0,1.0], + }; + + let req = test::TestRequest::post() + .uri("/challenge") + .set_json(&challenge_request) + .to_request(); + + let resp: ChallengeResponse = test::call_and_read_body_json(&app, req).await; + let expected_response = calc_matrix(&challenge_request); + + assert_eq!(resp, expected_response); + } +} + From bf88c804631379b9d9abc7c56326f2f2d60617cb Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Tue, 4 Feb 2025 13:28:12 +0100 Subject: [PATCH 12/85] fmt --- miner/src/api/routes/challenge.rs | 2 +- miner/src/api/routes/mod.rs | 2 +- miner/src/api/server.rs | 2 +- shared/src/models/challenge.rs | 3 +-- shared/src/models/mod.rs | 2 +- validator/src/main.rs | 19 ++++++++++--------- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/miner/src/api/routes/challenge.rs b/miner/src/api/routes/challenge.rs index 647f91b5..78ff3738 100644 --- a/miner/src/api/routes/challenge.rs +++ b/miner/src/api/routes/challenge.rs @@ -1,8 +1,8 @@ -use shared::models::challenge::calc_matrix; use actix_web::{ web::{self, post}, HttpResponse, Scope, }; +use shared::models::challenge::calc_matrix; use shared::models::challenge::ChallengeRequest; pub async fn handle_challenge( diff --git a/miner/src/api/routes/mod.rs b/miner/src/api/routes/mod.rs index 0a2c83c3..d1e00d34 100644 --- a/miner/src/api/routes/mod.rs +++ b/miner/src/api/routes/mod.rs @@ -1,4 +1,4 @@ +pub mod challenge; pub mod invite; pub mod task; pub mod types; -pub mod challenge; \ No newline at end of file diff --git a/miner/src/api/server.rs b/miner/src/api/server.rs index 7fecba18..12cddf9d 100644 --- a/miner/src/api/server.rs +++ b/miner/src/api/server.rs @@ -1,7 +1,7 @@ +use crate::api::routes::challenge::challenge_routes; use crate::api::routes::invite::invite_routes; use crate::api::routes::task::task_routes; use crate::docker::DockerService; -use crate::api::routes::challenge::challenge_routes; use crate::operations::heartbeat::service::HeartbeatService; use actix_web::{middleware, web::Data, App, HttpServer}; use shared::security::auth_signature_middleware::{ValidateSignature, ValidatorState}; diff --git a/shared/src/models/challenge.rs b/shared/src/models/challenge.rs index 7767e085..ed6b6599 100644 --- a/shared/src/models/challenge.rs +++ b/shared/src/models/challenge.rs @@ -1,5 +1,5 @@ -use serde::{Deserialize, Serialize}; use nalgebra::DMatrix; +use serde::{Deserialize, Serialize}; #[derive(Deserialize, Serialize, Debug, Clone)] pub struct ChallengeRequest { @@ -18,7 +18,6 @@ pub struct ChallengeResponse { pub cols: usize, } - pub fn calc_matrix(req: &ChallengeRequest) -> ChallengeResponse { let a = DMatrix::from_vec(req.rows_a, req.cols_a, req.data_a.clone()); let b = DMatrix::from_vec(req.rows_b, req.cols_b, req.data_b.clone()); diff --git a/shared/src/models/mod.rs b/shared/src/models/mod.rs index d2ebd023..bbb59159 100644 --- a/shared/src/models/mod.rs +++ b/shared/src/models/mod.rs @@ -1,7 +1,7 @@ pub mod api; +pub mod challenge; pub mod heartbeat; pub mod invite; pub mod metric; pub mod node; pub mod task; -pub mod challenge; \ No newline at end of file diff --git a/validator/src/main.rs b/validator/src/main.rs index 172c4b82..9488a0a0 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -16,10 +16,13 @@ use shared::web3::contracts::core::builder::ContractBuilder; use shared::web3::wallet::Wallet; use url::Url; +<<<<<<< HEAD async fn health_check() -> impl Responder { HttpResponse::Ok().json(json!({ "status": "ok" })) } +======= +>>>>>>> 21b6ce5 (fmt) #[derive(Parser)] struct Args { /// RPC URL @@ -169,7 +172,10 @@ fn main() { let challenge_result = runtime.block_on(challenge_node(&node)); if challenge_result.is_err() { - error!("Failed to challenge node {}: {:?}", node.id, challenge_result); + error!( + "Failed to challenge node {}: {:?}", + node.id, challenge_result + ); continue; } @@ -259,7 +265,6 @@ pub async fn challenge_node(node: &DiscoveryNode) -> Result Date: Tue, 4 Feb 2025 14:16:31 +0100 Subject: [PATCH 13/85] add signature ... --- miner/src/api/routes/challenge.rs | 14 ++++++++----- validator/src/main.rs | 35 +++++++++++++++++++++++++------ 2 files changed, 38 insertions(+), 11 deletions(-) diff --git a/miner/src/api/routes/challenge.rs b/miner/src/api/routes/challenge.rs index 78ff3738..d6555676 100644 --- a/miner/src/api/routes/challenge.rs +++ b/miner/src/api/routes/challenge.rs @@ -1,5 +1,6 @@ +use crate::api::server::AppState; use actix_web::{ - web::{self, post}, + web::{self, post, Data}, HttpResponse, Scope, }; use shared::models::challenge::calc_matrix; @@ -7,14 +8,17 @@ use shared::models::challenge::ChallengeRequest; pub async fn handle_challenge( challenge: web::Json, - //app_state: Data, + app_state: Data, ) -> HttpResponse { + println!("Challenge request: {:?}", challenge); + println!( + "Wallet: {:?}", + app_state.node_wallet.wallet.default_signer().address() + ); let result = calc_matrix(&challenge); HttpResponse::Ok().json(result) } pub fn challenge_routes() -> Scope { - web::scope("/challenge") - .route("", post().to(handle_challenge)) - .route("/", post().to(handle_challenge)) + web::scope("/challenge").route("/submit", post().to(handle_challenge)) } diff --git a/validator/src/main.rs b/validator/src/main.rs index 9488a0a0..a5b8c734 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -170,7 +170,17 @@ fn main() { } }; - let challenge_result = runtime.block_on(challenge_node(&node)); + let challenge_route = "/challenge/submit"; + let address = validator_wallet + .wallet + .default_signer() + .address() + .to_string(); + let signature = runtime + .block_on(_generate_signature(&validator_wallet, challenge_route)) + .unwrap(); + + let challenge_result = runtime.block_on(challenge_node(&node, address, signature)); if challenge_result.is_err() { error!( "Failed to challenge node {}: {:?}", @@ -219,14 +229,18 @@ fn random_challenge( } } -pub async fn challenge_node(node: &DiscoveryNode) -> Result> { +pub async fn challenge_node( + node: &DiscoveryNode, + address: String, + signature: String, +) -> Result> { let node_url = format!("http://{}:{}", node.node.ip_address, node.node.port); - let challenge_route = "/challenge"; + let challenge_route = "/challenge/submit"; let mut headers = reqwest::header::HeaderMap::new(); headers.insert("x-compute", "pytorch".parse().unwrap()); - //headers.insert("x-address", address.parse().unwrap()); - //headers.insert("x-signature", signature.parse().unwrap()); + headers.insert("x-address", address.parse().unwrap()); + headers.insert("x-signature", signature.parse().unwrap()); // create random challenge matrix let challenge_matrix = random_challenge(3, 3, 3, 3); @@ -243,17 +257,26 @@ pub async fn challenge_node(node: &DiscoveryNode) -> Result> = serde_json::from_str(&response_text)?; + println!("Challenge response: {:?}", parsed_response); + if !parsed_response.success { Err("Error fetching challenge from node".into()) } else if c.data.as_vec().clone() == parsed_response.data[0].result { From 92629cd16ae288931d3deed60220b98690f4e4c1 Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Tue, 4 Feb 2025 14:57:26 +0100 Subject: [PATCH 14/85] use sign_request --- validator/src/main.rs | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/validator/src/main.rs b/validator/src/main.rs index a5b8c734..59f4b2fe 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -12,6 +12,7 @@ use rand::Rng; use shared::models::api::ApiResponse; use shared::models::challenge::{ChallengeRequest, ChallengeResponse}; use shared::models::node::DiscoveryNode; +use shared::security::request_signer::sign_request; use shared::web3::contracts::core::builder::ContractBuilder; use shared::web3::wallet::Wallet; use url::Url; @@ -171,16 +172,8 @@ fn main() { }; let challenge_route = "/challenge/submit"; - let address = validator_wallet - .wallet - .default_signer() - .address() - .to_string(); - let signature = runtime - .block_on(_generate_signature(&validator_wallet, challenge_route)) - .unwrap(); - - let challenge_result = runtime.block_on(challenge_node(&node, address, signature)); + let challenge_result = + runtime.block_on(challenge_node(&node, &validator_wallet, &challenge_route)); if challenge_result.is_err() { error!( "Failed to challenge node {}: {:?}", @@ -231,16 +224,13 @@ fn random_challenge( pub async fn challenge_node( node: &DiscoveryNode, - address: String, - signature: String, + wallet: &Wallet, + challenge_route: &str, ) -> Result> { let node_url = format!("http://{}:{}", node.node.ip_address, node.node.port); - let challenge_route = "/challenge/submit"; let mut headers = reqwest::header::HeaderMap::new(); headers.insert("x-compute", "pytorch".parse().unwrap()); - headers.insert("x-address", address.parse().unwrap()); - headers.insert("x-signature", signature.parse().unwrap()); // create random challenge matrix let challenge_matrix = random_challenge(3, 3, 3, 3); @@ -262,6 +252,13 @@ pub async fn challenge_node( let post_url = format!("{}{}", node_url, challenge_route); println!("Challenge post url: {}", post_url); + let address = wallet.wallet.default_signer().address().to_string(); + let challenge_matrix_value = serde_json::to_value(&challenge_matrix)?; + let signature = sign_request(challenge_route, &wallet, Some(&challenge_matrix_value)).await?; + + headers.insert("x-address", address.parse().unwrap()); + headers.insert("x-signature", signature.parse().unwrap()); + let response = reqwest::Client::new() .post(post_url) .json(&challenge_matrix) From 75e055d78670a6ff30e58cf5667aa064a48fefdd Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Wed, 5 Feb 2025 09:59:00 +0100 Subject: [PATCH 15/85] use custom serializer for consistency --- miner/src/api/routes/challenge.rs | 4 +-- shared/src/models/challenge.rs | 60 ++++++++++++++++++++++++++++--- validator/src/main.rs | 32 ++++++++++++----- 3 files changed, 81 insertions(+), 15 deletions(-) diff --git a/miner/src/api/routes/challenge.rs b/miner/src/api/routes/challenge.rs index d6555676..6a175d9b 100644 --- a/miner/src/api/routes/challenge.rs +++ b/miner/src/api/routes/challenge.rs @@ -1,3 +1,4 @@ +use log::{debug, error, info, warn}; use crate::api::server::AppState; use actix_web::{ web::{self, post, Data}, @@ -10,8 +11,7 @@ pub async fn handle_challenge( challenge: web::Json, app_state: Data, ) -> HttpResponse { - println!("Challenge request: {:?}", challenge); - println!( + info!( "Wallet: {:?}", app_state.node_wallet.wallet.default_signer().address() ); diff --git a/shared/src/models/challenge.rs b/shared/src/models/challenge.rs index ed6b6599..12812ec3 100644 --- a/shared/src/models/challenge.rs +++ b/shared/src/models/challenge.rs @@ -1,14 +1,61 @@ use nalgebra::DMatrix; -use serde::{Deserialize, Serialize}; +use serde::{ + de::{self, Visitor}, + Deserialize, Deserializer, Serialize, Serializer, +}; +use std::fmt; + +#[derive(Debug, Clone)] +pub struct FixedF64(pub f64); + +impl Serialize for FixedF64 { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + // adjust precision as needed + serializer.serialize_str(&format!("{:.12}", self.0)) + } +} + + +impl<'de> Deserialize<'de> for FixedF64 { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct FixedF64Visitor; + + impl<'de> Visitor<'de> for FixedF64Visitor { + type Value = FixedF64; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a string representing a fixed precision float") + } + + fn visit_str(self, value: &str) -> Result + where + E: de::Error, + { + value + .parse::() + .map(FixedF64) + .map_err(|_| E::custom(format!("invalid f64: {}", value))) + } + } + + deserializer.deserialize_str(FixedF64Visitor) + } +} #[derive(Deserialize, Serialize, Debug, Clone)] pub struct ChallengeRequest { pub rows_a: usize, pub cols_a: usize, - pub data_a: Vec, + pub data_a: Vec, pub rows_b: usize, pub cols_b: usize, - pub data_b: Vec, + pub data_b: Vec, } #[derive(Deserialize, Serialize, PartialEq, Debug)] @@ -19,8 +66,11 @@ pub struct ChallengeResponse { } pub fn calc_matrix(req: &ChallengeRequest) -> ChallengeResponse { - let a = DMatrix::from_vec(req.rows_a, req.cols_a, req.data_a.clone()); - let b = DMatrix::from_vec(req.rows_b, req.cols_b, req.data_b.clone()); + // convert FixedF64 to f64 + let data_a: Vec = req.data_a.iter().map(|x| x.0).collect(); + let data_b: Vec = req.data_b.iter().map(|x| x.0).collect(); + let a = DMatrix::from_vec(req.rows_a, req.cols_a, data_a); + let b = DMatrix::from_vec(req.rows_b, req.cols_b, data_b); let c = a * b; ChallengeResponse { diff --git a/validator/src/main.rs b/validator/src/main.rs index 59f4b2fe..7ee6bc2e 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -16,6 +16,7 @@ use shared::security::request_signer::sign_request; use shared::web3::contracts::core::builder::ContractBuilder; use shared::web3::wallet::Wallet; use url::Url; +use shared::models::challenge::FixedF64; <<<<<<< HEAD async fn health_check() -> impl Responder { @@ -204,14 +205,18 @@ fn random_challenge( ) -> ChallengeRequest { let mut rng = rng(); - let data_a: Vec = (0..(rows_a * cols_a)) + let data_a_vec: Vec = (0..(rows_a * cols_a)) .map(|_| rng.random_range(0.0..1.0)) .collect(); - let data_b: Vec = (0..(rows_b * cols_b)) + let data_b_vec: Vec = (0..(rows_b * cols_b)) .map(|_| rng.random_range(0.0..1.0)) .collect(); + // convert to FixedF64 + let data_a: Vec = data_a_vec.iter().map(|x| FixedF64(*x)).collect(); + let data_b: Vec = data_b_vec.iter().map(|x| FixedF64(*x)).collect(); + ChallengeRequest { rows_a, cols_a, @@ -230,20 +235,23 @@ pub async fn challenge_node( let node_url = format!("http://{}:{}", node.node.ip_address, node.node.port); let mut headers = reqwest::header::HeaderMap::new(); - headers.insert("x-compute", "pytorch".parse().unwrap()); // create random challenge matrix let challenge_matrix = random_challenge(3, 3, 3, 3); + // convert to f64 + let data_a: Vec = challenge_matrix.data_a.iter().map(|x| x.0).collect(); + let data_b: Vec = challenge_matrix.data_b.iter().map(|x| x.0).collect(); + let a = DMatrix::from_vec( challenge_matrix.rows_a, challenge_matrix.cols_a, - challenge_matrix.data_a.clone(), + data_a, ); let b = DMatrix::from_vec( challenge_matrix.rows_b, challenge_matrix.cols_b, - challenge_matrix.data_b.clone(), + data_b, ); let c = a * b; @@ -254,6 +262,7 @@ pub async fn challenge_node( let address = wallet.wallet.default_signer().address().to_string(); let challenge_matrix_value = serde_json::to_value(&challenge_matrix)?; + println!("Challenge matrix value: {:?}", challenge_matrix_value); let signature = sign_request(challenge_route, &wallet, Some(&challenge_matrix_value)).await?; headers.insert("x-address", address.parse().unwrap()); @@ -261,8 +270,8 @@ pub async fn challenge_node( let response = reqwest::Client::new() .post(post_url) - .json(&challenge_matrix) .headers(headers) + .json(&challenge_matrix_value) .send() .await?; @@ -313,13 +322,20 @@ mod tests { async fn test_challenge_route() { let app = test::init_service(App::new().service(challenge_routes())).await; + let vec_a = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]; + let vec_b = vec![9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0]; + + // convert vectors to FixedF64 + let data_a: Vec = vec_a.iter().map(|x| FixedF64(*x)).collect(); + let data_b: Vec = vec_b.iter().map(|x| FixedF64(*x)).collect(); + let challenge_request = ChallengeRequest { rows_a: 3, cols_a: 3, - data_a: vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0], + data_a: data_a, rows_b: 3, cols_b: 3, - data_b: vec![9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0], + data_b: data_b, }; let req = test::TestRequest::post() From 61a482b8f9030794a983103794afb3407c11ac27 Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Wed, 5 Feb 2025 12:19:49 +0100 Subject: [PATCH 16/85] add validator arg to miner, implement rounding robust partial eq --- Makefile | 5 +++-- miner/src/api/routes/challenge.rs | 8 +++++-- miner/src/api/server.rs | 8 ++++++- miner/src/cli/command.rs | 5 +++++ shared/src/models/challenge.rs | 16 ++++++++++---- validator/src/main.rs | 36 +++++-------------------------- 6 files changed, 38 insertions(+), 40 deletions(-) diff --git a/Makefile b/Makefile index 06c43b91..3e855f66 100644 --- a/Makefile +++ b/Makefile @@ -73,7 +73,7 @@ watch-discovery: watch-miner: set -a; source ${ENV_FILE}; set +a; \ - cargo watch -w miner/src -x "run --bin miner -- run --private-key-provider $$PROVIDER_PRIVATE_KEY --private-key-node $$NODE_PRIVATE_KEY --port 8091 --external-ip 0.0.0.0 --compute-pool-id 0" + cargo watch -w miner/src -x "run --bin miner -- run --private-key-provider $$PROVIDER_PRIVATE_KEY --private-key-node $$NODE_PRIVATE_KEY --port 8091 --external-ip 0.0.0.0 --compute-pool-id 0 --validator-address $$VALIDATOR_ADDRESS" watch-validator: set -a; source ${ENV_FILE}; set +a; \ @@ -140,7 +140,8 @@ watch-miner-remote: setup-remote setup-tunnel sync-remote --private-key-node \$$NODE_PRIVATE_KEY \ --port $(PORT) \ --external-ip \$$EXTERNAL_IP \ - --compute-pool-id 0 2>&1 | tee miner.log\"" + --compute-pool-id 0 2>&1 \ + --validator-address \$$VALIDATOR_ADDRESS | tee miner.log\"" # Kill SSH tunnel .PHONY: kill-tunnel kill-tunnel: diff --git a/miner/src/api/routes/challenge.rs b/miner/src/api/routes/challenge.rs index 6a175d9b..da3f2f0e 100644 --- a/miner/src/api/routes/challenge.rs +++ b/miner/src/api/routes/challenge.rs @@ -1,4 +1,4 @@ -use log::{debug, error, info, warn}; +use log::info; use crate::api::server::AppState; use actix_web::{ web::{self, post, Data}, @@ -6,6 +6,7 @@ use actix_web::{ }; use shared::models::challenge::calc_matrix; use shared::models::challenge::ChallengeRequest; +use shared::models::api::ApiResponse; pub async fn handle_challenge( challenge: web::Json, @@ -16,7 +17,10 @@ pub async fn handle_challenge( app_state.node_wallet.wallet.default_signer().address() ); let result = calc_matrix(&challenge); - HttpResponse::Ok().json(result) + + let response = ApiResponse::new(true, result); + + HttpResponse::Ok().json(response) } pub fn challenge_routes() -> Scope { diff --git a/miner/src/api/server.rs b/miner/src/api/server.rs index 12cddf9d..36d82edc 100644 --- a/miner/src/api/server.rs +++ b/miner/src/api/server.rs @@ -9,6 +9,9 @@ use shared::web3::contracts::core::builder::Contracts; use shared::web3::contracts::structs::compute_pool::PoolInfo; use shared::web3::wallet::Wallet; use std::sync::Arc; +use alloy::primitives::Address; +use std::str::FromStr; + #[derive(Clone)] pub struct AppState { @@ -29,6 +32,7 @@ pub async fn start_server( heartbeat_service: Arc, docker_service: Arc, pool_info: Arc, + validator_address: String, ) -> std::io::Result<()> { println!("Starting server at http://{}:{}", host, port); @@ -40,7 +44,9 @@ pub async fn start_server( docker_service, }); - let allowed_addresses = vec![pool_info.creator, pool_info.compute_manager_key]; + let validator = Address::from_str(&validator_address.as_str()).unwrap(); + + let allowed_addresses = vec![pool_info.creator, pool_info.compute_manager_key, validator]; let validator_state = Arc::new(ValidatorState::new(allowed_addresses)); HttpServer::new(move || { diff --git a/miner/src/cli/command.rs b/miner/src/cli/command.rs index f06f5124..d2abbcc8 100644 --- a/miner/src/cli/command.rs +++ b/miner/src/cli/command.rs @@ -80,6 +80,9 @@ pub enum Commands { // Amount of stake to use when provider is newly registered #[arg(long, default_value = "10")] provider_stake: i32, + + #[arg(long, default_value = "0x0000000000000000000000000000000000000000")] + validator_address: Option, }, /// Run system checks to verify hardware and software compatibility Check {}, @@ -104,6 +107,7 @@ pub async fn execute_command( state_dir_overwrite, disable_state_storing, auto_recover, + validator_address, } => { if *disable_state_storing && *auto_recover { Console::error( @@ -345,6 +349,7 @@ pub async fn execute_command( heartbeat_clone.clone(), docker_service.clone(), pool_info, + validator_address.clone().unwrap_or_default(), ) .await } { diff --git a/shared/src/models/challenge.rs b/shared/src/models/challenge.rs index 12812ec3..70ee25fb 100644 --- a/shared/src/models/challenge.rs +++ b/shared/src/models/challenge.rs @@ -18,7 +18,6 @@ impl Serialize for FixedF64 { } } - impl<'de> Deserialize<'de> for FixedF64 { fn deserialize(deserializer: D) -> Result where @@ -48,6 +47,13 @@ impl<'de> Deserialize<'de> for FixedF64 { } } + +impl PartialEq for FixedF64 { + fn eq(&self, other: &Self) -> bool { + format!("{:.10}", self.0) == format!("{:.10}", other.0) + } +} + #[derive(Deserialize, Serialize, Debug, Clone)] pub struct ChallengeRequest { pub rows_a: usize, @@ -58,9 +64,9 @@ pub struct ChallengeRequest { pub data_b: Vec, } -#[derive(Deserialize, Serialize, PartialEq, Debug)] +#[derive(Deserialize, Serialize, Debug)] pub struct ChallengeResponse { - pub result: Vec, + pub result: Vec, pub rows: usize, pub cols: usize, } @@ -73,9 +79,11 @@ pub fn calc_matrix(req: &ChallengeRequest) -> ChallengeResponse { let b = DMatrix::from_vec(req.rows_b, req.cols_b, data_b); let c = a * b; + let data_c: Vec = c.iter().map(|x| FixedF64(*x)).collect(); + ChallengeResponse { rows: c.nrows(), cols: c.ncols(), - result: c.data.as_vec().clone(), + result: data_c, } } diff --git a/validator/src/main.rs b/validator/src/main.rs index 7ee6bc2e..a17fbb4b 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -17,14 +17,12 @@ use shared::web3::contracts::core::builder::ContractBuilder; use shared::web3::wallet::Wallet; use url::Url; use shared::models::challenge::FixedF64; +use shared::models::challenge::calc_matrix; -<<<<<<< HEAD async fn health_check() -> impl Responder { HttpResponse::Ok().json(json!({ "status": "ok" })) } -======= ->>>>>>> 21b6ce5 (fmt) #[derive(Parser)] struct Args { /// RPC URL @@ -238,31 +236,12 @@ pub async fn challenge_node( // create random challenge matrix let challenge_matrix = random_challenge(3, 3, 3, 3); - - // convert to f64 - let data_a: Vec = challenge_matrix.data_a.iter().map(|x| x.0).collect(); - let data_b: Vec = challenge_matrix.data_b.iter().map(|x| x.0).collect(); - - let a = DMatrix::from_vec( - challenge_matrix.rows_a, - challenge_matrix.cols_a, - data_a, - ); - let b = DMatrix::from_vec( - challenge_matrix.rows_b, - challenge_matrix.cols_b, - data_b, - ); - let c = a * b; - - println!("Challenge request: {:?}", challenge_matrix); + let challenge_expected = calc_matrix(&challenge_matrix); let post_url = format!("{}{}", node_url, challenge_route); - println!("Challenge post url: {}", post_url); let address = wallet.wallet.default_signer().address().to_string(); let challenge_matrix_value = serde_json::to_value(&challenge_matrix)?; - println!("Challenge matrix value: {:?}", challenge_matrix_value); let signature = sign_request(challenge_route, &wallet, Some(&challenge_matrix_value)).await?; headers.insert("x-address", address.parse().unwrap()); @@ -275,17 +254,13 @@ pub async fn challenge_node( .send() .await?; - println!("Challenge response: {:?}", response); - let response_text = response.text().await?; - let parsed_response: ApiResponse> = + let parsed_response: ApiResponse = serde_json::from_str(&response_text)?; - println!("Challenge response: {:?}", parsed_response); - if !parsed_response.success { Err("Error fetching challenge from node".into()) - } else if c.data.as_vec().clone() == parsed_response.data[0].result { + } else if challenge_expected.result == parsed_response.data.result { info!("Challenge successful"); Ok(0) } else { @@ -302,7 +277,6 @@ mod tests { web::{self, post}, HttpResponse, Scope, }; - use shared::models::challenge::calc_matrix; pub async fn handle_challenge( challenge: web::Json, @@ -346,6 +320,6 @@ mod tests { let resp: ChallengeResponse = test::call_and_read_body_json(&app, req).await; let expected_response = calc_matrix(&challenge_request); - assert_eq!(resp, expected_response); + assert_eq!(resp.result, expected_response.result); } } From 36349d6ae47b989ce05765228a8b33f95364364a Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Wed, 5 Feb 2025 12:21:38 +0100 Subject: [PATCH 17/85] fmt, clippy fix --- miner/src/api/routes/challenge.rs | 4 ++-- miner/src/api/server.rs | 7 +++---- shared/src/models/challenge.rs | 3 +-- validator/src/main.rs | 19 +++++++++---------- 4 files changed, 15 insertions(+), 18 deletions(-) diff --git a/miner/src/api/routes/challenge.rs b/miner/src/api/routes/challenge.rs index da3f2f0e..7406faf7 100644 --- a/miner/src/api/routes/challenge.rs +++ b/miner/src/api/routes/challenge.rs @@ -1,12 +1,12 @@ -use log::info; use crate::api::server::AppState; use actix_web::{ web::{self, post, Data}, HttpResponse, Scope, }; +use log::info; +use shared::models::api::ApiResponse; use shared::models::challenge::calc_matrix; use shared::models::challenge::ChallengeRequest; -use shared::models::api::ApiResponse; pub async fn handle_challenge( challenge: web::Json, diff --git a/miner/src/api/server.rs b/miner/src/api/server.rs index 36d82edc..0262e4df 100644 --- a/miner/src/api/server.rs +++ b/miner/src/api/server.rs @@ -4,14 +4,13 @@ use crate::api::routes::task::task_routes; use crate::docker::DockerService; use crate::operations::heartbeat::service::HeartbeatService; use actix_web::{middleware, web::Data, App, HttpServer}; +use alloy::primitives::Address; use shared::security::auth_signature_middleware::{ValidateSignature, ValidatorState}; use shared::web3::contracts::core::builder::Contracts; use shared::web3::contracts::structs::compute_pool::PoolInfo; use shared::web3::wallet::Wallet; -use std::sync::Arc; -use alloy::primitives::Address; use std::str::FromStr; - +use std::sync::Arc; #[derive(Clone)] pub struct AppState { @@ -44,7 +43,7 @@ pub async fn start_server( docker_service, }); - let validator = Address::from_str(&validator_address.as_str()).unwrap(); + let validator = Address::from_str(validator_address.as_str()).unwrap(); let allowed_addresses = vec![pool_info.creator, pool_info.compute_manager_key, validator]; let validator_state = Arc::new(ValidatorState::new(allowed_addresses)); diff --git a/shared/src/models/challenge.rs b/shared/src/models/challenge.rs index 70ee25fb..c461264f 100644 --- a/shared/src/models/challenge.rs +++ b/shared/src/models/challenge.rs @@ -25,7 +25,7 @@ impl<'de> Deserialize<'de> for FixedF64 { { struct FixedF64Visitor; - impl<'de> Visitor<'de> for FixedF64Visitor { + impl Visitor<'_> for FixedF64Visitor { type Value = FixedF64; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { @@ -47,7 +47,6 @@ impl<'de> Deserialize<'de> for FixedF64 { } } - impl PartialEq for FixedF64 { fn eq(&self, other: &Self) -> bool { format!("{:.10}", self.0) == format!("{:.10}", other.0) diff --git a/validator/src/main.rs b/validator/src/main.rs index a17fbb4b..4c7add1b 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -10,14 +10,14 @@ use nalgebra::DMatrix; use rand::rng; use rand::Rng; use shared::models::api::ApiResponse; +use shared::models::challenge::calc_matrix; +use shared::models::challenge::FixedF64; use shared::models::challenge::{ChallengeRequest, ChallengeResponse}; use shared::models::node::DiscoveryNode; use shared::security::request_signer::sign_request; use shared::web3::contracts::core::builder::ContractBuilder; use shared::web3::wallet::Wallet; use url::Url; -use shared::models::challenge::FixedF64; -use shared::models::challenge::calc_matrix; async fn health_check() -> impl Responder { HttpResponse::Ok().json(json!({ "status": "ok" })) @@ -172,7 +172,7 @@ fn main() { let challenge_route = "/challenge/submit"; let challenge_result = - runtime.block_on(challenge_node(&node, &validator_wallet, &challenge_route)); + runtime.block_on(challenge_node(&node, &validator_wallet, challenge_route)); if challenge_result.is_err() { error!( "Failed to challenge node {}: {:?}", @@ -242,7 +242,7 @@ pub async fn challenge_node( let address = wallet.wallet.default_signer().address().to_string(); let challenge_matrix_value = serde_json::to_value(&challenge_matrix)?; - let signature = sign_request(challenge_route, &wallet, Some(&challenge_matrix_value)).await?; + let signature = sign_request(challenge_route, wallet, Some(&challenge_matrix_value)).await?; headers.insert("x-address", address.parse().unwrap()); headers.insert("x-signature", signature.parse().unwrap()); @@ -255,8 +255,7 @@ pub async fn challenge_node( .await?; let response_text = response.text().await?; - let parsed_response: ApiResponse = - serde_json::from_str(&response_text)?; + let parsed_response: ApiResponse = serde_json::from_str(&response_text)?; if !parsed_response.success { Err("Error fetching challenge from node".into()) @@ -296,8 +295,8 @@ mod tests { async fn test_challenge_route() { let app = test::init_service(App::new().service(challenge_routes())).await; - let vec_a = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]; - let vec_b = vec![9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0]; + let vec_a = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]; + let vec_b = [9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0]; // convert vectors to FixedF64 let data_a: Vec = vec_a.iter().map(|x| FixedF64(*x)).collect(); @@ -306,10 +305,10 @@ mod tests { let challenge_request = ChallengeRequest { rows_a: 3, cols_a: 3, - data_a: data_a, + data_a, rows_b: 3, cols_b: 3, - data_b: data_b, + data_b, }; let req = test::TestRequest::post() From 7a473312a1a0a4a2718776ae6ddff7c86f31a545 Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Wed, 5 Feb 2025 12:31:28 +0100 Subject: [PATCH 18/85] fix remote makefile entry --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 3e855f66..db5bf99e 100644 --- a/Makefile +++ b/Makefile @@ -140,8 +140,8 @@ watch-miner-remote: setup-remote setup-tunnel sync-remote --private-key-node \$$NODE_PRIVATE_KEY \ --port $(PORT) \ --external-ip \$$EXTERNAL_IP \ - --compute-pool-id 0 2>&1 \ - --validator-address \$$VALIDATOR_ADDRESS | tee miner.log\"" + --compute-pool-id 0 \ + --validator-address \$$VALIDATOR_ADDRESS 2>&1 | tee miner.log\"" # Kill SSH tunnel .PHONY: kill-tunnel kill-tunnel: From 9a85c577564b3fc57dd5dfb767b81652a3abe952 Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Wed, 5 Feb 2025 12:34:47 +0100 Subject: [PATCH 19/85] misc makefile fixes --- Makefile | 2 +- miner/makefile | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index db5bf99e..cd89a59d 100644 --- a/Makefile +++ b/Makefile @@ -88,7 +88,7 @@ build-miner: run-miner-bin: set -a; source .env; set +a; \ - ./target/release/miner run --private-key-provider $$PROVIDER_PRIVATE_KEY --private-key-node $$NODE_PRIVATE_KEY --port 8091 --external-ip 0.0.0.0 --compute-pool-id 0 + ./target/release/miner run --private-key-provider $$PROVIDER_PRIVATE_KEY --private-key-node $$NODE_PRIVATE_KEY --port 8091 --external-ip 0.0.0.0 --compute-pool-id 0 --validator-address $$VALIDATOR_ADDRESS SSH_CONNECTION ?= your-ssh-conn string EXTERNAL_IP ?= 0.0.0.0 diff --git a/miner/makefile b/miner/makefile index e007a910..59ade05e 100644 --- a/miner/makefile +++ b/miner/makefile @@ -11,11 +11,11 @@ run-check: run: @echo "🔨 Building locally..." - cargo run -- run --subnet-id 1 --private-key-provider $$PRIVATE_KEY_PROVIDER --private-key-node $$NODE_KEY --port 8090 --external-ip 0.0.0.0 --compute-pool-id 0 + cargo run -- run --subnet-id 1 --private-key-provider $$PRIVATE_KEY_PROVIDER --private-key-node $$NODE_KEY --port 8090 --external-ip 0.0.0.0 --compute-pool-id 0 --validator-address $$VALIDATOR_ADDRESS watch: @echo "👀 Watching for changes..." - cargo watch -x "run -- run --subnet-id 1 --private-key-provider $$PRIVATE_KEY_PROVIDER --private-key-node $$NODE_KEY --port 8090 --external-ip 0.0.0.0 --compute-pool-id 0" + cargo watch -x "run -- run --subnet-id 1 --private-key-provider $$PRIVATE_KEY_PROVIDER --private-key-node $$NODE_KEY --port 8090 --external-ip 0.0.0.0 --compute-pool-id 0 --validator-address $$VALIDATOR_ADDRESS" # Setup GPU server with required dependencies gpu-setup: @@ -44,4 +44,4 @@ gpu-run: # Watch mode - automatically deploy and run on file changes gpu-watch: - cargo watch -x "make gpu-run" \ No newline at end of file + cargo watch -x "make gpu-run" From 58f97baeb2987fec12b8991e8a5ce8dd631517a9 Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Wed, 12 Feb 2025 09:48:32 +0100 Subject: [PATCH 20/85] fix clippy --- validator/src/main.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/validator/src/main.rs b/validator/src/main.rs index 4c7add1b..4e02a382 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -6,7 +6,6 @@ use clap::Parser; use log::LevelFilter; use log::{error, info}; use serde_json::json; -use nalgebra::DMatrix; use rand::rng; use rand::Rng; use shared::models::api::ApiResponse; From 9e801d312cc3f35cd768c8d0d7265b05e45d8648 Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Wed, 12 Feb 2025 09:49:30 +0100 Subject: [PATCH 21/85] fix fmt... --- validator/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/validator/src/main.rs b/validator/src/main.rs index 4e02a382..6a39d0b4 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -5,9 +5,9 @@ use anyhow::{Context, Result}; use clap::Parser; use log::LevelFilter; use log::{error, info}; -use serde_json::json; use rand::rng; use rand::Rng; +use serde_json::json; use shared::models::api::ApiResponse; use shared::models::challenge::calc_matrix; use shared::models::challenge::FixedF64; From 7255a74b190cb539bec482490b85cf97347ea450 Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Wed, 12 Feb 2025 12:18:09 +0100 Subject: [PATCH 22/85] make app_state unused --- miner/src/api/routes/challenge.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/miner/src/api/routes/challenge.rs b/miner/src/api/routes/challenge.rs index 7406faf7..15ad1d55 100644 --- a/miner/src/api/routes/challenge.rs +++ b/miner/src/api/routes/challenge.rs @@ -3,19 +3,14 @@ use actix_web::{ web::{self, post, Data}, HttpResponse, Scope, }; -use log::info; use shared::models::api::ApiResponse; use shared::models::challenge::calc_matrix; use shared::models::challenge::ChallengeRequest; pub async fn handle_challenge( challenge: web::Json, - app_state: Data, + _app_state: Data, ) -> HttpResponse { - info!( - "Wallet: {:?}", - app_state.node_wallet.wallet.default_signer().address() - ); let result = calc_matrix(&challenge); let response = ApiResponse::new(true, result); From 7350607a38733a9f3a4b76f79e23b132eccc9e4d Mon Sep 17 00:00:00 2001 From: Jannik Straube Date: Thu, 13 Feb 2025 03:21:10 +0200 Subject: [PATCH 23/85] minor readme adjustment --- README.md | 144 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 140 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 12bb252d..bff222a5 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,8 @@ -# Prime Miner / Validator / Master -The current setup is aimed to support intellect-2 with a limited number of validators and a central master that coordinates the workload on the miners. -## Clone the repository with submodules +# Protocol +Prime Protocol is a peer-to-peer compute and intelligence network that enables decentralized AI development at scale. This repository contains the core infrastructure for contributing compute resources to the network, including miners, validators, and the coordination layer. + +## Setup: +### Clone the repository with submodules ``` git clone --recurse-submodules https://github.com/prime-ai/prime-miner-validator.git ``` @@ -8,7 +10,7 @@ git clone --recurse-submodules https://github.com/prime-ai/prime-miner-validator ``` git submodule update --init --recursive ``` -## Setup: +### Installation - Foundry: `curl -L https://foundry.paradigm.xyz | bash` - do not forget `foundryup` - Docker - tmuxinator: Install via `gem install tmuxinator` - do not use brew, apparently their brew build is broken @@ -167,3 +169,137 @@ sequenceDiagram MA-->>B: Return logs end ``` +# Prime Protocol + +The current setup supports INTELLECT-2 with a limited number of validators and a central coordinator that manages workload distribution across miners. + +## Quick Start + +### Prerequisites +- [Foundry](https://book.getfoundry.sh/getting-started/installation) +- [Docker](https://docs.docker.com/get-docker/) +- [Rust](https://www.rust-lang.org/tools/install) +- [Redis](https://redis.io/docs/getting-started/) +- tmuxinator (via `gem install tmuxinator`) + +### Installation + +1. Clone the repository with submodules: +```bash +git clone --recurse-submodules https://github.com/prime-ai/prime-miner-validator.git +cd prime-miner-validator +``` + +2. Update submodules if needed: +```bash +git submodule update --init --recursive +``` + +3. Install dependencies: +```bash +# Install Foundry +curl -L https://foundry.paradigm.xyz | bash +foundryup + +# Install cargo-watch +cargo install cargo-watch + +# Set up environment files +cp .env.example .env +cp discovery/.env.example discovery/.env +``` + +### Docker Configuration +- Enable "Allow the default Docker socket to be used" in Docker Desktop settings +- This setting requires password authentication + +## Development Setup + +### First-time Setup +```bash +# Start core services +docker compose up + +# Build whitelist provider +make whitelist-provider +``` + +### Local Development +```bash +# Start tmux environment +make up + +# Start miner in tmux +make watch-miner + +# Stop environment +make down +``` + +### Remote Deployment +```bash +# Set required environment variables +export EXTERNAL_IP="" +export SSH_CONNECTION="ssh ubuntu@ -i " + +# Deploy miner +make remote-miner +``` + +## Development Testing + +### Starting a Development Runner + +1. Start core services and miner: +```bash +make up +make watch-miner +``` + +2. Verify miner registration: +```bash +curl -X GET http://localhost:8090/nodes +``` +Expected response: +```json +{ + "nodes": [{ + "address": "0x66295e2b4a78d1cb57db16ac0260024900a5ba9b", + "ip_address": "0.0.0.0", + "port": 8091, + "status": "Healthy", + "task_id": null, + "task_state": null + }], + "success": true +} +``` + +3. Create a test task: +```bash +curl -X POST http://localhost:8090/tasks \ + -H "Content-Type: application/json" \ + -d '{"name":"sample","image":"ubuntu:latest"}' +``` + +4. Verify task creation: +```bash +curl -X GET http://localhost:8090/nodes +docker ps # Check running containers +``` + +## System Architecture + +The system implements a distributed compute network with the following key components: + +- **Buyer**: Initiates compute tasks via CLI/API +- **Compute Coordinator**: Master node managing task distribution +- **Compute Provider**: Nodes providing computational resources +- **Validator**: Verifies node legitimacy and performance +- **Discovery Service**: Handles node registration and discovery + +See the sequence diagram below for detailed interaction flow. + +[System Architecture Diagram] + +Note: The current architecture is optimized for the INTELLECT-2 run and will be expanded with additional components (e.g., termination handling) in future releases. \ No newline at end of file From c3480c9f6442d0f3d286680452151a8cea770e54 Mon Sep 17 00:00:00 2001 From: Jannik Straube Date: Thu, 13 Feb 2025 03:22:21 +0200 Subject: [PATCH 24/85] remove redundant files --- README.md | 136 +----------------------------------------------------- 1 file changed, 1 insertion(+), 135 deletions(-) diff --git a/README.md b/README.md index bff222a5..ef137f52 100644 --- a/README.md +++ b/README.md @@ -168,138 +168,4 @@ sequenceDiagram B->>MA: Access Logs MA-->>B: Return logs end -``` -# Prime Protocol - -The current setup supports INTELLECT-2 with a limited number of validators and a central coordinator that manages workload distribution across miners. - -## Quick Start - -### Prerequisites -- [Foundry](https://book.getfoundry.sh/getting-started/installation) -- [Docker](https://docs.docker.com/get-docker/) -- [Rust](https://www.rust-lang.org/tools/install) -- [Redis](https://redis.io/docs/getting-started/) -- tmuxinator (via `gem install tmuxinator`) - -### Installation - -1. Clone the repository with submodules: -```bash -git clone --recurse-submodules https://github.com/prime-ai/prime-miner-validator.git -cd prime-miner-validator -``` - -2. Update submodules if needed: -```bash -git submodule update --init --recursive -``` - -3. Install dependencies: -```bash -# Install Foundry -curl -L https://foundry.paradigm.xyz | bash -foundryup - -# Install cargo-watch -cargo install cargo-watch - -# Set up environment files -cp .env.example .env -cp discovery/.env.example discovery/.env -``` - -### Docker Configuration -- Enable "Allow the default Docker socket to be used" in Docker Desktop settings -- This setting requires password authentication - -## Development Setup - -### First-time Setup -```bash -# Start core services -docker compose up - -# Build whitelist provider -make whitelist-provider -``` - -### Local Development -```bash -# Start tmux environment -make up - -# Start miner in tmux -make watch-miner - -# Stop environment -make down -``` - -### Remote Deployment -```bash -# Set required environment variables -export EXTERNAL_IP="" -export SSH_CONNECTION="ssh ubuntu@ -i " - -# Deploy miner -make remote-miner -``` - -## Development Testing - -### Starting a Development Runner - -1. Start core services and miner: -```bash -make up -make watch-miner -``` - -2. Verify miner registration: -```bash -curl -X GET http://localhost:8090/nodes -``` -Expected response: -```json -{ - "nodes": [{ - "address": "0x66295e2b4a78d1cb57db16ac0260024900a5ba9b", - "ip_address": "0.0.0.0", - "port": 8091, - "status": "Healthy", - "task_id": null, - "task_state": null - }], - "success": true -} -``` - -3. Create a test task: -```bash -curl -X POST http://localhost:8090/tasks \ - -H "Content-Type: application/json" \ - -d '{"name":"sample","image":"ubuntu:latest"}' -``` - -4. Verify task creation: -```bash -curl -X GET http://localhost:8090/nodes -docker ps # Check running containers -``` - -## System Architecture - -The system implements a distributed compute network with the following key components: - -- **Buyer**: Initiates compute tasks via CLI/API -- **Compute Coordinator**: Master node managing task distribution -- **Compute Provider**: Nodes providing computational resources -- **Validator**: Verifies node legitimacy and performance -- **Discovery Service**: Handles node registration and discovery - -See the sequence diagram below for detailed interaction flow. - -[System Architecture Diagram] - -Note: The current architecture is optimized for the INTELLECT-2 run and will be expanded with additional components (e.g., termination handling) in future releases. \ No newline at end of file +``` \ No newline at end of file From 07b440b190218d8d5856105596e26f919530b5a6 Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Thu, 13 Feb 2025 18:40:27 +0100 Subject: [PATCH 25/85] add license file (#124) --- LICENSE | 202 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 202 insertions(+) create mode 100644 LICENSE diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. From 463ec6c4ef33414c0b32b260f15016880a88ed05 Mon Sep 17 00:00:00 2001 From: JannikSt Date: Thu, 13 Feb 2025 19:41:58 +0200 Subject: [PATCH 26/85] basic adjustment of readme, add security, add contributing and docs (#123) * improve README * add docs --- .github/workflows/dev-release.yml | 24 +- .github/workflows/prod-release.yml | 26 +- .tmuxinator.yml | 2 +- CONTRIBUTING.md | 68 +++++ Cargo.lock | 96 +++---- Cargo.toml | 4 +- Makefile | 30 +- README.md | 258 +++++++++--------- SECURITY.md | 66 +++++ e2e/tasks/discovery_setup.yml | 2 +- e2e/tasks/orchestrator_setup.yml | 2 +- e2e/tasks/validator_setup.yml | 2 +- examples/python/taskbridge_basic.py | 4 +- miner/README.md | 47 ---- miner/makefile | 47 ---- orchestrator/src/main.rs | 4 +- validator/src/main.rs | 1 + {miner => worker}/.gitignore | 0 {miner => worker}/Cargo.toml | 2 +- worker/README.md | 1 + {miner => worker}/src/api/mod.rs | 0 {miner => worker}/src/api/routes/challenge.rs | 0 {miner => worker}/src/api/routes/invite.rs | 0 {miner => worker}/src/api/routes/mod.rs | 0 {miner => worker}/src/api/routes/task.rs | 0 {miner => worker}/src/api/routes/types.rs | 0 {miner => worker}/src/api/server.rs | 0 {miner => worker}/src/checks/hardware/gpu.rs | 0 .../src/checks/hardware/hardware_check.rs | 0 .../src/checks/hardware/memory.rs | 0 {miner => worker}/src/checks/hardware/mod.rs | 0 .../src/checks/hardware/storage.rs | 0 {miner => worker}/src/checks/mod.rs | 0 .../src/checks/software/docker.rs | 0 {miner => worker}/src/checks/software/mod.rs | 0 .../src/checks/software/software_check.rs | 0 .../src/checks/software/types.rs | 0 {miner => worker}/src/cli/command.rs | 6 +- {miner => worker}/src/cli/mod.rs | 0 {miner => worker}/src/console/logger.rs | 0 {miner => worker}/src/console/mod.rs | 0 .../src/docker/docker_manager.rs | 0 {miner => worker}/src/docker/mod.rs | 0 {miner => worker}/src/docker/service.rs | 4 +- {miner => worker}/src/docker/state.rs | 0 .../src/docker/taskbridge/bridge.rs | 4 +- .../src/docker/taskbridge/mod.rs | 0 {miner => worker}/src/main.rs | 0 {miner => worker}/src/metrics/mod.rs | 0 {miner => worker}/src/metrics/store.rs | 0 .../src/operations/compute_node.rs | 0 .../src/operations/heartbeat/mod.rs | 0 .../src/operations/heartbeat/service.rs | 0 .../src/operations/heartbeat/state.rs | 2 +- {miner => worker}/src/operations/mod.rs | 0 {miner => worker}/src/operations/provider.rs | 0 {miner => worker}/src/services/discovery.rs | 0 {miner => worker}/src/services/mod.rs | 0 58 files changed, 375 insertions(+), 327 deletions(-) create mode 100644 CONTRIBUTING.md create mode 100644 SECURITY.md delete mode 100644 miner/README.md delete mode 100644 miner/makefile rename {miner => worker}/.gitignore (100%) rename {miner => worker}/Cargo.toml (98%) create mode 100644 worker/README.md rename {miner => worker}/src/api/mod.rs (100%) rename {miner => worker}/src/api/routes/challenge.rs (100%) rename {miner => worker}/src/api/routes/invite.rs (100%) rename {miner => worker}/src/api/routes/mod.rs (100%) rename {miner => worker}/src/api/routes/task.rs (100%) rename {miner => worker}/src/api/routes/types.rs (100%) rename {miner => worker}/src/api/server.rs (100%) rename {miner => worker}/src/checks/hardware/gpu.rs (100%) rename {miner => worker}/src/checks/hardware/hardware_check.rs (100%) rename {miner => worker}/src/checks/hardware/memory.rs (100%) rename {miner => worker}/src/checks/hardware/mod.rs (100%) rename {miner => worker}/src/checks/hardware/storage.rs (100%) rename {miner => worker}/src/checks/mod.rs (100%) rename {miner => worker}/src/checks/software/docker.rs (100%) rename {miner => worker}/src/checks/software/mod.rs (100%) rename {miner => worker}/src/checks/software/software_check.rs (100%) rename {miner => worker}/src/checks/software/types.rs (100%) rename {miner => worker}/src/cli/command.rs (98%) rename {miner => worker}/src/cli/mod.rs (100%) rename {miner => worker}/src/console/logger.rs (100%) rename {miner => worker}/src/console/mod.rs (100%) rename {miner => worker}/src/docker/docker_manager.rs (100%) rename {miner => worker}/src/docker/mod.rs (100%) rename {miner => worker}/src/docker/service.rs (99%) rename {miner => worker}/src/docker/state.rs (100%) rename {miner => worker}/src/docker/taskbridge/bridge.rs (98%) rename {miner => worker}/src/docker/taskbridge/mod.rs (100%) rename {miner => worker}/src/main.rs (100%) rename {miner => worker}/src/metrics/mod.rs (100%) rename {miner => worker}/src/metrics/store.rs (100%) rename {miner => worker}/src/operations/compute_node.rs (100%) rename {miner => worker}/src/operations/heartbeat/mod.rs (100%) rename {miner => worker}/src/operations/heartbeat/service.rs (100%) rename {miner => worker}/src/operations/heartbeat/state.rs (99%) rename {miner => worker}/src/operations/mod.rs (100%) rename {miner => worker}/src/operations/provider.rs (100%) rename {miner => worker}/src/services/discovery.rs (100%) rename {miner => worker}/src/services/mod.rs (100%) diff --git a/.github/workflows/dev-release.yml b/.github/workflows/dev-release.yml index 616f7a30..8730c010 100644 --- a/.github/workflows/dev-release.yml +++ b/.github/workflows/dev-release.yml @@ -52,8 +52,8 @@ jobs: - name: Prepare binaries run: | mkdir -p release-artifacts - if [ -f target/release/miner ]; then - cp target/release/miner release-artifacts/miner-linux-x86_64 + if [ -f target/release/worker ]; then + cp target/release/worker release-artifacts/worker-linux-x86_64 fi if [ -f target/release/validator ]; then cp target/release/validator release-artifacts/validator-linux-x86_64 @@ -96,7 +96,7 @@ jobs: - Linux x86_64 Components: - - Miner + - worker - Validator - Orchestrator - Discovery svc @@ -120,9 +120,9 @@ jobs: # Upload to Google Cloud Storage - name: Upload to GCS run: | - gsutil -m cp -r release-artifacts/* gs://prime-miner/${{ steps.tag.outputs.tag_name }}/ - gsutil -m cp -r gs://prime-miner/${{ steps.tag.outputs.tag_name }}/* gs://prime-miner/latest/ - gsutil -m setmeta -h "Cache-Control:no-cache, max-age=0" gs://prime-miner/latest/**/* + gsutil -m cp -r release-artifacts/* gs://prime-protocol/${{ steps.tag.outputs.tag_name }}/ + gsutil -m cp -r gs://prime-protocol/${{ steps.tag.outputs.tag_name }}/* gs://prime-protocol/latest/ + gsutil -m setmeta -h "Cache-Control:no-cache, max-age=0" gs://prime-protocol/latest/**/* - name: Generate Docker metadata id: meta @@ -153,8 +153,8 @@ jobs: tags: | ghcr.io/${{ steps.meta.outputs.repo_lower }}/discovery:dev ghcr.io/${{ steps.meta.outputs.repo_lower }}/discovery:${{ steps.tag.outputs.tag_name }} - us-east1-docker.pkg.dev/${{ secrets.GCP_PROJECT_ID }}/prime-miner/discovery:dev - us-east1-docker.pkg.dev/${{ secrets.GCP_PROJECT_ID }}/prime-miner/discovery:${{ steps.tag.outputs.tag_name }} + us-east1-docker.pkg.dev/${{ secrets.GCP_PROJECT_ID }}/prime-protocol/discovery:dev + us-east1-docker.pkg.dev/${{ secrets.GCP_PROJECT_ID }}/prime-protocol/discovery:${{ steps.tag.outputs.tag_name }} - name: Build and push Validator image uses: docker/build-push-action@v4 @@ -165,8 +165,8 @@ jobs: tags: | ghcr.io/${{ steps.meta.outputs.repo_lower }}/validator:dev ghcr.io/${{ steps.meta.outputs.repo_lower }}/validator:${{ steps.tag.outputs.tag_name }} - us-east1-docker.pkg.dev/${{ secrets.GCP_PROJECT_ID }}/prime-miner/validator:dev - us-east1-docker.pkg.dev/${{ secrets.GCP_PROJECT_ID }}/prime-miner/validator:${{ steps.tag.outputs.tag_name }} + us-east1-docker.pkg.dev/${{ secrets.GCP_PROJECT_ID }}/prime-protocol/validator:dev + us-east1-docker.pkg.dev/${{ secrets.GCP_PROJECT_ID }}/prime-protocol/validator:${{ steps.tag.outputs.tag_name }} - name: Build and push Orchestrator image uses: docker/build-push-action@v4 @@ -177,5 +177,5 @@ jobs: tags: | ghcr.io/${{ steps.meta.outputs.repo_lower }}/orchestrator:dev ghcr.io/${{ steps.meta.outputs.repo_lower }}/orchestrator:${{ steps.tag.outputs.tag_name }} - us-east1-docker.pkg.dev/${{ secrets.GCP_PROJECT_ID }}/prime-miner/orchestrator:dev - us-east1-docker.pkg.dev/${{ secrets.GCP_PROJECT_ID }}/prime-miner/orchestrator:${{ steps.tag.outputs.tag_name }} + us-east1-docker.pkg.dev/${{ secrets.GCP_PROJECT_ID }}/prime-protocol/orchestrator:dev + us-east1-docker.pkg.dev/${{ secrets.GCP_PROJECT_ID }}/prime-protocol/orchestrator:${{ steps.tag.outputs.tag_name }} diff --git a/.github/workflows/prod-release.yml b/.github/workflows/prod-release.yml index 2099373b..f298a656 100644 --- a/.github/workflows/prod-release.yml +++ b/.github/workflows/prod-release.yml @@ -63,7 +63,7 @@ jobs: if: steps.check_tag.outputs.exists == 'false' run: | mkdir -p release-artifacts - for binary in miner validator orchestrator discovery; do + for binary in worker validator orchestrator discovery; do if [ -f "target/release/$binary" ]; then cp "target/release/$binary" "release-artifacts/$binary-linux-x86_64" fi @@ -95,7 +95,7 @@ jobs: - Linux x86_64 Components: - - Miner + - worker - Validator - Orchestrator - Discovery service @@ -106,8 +106,8 @@ jobs: Download the appropriate binary for your system and verify the checksum. ```bash - # Verify checksum (example for miner) - sha256sum -c miner-linux-x86_64.checksum + # Verify checksum (example for worker) + sha256sum -c worker-linux-x86_64.checksum ``` ## Changes @@ -131,9 +131,9 @@ jobs: - name: Upload to GCS if: steps.check_tag.outputs.exists == 'false' run: | - gsutil -m cp -r release-artifacts/* gs://prime-miner/v${{ steps.get_version.outputs.version }}/ - gsutil -m cp -r release-artifacts/* gs://prime-miner/stable/ - gsutil -m setmeta -h "Cache-Control:no-cache, max-age=0" gs://prime-miner/stable/**/* + gsutil -m cp -r release-artifacts/* gs://prime-protocol/v${{ steps.get_version.outputs.version }}/ + gsutil -m cp -r release-artifacts/* gs://prime-protocol/stable/ + gsutil -m setmeta -h "Cache-Control:no-cache, max-age=0" gs://prime-protocol/stable/**/* - name: Configure Docker for Artifact Registry if: steps.check_tag.outputs.exists == 'false' @@ -169,8 +169,8 @@ jobs: tags: | ghcr.io/${{ steps.meta.outputs.repo_lower }}/discovery:latest ghcr.io/${{ steps.meta.outputs.repo_lower }}/discovery:v${{ steps.get_version.outputs.version }} - us-east1-docker.pkg.dev/${{ secrets.GCP_PROJECT_ID }}/prime-miner/discovery:latest - us-east1-docker.pkg.dev/${{ secrets.GCP_PROJECT_ID }}/prime-miner/discovery:v${{ steps.get_version.outputs.version }} + us-east1-docker.pkg.dev/${{ secrets.GCP_PROJECT_ID }}/prime-protocol/discovery:latest + us-east1-docker.pkg.dev/${{ secrets.GCP_PROJECT_ID }}/prime-protocol/discovery:v${{ steps.get_version.outputs.version }} - name: Build and push Validator image if: steps.check_tag.outputs.exists == 'false' @@ -182,8 +182,8 @@ jobs: tags: | ghcr.io/${{ steps.meta.outputs.repo_lower }}/validator:latest ghcr.io/${{ steps.meta.outputs.repo_lower }}/validator:v${{ steps.get_version.outputs.version }} - us-east1-docker.pkg.dev/${{ secrets.GCP_PROJECT_ID }}/prime-miner/validator:latest - us-east1-docker.pkg.dev/${{ secrets.GCP_PROJECT_ID }}/prime-miner/validator:v${{ steps.get_version.outputs.version }} + us-east1-docker.pkg.dev/${{ secrets.GCP_PROJECT_ID }}/prime-protocol/validator:latest + us-east1-docker.pkg.dev/${{ secrets.GCP_PROJECT_ID }}/prime-protocol/validator:v${{ steps.get_version.outputs.version }} - name: Build and push Orchestrator image if: steps.check_tag.outputs.exists == 'false' @@ -195,5 +195,5 @@ jobs: tags: | ghcr.io/${{ steps.meta.outputs.repo_lower }}/orchestrator:latest ghcr.io/${{ steps.meta.outputs.repo_lower }}/orchestrator:v${{ steps.get_version.outputs.version }} - us-east1-docker.pkg.dev/${{ secrets.GCP_PROJECT_ID }}/prime-miner/orchestrator:latest - us-east1-docker.pkg.dev/${{ secrets.GCP_PROJECT_ID }}/prime-miner/orchestrator:v${{ steps.get_version.outputs.version }} \ No newline at end of file + us-east1-docker.pkg.dev/${{ secrets.GCP_PROJECT_ID }}/prime-protocol/orchestrator:latest + us-east1-docker.pkg.dev/${{ secrets.GCP_PROJECT_ID }}/prime-protocol/orchestrator:v${{ steps.get_version.outputs.version }} \ No newline at end of file diff --git a/.tmuxinator.yml b/.tmuxinator.yml index cd927412..25ea8143 100644 --- a/.tmuxinator.yml +++ b/.tmuxinator.yml @@ -4,7 +4,7 @@ windows: - services: layout: even-horizontal panes: - - bash -c 'tmux select-pane -T "Miner" && sleep 5 && cd smart-contracts && sh deploy.sh && cd .. && make setup && clear' + - bash -c 'tmux select-pane -T "Worker" && sleep 5 && cd smart-contracts && sh deploy.sh && cd .. && make setup && clear' - bash -c 'tmux select-pane -T "Discovery" && sleep 10 && make watch-discovery' - bash -c 'tmux select-pane -T "Validator" && sleep 15 && make watch-validator' - bash -c 'tmux select-pane -T "Orchestrator" && sleep 20 && make watch-orchestrator' diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..7982f8fc --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,68 @@ +# Contributing Guidelines + +We love your input! We want to make contributing to this project as easy and transparent as possible, whether it's: + +- Reporting a bug +- Discussing the current state of the code +- Submitting a fix +- Proposing new features +- Becoming a maintainer + +## We Develop with Github +We use GitHub to host code, to track issues and feature requests, as well as accept pull requests. + +## Branch Strategy +We use a two-branch strategy for development: + +1. `develop` - This is our main development branch where all feature branches are merged for nightly builds and testing +2. `main` - This is our stable production branch that contains reviewed and tested code + +### Development Process + +1. Create a new feature branch from `develop` +2. Make your changes and commit them +3. Submit a pull request to merge into `develop` +4. After review and testing in `develop`, changes will be merged into `main` for production releases + +## Pull Request Process + +1. Fork the repo and create your feature branch from `develop` +2. If you've added code that should be tested, add tests +3. If you've changed APIs, update the documentation +4. Ensure the test suite passes +5. Make sure your code lints +6. Submit a pull request to merge into `develop` + +## Report bugs using Github's issue tracker +We use GitHub issues to track public bugs. Report a bug by opening a new issue; it's that easy! + +## Write bug reports with detail, background, and sample code + +**Great Bug Reports** tend to have: + +- A quick summary and/or background +- Steps to reproduce + - Be specific! + - Give sample code if you can. +- What you expected would happen +- What actually happens +- Notes (possibly including why you think this might be happening, or stuff you tried that didn't work) + +## Development Process + +1. Create a new branch from `develop` for your work +2. Make your changes +3. Write or update tests as needed +4. Update documentation as needed +5. Submit a pull request to `develop` +6. Address any review feedback + +### Commit Messages + +- Use the present tense ("Add feature" not "Added feature") +- Use the imperative mood ("Move cursor to..." not "Moves cursor to...") +- Limit the first line to 72 characters or less +- Reference issues and pull requests liberally after the first line + +## References +This document was adapted from the open-source contribution guidelines for [Facebook's Draft](https://github.com/facebook/draft-js/blob/a9316a723f9e918afde44dea68b5f9f39b7d9b00/CONTRIBUTING.md). diff --git a/Cargo.lock b/Cargo.lock index b73a7e57..4fe7113f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2226,7 +2226,7 @@ dependencies = [ [[package]] name = "discovery" -version = "0.1.6" +version = "0.1.7" dependencies = [ "actix-web", "alloy", @@ -3631,51 +3631,6 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" -[[package]] -name = "miner" -version = "0.1.6" -dependencies = [ - "actix-web", - "alloy", - "anyhow", - "bollard", - "bytes", - "cargo-watch", - "chrono", - "clap 4.5.27", - "colored", - "console", - "ctrlc", - "directories", - "env_logger 0.10.2", - "futures", - "futures-core", - "futures-util", - "hex", - "indicatif", - "lazy_static", - "libc", - "log", - "nalgebra", - "nvml-wrapper", - "regex", - "reqwest", - "serde", - "serde_json", - "serial_test", - "shared", - "strip-ansi-escapes", - "sysinfo", - "tempfile", - "thiserror 2.0.11", - "tokio", - "tokio-util", - "toml", - "url", - "uuid", - "validator 0.16.1", -] - [[package]] name = "miniz_oxide" version = "0.8.2" @@ -4105,7 +4060,7 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "orchestrator" -version = "0.1.6" +version = "0.1.7" dependencies = [ "actix-web", "alloy", @@ -6013,7 +5968,7 @@ dependencies = [ [[package]] name = "validator" -version = "0.1.6" +version = "0.1.7" dependencies = [ "actix-web", "alloy", @@ -6624,6 +6579,51 @@ dependencies = [ "bitflags 2.6.0", ] +[[package]] +name = "worker" +version = "0.1.7" +dependencies = [ + "actix-web", + "alloy", + "anyhow", + "bollard", + "bytes", + "cargo-watch", + "chrono", + "clap 4.5.27", + "colored", + "console", + "ctrlc", + "directories", + "env_logger 0.10.2", + "futures", + "futures-core", + "futures-util", + "hex", + "indicatif", + "lazy_static", + "libc", + "log", + "nalgebra", + "nvml-wrapper", + "regex", + "reqwest", + "serde", + "serde_json", + "serial_test", + "shared", + "strip-ansi-escapes", + "sysinfo", + "tempfile", + "thiserror 2.0.11", + "tokio", + "tokio-util", + "toml", + "url", + "uuid", + "validator 0.16.1", +] + [[package]] name = "wrapcenum-derive" version = "0.4.1" diff --git a/Cargo.toml b/Cargo.toml index 96e55269..e47f9598 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,9 +1,9 @@ [workspace] -members = ["discovery", "miner", "validator", "shared", "orchestrator", "dev-utils"] +members = ["discovery", "worker", "validator", "shared", "orchestrator", "dev-utils"] resolver = "2" [workspace.package] -version = "0.1.6" +version = "0.1.7" edition = "2021" [workspace.features] diff --git a/Makefile b/Makefile index cd89a59d..704dc22d 100644 --- a/Makefile +++ b/Makefile @@ -58,7 +58,7 @@ up: down: docker-compose down tmuxinator stop prime-dev - pkill -f "target/debug/miner" 2>/dev/null || true + pkill -f "target/debug/worker" 2>/dev/null || true pkill -f "target/debug/orchestrator" 2>/dev/null || true pkill -f "target/debug/validator" 2>/dev/null || true pkill -f "target/debug/discovery" 2>/dev/null || true @@ -71,9 +71,9 @@ watch-discovery: set -a; source .env; set +a; \ cargo watch -w discovery/src -x "run --bin discovery -- --validator-address $${VALIDATOR_ADDRESS} --rpc-url $${RPC_URL}" -watch-miner: +watch-worker: set -a; source ${ENV_FILE}; set +a; \ - cargo watch -w miner/src -x "run --bin miner -- run --private-key-provider $$PROVIDER_PRIVATE_KEY --private-key-node $$NODE_PRIVATE_KEY --port 8091 --external-ip 0.0.0.0 --compute-pool-id 0 --validator-address $$VALIDATOR_ADDRESS" + cargo watch -w worker/src -x "run --bin worker -- run --private-key-provider $$PROVIDER_PRIVATE_KEY --private-key-node $$NODE_PRIVATE_KEY --port 8091 --external-ip 0.0.0.0 --compute-pool-id 0 --validator-address $$VALIDATOR_ADDRESS" watch-validator: set -a; source ${ENV_FILE}; set +a; \ @@ -83,12 +83,12 @@ watch-orchestrator: set -a; source ${ENV_FILE}; set +a; \ cargo watch -w orchestrator/src -x "run --bin orchestrator -- -r $$RPC_URL -k $$POOL_OWNER_PRIVATE_KEY -d 0 -p 8090 -i 10 -u http://localhost:8090" -build-miner: - cargo build --release --bin miner +build-worker: + cargo build --release --bin worker -run-miner-bin: +run-worker-bin: set -a; source .env; set +a; \ - ./target/release/miner run --private-key-provider $$PROVIDER_PRIVATE_KEY --private-key-node $$NODE_PRIVATE_KEY --port 8091 --external-ip 0.0.0.0 --compute-pool-id 0 --validator-address $$VALIDATOR_ADDRESS + ./target/release/worker run --private-key-provider $$PROVIDER_PRIVATE_KEY --private-key-node $$NODE_PRIVATE_KEY --port 8091 --external-ip 0.0.0.0 --compute-pool-id 0 --validator-address $$VALIDATOR_ADDRESS SSH_CONNECTION ?= your-ssh-conn string EXTERNAL_IP ?= 0.0.0.0 @@ -127,21 +127,21 @@ sync-remote: --exclude 'node_modules/' \ . :~/$(notdir $(CURDIR)) -# Run miner on remote GPU -.PHONY: watch-miner-remote -watch-miner-remote: setup-remote setup-tunnel sync-remote +# Run worker on remote GPU +.PHONY: watch-worker-remote +watch-worker-remote: setup-remote setup-tunnel sync-remote $(SSH_CONNECTION) -t "cd ~/$(notdir $(CURDIR)) && \ export PATH=\"\$$HOME/.cargo/bin:\$$PATH\" && \ . \"\$$HOME/.cargo/env\" && \ set -a && source .env && set +a && \ export EXTERNAL_IP=$(EXTERNAL_IP) && \ - RUST_BACKTRACE=1 RUST_LOG=debug cargo watch -w miner/src -x \"run --bin miner -- run \ + RUST_BACKTRACE=1 RUST_LOG=debug cargo watch -w worker/src -x \"run --bin worker -- run \ --private-key-provider \$$PROVIDER_PRIVATE_KEY \ --private-key-node \$$NODE_PRIVATE_KEY \ --port $(PORT) \ --external-ip \$$EXTERNAL_IP \ --compute-pool-id 0 \ - --validator-address \$$VALIDATOR_ADDRESS 2>&1 | tee miner.log\"" + --validator-address \$$VALIDATOR_ADDRESS 2>&1 | tee worker.log\"" # Kill SSH tunnel .PHONY: kill-tunnel kill-tunnel: @@ -149,7 +149,7 @@ kill-tunnel: $(SSH_CONNECTION) "pkill -f \"sshd.*:8091\"" || true # Full remote execution with cleanup -.PHONY: remote-miner -remote-miner: +.PHONY: remote-worker +remote-worker: @trap 'make kill-tunnel' EXIT; \ - make watch-miner-remote + make watch-worker-remote diff --git a/README.md b/README.md index ef137f52..0bab741e 100644 --- a/README.md +++ b/README.md @@ -1,52 +1,122 @@ -# Protocol -Prime Protocol is a peer-to-peer compute and intelligence network that enables decentralized AI development at scale. This repository contains the core infrastructure for contributing compute resources to the network, including miners, validators, and the coordination layer. +# Protocol -## Setup: -### Clone the repository with submodules -``` -git clone --recurse-submodules https://github.com/prime-ai/prime-miner-validator.git -``` -- Update submodules: -``` +
+ + + + + +

Decentralized Compute Infrastructure for AI

+
+ +Prime Network is a peer-to-peer compute and intelligence network that enables decentralized AI development at scale. This repository contains the core infrastructure for contributing compute resources to the network, including workers, validators, and the coordination layer. + +## 📚 Table of Contents +- [System Architecture](#system-architecture) +- [Getting Started](#getting-started) +- [Installation](#installation) +- [Usage](#usage) +- [Development](#development) +- [Community](#community) +- [Contributing](#contributing) +- [Security](#security) +- [License](#license) + +## System Architecture + +The system follows a modular architecture with the following key components: + +### Component Overview + +- **Smart Contracts**: Ethereum-based contracts manage the protocol's economic layer +- **Discovery Service**: Enables secure peer discovery and metadata sharing +- **Orchestrator**: Coordinates compute jobs across worker nodes +- **Validator Network**: Ensures quality through random challenges +- **Worker Nodes**: Execute AI workloads in secure containers + +## Getting Started + +### Prerequisites + +Before running Prime Protocol, ensure you have the following requirements: + +#### Hardware +- Linux or macOS operating system +- CUDA-capable GPU(s) for mining operations + +#### Software +- [Docker Desktop](https://www.docker.com/products/docker-desktop/) - Container runtime +- [Git](https://git-scm.com/) - Version control +- [Rust](https://www.rust-lang.org/) - Programming language and toolchain +- [Redis](https://redis.io/) - In-memory data store +- [Foundry](https://book.getfoundry.sh/) - Smart contract development toolkit +- [tmuxinator](https://github.com/tmuxinator/tmuxinator) - Terminal session manager + +## Installation + +### 1. Clone Repository +```bash +git clone --recurse-submodules https://github.com/prime-ai/protocol.git +cd protocol git submodule update --init --recursive ``` -### Installation -- Foundry: `curl -L https://foundry.paradigm.xyz | bash` - do not forget `foundryup` -- Docker -- tmuxinator: Install via `gem install tmuxinator` - do not use brew, apparently their brew build is broken -- Rust: Install via `curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh` -- Install cargo watch: `cargo install cargo-watch` -- Install redis-server: `brew install redis`(mac) or `sudo apt-get install redis-server`(ubuntu) -- Adjust docker desktop setting: `Allow the default Docker socket to be used (requires password)` must be enabled -- .env in base folder and .env in discovery folder (will be replaced shortly) - -## Run locally: -### On very first run we have to pre-build some components: -- `docker compose up` - to pull anvil, redis and start discovery service -- `make whitelist-provider` - to build the whitelist provider function which runs in the background - -Once both commands complete you can terminate and proceed to the official commands: -### Commands: -``` -# Start tmux env -make up -# Start miner in tmux env -make watch-miner +### 2. Install Dependencies +```bash +# Install Foundry +curl -L https://foundry.paradigm.xyz | bash +foundryup -# Stop tmux env -make down +# Install Rust +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh + +# Install cargo-watch +cargo install cargo-watch + +# Install Redis (MacOS) +brew install redis + +# Install Redis (Ubuntu) +# sudo apt-get install redis-server + +# Install tmuxinator (do not use brew) +gem install tmuxinator ``` -## Run on remote machine: -Run initial setup once: -Run miner: + +### 3. Configure Environment +- Enable "Allow the default Docker socket to be used" in Docker Desktop settings +- Create `.env` files in base folder and discovery folder + +## Development + +### Starting the Development Environment + +To start all core services: +```bash +make up ``` -export EXTERNAL_IP=machine ip -export SSH_CONNECTION="ssh ubuntu@ip -i private_key.pem" -make remote-miner + +This will launch: +- Local blockchain node +- Discovery service +- Validator node +- Orchestrator service +- Redis instance +- Supporting infrastructure + +### Running a Worker Node + +Once the core services are running, you can start a worker node in a new terminal: +```bash +make watch-worker ``` -## Start a runner for dev +The worker will automatically connect to the discovery service and begin processing tasks. +It takes a couple of seconds until the worker is whitelisted. This is done using a simple loop on the second page of tmux. + +You can find more details on the APIs in the orchestrator and discovery service directory. + +### Deploying a task First, you need to create a local miner (after you have all other services running using e.g. `make up`) @@ -54,10 +124,10 @@ First, you need to create a local miner (after you have all other services runni make watch-miner ``` -check that the miner as been registered +check that the miner as been registered on the orchestrator: ```bash -curl -X GET http://localhost:8090/nodes +curl -X GET http://localhost:8090/nodes -H "Authorization: Bearer admin" >>> {"nodes":[{"address":"0x66295e2b4a78d1cb57db16ac0260024900a5ba9b","ip_address":"0.0.0.0","port":8091,"status":"Healthy","task_id":null,"task_state":null}],"success":true} ``` @@ -65,14 +135,14 @@ curl -X GET http://localhost:8090/nodes then lets create a task ```bash -curl -X POST http://localhost:8090/tasks -H "Content-Type: application/json" -d '{"name":"sample","image":"ubuntu:latest"}' +curl -X POST http://localhost:8090/tasks -H "Content-Type: application/json" -H "Authorization: Bearer admin" -d '{"name":"sample","image":"ubuntu:latest"}' >>> {"success":true,"task":"updated_task"}% ``` and check that the task is created ```bash -curl -X GET http://localhost:8090/nodes +curl -X GET http://localhost:8090/nodes -H "Authorization: Bearer admin" >>> {"nodes":[{"address":"0x66295e2b4a78d1cb57db16ac0260024900a5ba9b","ip_address":"0.0.0.0","port":8091,"status":"Healthy","task_id":"29edd356-5c48-4ba6-ab96-73d002daddff","task_state":"RUNNING"}],"success":true}% ``` @@ -87,85 +157,21 @@ ef02d23b5c74 redis:alpine "docker-entrypoint.s…" 27 ``` -## System architecture (WIP) -The following system architecture still misses crucial components (e.g. terminations) and is simplified for the MVP / intellect-2 run. - -```mermaid -sequenceDiagram - participant B as Buyer
(via CLI / API) - participant MA as Compute Coordinator
(Master Node) - participant M as Compute Provider - participant V as Validator - participant A as Arbitrum - participant D as Discovery Service - - rect rgb(0, 0, 0) - Note over B,A: 0. PREPARATION PHASE - B->>MA: Setup Master Node(s) - B->>A: Create training run with node requirements
and discovery service URI - end - - rect rgb(0, 0, 100) - Note over M,A: 1. REGISTRATION PHASE - Note over M: Compute Provider registers with capabilities (GPU / CPU / RAM) - M->>A: Register for specific training run - M->>D: Register Node IP with discovery service - activate D - D->>A: Check if node is registered - D-->>M: Confirm registration - deactivate D - end - - rect rgb(0, 100, 0) - Note over V,M: 2. VALIDATION PHASE - A -->> V: Listen for new miner registrations - opt New Miner is registered - V ->> D: Request Miner IP - D -->> V: Return Miner IP - V-->>M: Send challenge - activate M - M->>V: Send Solution - deactivate M - V->>A: Report Challenge Status
(approve / reject) - end - M->>A: Monitor chain for acceptance status - A-->>M: Return acceptance status - end - - rect rgb(100, 0, 0) - Note over MA,M: 3. ONBOARDING PHASE - A -->> MA: Listen for new miner registrations - opt New Miner is registered - MA ->> D: Request Miner IP with signature - activate D - D ->> A: Check if MA owns training run - D -->> MA: Return Miner IP - deactivate D - MA->>M: Send signed invite
(signed with Master Node's private key) - M->>A: Verify Master Node's signature - A-->>M: Confirm Master Node status - M-->>MA: Accept invite with signed acknowledgment - end - loop Continuous heartbeat - M->>MA: Send heartbeat with status - Note over M,MA: Heartbeat every 30s - end - end - - rect rgb(150, 100, 0) - Note over B,M: 4. EXECUTION PHASE - B->>MA: Create Task - MA->>M: Send Task with parameters - loop Execute Container - M->>M: Execute Container - activate M - MA->>M: Check Status / Logs - M-->>MA: Return status/logs - MA->>M: Sync persistent storage - M-->>MA: Sync acknowledgment - deactivate M - end - B->>MA: Access Logs - MA-->>B: Return logs - end -``` \ No newline at end of file +### Stopping Services + +To gracefully shutdown all services: +```bash +make down +``` + +## Community + +- [Discord](https://discord.gg/primeintellect) +- [X](https://x.com/PrimeIntellect) +- [Blog](https://www.primeintellect.ai/blog) + +## Contributing +We welcome contributions! Please see our [Contributing Guidelines](CONTRIBUTING.md). + +## Security +See [SECURITY.md](SECURITY.md) for security policies and reporting vulnerabilities. diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 00000000..4963a1d7 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,66 @@ +# Security Policy + +## Reporting a Vulnerability + +We take security vulnerabilities very seriously and appreciate efforts to responsibly disclose findings. + +Please report security vulnerabilities by emailing contact@primeintellect.ai. + +**Please do not report security vulnerabilities through public GitHub issues, discussions, or pull requests.** + +Instead, please send reports privately via email. This helps ensure that any vulnerabilities are handled securely and fixed before they become public knowledge. + +Please include the following information in your report: + +- Description of the vulnerability +- Steps to reproduce the issue +- Potential impact of the vulnerability +- Any suggested fixes or mitigations (if applicable) + +## Response Timeline + +We aim to respond to security reports within 1 business day and will keep you informed throughout the process of fixing and disclosing the vulnerability. + +The general process is: + +1. Security report received and acknowledged +2. Issue is investigated and severity assessed +3. Fix is developed and tested +4. Security patches are deployed +5. Public disclosure (if appropriate) + +## Scope + +The following are in scope for security reports: + +- Prime Protocol smart contracts +- Worker node software +- Validator node software +- Discovery service +- Orchestrator service +- Protocol APIs and interfaces + +## Bug Bounty Program + +There is currently no formal bug bounty program, but significant vulnerabilities that are responsibly disclosed may be rewarded at our discretion. + +## Security Best Practices + +When running Prime Protocol nodes: + +- Keep all software components up to date +- Use strong passwords and key management practices +- Follow security hardening guides for your operating system +- Monitor system logs for suspicious activity +- Maintain regular backups +- Use firewalls and access controls +- Keep private keys secure and encrypted + +## Public Keys + +PGP public keys for encrypted communications will be published here once available. + +## Security Advisories + +Security advisories and updates will be published in our GitHub Security Advisories section when public disclosure is appropriate. + diff --git a/e2e/tasks/discovery_setup.yml b/e2e/tasks/discovery_setup.yml index ef2344c8..76f89b4d 100644 --- a/e2e/tasks/discovery_setup.yml +++ b/e2e/tasks/discovery_setup.yml @@ -14,7 +14,7 @@ group: "{{ ansible_user }}" - name: Download discovery binary get_url: - url: "https://storage.googleapis.com/prime-miner/{{ version }}/discovery-linux-x86_64" + url: "https://storage.googleapis.com/prime-protocol/{{ version }}/discovery-linux-x86_64" dest: "/opt/prime/discovery/discovery" mode: '0755' owner: "{{ ansible_user }}" diff --git a/e2e/tasks/orchestrator_setup.yml b/e2e/tasks/orchestrator_setup.yml index 36ba0354..4677ba60 100644 --- a/e2e/tasks/orchestrator_setup.yml +++ b/e2e/tasks/orchestrator_setup.yml @@ -8,7 +8,7 @@ group: "{{ ansible_user }}" - name: Download latest orchestrator binary get_url: - url: "https://storage.googleapis.com/prime-miner/{{ version }}/orchestrator-linux-x86_64" + url: "https://storage.googleapis.com/prime-protocol/{{ version }}/orchestrator-linux-x86_64" dest: "/opt/prime/orchestrator/orchestrator" mode: '0755' owner: "{{ ansible_user }}" diff --git a/e2e/tasks/validator_setup.yml b/e2e/tasks/validator_setup.yml index 6d69428a..9713a4c1 100644 --- a/e2e/tasks/validator_setup.yml +++ b/e2e/tasks/validator_setup.yml @@ -8,7 +8,7 @@ group: "{{ ansible_user }}" - name: Download latest validator binary get_url: - url: "https://storage.googleapis.com/prime-miner/{{ version }}/validator-linux-x86_64" + url: "https://storage.googleapis.com/prime-protocol/{{ version }}/validator-linux-x86_64" dest: "/opt/prime/validator/validator" mode: '0755' owner: "{{ ansible_user }}" diff --git a/examples/python/taskbridge_basic.py b/examples/python/taskbridge_basic.py index a81f5410..2463a522 100644 --- a/examples/python/taskbridge_basic.py +++ b/examples/python/taskbridge_basic.py @@ -6,7 +6,7 @@ def get_default_socket_path() -> str: """Returns the default socket path based on the operating system.""" - return "/tmp/com.prime.miner/metrics.sock" if platform.system() == "Darwin" else "/var/run/com.prime.miner/metrics.sock" + return "/tmp/com.prime.worker/metrics.sock" if platform.system() == "Darwin" else "/var/run/com.prime.worker/metrics.sock" def send_message(metric: dict, socket_path: str = None) -> bool: """Sends a message to the specified socket path or uses the default if none is provided.""" @@ -26,7 +26,7 @@ def send_message(metric: dict, socket_path: str = None) -> bool: if __name__ == "__main__": """ You can get the task_id directly from the docker env. - The miner reports the metrics using the heartbeat api but only for the currently running task. + The worker reports the metrics using the heartbeat api but only for the currently running task. """ task_id = "0725637c-ad20-4c30-b4e2-90cdf63b9974" for i in range(5): diff --git a/miner/README.md b/miner/README.md deleted file mode 100644 index 6e765b02..00000000 --- a/miner/README.md +++ /dev/null @@ -1,47 +0,0 @@ - - -## Miner Node - -### Quick Start -``` bash -# Run the miner -cargo run -- run \ - --subnet-id \ - --wallet-address 0x... \ # Your Ethereum wallet address (42 characters starting with 0x) - --private-key ./keys/eth-private-key.json \ # Path to your Ethereum keystore file - --port 8080 \ - --external-ip \ # Your node's public IP address -``` - -### Run Hardware check -``` -# Run all checks -cargo run -- check - -# Hardware checks only -cargo run -- check --hardware-only - -# Software checks only -cargo run -- check --software-only - -``` -### Develop on GPU Server - -The miner can be deployed and run on a remote GPU server using the provided Makefile commands. - -1. Set up SSH connection details: -``` -export SSH_CONN="root@78.130.201.2 -p 10100 -i private_key.pem" -``` - -2. Install required dependencies on the GPU server: -``` -make gpu-setup -``` - -3. Deploy and run the miner: -``` -make gpu-run -``` - -You can also use `make gpu-watch` to automatically redeploy and run when files change. diff --git a/miner/makefile b/miner/makefile deleted file mode 100644 index 59ade05e..00000000 --- a/miner/makefile +++ /dev/null @@ -1,47 +0,0 @@ -SSH_CONN ?= - -.PHONY: gpu-setup gpu-run gpu-watch run - -# Run locally -run-check: - @echo "🔨 Building locally..." - cargo build --release - @echo "▶️ Running locally..." - ./target/release/miner check - -run: - @echo "🔨 Building locally..." - cargo run -- run --subnet-id 1 --private-key-provider $$PRIVATE_KEY_PROVIDER --private-key-node $$NODE_KEY --port 8090 --external-ip 0.0.0.0 --compute-pool-id 0 --validator-address $$VALIDATOR_ADDRESS - -watch: - @echo "👀 Watching for changes..." - cargo watch -x "run -- run --subnet-id 1 --private-key-provider $$PRIVATE_KEY_PROVIDER --private-key-node $$NODE_KEY --port 8090 --external-ip 0.0.0.0 --compute-pool-id 0 --validator-address $$VALIDATOR_ADDRESS" - -# Setup GPU server with required dependencies -gpu-setup: - @if [ -z "$(SSH_CONN)" ]; then \ - echo "Error: SSH_CONN not set. Use: make gpu-setup SSH_CONN='user@ip -p port -i key.pem'"; \ - exit 1; \ - fi - @echo "🔧 Setting up GPU server..." - ssh $(SSH_CONN) 'command -v sudo >/dev/null 2>&1 || (apt-get update && apt-get install -y sudo)' - ssh $(SSH_CONN) 'sudo apt-get update && sudo apt-get install -y rsync' - ssh $(SSH_CONN) 'which cargo || curl -sSf https://sh.rustup.rs | sh -s -- -y' - ssh $(SSH_CONN) 'mkdir -p ~/miner' - -# Run on GPU server and stream output locally -gpu-run: - @if [ -z "$(SSH_CONN)" ]; then \ - echo "Error: SSH_CONN not set. Use: make gpu-run SSH_CONN='user@ip -p port -i key.pem'"; \ - exit 1; \ - fi - @echo "🚀 Deploying to GPU server..." - rsync -avz --exclude 'target/' --exclude '.git/' -e "ssh $(SSH_CONN)" ./ $(SSH_CONN | cut -d@ -f2 | cut -d' ' -f1):~/miner/ - @echo "🔨 Building on GPU server..." - ssh $(SSH_CONN) 'cd ~/miner && . ~/.cargo/env && LD_LIBRARY_PATH=/usr/lib cargo build --release' - @echo "▶️ Running on GPU..." - ssh -t $(SSH_CONN) 'cd ~/miner && LD_LIBRARY_PATH=/usr/lib ./target/release/miner $(MINER_CMD)' - -# Watch mode - automatically deploy and run on file changes -gpu-watch: - cargo watch -x "make gpu-run" diff --git a/orchestrator/src/main.rs b/orchestrator/src/main.rs index 67393f9e..65e8f348 100644 --- a/orchestrator/src/main.rs +++ b/orchestrator/src/main.rs @@ -37,7 +37,7 @@ struct Args { #[arg(short = 'd', long, default_value = "0")] domain_id: u32, - /// External ip - advertised to miners + /// External ip - advertised to workers #[arg(short = 'e', long)] host: Option, @@ -45,7 +45,7 @@ struct Args { #[arg(short = 'p', long, default_value = "8090")] port: u16, - /// External url - advertised to miners + /// External url - advertised to workers #[arg(short = 'u', long)] url: Option, diff --git a/validator/src/main.rs b/validator/src/main.rs index 6a39d0b4..012122c7 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -254,6 +254,7 @@ pub async fn challenge_node( .await?; let response_text = response.text().await?; + println!("Response text: {}", response_text); let parsed_response: ApiResponse = serde_json::from_str(&response_text)?; if !parsed_response.success { diff --git a/miner/.gitignore b/worker/.gitignore similarity index 100% rename from miner/.gitignore rename to worker/.gitignore diff --git a/miner/Cargo.toml b/worker/Cargo.toml similarity index 98% rename from miner/Cargo.toml rename to worker/Cargo.toml index fab3f33d..b2d36a17 100644 --- a/miner/Cargo.toml +++ b/worker/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "miner" +name = "worker" version.workspace = true edition.workspace = true diff --git a/worker/README.md b/worker/README.md new file mode 100644 index 00000000..d01bfba5 --- /dev/null +++ b/worker/README.md @@ -0,0 +1 @@ +# Prime Worker \ No newline at end of file diff --git a/miner/src/api/mod.rs b/worker/src/api/mod.rs similarity index 100% rename from miner/src/api/mod.rs rename to worker/src/api/mod.rs diff --git a/miner/src/api/routes/challenge.rs b/worker/src/api/routes/challenge.rs similarity index 100% rename from miner/src/api/routes/challenge.rs rename to worker/src/api/routes/challenge.rs diff --git a/miner/src/api/routes/invite.rs b/worker/src/api/routes/invite.rs similarity index 100% rename from miner/src/api/routes/invite.rs rename to worker/src/api/routes/invite.rs diff --git a/miner/src/api/routes/mod.rs b/worker/src/api/routes/mod.rs similarity index 100% rename from miner/src/api/routes/mod.rs rename to worker/src/api/routes/mod.rs diff --git a/miner/src/api/routes/task.rs b/worker/src/api/routes/task.rs similarity index 100% rename from miner/src/api/routes/task.rs rename to worker/src/api/routes/task.rs diff --git a/miner/src/api/routes/types.rs b/worker/src/api/routes/types.rs similarity index 100% rename from miner/src/api/routes/types.rs rename to worker/src/api/routes/types.rs diff --git a/miner/src/api/server.rs b/worker/src/api/server.rs similarity index 100% rename from miner/src/api/server.rs rename to worker/src/api/server.rs diff --git a/miner/src/checks/hardware/gpu.rs b/worker/src/checks/hardware/gpu.rs similarity index 100% rename from miner/src/checks/hardware/gpu.rs rename to worker/src/checks/hardware/gpu.rs diff --git a/miner/src/checks/hardware/hardware_check.rs b/worker/src/checks/hardware/hardware_check.rs similarity index 100% rename from miner/src/checks/hardware/hardware_check.rs rename to worker/src/checks/hardware/hardware_check.rs diff --git a/miner/src/checks/hardware/memory.rs b/worker/src/checks/hardware/memory.rs similarity index 100% rename from miner/src/checks/hardware/memory.rs rename to worker/src/checks/hardware/memory.rs diff --git a/miner/src/checks/hardware/mod.rs b/worker/src/checks/hardware/mod.rs similarity index 100% rename from miner/src/checks/hardware/mod.rs rename to worker/src/checks/hardware/mod.rs diff --git a/miner/src/checks/hardware/storage.rs b/worker/src/checks/hardware/storage.rs similarity index 100% rename from miner/src/checks/hardware/storage.rs rename to worker/src/checks/hardware/storage.rs diff --git a/miner/src/checks/mod.rs b/worker/src/checks/mod.rs similarity index 100% rename from miner/src/checks/mod.rs rename to worker/src/checks/mod.rs diff --git a/miner/src/checks/software/docker.rs b/worker/src/checks/software/docker.rs similarity index 100% rename from miner/src/checks/software/docker.rs rename to worker/src/checks/software/docker.rs diff --git a/miner/src/checks/software/mod.rs b/worker/src/checks/software/mod.rs similarity index 100% rename from miner/src/checks/software/mod.rs rename to worker/src/checks/software/mod.rs diff --git a/miner/src/checks/software/software_check.rs b/worker/src/checks/software/software_check.rs similarity index 100% rename from miner/src/checks/software/software_check.rs rename to worker/src/checks/software/software_check.rs diff --git a/miner/src/checks/software/types.rs b/worker/src/checks/software/types.rs similarity index 100% rename from miner/src/checks/software/types.rs rename to worker/src/checks/software/types.rs diff --git a/miner/src/cli/command.rs b/worker/src/cli/command.rs similarity index 98% rename from miner/src/cli/command.rs rename to worker/src/cli/command.rs index d2abbcc8..7cd5480a 100644 --- a/miner/src/cli/command.rs +++ b/worker/src/cli/command.rs @@ -45,11 +45,11 @@ pub enum Commands { #[arg(long, default_value = "http://localhost:8545")] rpc_url: String, - /// Port number for the miner to listen on + /// Port number for the worker to listen on #[arg(long, default_value = "8080")] port: u16, - /// External IP address for the miner to advertise + /// External IP address for the worker to advertise #[arg(long)] external_ip: String, @@ -57,7 +57,7 @@ pub enum Commands { #[arg(long)] compute_pool_id: u64, - /// Dry run the command without starting the miner + /// Dry run the command without starting the worker #[arg(long, default_value = "false")] dry_run: bool, diff --git a/miner/src/cli/mod.rs b/worker/src/cli/mod.rs similarity index 100% rename from miner/src/cli/mod.rs rename to worker/src/cli/mod.rs diff --git a/miner/src/console/logger.rs b/worker/src/console/logger.rs similarity index 100% rename from miner/src/console/logger.rs rename to worker/src/console/logger.rs diff --git a/miner/src/console/mod.rs b/worker/src/console/mod.rs similarity index 100% rename from miner/src/console/mod.rs rename to worker/src/console/mod.rs diff --git a/miner/src/docker/docker_manager.rs b/worker/src/docker/docker_manager.rs similarity index 100% rename from miner/src/docker/docker_manager.rs rename to worker/src/docker/docker_manager.rs diff --git a/miner/src/docker/mod.rs b/worker/src/docker/mod.rs similarity index 100% rename from miner/src/docker/mod.rs rename to worker/src/docker/mod.rs diff --git a/miner/src/docker/service.rs b/worker/src/docker/service.rs similarity index 99% rename from miner/src/docker/service.rs rename to worker/src/docker/service.rs index d1797f21..88ad27c5 100644 --- a/miner/src/docker/service.rs +++ b/worker/src/docker/service.rs @@ -287,7 +287,7 @@ mod tests { cancellation_token.clone(), false, Some(1024), - "/tmp/com.prime.miner/metrics.sock".to_string(), + "/tmp/com.prime.worker/metrics.sock".to_string(), None, ); let task = Task { @@ -331,7 +331,7 @@ mod tests { cancellation_token.clone(), false, Some(1024), - "/tmp/com.prime.miner/metrics.sock".to_string(), + "/tmp/com.prime.worker/metrics.sock".to_string(), None, ); let state = docker_service.state.clone(); diff --git a/miner/src/docker/state.rs b/worker/src/docker/state.rs similarity index 100% rename from miner/src/docker/state.rs rename to worker/src/docker/state.rs diff --git a/miner/src/docker/taskbridge/bridge.rs b/worker/src/docker/taskbridge/bridge.rs similarity index 98% rename from miner/src/docker/taskbridge/bridge.rs rename to worker/src/docker/taskbridge/bridge.rs index 782e03e9..44883ea0 100644 --- a/miner/src/docker/taskbridge/bridge.rs +++ b/worker/src/docker/taskbridge/bridge.rs @@ -12,8 +12,8 @@ use tokio::{ }; pub const SOCKET_NAME: &str = "metrics.sock"; -const DEFAULT_MACOS_SOCKET: &str = "/tmp/com.prime.miner/"; -const DEFAULT_LINUX_SOCKET: &str = "/tmp/com.prime.miner/"; +const DEFAULT_MACOS_SOCKET: &str = "/tmp/com.prime.worker/"; +const DEFAULT_LINUX_SOCKET: &str = "/tmp/com.prime.worker/"; pub struct TaskBridge { pub socket_path: String, diff --git a/miner/src/docker/taskbridge/mod.rs b/worker/src/docker/taskbridge/mod.rs similarity index 100% rename from miner/src/docker/taskbridge/mod.rs rename to worker/src/docker/taskbridge/mod.rs diff --git a/miner/src/main.rs b/worker/src/main.rs similarity index 100% rename from miner/src/main.rs rename to worker/src/main.rs diff --git a/miner/src/metrics/mod.rs b/worker/src/metrics/mod.rs similarity index 100% rename from miner/src/metrics/mod.rs rename to worker/src/metrics/mod.rs diff --git a/miner/src/metrics/store.rs b/worker/src/metrics/store.rs similarity index 100% rename from miner/src/metrics/store.rs rename to worker/src/metrics/store.rs diff --git a/miner/src/operations/compute_node.rs b/worker/src/operations/compute_node.rs similarity index 100% rename from miner/src/operations/compute_node.rs rename to worker/src/operations/compute_node.rs diff --git a/miner/src/operations/heartbeat/mod.rs b/worker/src/operations/heartbeat/mod.rs similarity index 100% rename from miner/src/operations/heartbeat/mod.rs rename to worker/src/operations/heartbeat/mod.rs diff --git a/miner/src/operations/heartbeat/service.rs b/worker/src/operations/heartbeat/service.rs similarity index 100% rename from miner/src/operations/heartbeat/service.rs rename to worker/src/operations/heartbeat/service.rs diff --git a/miner/src/operations/heartbeat/state.rs b/worker/src/operations/heartbeat/state.rs similarity index 99% rename from miner/src/operations/heartbeat/state.rs rename to worker/src/operations/heartbeat/state.rs index 9d441d2d..0cc3d13a 100644 --- a/miner/src/operations/heartbeat/state.rs +++ b/worker/src/operations/heartbeat/state.rs @@ -11,7 +11,7 @@ use tokio::sync::RwLock; const STATE_FILENAME: &str = "heartbeat_state.toml"; fn get_default_state_dir() -> Option { - ProjectDirs::from("com", "prime", "miner") + ProjectDirs::from("com", "prime", "worker") .map(|proj_dirs| proj_dirs.data_local_dir().to_string_lossy().into_owned()) } diff --git a/miner/src/operations/mod.rs b/worker/src/operations/mod.rs similarity index 100% rename from miner/src/operations/mod.rs rename to worker/src/operations/mod.rs diff --git a/miner/src/operations/provider.rs b/worker/src/operations/provider.rs similarity index 100% rename from miner/src/operations/provider.rs rename to worker/src/operations/provider.rs diff --git a/miner/src/services/discovery.rs b/worker/src/services/discovery.rs similarity index 100% rename from miner/src/services/discovery.rs rename to worker/src/services/discovery.rs diff --git a/miner/src/services/mod.rs b/worker/src/services/mod.rs similarity index 100% rename from miner/src/services/mod.rs rename to worker/src/services/mod.rs From 888810fc3dc144e54d4ca5fb905199e2040f7384 Mon Sep 17 00:00:00 2001 From: Jannik Straube Date: Thu, 13 Feb 2025 19:44:05 +0200 Subject: [PATCH 27/85] fix worker naming in readme --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 0bab741e..29041186 100644 --- a/README.md +++ b/README.md @@ -118,13 +118,13 @@ You can find more details on the APIs in the orchestrator and discovery service ### Deploying a task -First, you need to create a local miner (after you have all other services running using e.g. `make up`) +First, you need to create a local worker (after you have all other services running using e.g. `make up`) ```bash -make watch-miner +make watch-worker ``` -check that the miner as been registered on the orchestrator: +check that the worker as been registered on the orchestrator: ```bash curl -X GET http://localhost:8090/nodes -H "Authorization: Bearer admin" @@ -152,8 +152,8 @@ you can also check docker ps to see that the docker is running locally docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES e860c44a9989 ubuntu:latest "sleep infinity" 3 minutes ago Up 3 minutes prime-task-29edd356-5c48-4ba6-ab96-73d002daddff -ef02d23b5c74 redis:alpine "docker-entrypoint.s…" 27 minutes ago Up 27 minutes 0.0.0.0:6380->6379/tcp, [::]:6380->6379/tcp prime-miner-validator-redis-1 -7761ee7b6dcf ghcr.io/foundry-rs/foundry:latest "anvil --host 0.0.0.…" 27 minutes ago Up 27 minutes 0.0.0.0:8545->8545/tcp, :::8545->8545/tcp prime-miner-validator-anvil-1 +ef02d23b5c74 redis:alpine "docker-entrypoint.s…" 27 minutes ago Up 27 minutes 0.0.0.0:6380->6379/tcp, [::]:6380->6379/tcp prime-worker-validator-redis-1 +7761ee7b6dcf ghcr.io/foundry-rs/foundry:latest "anvil --host 0.0.0.…" 27 minutes ago Up 27 minutes 0.0.0.0:8545->8545/tcp, :::8545->8545/tcp prime-worker-validator-anvil-1 ``` From 593520fd8af033bddfbad487375ff5093dfc5135 Mon Sep 17 00:00:00 2001 From: JannikSt Date: Fri, 14 Feb 2025 14:47:40 +0200 Subject: [PATCH 28/85] add readme images (#125) --- README.md | 15 +++++++-------- docs/assets/logo.svg | 5 +++++ docs/assets/overview.png | Bin 0 -> 212657 bytes 3 files changed, 12 insertions(+), 8 deletions(-) create mode 100644 docs/assets/logo.svg create mode 100644 docs/assets/overview.png diff --git a/README.md b/README.md index 29041186..6546d1bc 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,7 @@ # Protocol
- - - - - +Prime Protocol Logo

Decentralized Compute Infrastructure for AI

@@ -21,15 +17,18 @@ Prime Network is a peer-to-peer compute and intelligence network that enables de - [Contributing](#contributing) - [Security](#security) - [License](#license) +## System Architecture -## System Architecture +The Prime Protocol follows a modular architecture designed for decentralized AI compute: -The system follows a modular architecture with the following key components: +
+ Prime Protocol System Architecture +
### Component Overview - **Smart Contracts**: Ethereum-based contracts manage the protocol's economic layer -- **Discovery Service**: Enables secure peer discovery and metadata sharing +- **Discovery Service**: Enables secure peer discovery and metadata sharing - **Orchestrator**: Coordinates compute jobs across worker nodes - **Validator Network**: Ensures quality through random challenges - **Worker Nodes**: Execute AI workloads in secure containers diff --git a/docs/assets/logo.svg b/docs/assets/logo.svg new file mode 100644 index 00000000..7d8fe3f8 --- /dev/null +++ b/docs/assets/logo.svg @@ -0,0 +1,5 @@ + + + + + \ No newline at end of file diff --git a/docs/assets/overview.png b/docs/assets/overview.png new file mode 100644 index 0000000000000000000000000000000000000000..5070ca232fd672ea132f5f4337fd038468e3f99d GIT binary patch literal 212657 zcmeFZcT|(v_b(h3WnuvbMiFVEfJzYr1nDY+f>evAP{;8D#DB+AWCl*inM@q zLKQ|qLI5cV5CS4XAR!=z5FnKI#F;_g-@5l*cinaWxc9!_M_sNYPoA8!&px|+_Gh0z zO^mJ_*n4y@1Ohprr~A7p1hO{)0^!vEWe<3#;Zm(5_z&u%Yvl)lh@NNv+o6+m)&ab^ z!_V}}B?!7(a2CAaaJguB5dwJ=w{QLCZpbdp%X+_GGz;1>N75>`bqZJhI*B}OukVTgqj6mG-BftgnS8kelM-db%>|0;e|@SHp6yC_Kp9u zfld1wne!wZyoQAT@oZ1!dpWzr&WEfm%}x&#TrU#s8@aoS zGws!txazmSZvAsb&~CL?1yu1rDY7KnkYt-yUIs6Hyh0?p@f_DcZLW{r?E5d{p9np0 zeNeRTig0kwfkzTCX-TYt%SXk1Ml9d__U|(x4pKZ~nV(k{C&7Pm`w0$?klM_>&eT&?k2|uq$8iX~41%LL*MlM@bxb$Q#Z?b2_l};4( zhjVX##q;nXy_2;*6Rqy&`oM&Y1kczFi>L>E#WeFu@7}&^h{B{))@NP zCr)#1=D^#JA-3DMM3h{O_rJ%(Wyr^PdM~Hock3wq!M?ssWWntp+qWzcnqU-vhBDd2 znx`T+!L)Gc-c7?}#(yjgy>TpRql^e|KW~T&{N$;NeWp&8NeA8Ty;_{B=s}My2przN z!=VVs=DN^vQjPbxyZO%xaXH?fE8jTV{=k#@vBr8UAg%2$L8_%L_Cc>IGhkthI-xAQ zjrX}|ZV|7A@xXB;E>*$BYo>D(XhFT#&oeUsGmF$YG11$j$tqUy`P4$0B==;)kWEPw zlsi9+dbK=wv~cj3C(B9~`>u!}D;u@Mem^?9{9{rMMZ98}4g|Da5tCQKFp%-|QqCQq zRf8;Ut#zLt-lA-fEvJV$2&a#UiHU!T_OZ38&S&CMs8-uE_q-pIu}M8P`&%@ZkaZ(! zePyv)-px`tbZs`DDCZ`q5mNbMWJ6p5wjP%0zt)8yng$JZmq>x*@P+)5nTfV&%lgJ< z|5;N%%0R*3!PzDxD^0;O`wX_2N_(`V~3UpQT%}yCU;yqGm2n_nHJgmlP45xqhQa7UiBqlzg$Tp)d$v~P)t%F4s1SR z*y1}rg?8+Der5Uf2mAOWVlAVvOIobC3W2QVg(lX>;b6@+L_4itsJcGv$y8c37ekKW zsO?X187aq?XF9_tMV)09a%laDQ)uSLc*o~mTP=5Y(crh$x+@x$P3^U~)xWk-Jk(2H zd1f~ef}asar5V&3&FKASY3fhmiS(-)alZ50x^bggh%hqfd|NnV=Ed}ZT#u~nLN)^Q|km_rKzkN6f=D1vuT~!0kYFht>gj-j?wN6m5QV5#lS@r165r?#5vVslVxj|yvM?T z7x6wK+fS_VhJ|YNBtO5YF)!xbwCY@6O!_*zN+%QLzI11*g|Pa0jse&Y*(qQNg6F3KF)WUMog}h;vDI5ec~eT6_6J z`4>bzYHM@MVTMyAtT#g~a6T#`AQ*=;&6eUZ)A0SGt6(-Ax;!^b6%L(y(>_Ai8!wj# z3iWbo`3;@({xc>5axsxgT5B_} zlw4CT+}xQ5{YApTI$G?++qTFZblXCeCgxZuEr@Ln1vm^>zA~G)HYk<#JQ7{CBojyI z6QifS&QIKKTa`By{EkLFU`Wr!ulk|9P#93z&A2#u?UKE52& z98RvI?Fqs*b;0p6Ecu$LBOOg${%bqmA^uoen(e0rrFLI8)~He?&Bcihnk37-?Q+Qq z&TGtu1leapALseH+LPn7HiKAelzp*RmIo^(TNqRg@*@s?k5psp@wyiRdyS81i1sx( zHEwOPhz`_!vJJiK-;$eulh`&+N%NGU5p8Y_f5v0wu^VtsZsCmcU9c?%48~jm-t=Uw zLWxsGGAul}*T?PAjUE;VLnt-l;i2ciat5@~TC^XZWj&^)*Q4TvlUx#25BGZbT|u_vL`U%C3X6@TgQc zKE#=rM0?73ooxfOScq9o^rG64n;f5qdx6+TS=kY`uj0H%BqIte&gWRqgxuDg{T!aAV|hX&WXaXWP2OCLg|Rdq#F++Hpnpp% z%*a(^HAyC>TJV>aV(Di22Ks}Y)~ofzLu&_9VUo9!rY1uQOxJ)vuVP+5SKSeAAWqQD z?em)IHFJ`goC+$)wRYprv#~nIDl>b7Tk^|*=bg+7dWqclQq@npqa(Bp3tSo9~dA%w6ncdz`VL1S#VRH1;wyQR2e+(@R#Z(WEk0vEr0ZH!((~ zyP~YJ)SHryxjkD9pp6Xryy97&BPK&X8;#5l_gq^j9Zj0d=Lo8o$I17o{?WkS>G4|j zck5?P8z1As2R!q9xl-^>$V;Wrv^;3S&VPr8Riqqq%Q^0FDJxE6dB}8^4QhQ-gh-T+ z3!SgaubYfj8%bg@{2s+0bAbMqM0tS(wrFr}=*=Vsyy0$-@pt*4L&3@@v#q%fF>p#n|+C?j^jC)(cWv~P;hJ55~|RX+vn7p&5kd#LMR1F z{weLT!Wtu83R{r^yb02x?La-0ZQcPBep0i>kKCObX@3%)t)4~8lB{?dl_`d`;wIJ@ zvz|>-Iki?URt)1=7v%}Zi1jG!@`#>VVb(GwhD8-vim28ucPNUqflS%wlVcbl^AhADP52Jws z?sz1^zBM-Y>a29KENmXB{^A|*QsMgMVx$=-y&=C7)jhq(W-XHAoh8HZY#~(dy+b&@ zYqUblGs}U(Ypnip&j;Y8=Lb!KtuN)NaAyJ~*p97kNL=jOVr;S&V3vvgNoM=!EQB)i zBK7s-6~Pu9vJi`jZ13E7yuXq$YmuLE+hVB9Ohm>G-3+_}?jV0@Qp9bHhw(v#)C1i~ zppvAoPHB}$(?2V1?gqH(jFih_ziJcQq&SSQ9v-d-)h2LF$K33D9u=yp{%%i(R$W$+ z%}I1v1tynxlSg&iJl{ABhGf3erocX>Fz3;{1;#-3W(|*+p-$hoAYrMywK`q(96NY_ zF&y&FIx~+g>u=3LBrih;m-RlK?U%Ic3)~g|R4hZUd5#cVZM-vYc(dZ(+o{#^g=oQD z40<~M*And!H-yfoNNs8hQ^337qo%^N;|)NYi{iw?&U-X` zv(?Df1euRlbTHV7Q$9=c9lB^EgU6VKuofW1<`lQM_(I#nAPu75B%!LB}_s%9!> zc>mP7-wNjH6P;F#GR}V#RPnt{@nNe>sLdtv_IAJ?*R|F=CUa;9@3Mh;pN_E_&k@6# z!EDs#D#foZyJhHQeYLFJhy2i0J+`t>Ke;08768o3+6_#6oJ%$`;MN~hTy ztB}twxVDCiOW)8x_?7JpH&hR%v7mMmK|_3(4CN|fk{`ZGKaI6&O+aGP5bPi&6s1t) z?3FZ!E%&CL8GE#LB#2s4gAPTnXVM6ScgTWUHnGXLC7{GLHBi?)wH0)v5;nvu&+Pb_ z(uEBc(^wGAaf?dl_;+wE@}qq|nH)8J&gQZPGkze0z8<~$={~o`uAHfAGtuH*x1h<1 zt#>BnQddXlJzC=#Dt>t-ms91O`DN{LlJ_R220-t4 zlMYN!>7E$Bxi5bg8mo}W-KnmyB)1HFytOceBIeJof1Yx1ChCTx<9DxYYu#7u)%;bU#3=-M3!?v^uPG z+hRi;p8X$|08^5prtfkJy8`=~HO(5|3TH7vx-)1C_Cny$zsYE5HjA_nPFP>AX@=+F zT>c2oqZh$8+RLe=#VV~H9QsBdk!w#Z{LGVLI`dP z*)!=HrylPywV(L;@YT-USJ((YM#83Tp6(?*+ zGHui@U+3xt1)^)8ChpEtI85X|!z0I=DE`$dwlwk@EO;t$%#C~`93^}(^5EF%e|MD6b$)n!C5H^}aRa%zeq${-J4;?(0QqKFBaYs@ zJ|!D^KMtBVxT9`A>bvKw13|UH0g-?C4r}ImT(=_drkX)~gsxMfpqN zBqFUO?VVFRd*+BCtKXQ4TB{$7Ef!pa6dIkP4k&D>4y7Vs7V%3D`0Q^WAAh;gp(!({ zr7#{TSAjhFz@dad4?2_W zLka@NNQ={NJP7DzrJJ=Jr+DpY`w}F+;xmo!1wj|ZZ>#$Ao5Fo%4*kyCS|pX|Oce7; z-5U>G8ob!g(3MjpzVN@RSt7vjT2ZDr+^RN)Cg8D}hR}kf`6-1`(Yx)8Q`D!3o(*Yj zJnC0sMG_-r>FA$b3Wv0@s>}!Xc{}H$Y;P6OtTk?)f}zeK57iL70X{;>|^Qqik}@N1@zFds9fy$1E%*k_X!r!Onam`3T ztn;d}4!Q=z^Fl-a)p*Ee`>Fdm;xS56T>u!`D2CJhY4*`q_(+36(1UsU^jZMEhSq0F zO5<|u!kB9X12fNCRrUI-H0}M%X6~DRZT4}nSaaFfoI6Zy5vZjCe zdh9z(`{3LfwN+`yq!O;WBu9kB>lzM!tk0ugy11b4x7Q;E2gZsyZ4L6wZ$pAOg|lP}zcK3m>>icg%L8EEBl`eKugr5J6W5B`wcNm^W0VUhD;@eJF0 z@sBL>Y?hJu0>So2SuUwp@V1O&wsPZ{FKNeSijmhWhiSr8Xcbyqjy2>M zSJiG4+bA!yEsnHl02y9&Y7Lfr93UQJ-RcFcc1nZRO}*im0zOXp&M14shn+cO!diN8 zSpQrnPlJvBJKTBSFCBl8jk471@`Ar-T1R9PFH{`pqeS==IIJ$Vizq+BTiPjL7eSbE zrg+5N(keS7t1g?-tQlk*6u%UQT6YXhHyO3QACBOoS4Glz0_p*pbkag= zeYFQQ*7d5iXyLeMYvejllUnGqXN9X(v<~M2zVFnh>=q`d_RK`xqCPC=@`+$I2&{$^JWB$cY095*UrR z-Y)>3Afu`Fp^X%;K3LGigRz@D&-7n=tC`t!RmWK++K{gWBD3Jxy%X|9Pm+)c2$egZ z|3dU6KKG1D{VmA$QMo46bbh3&C@>3p^^B^nH8#KqyKH!5=^Y=;lbk8%SHM9uFMQ`* z%LW%IOIV*pOBBPhvCW`pIRvBwr>O$~E>!xdyVyQP*rhnIO)?p?^Km?p1`czvnhPrv9@{%*477m4qBQY3@gbP!yp#hx=n03{QfS*2s z081D_AQyC|BI{#TpG`_!93sF_je#lbP@v7QFhix3N5be7eYDQ6u`S1%R_UHemmIV^ ziIcs0s$!v5`rFznTU+*Di)B1LXt+VaSV3(7de(9OOp6}u30Bq_mvs0 z>&?d+tv;TW7@ip0Qd2j@Ll;is=FNwOrze z*J7QgXXTs9ZI5JwEh7sHi!?r1*qZ~TjcnY#?Cg%}$b;8EmPalo{Td{Fvr~`fh|I+% zp3hQm|I&ow#e4`K8 zjD5CPqKp2@@>u8s+}pd6fu~}OdNyCzT!cm&QS%zRY9d1kyqAq!%cXlNTb%8lFlaU7 zUicIA51O3;2D^K^nj_C+H@n_kE5vqF%ESNomX=-}n~GY#lH{Uh_UXq`Q!WF9W<`eN zq6LyExKmr?6lN@#oSAD@X`sB+FVN7JXQdQ_8XDUP?Sk5zHl z^9?p4W4HXRvK^muk@4kQyq)%ue>tgTRWg>=u73#N?UJQWWSiH0KUZR@Ib*S@V>xbk-xvgQINq^d zkgsC-ii*VHH{;t%QjK9(aklXSNW{lg`|Vg9(sJr2{1g)N>_@-~Y0>@P@uLBm1Grt> z>b;{Na>ucYB+Cn+)>c>-wD@^+a)V;o>~53&Z>D3JzZI>ue)F?jGst+-#y6L1lw*{C z*PuyTCY2gTeJIl&=uZu7U{~O%2d(mZw6?+!B=_MYdP-&Hw%d!C_+CJMa`gh8jS`5y zk5qw$(yGRKZ|_WCF&Dhq#W8^_{gy?X@_2)&bn5D}i5`H)WB_<8>JCVyv1Vfx!`G_ z0%L!mZw!R-IFLmT8JKc9SA_F7mlnD{iKk7*X^o8;@9r}tottzX=9RD?fir%+n7FUIm- z22`yHAl^k2101JMKG5M!=<;_q-}yLFPXAyg~d>7AqE8@r?~cH)u_y5rAb zhwXY^s%JgS6YsU&nT6~OFpQ{_J!R9QaQ6pY!hs$muWQ}<1bPo|*F@-?gsSJYx{5`l zotbbnJ@*mJG@-zCBoRHPU?Aj7lD6t*14teVOn}QoYFyt?lqug>i;^AGp9Jm+C+4Ke z|ARd-q|T1weQsaM>|dXA(ekhck4K^Ye3{L)@bCbqa|n~58mDBSFBI!He5q(W(N$7>Cm&~IGIK53jaCsb;7Vq;}8 z&Qhd4Xz^ABD8MS2r5|*Mrd*z#4=*7e+J5fP4KRJLXIiUIwNh7XYb0mo+j5{tVHMvT zW}Tb4)dDD(EGr$-d(;Qt5DFaqXcRasdSm;`4#Ag)t2RWoR@y-l*-@2Z;Z~_sB=1s~ zaJewy%=Ol6c1&_W2Af>?O19xEAOn3oK7sel<}j?M-kI&oT@L8U(rBa$V36AnjKH1i ze3%2hJceYF{5Xa6pZi)s&Lyf&g4)9MX#I0N1{G%Fc1aw1mKGLXLFyA9`}2)!YDiv` zeg<+B)C_Q-BJOoWRygeI+iBo0?U4kQr&dTLQIBK2fVc6ESF{p#}$3$ zdFaCcsUSR$X-Rap^Q*#y<%0XK>~d{b#V+_}FDH_p4W|QkO(G1e@@=~cJN+9!2~Y4< zv2I$6V{1$m>WFe9?(5U+^_)hWQm5)t70*kW1wv+` zw_P~2I1qXD55;o$wn!py3P9v<%FFv2Rz5$1i}!+>aa~DP>cg}Gr8NNVR3|FEvN2cK z(W#y-cN^m(JDID-wa3z-PbK8iBwryuEdzYjyGDO!E;Kgj ztLqb=S%5x~wlxZpPTjKgXis~x#zbu~m0D82l0gA{q?tE%y&bjQJ`G%p=jLkmR$X9& zlqGsp&i+zz!KYPa6kVA%X~#d$E~pddNBxM2tDE2ffvm8+LFLCI-XK}N*p}#oU1Hs? z>r~&OHg6V{JWOL?l~NRqo&@JR)rMrsXU1gDI$K+n?x)LceUrtFZ;-~%fia-QyAq}f zYkm-r%7WC;RYeDW4EYtk@u!&L+W#mc>3@#L^q+724-&rqmkWx`AgiUK>(kU08Y;H; zBP3-S&==jBHrJ@m`_2CQV>@a{Sgtyom|<61KaVM@1C#L`Qcv2SyN(q}Z&n5s2#-B^0}uPoqO0cugz*SeZ-l{ukLP;q`o@l06#v zdZ!fIyx;%y`QD3tyLHWbK}KA>i3HqVHn9uG4LEDgpgc)~Z*G2mBx@1<;VZxQiTodf zFyR%m%a2qB#mR;>=Hg^~I-BGfOvfpw18avyO4qE00`4u1uJc}>xg>DbaC`I-XD;?3 zRQ+dqDH|MW{qNQlgEs;9s0byZ5#q1?(=2pRui#b7eIGk>Tb^_32etbBAA;!j}v3v#dANuz>*2yEussRC&}i z1vtxL6&AxNc0N#XEy^>xN|3Wk@D=p_0wAK{+C8#;_q(iOsp{A?wxs^d?IzVW%Y+;z)j*5ol`yxEp^x|I z(A)zVq9{$G8s`}|v%m2hZ*pR*Na}=@v$V9c{n#x7kGl6VgaqnpOn#FvX{P6UkA_!( zjVQ6q3LX2xj`-G+O{S@lVe*~Dd59dhmq8;{GzqnAHn0$LgcGHoIc-7o(k51Mo0i(- zyV)Zdg9ZJa%PE$5sZK{QIKr(g+8y9(1>R>@g*~$FcEi>STcIDT=i$qR05wE4XpK$g zT~RDWUOVerCku3)OKk}eKcVbP^spYmealK97@u9*SRUcKTQq6QYW4f-%WOmemY63R z0mFuZf)BPXk2=CvtdRU_Y?F@Gu5ntwmBmc(fK}Q90;abMI&^ulN*2LssL(dp{+Adi zYmsN?{i6h>7@IWW;YYQ5WjNlf7qMzI>oUzf1ji1K zE8l5d@S+;r3e3CXT#!s#4?Wlbz1!u()?q+)5mv|95|)uogqq?CbUI;8>q(wl+V)?i zdbo4-O@(wcjqn0JujnoQ0-%wvQ6HOP+`O(>ZF-0lS2wShCSKi1+Pkx0sXD7O1u2h{ z)-KpzmRhc{R`_S`@txR2z$uKqZ+2e|K^Xu{oXINhH0{D&&duc7`6e!T+`K>5!}-@l zmkH;JLu1$Rg{7Y-0MIb%dCsFC2qVl0(0i99S{_p{*RPRntd<`Tmx}hynzs*R2wA3D zjF#Y9;;~Kd`I-U3SI)9?%9S(r<4oTw|kIi)={!{8%7C^rQt$f&c!cw24MYC16NyL z`YSYmG}w?-T{b99*KkVhTs=kcLqU5FAT!Cf1dVa0P+QO_z*u1Obc%?J($_~S(Ji19 zaGIq+bm(9?jLA~z{_oWQfm+RLI5szQO_0;chMbWG@2@GrCf6e@pQ)gIy9UU!T{R6o z(+drJ`|HrB%GCkQ(G&0fD*>V%TP5u{e)bu&w;?~tvCh^8N4HO0q&lTCRbS6El}-oY z?wE+wSdB&Qp!i)}zKLA_eOhfcNVfk}9?hFHvI|lSL$|RsVIL5VXnwk5gRfAE$EPr1 zg;`XiEy^Y3Gu_`==r+#w^rp7RoJecw_vgu`gmjoL}6<1tG3l zGHBQ8ugR%g@Vl^(68n%L`4@#lq{&j z8|`i$7&fxW#Z@$~Vcup{mQd#h`gDeKCcXuc9t>A*DECm05s@8bm}-JjJxO}uw7_5$ zvT=?--@ZB+V?5_ULbsAPh-;b#}Bi7@ zzi%ygd-bp#v1hHhn3%hsL#>}b=Y@vm8&=pyxe@xW#0fWgVfvG5LP@H#C@njp$B*rp zCsR5*l2wN&;yXaIS7<&}DrJCA{3Gcsj+QsiB{`UdIrJPuk+jS0YYEBSYiB23RO&JD zI00!>cq_NhvieHFN+M%0an<)(XBFd)iDk}fks_(9{_UwsIMWSuk9z5SQ=wOYQPVq0 z+GxeOOEu&do7+8D1V-G$X525mB%}YTd7qP{dB{Ui(nw%We*u?D$(nn2L4m8;lHD%< zO^$X>;q&$d)!^iwmi7dElEZl4L8(%l6LFPmj&2e%aOb z%_%@LLU8S^y@u^2PK1dAfB&cv=4PC#pt4>g?$)nwJ2^Nh+;FPXh${)AGjQ|x+o-7z zcTBvg097K%83;vwh~B8M4-Ib8tX^w%K-A#QenFT7MoWC%x#d{Io-}hE7C3mmC(aO* z>Fmz1^t(gS{h5Pe0LXqdhF6Kg5O!F_V)R*}q#{E5cziEVIqxKqUj~Ths0z0;DZTKq zrxiU^X1!?X$A**Lp=UbODLy`ziXaeAnD!>2sPrR!7+1U{gwgV+uyzbtqjjhv8dGV3 z!&*aZj5FdO2wT?Ln!|3Km%8wTOohE5h~5QTpnk>zCUEqRjhR<*765?Rz~3q|L(uk| zddYRm)46Kk?CZFK3i!|`3OLv72xQCF$P{4Ob&Qj(uGwA*Qw z`f$Kz3j3F8N7Gzk4&s!52Va?ohH>DoLJ}Q?^qk7j{4&(3NS$!reT*tnbsIIl-&~K} zSWOWoPi<6;#c7A12VB?r@RU4FzSj_LNZH258ll;~aqGUw`X>z8x6u;DJS~%JFYSs$-rH<)Kg4R9A`M7IOfbRiwrd%)xw_mld;S~H0uLB?X*#@QTsdokVX%P@Z? zY~!7_(5=*5bQUEgqJ)tpU%G~UzxL=u$xtJ*+eP47q9UteEML33j#1~6MfZQ^c&tPq zqBz+xl{~w4biw)cRcn}y6KY`L0g?hY^R5p@nOt8e4NB>iD5S`BWn$dX80C((r6tepy+ z4E&0)(Ito$z}T;L8tTg&cq#cO{%vINPTfZX2TTU;?tAg}@io+q@dLjRV0*g!MiV>l z>A-UPZ)Npf*YM1{qu_skzem;J*!Af_=81c+$K$F^&C?!(VOyRMvc4HmLY_U?K*NccIi2x7^)^?+7K3gdp-eDW$?kke)YV1O;32MC`Kh`!gQ zi`0W;X_APp;l6}!KSL|-B2`WHdI?+vk-4dmzb;?Vs>9hq90fh2?3-p|rEE}>3LDB# zv$o3e3N#gPGmSV-aq4T_@cnY+^?hp+(uDIIxx`vcQCGFd+NMOU_}FR3>O?T`L=#)9 zP3b^n(@^1|f_ig3YA^Za09CbeZ7q9aIJ;yln~eSBhWGOvWXCxY1q6her8sU4Pbh?n z^q4kt5(pZ@vDzq;icGfUkTycgrdrkZV{Xf{fa2z$1wcVXHL3*RH9cB$mBCsw_LZRFYX6A0Rk!E;66qJu%8@6q>N2`% zl5gp+@8@f+yyXtb=chMtVOS_BLEbVeC5nbPc>|zHLDN3kQKx|`hYh~xnCWC0Mv;m| z)cmJHayW!4Y#*s}ibveoYH@VU{knGk#=zOK4*Jr;+~dw;$-w4fu&k0Z*o?_4`{{KP zcl;e2yt2G!AaUx=H`N)VX0@#moDXh4q$SK3zDg-7MQ@UruNE2W3%#_r&lkc|7p(KF z@}{iWt#}bmJYsfbVaHOAy^ELBPglW}c%_QOOy4Ka$vsR>drOIq0Ii^}pFTx&zbx9- zwb$gfK%#@5SY^d(b9C&B+f4{~C{#b=R;b zl%B}E-FnFPYB7p`L#@|4AP#;Yew-h^nHH%Vz3d}rM(W?sRx?k_aSw-mBYVwdc&GJ# zkr>pEmNvd8qZTlCWFRWi9gODX=BIkJ1IDV)ZX=#CUgzghA#}UmB~#)7f-*l#rQ}dW zGxk2J>GE@wjLM@-7b_{wsC*gCrvEO~YIE6qpi0tQ?~#s~J}&gmN6QzXYO@x4X)bmY z2pmfFQm(7+iQy>9t!HzWeRZ;Qp>BOe}S21_SoQ~gVstin7t zPS|4c;`r>nsrzkVbuf%!=eYRSC$jat>*>mnHz2InAt6pt>s(W(L2Xw9k>W{aHy<@- za|iQ=%uwxvtSAtjBxQJCG){%M$C$&H(?Llx-DnQq3aOxSD5vw5wP7N+dYE*8_A z1&4V*tr-g`O1**%w=?_Hyny{wmElb$N$^z%nfGzz<-?;r^KESsWrr(ZvIf8C69#j9 zGZkn@U!Nl8*4yz_H3O}|eBILFk&O#BM!8n~VDQWm5#e z!d2Jfu$e$DPieb?{Hc9Q`xPo}@N82}3GYqgHgG|NV(_cBDcAVV^tcd{x#7wfilxx= zjt1br)Lqp{MDFAWe8{fzjOZ2mo@aT;5An0;sUf=fu62|EBdc83>ixI2h#OW`{EA~a z5R&r;-!IW;pnK8EUxzkszH6{E!4q#^8$@AXHC*^Q@IS^E zs+zn?^8d~ukLzNLR`1nuUHO9DU_(wcqXG_f-}L6d`O#of@)m#Tg7ycXVD+!H4_U>% zc-w$!B>+WDqZMc@e2>T_S@XvvD5f6s31(nPB4S`CXK4uSL|w#cuw zveWZ6MP4l&Mad9e^5<=i-z?F(^gT)q{v`o@{ob!Smv$puUm9nXYNL>MquA5x;6x*w z3PcZC)K8>W?NO+HSY;;oTFXRu?V|FdYt741VWRP5Dsq77bb*O|Bw9Kfk`l9^zh}rE zOei>@UVHB81f?*?hPDpN#!Bp7&nxKhV8l63n_J2Yt-XT_bsPLqNfZxdz7Yjc+ll}t z?qoqtd%)(7YXoZ;ilI5b4j$kyLj#cDq<^Et(+uSimYQA(NJ0NENvH_<2q>9XkKFmaQ7m#_h}PyxhiMI z!X2GN24TPCu!wr4&FWd?M=_R`RYeFGXa7#0;Ku_bt2ddM zJU@FBnYE=)w?*2GGPizV?<(mwnFAgcoSM9AN35+RMRX-rEH!`Yfs*1LWqZDhkM_;)-t^GH$t%NnKI zY?n(V@n~WfQ&^)AmWmuE+sY4GHysF`;Ua6MMeNpPlU#haFQkX%H z=g;o5Aq4=a5gYeQXW5kc6NK&!bf^)nXwS%oe5Jr>jP3p6e0S$(Et3rk2M~zmV05%Z zFuOh8&#zhsPyaTBrM-D_%quluR-%dPiP64w@`hAfGCbW;s{eI`G z)UC7Bo}h0*8*RDhb>nL&XQd@nbd2&Bg)9RP82}TZ{J+JL#?;27cJGOLni z)y#!=ADcAorvy|qc^!yqjkuu|wT08GrlrE( ze>50jRjSpDrqc-^tX51VFn~`PVIi?if2)A{bf*1SHs$%JVn2)2&6jP9srIcoDtpS+ z>jETS3>I51zw7mL*T7|D`-M$~*%>PkbfqEuyg z_-eg@x6H!1;loZuo!~86#cRK}Vhjw(w*);_n>~f=guAx(tTw+`P{;~zQiE;_lkSj!J2kQ4$Vfno%p$;BMy9bfv`2Fa-5JF zKHzM&gNx+D%U*OQyKx^FdnPC$dUgC%nZo>iQpa|+v2hF3y%j+#2%a7VH9qvb? z*zIUV$1&IXGYI810+7l7T1j-=XwcW`+pQN-F`p2gz%EV&Z?2L#cWWm9T$K9!z1;rm zO25`dfAu@mCK;u)wJ5UnQQ8`lV{G0F+gc;wwlL@>b6F!#f5t>&{g53CRfnCg07TKb*;}{=>k5%6* zoqx;j)$#vJkGc9UH9Xv9Rz7Mi-(LkgNC8CEl0X$6O-gfBDtr$}_0kt~~}xCodk~b)+MCSK!W*Ef9%NNLR~;OGl3H zNZ7qU@kJDe6_3+%50CC^%P|C0wrx+7Wl150GEX7Xnfj_dS1iZP5tiT&p}ADR=+qBp zJS2km=R6!ri=u+K1ichh$$ zi4)?mmuv=sL`VO_h%cU)h!(LIl(@tVX}R_#V=v^>SGmd`PQ3;C^VyX?mwu+sXPEqa zlI6((7d{9??kQHbIV@NWoR9Yz#Mk>AG_zow`dfb!^L=dDUzd9%;8lC`&Y-y^ZDRxQ z6J@)zH0mMM7e${gX)*p={>S%>b@jQ z(FDdT^5NLdxT)0(-(W_v)0i~ru2?uBt%Bs6a#c0gLnWWy%*JpxDi-R`SYsR8F8 z+gQl4x_EgHMI7-+ZkNcuf-Q$2LWb@-8iDt)3URZd84y1P=DyRcn z!MyY8sn6^t3lK-wMd}ufu+9-|Pk{45$|TsP-1+JAL`h_sz(2C`(>v5>cH%LJEId>U zsGvo-5#VbID$Q5LO+`&15jf;OZE$ms3ZErTSmf{jxEIn=n*kFyNTU~lBE~OIc0xQa z{Bv~L@A$JWDJC%b6YuPO)Nkmwpo$B3pu&zy8gZix3J0aVej%24mn`peQFK zf+l{!nrsOIc*3_mkQV2E+=xRokFV~zKHZiHaH@nAm`f)6EP(GaUkz<~y$jMZy?v(_ zW;V~ML)2XNfgl7jz4#3)D~lQnntWW?xC`Q`_s`%0#1eI{KT!mSZk_l7wD7+2c$nXU zAhla z)Ev}3v7?Ve&{UN`Va zQsuX^&}Y^CjbLddJ$FEsI#$hJgS+Lp*=qOqb>-3nUF~yV8G>K`6E`d9zn-|yt(l@o z0~G9dnIEm&&GH~@c(Y>rwLycZr5Fvu!2UowOF$4%9N=T4|Bxh}Bp67bc|9ao|oDb*2*;J9M{C{7P|Nrx2$QW==(rE@tz8o9? z619Lb7?(?}rdlknir)}Xwozdl0$p~-XCer zOD)1@7sZFv|M?_|`XQcPcW1wUpy5~d0?W~xRYk_pi^*xD58Lg9^T!)dqYL6cUi|Y3 zO{B79w?-s>Keuh)Eq6W-5Kk4I6;$rP<2HSK{`a;~B1-}b=j*3exc}uxPhU}q1C9&w zcm4z~RYB8I{u#8)3}sk{qWNO41#!f=HdKfw)>|N9so?p2;l6!0!~>6zhCG@E?HZz- zcA%-k<-Yyj?!%u@S?Yt{I8k#1o?mYPtHJ3$HeC|TuqDu>%YBK#1Aq3 zdSrFNm)lx4#UcY>+xDpjY(-KTa4|W0_JM)NY7imxZW5JlkGcibcsR;E6-~&!*dQ%v+eoYZuS?ZAkfD8wt<*z3_I~@I!w=&(i>Hv zaHku5yJ>JhJ^@~5y*A#`Z}hfoZoUM@=lhYGr=JL4uo$&Mi!L6w)^{Hz8#9M{Fo;}6 zq}3oKms--1-eTgy1%s)$+)=b|I@RP6V7mu4>LgquQ*D z{*|PK3!apO$#EY7w<=5nm0l%&;%h6uPB=Mc(HH`$(0&xul=VdVuj7d=K}C*v6pM*4 zw75J#@T#cWIO71t6OY_{4q6S+Rk;9^aD@t|?uI4K9!QXP8w7@st%ivswgy~})_x?^ z8l24t7Pt38`|=HF*F?OeyZa|Wu&6wcluWLyecu#kfyU>r?#XDfdm6+MnD}WS?R^xm z|6tTrH5eTgmUJ^};fKsu-X9@$XFvwUIU`k^Tu1yAh3n?Kh_&%L%q&SNVE%&C+~c1F z`EleBO;tveb-#21{b#b+Kd)XZ>W|$MdlufV20HHY{6>IAOM(@y#UaqJETuN3@Mi ziLbE5yVE%A?gh=}=dngmr=yjl?0LKyG(Ky~1>n#!apmxGL8Guv zD_!bdea0k^4iULX&~C+-2d>?&MMjYb#Yhd#JQdZ2^_|P<4mnV_Y)wmV`{{1V{zErD z-bxdd9QIOl-P~2}3DlBl0FaKxT=H>BPN9iKYx1BS;QSS4`bLfFTZEGG|Vo}C&q`(}u!L?9T|g>*7f zjN!&gATRpph!g>#&vmU;<1q`M{TF=1vPXE{Rc}?NkYORH{)UD6RapgPXafM(t1`)z z7Ws?2eY;EEwF)B?PBYcKb;o>4RKcx`*?r3S z*xpA^&)>EhIfq0RAR|NC6&A_pI*MlSRAhuH)9B~LO*AUrOj1Vb5D3A$9<5u?$edlR0+=&0MG9!U#Wid-n?krah-OHukUuJ zH5US$RI4=?d6cAz`Q$&BCD3Uv1qM@h9PjF`G?hO6jNO%Ob65AutpORSQg=IkycS==Yy7+2^rk3@j$Ksyo`3E3s`jTXWT z@wbczeE*P^XtECRXXlJxnG@A-pHRo^=mg5SF?cL#!x4^KMbV-_-D_SG+vqe@;i;9_ z2t^vs28aMQRzxA33hFnYf}cvTq48?;$!*hhLA1~sdX(UGKe*u2)CiOE-jz;prZWv} zS^nBS#sSQJd9yMz^XQB;pYC@y0XSiv>`-9n}&7N_NWt5wetPGv5~!Od^Otg}cEPEpLFGV@)A+DKU{s9RwO- zJF$=^7LJ4=nXb=9rhr)zaY4kQiC80ApcKnbi9vl&h!a1-mQADkL8au6Ky_b!3e|lu zR-KKSFTLZ4g}k`i=z>XxIoX7jLs)&$7C7cUR*5jv=PXU5h#I)1stYD!P=XTH#gi7J zF}9COSmNL$B-LlJ3LD)244;Szwa2YL;VMv*CfNLC;>w%|U}wl#^Kg?U>Um)+Q%83E z*t*{(YZ^nJf&1Ib+OY6So-*mlm3<9pwFf;7uzufwEj;w^`3rN37da;aFd>e5mGCOp z%@`Khq1c*KF^>`DEjq&Xg@jm9s{5X$u2rG0VjM`74LsM>{x9gNBFH#ZT3yBmr zn$cCi%AG$V7f@fAmo&oZY`6@A+50Bl>(gPRPO!DvU|B~-3{|&_`1HZ8%Ei{t-75lI zEbrUx>qxxk@a?|CB^KtIv*nv|v|lJP$oYy?13G)4F%e0m@GN7U?O{43-I_KYrPKTJ zAXgm$OE9w`8mJHfwW{Cp`b@vg(6zk!i(?avb5Ad8vS_ojN2nKLd& z-bV2%(k3k}|5E&^`BX_#T69OArRv?GALV!b2-#d^<76^cZ(FJA8tMNA-PC|-}#p4^gb^Ag!HCkb_s?3jU40cphHjqofOwbm z>T0)fXGz_Xo+&~i8)@0bzA~_nKj0rz;ofaD=S-w6|FqA;RB%cviWQI3ndaIMio7V&{zoWV9z{pYq zWdTkxXV?6%3K||S?0QrwgfBmh>d48n+qzBmke;}7h?#^D^@MX5N<#48<_y}K4OQzV z@msgN)!g!^4squ1J>!^@l&H*6xe9G%S^5s$v-3 zrfV}#XE&l#Y%2mBDGN?8Jrl%SUnE5`g!-y8DZiz1)6k+x=+RZ@?iu5;HK{GYS74Do z2LDL%aMTGolgyd(T%EZ#vllJwR0z?4@2HbKggs&(l>m48t^Ngh2`r9xo< zzgIU=h}z7>4@o@%=`wt()%c07GpW)nb6^*e2HZL#YO7>iRVH%}&u5x?LT+uH(9`k( zDYsnt)LZJxNK8iwqQbX7k_Wq}A$`x^nK55t6eTF$v*#e1V)KOeRanD`NaaJ!v{)Z?IvH> z)4>PfTw(*zeZw(cFRCQ%RX6g!oKYK zvlK1PXVbVT&TJp&+YBahz5ReHC(!{)e7X-{n?Lm$0=;pz!Q*8acR zcnq{nKG*77y~GnL`fwoZoS7;**`I6I(E*t#nJqcR6WbX zTkiCgn6hmT=iVtDa{)OBYu_P;n_?&Lh{rd*uW#v7%5jVA`ShyCzq9#E(FqUDD>W|c zts{AXh|{fYFQ?jWBwhXd%f^s?kCYIah1FKeIQJbqsjsUT-X%DJYHx| zI%$TM@s+q|)D<-tt8cX3Nr03)Zy!--ww+vCm3#-2{#Gv}o^V5(TX>YI^RtbOpV#r$ zmc>GE`R*BlsK=C9=rdoaGy<;^kLsgUpI2aulR^h0`1~d`d5*wyJI+9m&iOE`rZ@$K zKg=np2`=((*$(Jo?k$}D>AcSM1c{qaf0wiQos5#oeBKxDj=PqYLt&qQiG;goQst$K zoGR5TO0`hsXdb;p?YJ|^7POD;TIu&dT{~*4Jn^WvZ=y?jX89MZd@O?bSa=NPN`?vE z7sC9!b0p7C{K-o}TTdOrhQ0Yd^jp#=$JQ^JJmD8cs#nbhV$cbW%6;ZW{+N5^&dUmS%2SLC%(nY0 zT6WL4!=BN~P-7FQAc{lq1N5h2E?7>sBL?lpJC=Etyk~_+q6Z7`seEpegUev%lJr59 zl)Oy8DYs{ zePyULQj0W()zyig$Gh-u{p$VMVJ+WyBUFt@!=YZF>Ixez6ti!h*mQ2TP_?_mSmgLo ztNXEnQzWs@@%1aS?HRC*&<9e1BL+yJefNwyc|6XBZ_wSA(zJLIGeBP)Z>xq{RWpRT z=#zYVQ|?Ha@UgGEzj(nh-xj~{EI-!ua)T-oi`GV|W?~mxpYxuTy8TY$y-%|D{I+jV zaI$ewF`M(3>lbfq`m? z?1)_M-u;oSu(wfj9WiFr6eDRR0(RGB&n9p8#u~bgK=SJF9hk4$a68g_XseR5<(Kq* zn7^{&+1?=n#m7qC-IRe7t8$L#k1J;l?fU)V-fQxMCF&?zT^DGsS`0q36sVFn;}mWq z&@o%Spnr4j{1`yxZEuQQ)8FC^4-sP& zO#ENYf&!aRAaXwE38VDkd9=b3F0!Ir(Vo!(R?p{^F?6?Y-kmc9ONN2;7CR#{(>}xK zKI1HO>dwYhdUo8}VYWJ?ZIchuyIwb+r$$Wp=4j|&eoF!8E*4SdIdX14p)FgFndD%z zP~t9mKDJb%6-unw6aI6e-)56bwB=pDd>=DDJ^J%rX^Cq7Mw99bi&Y6UCp&=9{iM9dt$X^|7jO z(=MW^V)j;K=`%%x=gSzSd9r)iN%-;F)aJoR6`T0U{)11dc<4__qjC1UJ2E}kk?eeP zv^6S0Et=5U6Iq4X_D=D7rtap667dEj4Zs9(;rS`Q2Jd5Rad5nO(!1+q>|{iDolFpH zyt3P&=>$oYlaPRs$wq7oLxG_yeXw%vYINVKXSSYYckBbR>g5CbcO|(mlmsCvQK}*F zqN6oa_P{81G9#!~T6T1(W#6Zm$KHs|UhSl>wFk${RX3a& zMpM>Lj0Wh21+K}3pnBu8XTxQzU$i6+DD!-ZyU!jt+Mkq(sH99UfCctpYKQ9Ke3@D# zqa(0>AOoBX=NuVmXWk!`TM9S%NTeF&2uU?bdWKVlmiLeSZ0LKJ3)K>%eoPIaWvx97 zC7=>ZzMsLxTEIwiL3=3gBSQgZu=_9D1A%JZA4*H9HK2=y&h`tcx>cIz_E9d=V|GIJNA?nYtbFHYVZi<){DfDqe-5RTt3c^yW<-GbxhRI!Mhchj^vI zxGZj8_zS%drrYTrnV+N7yw0MFd;y$8+otI7&47=->1VWi{(keB8lT}~`&j}^{1Q=x_AqpKsH-$= zd?++55JP$Yg@?~Z*7EQvt~;(sG(7a0xpaF-l8+8!?A;DBDS+!6ay_AgBv%&VYsE{k z0v?CxM}D9hpNgVP)|0bWQ>VxuJ-A3sIVw=ZJyHR!biPfF=8q9)uDf!4sIS%k*nzXd}i7$%Y^DpgxbV5G0rlidiR$Q%%kc@ z)=qiVns;mVj}Z<|*B>+r9`P+w1!F+lU0YZMVS*+IeQ2(RonI^~2zBXuG3O4)$-P_b z2wP_2w-nwyr8(y}0cE0Wyg9(>YV=8=v*HgiOiMB_j#5( zXZlMw=b1Y78c8&gq*RV{yN=I6+qkYZo*t$cIKo6W3DMK03?pZ}hJ|>2Pz#q(bcU5N5lEI#&)mD)wt1UClG+0|VF=^0^Q0W7rOV zDh|r8a+I!$4iPj8njFh|uzMd9h`KOeU7?Qt&xzS)Bh~0L`0CY8G@;5D{=zy)&?KYj zl-X3XqORch&kQO2-74&P5dG6vx6(V3@-3%tZWMXjUQqp7iFjiyUsNu6zJ5Er5d&j7 zyh*K&an@ls|BxaVpr8}=wVfT2r;jxD|Jb5sV8oW^Csg0i8+vtWI`uW#424+|S ztS{>aMA+LDc4Rs^8ff6y!eXy!V15?Sq&J~w(_buKjWt_sXB)0_}{}^LrApLO+ zt03!Mxp-vLT!g}41qG2x*fcYQRQ61=I{fRc59pm@FwxP-NU+SP@cA?(jNV`?3Miap zCAq_u-jv`2qei2uV0@=tjn}}{5=FmGVe1DS#ZlzOwwfY;(HdX_RBzSw_n4yR*VXJO zTI0cq4LJ*Cg?3@Tz>Yh?!j@#|bQCn5u5-EGaaY>R@%4!}Ba-}>lMOd0kQ-XgOdqi5 z&|sM84^HNAiAW!9_l`X0I}W%$8stdjn5BUMHky^@_g2@hia$p{eGq_3ovU5C>jvt^ z^!pim;@n(2qXYP%;`*}PY~oOpjf=*?eiBXhGE%X=eVf-Y%a8M8eY7($Z#(KP<8mn z728hUFJYhA4m*ZL%K*U&@dWil@Ds^=7Frl!eYA?ccmKyAB;d7DIOtTkB2=|(8^vP! z|KWST0OQ*=kr*sJSr(fhd^YgktUKuCgg;7**{qWQj7sDo|H`vs(1)Yi*91^jkx4&L zW8=Sd$P->|y7^c|KK?t_lv>jFouSZoTF_mXe(c(nO* z3C>Pl;U&a(_zD5PC=`S!pbqJmBpD+OCj1V9vG*UHfE&Th8gY;8QI_45o?Qb%z|v|G z>7eh1Z16Nh-?}X{o5rDr_wA**4)!HEB^JB-zkM^}py|Jvq=ki}gLujFJ2r#l4nQLf zPi$@i;)L*x8-LSxBO(*P7{U0A_{Z@~g{0 zvn;Cnj&!Ywp}A?>NJjsTo8n=@kFi29WxWbH;a{~m$<=GmT0p1^L8?{an+?Zro)j{} zN&wwUc|^gA0@Jmf@>ACsl9bf(LFwPz31SWhY7FTCc1X`ryG%9!-`pV3OnyTQF`QC2 zL;hvchrJt3Ai<2MIpL`KH!w_BN7+#grlLm9X7rq0vw0ksG<$&76yRL%29*k}c(*Yn z4l#b_ZS+=!qN(aCMs}NM-@&S5LQw zbmsx}u+OcPVa-AL3bS=f)cwX?*u@)E9Go*==}${~LMKLILjlA~UxEDipWUizC=lw5 zT&KW4w>}@60R)lB<0*Mby>+-cjZ_{vru_iLQO_2OSTi*h*ex>$bhHOSlOBkD?TV+_ ze&BhxrQSDV3;2X770i8Bfj!;KX#gkS#|Ol*!L5}QmAsxA3ihKB6fu1Lnb%9SDQ>W! z9IwEN^vi1*d}!+lLitkrgz?VE$1DM`zS(gxC%UgCCI^=lU9EK%^6dIPfjsHA6df$z zeY>V@>pJ~i~XD!aS7ySAbi?@7!07U!*vo9ZBw%`H=x ziQ?d9?F^zOK4e|zO+}@@=a!HIax1+pZNPx!dy^wjW#*a#&cx^^;MjU#t6o_~%)i2m z?4??Nq1Gioa2_n&4XhKBq+_D1(787jUgT(^iXraL5M^iJn6ki^R|F1d@#{Ls6IhMi zg#&Rz#+%0|s9y#Lm+%Yq-%oIZlKab3u;uf?N2ppHCPDvQA?cD2DapJ$ZW5pm+JP-t zwC|dDLIu`lKy<5dHDXkf8%oI!dE8@*dioilV@DAtQDYU~@{o1F1(o(T%g945*tw?Z z(>>oqpSjwD{RGBp%h;5PkKi=F?z55ho}+M2TwV1;oBs`i3o z&ztEJix%q5B2kJX=9+6H+LU$kx><(ZQ{AkKcvxTtTILkvmjuoDIrgve=D{z4l<;2_ zy(AAe05l&G&Yw$=zt`PyaFcp=&4(%Sw9G|ihsQgoE}bZ6>!AWD4bHbPtue6~+gPRu z%_mRYoX+I=keI30g)LcI>OG{dKaf&1c$oAu(@AhUb@ejN^A7r+UyE(}pMqb#yXGB# z>x;21aSSk|R;Is|^T0fv&qV)xW2l=QLX>$J^EL?rb*8a>NxBXT+Dfl#;Zp{KGD`m9 zKt`VaFkAXa7+gn!2a=Vb!WP*>@wc9bm|89LrFQvEo&%~@0<=C)_A`FajE zC*VE(1fxtV9=CK;VyqdJLuw`WiuBIH&{gD|QpWqTnyf%vSOFM#984fRr!rQ{D2oj~ zso3*9h*GSrhYm+D9?TeGI(}Z)TZ{ia`-OWkD5mRrddG|4npW|x|6wu~$e_$Y4wbx= z4mcy`SDS2n!=fYUPrN6O$Y~y#A%Va_=jqCGi3}5(^NEbrU|61QQ$5MCl=b5!YBwoL z85rBgEXi)ni6m2Z8y2;o6#V28E4-baX)dN$q+u|r$ilz2Enko0r{0$Ga`U*LcGWrS zUjhf^CWlWKK9;5bfJ9%5MmzDOu2B}_?b8oZd$mVvRZB~o=bXChQXaJ^l~($sHfE$` z@%!RzCbL3_DJv9ipzjpso?n7O^`)^H))wWz_+R&A|0qj+#WH3T!o75AYVEPU*)KZ2#VsW{ zP7NYje&4h={()!rsBaBi`{(&eEuGJ8j>#n_8xr9bYi*%t@x{m^whAfncJ?*r6zu*@ zSFqbPj7hVx%+j)t)44@Ru?nU%JIw3A&X>PD%pA{Lmd+c~)+s^v~e$|&*r}r42C^I1*NG9C2G2(KW3$lDP?`>l5mc5<9k>8e9|0kj?6{` zB#eoMik;S^&U1XL9bQ*j-?|Kw*-`gPARd{jy4g<%g^|+PNP!)k82U8E%B;L@+R~E1 zAS`m;f)7+*?|wXCErmO?m4>IG0jc5Ey0&L$-u;&wNU>I) z3VKxKDO$-PmLQx`ZQh^Y6uD%rLiAnHl4ECxuew;=~W$d=g=`flt9_n4{SW9*l`Effn0h5kCXrx-os|xe@qdr~F z4LPdMOwgE5%2OH~u`zI{!WA*DZZQ!S_zYdjO=yQ5Mpk|@I9Kuydur*i45tb`?W{Bc z&6Q5#_VW{g%LzI@Z)gEz?$uU0Ovs0uYMMi%9C8iYk>tE~;dU%_D1a-xy2VI!liLW6 z8loO>nWe~uPdYB6Wf$ebugdMe=T=#j){cNFSIJKKyw(08ytLKh*z4S<0pL;ILWaE| z8h63;7E3pw+6EYeJBslE6QY)yL1{VIH%tO`^}-Jj$6Hi5d-XDW12q1JimD}SEjc!x zigW1aa>ozI#$!vXLiH#zcVO=ucxj6nE#kv>ur--9^deV9`28~%!>=j!L2jO-?C8RtFJsGMs;|hYFdHMRR3DaEtR&{{ z|D{OKsNJ1ns!2R?PH!}RdNk&ug_GX&OLB7U;=3s=orY32;a|T~KJ2=})s?%HVSR%0 z3e|j*1+gz*mqq|Wu~yi=>{4^xVUeh@J(v$pyJS%WT>P>gnPEU(1DwU#DRZi`oTR$v z#wU4s#0A4u&_+*Utc@L~wAYkxuSf1pC|W9cVROMfC(CVXlZisOvtv01I^=w zvYpK*3S~Qd|KliYQZ=aRSP2ts&c*vKj0ep;N_pm-(GPI=OZWbVna zxjpZ|W)b#mg`Id1)1qKG?8wKln|Xkh#)8H6$HVsOA@ku6*G}H~YG%1cl$s_hSU}bK z&bw^}ldqZ<(HNd639y*ufjOXi-&q0BU4Xd+#iAN;MZqw`PoStSM72`=#SS~eLjlEm zRa+sSdNorQ5{J;w5;poW8St!u6#d=it>CP*2V^mo&-TSI zGc)CefppFCnz4dDY`xqS$pzyESC5?oXTQMY%@A)4M#eIqP*Z;raV*4$&9D8s&RhUv zYvGFjnjQ*v_t={%1zEyc8(X9N!cB~-3j-KdbUd|$UD`diUnUN||9XK=1MZ?;2W;3$ z`XTFFc);;Z1~^I#%=eWwls?&f-J%BBhPRnWf@R_C(AjeYn*w@Y@aHEd7@+aU z>mJIHotq>F;hjpHGFJiP(Ul-?Wu@DlSxQdk5x24;>CIW@=-%(PZGz>ETA%J{nQ@}XhEsQyL05=T|R=f9S~aTr$_ zVo>G^e9pe<0}6}CcO9{OYSZh8N0+V`Jb5%^k9H^v3g~zk2M=w8y>!K#F@1R0?%?-W7eXez!ljgZ`?bV~{GJb0P^yuiN$yoxfybn`& z00lGG9tuJQsfGIGec#AwaohG|w%~&b?oV0hDkOG1ikl*Gywnx)*Us-_eFh1?E}FrL zBS_n2%4%a|h%=(Pq$JM5O(d!H=S zr%UJgdA?6V=0;&2#iJ0d&=^TUBF8<(nJw1YN>`ANZ$!OXZRa)C2xlpx%YV-V0a?8p zkHp&FBEU|+{(k7GPriw`aJUxz2nu(R-&$f$ua!2M%l_!C8Z_shTvp){KO_PSW1ka4 z@q5)Gp$Lk{KQ!r0p$|v0JoS+H=?OZ@M!aV!hp7afrH)#Wel!7 zhz_;D9)bYi0VACZg4Q{@-U{$RepVxIBwzhHDar1VMNQ8>i6nUjF0PuV5j>{F_F-d`g2zN4JUo59el&DgVqclydU1v~_CV+041>aqJUKPOzOyB~dj zk|!Ihp!xR%HdS*DI)0W4r*Qw{E>L zwx#^Qqf^&ZZ+4->)v1`9{;F@%7q+vCS#C2QYVTFTig-|)rT~-HebO>do$h_I(*~ye z!0qUiLqEpkv+|}9hs^qIbLRvu=Dhrp-e~30!W3j`s*NU>Ps7#+Hx?oiOP@(Rmj3+# z?0DtjYiQmQl*tGt;kGwWa&dBAASp>d$}qw}28>9SwDUmB~-MZgW$4wE9wKhZK}Qew!p z637a}@o7(s-~@%J+eCEsf$!ol-MI89%W%N2e2%1QE4`thy3Sm~s7^=L#@kj9FmP9r z1Ll{&wZ?MvEo_ayJ2$`S&@X9Mi?q9r5XmXv3D7>G*BykoM}$Y*5>7^peSs z^lYra;|_;{SFQA*uD|*b>m)U7ln)8A!mU7dPE4G)f#vPb9qdWVi$Opmf`X8P?nY=f z4#7%5qmmITsC1OS%|DZ^_dHPoDZLbZXP*V2^R4)w`Ir42Qp3}-#S(^gohe)G=`XYsg&Co(JB!5BF)=zcsI^2)^u(1!{RNR0{QkWW?6At4%+gafn!OCix8v0^cN}2DZin3s)E}l1;@mXs-9x0j z%;eI0g9F<_ac`O>EBO*7dDw0CbK^rleqPz{?)|b_Mt=F6@sdMiflH+al8|VZ<&q%2ZI8ovi%IVVZY;dO9E$a= z%UE%7VB0OCq*EDsmF(K+t@{%^5kqw)2g_zRbN+?{BTHj*V95>t(_XMA;a01o>O-vD zS%IpFN^9O1-?BKMp~>FdpR%?R3+7652V_DWsjTo-R^-vEhK(||2W%8tw1`i@GpcE7!2&0aMUE2Lya5AsZ`Sfa-tZSdj-@1bSzw*dIyQf%Ncs?N+vW$f+q zd>(!UB2D{BQ5sBiZO>>&mqy6TH(~A#jdzaiU(_l6bLnkGHP*VUr*jfUx*2)0>EM1{ z(uc*9&Eu#cx&BSt@r2$O=`W*VN@*5#vB+oU#n$fWZC{U`%K7N-f#Znxml^H4R7JPK zTY5?~@fR^p+YKf)!_JR00l>ZB-md;OO^eW!dtpqi`!9tANS8%lIr`)YeV`?5$~_& zk1zc`p#X53?p7IcmOJGdi1{qxgYK|_!OrhKzGNMKA`+FHDyJW!{IqFcV|)D}(U&_! zYG%meKej)*IDv{p(L^JmgBu>G958nc)7@FvUG=EDMy6od+NAY!uG^CPF)NBAeZPIy zA1=+`3e&m-mp^R(`0@Zc^s4t89=DBP`UT9rwYpYs-&kz4s9{PEI_PyfN_arMuh+K| zoEqP~+CLv9c+`r*b-%Pb}&o358GKA8pYz&dC@&9*HH@SZ2UqnJ!wBxWgTB*{B;zJ zoW9C64frgwYZ;E`X#;U???uD7k`jPxu-8J~MoTQ*VG*T5psfhH^oI`aZ*@33R4O>4 z)DFp~h+cl^(<<-s#J;)s+QS~Ih1L6)2?9NFtgj_vq~nI1{?c1{sn zJx6Z@3AA}c`aah|BL@KnUFk@>Z@bnZtECQgU}xLQ+3!L6?kZi_&2OSUzMtp z6|N083qAhL6f0h}wK#e@-F^EX`Du*iDs+cKR&R@#c>80i^#JSap zDAT4IhnJIuK-jx*_FWJ2)`0 zq@pp&pNF)uVs_HZ5jc$2*O)H-yxLyah==pBzGXctfKTAUE7fZF90AiHgyq9Hm8z+D zV_F3{l>nd4*LLS3sdmhe$IXPFM+qu`-OePGu9JevAi@WAT5q-%*;YJa2@>E-YYvsI z+7&r5vKbyy=M9immF9b8&#u9V@P^GZ%i8630<06a{|VB9UEkt)_$9-!*l8j?T8t($ z;5N=<%jUz&KOl*Z#oVex=g7+9R%FY^Q}hF$xxfx`@8kB(ADh+z#?kVv*NITKA%q}| zPax!U767<6pYFgkY?P!}%XZTZ>HOl+wZB!5h0y+(Nm1`f1|C9ax+?}|hkL{7}c zL0qI@i)9E<|JVSHW3t=DbS=~Z9A8wH>Jub3=!9@cdSpn;R?Cb6iUo~7hTCKRKqHva zQ|C8bo2G=O@X7-ip3FS@V0;u8PFxl{?F)k5@%L-H2>`7H0Q7CT0oeH2?LhUzNrU7r zfM~cs3<&Gn#I%143r}_`=eX5v7qHUWD&O+xDB!jy z551(H$}zqd!dJjy@RQV<@;fKu2>1Pw40T1&jZBMapcVOpa{*P>j=weBr$+o2DQt>v zb5)zT0;%_*0@@~kvIEZPno;{aZDf zG6BmnBAzn^)~T!VCzctHlCGupYin`V46%=BI0I=Elu^zFIz z2MTR6PiELz)l$2z766?jcbW}*fKY##%YqcM3F`^GhYdyD*QXbtyh)!3V7%UzBmgPQ zKj!Fh7MA^;MUH&4nE|WzGBG6LUK&}XQW$ev9z$`=X<#51#b%>hU6@p!?HK$bQux8( zKjB&mXOo|Ys4I0=koeZHvBGObmja|raQnsx$V6OkfD`NhYoO-L^QpxW>2#If^35+O z4^7f=Nz%oBEz7OD`fLI;GvI1%={>DiV0xS$pH#kz_n^Z;ck(4eJ@|{3pt0uU2CDU` zx--u!K2H3&+x|~or5>~~ALvV$Z}WHE2ZBCyR@oW99=04|1BALqomTFxPPq&?bC{06 z*a8q3< zc3T%*(TF;bXGh!lwW=BY5DTwFTe)0uZ+IRQpb?xJ!?r%vpn5~)WAlJEEP-X(<-i@W zdK#Th$Wvuzv$9!NPPfC6>;#&t$R?n+ro}foOn8Quf~C2X`Nf4YJBuG*-E@B?2xzl(tTs5!5&DC%?+I^4-o;*K`+;U zv;R+N(G8aGYaaR5EdD9|w0i%egwP4UDHirYiYbKD(%F`l&yT&z9rA4`?Ub=4@F>B5 zWL%IMvFaSGcK9n%$R+0h_T`}P(&kBr9aS=aS(Z+sJ@)$uAA7T&l@(dWHVJ@DG@Arj z3H*I+KX&?x{v&VV?meww^~JhmH5{6h z@$a!z-IsU}z*<7@}qi|*#Vy~h6a@T5Sh$V4zd7XgjR4Gg0O;}v$fJ8o#p5-%^YkSsyu)ic;e8o1(@>k zuDR;jAhV+@DFG7w0GqiHmIb_b{=Pnj?>y>z1eT?H>rFHu@$zMSW5SScdP8@9HKDB! zOmD=(9_qqzZ~9Ztshw~+J>1e>m3*s`scJ)!bRA9OHr*K?;JfqNh?73)a{ z688-k>h&y0C%)cP$y6X+QhRB*=h4#ROrL$Sj~rqBF?`1}k6~F2shPAFGO5@ZqRg>1 zn62$=-7!`P{JBN&I#5O*(ttQ0)0xbOM?1d`{paybuN&0#vaE>7yZ5{qc6%76y^*kO zGc7CgGpO7^z5sUGb`6xuXs*|_j%O8%c~7t1wpLpLJ=h`5xlHc7=CTP-_}om#Ii5r#A$asztiG{}^ODE42EyzoWOdl;hRL|hHAH;X#{ z5>MOg0Zf?x8;^~Ef8HZ_yCn_03}dR3fU?Ga<+;*q`}x4`l(_ww)jK%znhLl@( zmSqK-011pjT{KCxGs;=GsKae)T2u%|!@7rm?QUE)A{lqz<6U*>UN4OsOLQzcaPW6# z;UOoB^eTT^9C-*k2}4y0M5XTO#&-K$vH5-@h(aTB!blvisJuokW3c(EyL~m|{ZKG* zJeWLczA&JZaY6I?GN-*Z$-S|%fZ!O)7aORA!2c~LRRz9@1lXuVW?=; zYW!!#*9aevL6dO6ll3^nyI?Nc!|DA&RvPq zTyQGld4v#k15Jp9-PzGte^2VdtSJExN29cBO5u^*Qx?c*9>Dh99s|*>iEQ?- zNa~sqWnrXz&HRu4x>fg-sz3^ScmM}~wv{`Gbu_d4rKB9k>8l&%gx_0EI?=G>Nuquc zV-EcRw<#U{YfW1fFFI;z?=DtEJ#x~RVK_w=+Ey9+zpo<&CzX`?T>I=6F&|;{6R|mH zz!@L8od-3(e{nIstl}LCQqjx%CXP`!*lNm8sRNtu4B&xB#eiL%*&=^IVa%xe0k6orw9_y0Z%Ys1CmxI+)W(`QwZ%MWBljet@4|qK2)iG9S@34O zj#tIdugNNC6;H+#*Y*U{(L&7{Wh5tvT+42|=n-45ev+5eHXYnGo-jPl6Ovlk7EHpg zgBP0b&7RJDQgzfO7yW$SuxV*$b4MXu@bC*AGMU&gyVb%+T^v>oF($WSw1Z$r`!}>R zWNxS&clI8_Uh{YDiLx?zj`Z|e1!vF%^G|8q@0TVRr>?f8+x@aE)Y;+yXlv`&Hvh{0}*Q#Ka!wx(qBm zW=U_1zFtat8CLPxb{Qla*KVD5^c{W+tb~Vfx3^o;PC`tz$~E!6u84Hm7vD?m-&OZm z)juraShBhJMcaudX?g4S7l6+k@kE&UU?1a3vlzrBPT_Cn4jyljpI@190=&G0QvW3c zX1}t^;)Pz!b0NPDZjR{HV2$SFQvWZy-aVe_|NsBjLA{htyrgofS0rZ zq&bVs$YGRhB*(zQW%Dn*_O@h_gt^{>;3)wKHuMOf4N+^ zFnc_nkH`IRyI*hnuWUZ4q2IVsfe8XlV?8!Q&Z5N4IhD$sGRWli@qLLQzM~(xECxD0 zA!nNqJJ0CCIYb4?SJ{x+$DJrxsY3q|^Pvs1wKyPo(U?q(;0pq60yWr-+(5^zNj5YB zr2v#{tB|%U)fz@GEh^01`fM)WmIJr4h!qFOzGi8{1-ZydoxB=a!DPOk)}X zN={EhgZcgErx7O+Uf}E@Knd8DF6<+e*3jV`m2Th!xk7yn48*PlCr!hWtEmEziQ&FJz>F(#!X%omR@N_AkEjq~|dy>47^lv1uXL@;;NLvAn~Hs-tXuP8R8D*J#8Sx#Wcoos09;%7uRLVSlVQ>7JC=zV<7+dvyAYs>HBA?DT8V`$k znUjEq{|Jp?eHi#SAb?sm{m@KAU}ct#Hsh7=wR%3mEbB6nHk6Fp ztMMMQ0@qXkQ`G=L?NnT{nO-ACQx>k{qR21b=;m#AR)ir@VV}HQ1-J z4jr`cZ2v3xY}6WFf#&^&sO8t$(7XC^>a&0PQlY2Ns<*UiYuI#*U)QTBaw>cxQIN;) z$M#6S+>XrCpn0IuYu#CB$7{-;k;ISf+~JH9tAWe@4_D2QKFr}DpOhJ6Im55a$xvT_otw#|B8` zRrzjaGah)7rsRP{?5T-KJJr7MM1j4qWpw8u>eQVsb~>+!IuO@}z2k@tguF%o3T+L* z%7z-T8dp3S^^7evecgK!&6olsZI{ub6gzs7v-)#qg$FBrMw*nAutJFVZJ)#_ZiYZn;#2RgSa_EnK5KBJAZWU7U%= zWIfca_5@3>kJlfD8JE>q9U*_VG+)RO|1HN56cSN-{=<3fdUT7`1E4Sc-%!+k&wt{x zV2Qb2s+yUkljDRIyCp}C>WIpj|HBP&a7^#!((jwSmADJV#Aged<%!GMqZ`Dw(_(#U zs75ffAZon3?XDEe`9Uvdj1^>-Sq?~PQDL$Yn^Ul2B6wI2{!Cl)9+y*zC5>2_q&R?W z39vHy&TuKmVyz;1p(`#@1);>H3SpLbRDCM z)8H}&8)j$I#+h7VJqaI*g+lc?6-zTDbt2Gm;FjytQA3J4kKd25U68~PHfkoiC`36^ zEA`qRwyw8B7MZq9qZeNuxPInZ!6^z^83lV4r5r!8M^u~N+bFY)>(oe>LGLV>ubxdu zNI%thk%j)iv zc@bjrQ;ze=VyqKi+d}cH+U{xbk9a>oG673OR*(F==9b1`ja;qTjQs#QUu3v~2gZ%^ zBriMtr66fXm2GhNCzlYcCBLjWapa$rBE<_$O!<@?hMA?%8ZjX7Xl4@%+Cx3p`lYFV zV77)c6;PWxJ!<0n!OofgY>cri;dCF%-4@R4$M0wQ4 z*7OE{)F%>H&GmGqJljQK5RIEuC%S4jRuFWM!)@fY0EF^4LKy=7jLK$fotmZm80Ume=+w%emc>6onwYaF>} zK+6$-Q1l=TfJog)yN?J>R|Y{Csi7R!YkW^Ub2g(Wyzj^mRJQeM8$sFN|0~4lG+wbn z9L*qFthe?Ke9ZR})p2`wmm?N-i<~UP!M*By+NSviome6$Q2@Lun`Y!bF@Wiz5ceU| z8J^oZ2WvmJFY3>QI6);n?f!{XCCS;!-O6^w`psJ)mRszBn8a+T6m|0%F@PL{R&q_=7CN*M8r>@X zq%YK>!esGe;M}hH_`8L>{N87KflB3+_2fX~3qq4>tHpA7rrR&MYE|$*Y zT)5|ZA{*mO~ zNHYfca*cmvUfc>U6O9rHIiu;W$Q88awQlNf#*5&K@n~^HzETopL{NVyUqv=gQq%5FCdCIA*cBea!sW1p4Ht1a=g@h&t zL_0zJq3E}3;FF&gw4rUK%-T@Bhsy%2cDU97M5`X^W4xezlnQEpb8`G(s`sSwSzm$j zxij^Fgzo2umDOg(!$?NzJ3!$ULeF~^J#F#>DAL7$$AQ7l>l2hN9vLW_5hLuDvf_<( z&lBTINHu38r7sE7@YFE)Z2YojT{q$a+YpGgM?QX4zJApw$*?Z{LmqQSlp0m4&^vE( zp~y!Z0Wt2`^hmOjI2)w$$=1mK)yY6%(lQn8yL4M*d*gtEcaFS^L>oE~^PE^AvF!m* zYlnPzP?w1a34lO!n(hdXa^~CtBkNb5I)BGLHVd^8XRckc+?fz|Va*J{RYXT79t?>LC4*62sl+NqQ6DEwY zW5J^d4v?r7GM#K1suqleZs&z&@@`omE~>UnwybxdHzq4Jgdh-Q51oc9HM9|QnG@H) z_gCZvobRd{5t!tga%fLCGfLQtunR0Ouvy-xn=z!@>vA&Y^>eO>%lCy=cj7h=^13i} zD6y#NztN$f=5*cWB~4@ax{0}6hO^9EMZ%rDT|9Icyt*+2GnQEBlU7k3O`4LFtiX%( zXG4p%rUzo}C!&BwecR&S+op-Io_bcmc{7hikAIB&+SslR0wtm+uw~ydoz3K~Ac%4;tIj+D_r_x@Q-1)qS zs3^W|ghn5}B>axCW&7%6%Yz&1MSYCLlbCBeq@zObMtG5nv_o#J5EMb`e%YTu-i+?| z*4+-p{@7Df`X>Fja|+V#fXV=`zYHdFLpbIY^^7Sm;b%0@fX^deEx10ao_fC<_kwO; zmg|f0o{5{|d9j9zxbwNBa>HB0v~4pq&zLzAC784ErJMM!bdI4NZg3;^k>iil@NT$Z zl#|nduvNG_xdtm;B zBNs|Op>{?xBjTb#&c%p&Xzj8z0&IO_S-28NK96Wh{vA<|QNMd;TsNyIcJyn;2`ly8p3jAkWhgHqK3-hbiR{&-iWYtjj_H>I8uaVzhj%MV02|yUCo@Rl zp729iMbZ9%%lrjk__Lh=o3d$CmYxJEbA7EJrAPS-*|_7JueR^Qe8C>D5*#IGkN8_D zl?dG{UD$hX>hE6XN-q_j;r0N|J{lZIe@r*4^@MQqf&rt@aVay23!M=qg}x61kunEUfj6#GB96<4s_TG}N^!q=7|SRg~+*wcT+50x=hnl?sEUc>{;eCW?SU zl5SVu(Wy|6bRI1-mMG}i9`{~cNz2F%zxoi9p1Km(I{y{st=Dj9At`L9CW7cCv<)$> z&^X3eszj+DEQEK(Yt0vx)_iuSs*K8E`d5Q~Tj169>R0Z&cxFx~D#37`;0W+#LX}J8 zJ|)QPOPBeifu4(LpgEjtoN;;&XG)`&+Sj*($n4m_mdmn;)n-yI^-WAa74f+UvTkj9 zyGv!WE|w39Qt#?F&d?0Q&Z9~AA@+o_nGAW_-&8(1=AAwMkcol+>a#XYy}OH&$1<75j@ z`;4TeZ{chJHG96cRMIK%jHIN>mx9zH7&H6Mw~#Z@BK-BKXh&UI)|6MgAw z>r=KrwFH;R$J;v5cVqCuP$La839nRPS9mfUp%gL->ylZ=aoa|5#NbLP$Ct)WI=j$p zlrIkmkrfs13%Jq1>vgi{tE%t5>VJ!ZH{JmHMRo$}+aYJT{0 z>|>oOA^WjrN~xIRLkrlET4gZ>(V|(AM%k+GS}yY=o;Pjcs?v0>LCJObR&2{MU;hb$tN*TTJ0EeSUgUsf=d{v@|Nv@7OBKhSe#%cyYh3-4iO!eIC z>_48|?*db;WhB3JFRdLrb?>b|3W1F-&^M7a&Em z;!=n5OzjDD{9{AK#x6g;VE*)IuObBJ&UAMpN4V5Q2eR+&m30MqdFRnOc|Z{k8xI-C zf%gOtWY4a?lwp?Ick-oaC>$GAWT^ZPE8uMbI@T85BP!gvymP%x!aiAE`b6u=;=9)= zHOeSY1ama?ekQyo73Rxxl3YJ%CR`nYnQEvJlRfK}*Y>&a`I2WT4jq-lN>&yF-zhav zy}=}1edDuuJH^r?r>V}qaYZ8GxI3}ls^rSWhetjsmnb}HpzWU#lD@SH0?M1`-l6oV zE6Gon_iKunN2wqyJ9n#|-cjcDAX)h;lB_#w)IHKhmAYV0v9e2fvz6rrYUCSpMpzUbn6KjWb|&NKIp9&R_k~*u$@$ zMS&XnV~5#fbSlO7!`%E%2lb-S@2tIGOZiy>01>)+kx5inp98}OPr*xTax2w!{XF29 zJI?St)vyX0MmAg5xv#M%@`Qjhs}+!e9kyS3iH;(#nE<%qppw@4A0mIVh z=$mi)bdNRIjn|L8v>#LC!GH%H3iLT--HD)P$sjT_l$sNI{z^G%a&NGKA3tDMQx(?T zXTp#wY`G)Kcl3t=RRebIkEu@p&pNf!_eR>0P|b!k36NcsSt_W-;-uQW&(gsWC0XBG z)5>E!w<$a^+hF{rt<=bh6<4JGavD@HG|H91)1Shl&W>H^%Ew| zA&cqWEZ56S@b}o%^1>4GSqM|s!;s3|i)(Mbr3tWZtmNOqP!ObF8l$UM11N&hD(xB4 zRgA9B+gaS@)|mXA2wF#%{%_OlL%wx6dyRt3xWa>{m?fvV@5j%ct*K|b3?lf*C0ged zR(mX8dt*{(zWMq432Co?CowBL$72A6ORgk3;x*Pr&YDE(8pJn#qvQm+-JyyZq zHP{B%xt%MXh{9D=jB_&0JQwtp(IK@7!kkooCSaA0H(F zibB6g1g_wltJJb8D2jSaJJPc-A|dPBh%~jW@bjjn@Y<*T7>s)YSiVThCXzd!cFK&G zwfz~?dt1cmDcD{kMZuzgZ6h%xHN2)VA%}hpr7%>8ogfnMVXsJ;F;ZhS;Q6dl_zXlQ zTcG_UXKOtA1WePkh~Duts@PaXvt6dys7=+E&C7_nY*LaJ_>CX`Kg^J~dm`@9foT9w z?{Q0J0s70c{Ti)d%jVf_XV2?yKp(1CdYW1d)YMcQbIbL7r4Lv`+z9ZhxHeg}R8j4s zJ~xjI<=f;$RXQgGaerjl8@nC<*|!czfL~vdI{Run|Bq8CwHx+WOZCrS*9(ZusBU!Y zO}EB_F*ab5dkciLRHrn|>eH4;L@T?XJYQ9(@({yihlZ{Q#-(P=3F!D-CJ~pzi>>7W zhpBIJiCGPp=G8DYQP)til!CsI%cs`6Mdw)D%L)Lhujr-aV{)tYFL-=q*a2H(rf0B4 zQb?6IARI)5(TmXN@-!KhAw4+Goq7V(e|!wob=(7`pZLnBDB33o<<}vZ#o;HMaL)x-6n)6SVcUsyO4J)VMBDz+poG=d zEp;NlqJy&1*dGjyM(QE>t^J1FkahRgpe_ulVGY(kc6%kpkv5Um?{HeJI(FO7NXyHB z5=wy_=;Zgd9g-0ri%~oLhY{yLY@cO;BZDDeR6?KtUYUdQ6ru2`cmSmB1wdvXZrj_H zCcMBO-6q?&fz#{l;6f*0RH%7I!a-c!O2JU*!af12=RfB>Z~Ja$OYYnN zEIDrd=fMKL*L%hcHW(^l=T6iN)rZHAuAq$*6*u8YVZ|?;!ds81sZIXo7L zTmW1Fy06EL_79v7JRSN`Fl77@P=8QAW<*V%6X0MZ0X%awB)Xs;I(==jbD<#ksNiSv z68&Wo0e;KV+F;7JPuTA4gpkUrg#5`$1w_{T+2*HQQ6?OvM1Ezz03l>R3BKfcm8B0F zh$ZXPs)g}GlkkEY0i9$jGiE~%W#PsD_22!{N2C=3@)28+ z@o50m-dXzgftWyDllneJfJIAY{Axi9c}@y`OO&oiPsrauv z@J0L6PqKxfB}iT;Uh64|}14nQ)(x`dsE_8O8AYrt^F_Dq8A$8lo= z@T!v&oJ}sJLRnNC{K2m> zX%iroi558K%u@s@z`Fk$e}b5R%~F0OU<9!f?3sP%kcm9>qLczK9%8`D?F(J3y zQld%_gyPT)Xdfc5Qs;^(LonVc5S>VF)xEC{O+2tJh9+Mx3Z5xwzS?ZFpd|J~00>{mHod-3{Q9P`D3R{F z z{A_Gvdfr;Dye)7c2FOCz9!6`2MsH8FXnS$^0F%cXXs1a+Iu;X~YZ1IrJWDFp8%xVa zluG@X{O#uAjYS|IlmvQv%KvM<1)=4F4MhO(<_1`^j|BiXm)h&fYBq>KsC4v-!^6LZ<;=jI!4SLVFRiG&@bQ1f zqR!`FV6%~Hr=w$f<}{7vwnmf44N02UN?6owJ<1P}DqC`YaZEp7_J zK49xGJD)V)GSlL(X*JExoRY|vMy~^;=dn6;rtHo9rb`WGG$-!$OU+DeLebqoi0gT7 zpKIK+@Ktrz1TeV51UmI6ip~&orV;^+sA{F8{QMi>Gxc(w82s<%{&(SjVcxoX{Euo5 z0*cSJEznUsIcFaMeD^gEk!RMRSd@R*`@l2Yeu)yBjL59+n%EF8)qu({C@*M{vD@m9 zRc@Vf+ClK)8g!F{U+umkktb0pE%tHDuKVNuYy$dws9Up~V7-356tf;w}PebzntjB<|t<=+aBMBCdkV=pPPJ%RYl!!*zulfhzC%(C%$=W+=s< zZd|8?3T2vL4tOOM5Q0@jZ@XcqP=^DjQA|7GQFO)Rnp{mNmwGn8Ep7U(+P{B0yg?+h zp-$|Ehu-Jh=RN=rVze)6oViu?iZfY;t*?uhPc z) zrdt2ZSfp<^2bAUmErw;U8Pa~9kCMg&wh{E<5Bim;@`V8{J!n{eefNQ4y)mFe?&V9} z>C4j|Q}m_!PX}`PQ!5s0i32fsba!ab@|G1@5>sMeM$L7iL`RZqH*Jkb0r{lTyz*yqd?!@rsEW3417zf>kJ)C2C;p;_GrtHiP2LPn!%xrnR#fR%nNwHdz* z$fdovONcG9fThmh5(61fA=h@em`Kx#wMWILMFWl*8wqE!V+8gpos#Rx#`i;~f%OjV zV*rW;IsiH~S>Ftz1lVt?YN1vYPObxuAUe;4kI~3YUt+-wh`2z;#G2qP&$nL4V-l%V zz9=aYm4e`!WBCFFq~sr=<$X9}kPFk^SQ`O&BJE}0E9GgESwO_M6yes;x2!$MVsHig zg_p!*!}go;o$ay!8q$T;xpKcbA*Oe{BN-S+*By!*6M;w&I^jnVUU~L`0P$j#5n$z1vi>U{T^)9!o z30e`UYhgs;HU!XbXsDlPS}&X5)XOC+pyArrc0km3o)=>s#RQhpFR%gX?@2J1AiZLdo&{>BPy zA*0!cdZy(G!!2n}Glk4fR<--gMlh$+Lp5X({<>~8lv6qF|KIJAEBWQxp6N5+^LK$M z@2G$_OD5i?^E%c1r@-$=m_3@3+b)`U`JpPTFwS^X?)VHs2J}4}m~I}uvbuo~2(w~L zr2wIVVm7m{X|1Jrc{f|pWYaRN(2dBty~>fLo(!y^qN&hnjbGC;uccNWR(f9T{rI_5 zSsp&CkBXUR6bJ&E%@{ZE`5tPq$n-pYO?WOoDsTVNv;D(5cz}4O;al1FU`HaW5#{y$ zxMxDxu4BjVRXyHQR(#Hv(HrYx8h-X?+me9UcOeY1-lN#buTwUCX534ibR)HWHXuE+ zX*R~7Nur3$Wbd2yU1R1W$|V`bb;MBaHRfa{yw^CR+h6YUM-p6l_S*Ai_1OOM1JC@t z8Z`Wj1(OZy{+urYp4qgCR%jr`=xGnm3E_>%;^`m?_sRa8;d@qTnCakGpxSH9j(GfT zA(!Z}iofNq=}@#3PL8qcpJuyv^9>V5H$Pni@dOX(N|0jkKvEh#2%n3k zGz1;Tj+SJl+d8dp)r5|=9Qw2KXc>yixp9@tapJDD!l=1~tMocfzl%0+99QBoDa+k? zZN}erdTfcXwtcP61VL^9wjc#E3W~@DxfNCWvf*XLVGH1UYb@rS5E;%);~dY8bEb5# z{_UbZhvPqu=%m9t-~)6MACq4_IZfC}cZtdBL%2^kTODxR5Lht|5wuw8RlEq$5c;cl z&g^mhn==6T?e|fU0K}JEU|o7pS;b&rUK(S!9Obk4r+(O<_;Eb|Mi7_zL*RIFXvmiv zf@nX>D>-y1y+Hrjam?ovs{_BT$X<;49g=WaJT8}rPBu>M5gypL!UVVUe1i=tnK0b0 zbC2KfGPDh7m%Y%r%|XGiPVX;R$;sSQ!E%6uuu z3!uY@_4`RRy(-+0)hrldY-Z2XcF76|ppfe1=@9grY8us%70E5VFf{@PIMLGk4R~XA zPTYuGB?RSw$FA`32=!;*p#9c@w)=j7&Q$x1^qq6b8m0VO6XCjN+Qjs74ESs%{f%Ku z=?$ay3|}bLa!ilJBPZ0LGbHkZKV+@O+|s*~w=@KqhE3H1o2FNT6z{S`L$S8*)V{vW zE-Ft-N3aj{;M(LMLB~%>oQ&!;VHRhl_ZVmKUi6dOd0C-u1Y_!N7>PgnTK(c=i=b5q zBa?QneeUf=BQI=Nr*0(bbWQz0hzn%q)GSez3ncJ#-Y9UWtkS$IYECYVP>0ZLRoa8V zZlJNfssS|HhDkq4*{CHVh?4j|;b-s8Gib>%to}_G??u{my>83c;_!Lfi&kOm>7drh z*R$#)d+k7)b~5S-kBeMKQv7{SjdEDFDsm6Zaf2phYlUK>pDLY^tRh0!C#(?%^PVqcoG3mk zS;bMGS-9leJnXDrLo{W?1X3P0!~=#Hyk;9%eAS{n{OlZD?v+*8gq>3|5^`l# ze2IVSb=x#Vd)oEXU!RRH%x~(*GXS{GGD(o5?0EYQLnAyREv@M+usnSiW51v2w%JZ( zZR;)IFty@c(Sd=b#a5NY&XY~ej*e!D7r#(J%dzmE0rgdXKD&Rx0r;LmTUtFI4!e|tAW>i(uyc33Alsp;W%>F?E@U39at zdCuAztbc~O0_!)Xc6j&n!;5M9{>2E^jDJltiBsLYVBP9Ta+;*dCu$(@t8SZwZ&9Y< zbA~xWZ+EA|Ese^iTRdvh!CwzG)Xf`lRlWLd<#3!&vUgV4hkR$*1+IaaZk z8>5wS80QzC9ImTqV47@_v=-`Iw02ny>h z)GB4^1sE-+dLobzta6`Jd3eg9<#zfJi-FNSC6U_NSN+tLsTfs7cN9IM?`GmcF>z?E zB}C^1R{uK8%_C~^&~WXsrCu5BM@_ftilcdRj69mZclg!KEoO*?S-NFSq&t-tb$m+n zZA;Ep*T$|Wr5JWc4;;^rHVU7=nhk4sRpPiT1*b%HUi#XB+@$qj+WNt{H|%+3b}il| zu@cOcZ5xljdLG4G#QXMNc3;j;O>(m4HR4E0q_EDe(DH2qjSG!w&~bRkWgbF}tzPK7 zR*q#Rv}>|>iXrz+9rq}`eCwF;>tX6qMONAP#d2duJ|43=J^$h|F(#*P$O4RA@4BY7 z+;ynFYW_F;waJNYo7*MH?G1?{J5>+F1cukBXfl7xS6#}eDN1kn+sv{nY}a)om8IWD z8bQ5T2z8PL!j8dqeAZikw!3ooOfFP3LfWPCM}AVnim7s4;B$2f;e0O%b=$$$M6fIv z6`2g^UBuF(LXz9YQ($1bngq8@nGf6g6jszM4P6Q$(2fdQ&cP?fWL6-1V!B_jT2&oN zkgC*XlbkO5Q*T~IZq$lDa?L(T(LY;ivoFKfMZb`6u9|oSnG@yEesem|Xzoo8uZOD^ zB&lxQd1kfgW!iLtt8pT)c+s)8rq*OuJ2f-$(!uc zJ^c2a4OSo@=b_uc$^>KKJwsy+h@Rzq8wsS9!v;0%RCxjSV zRZqq-{xDbhZbf3K0-iGLJvyqJhpiT1i^lfklX9V5dCio$m_RB%SnK(PT@=Z_pG~zx#`&yrb8aqQ zV>!Gx*t$K)7)IH_MHc&ccP9>cmUm*LG3WfsmSg!%*3r!_SB%><lrzs;{lxjb zPVyPq?;MeFz#O=tO0U&x|KkpZVR(8&>WKk-?=uC4TB9W^@V8iE+zcc8i5|Be z-?RYjc@^{Wier8r@!UedllU2dPo|>w*n4Q1RH-!iTb8CLSzR7o?Vk37@*I+6gZ zJk#)fJarH{kXsX zB-Ezbx<+*bfo=iyWnRmKEqK%WAn%FbsT6h5ub{bQ4#{Sv)ejvVy;W!#2i~5MmG<5< zT3Lg^WHmLBp(k(aYY^^!9WpK?L8UOe8cHT2yvldou>eA>)&;fbl5bfOP07l1gRzqw zu@SNP_(~%~RAZA{-ZxiN9&X18g|p2n0TsPtZbK^82@8nf+}2l4SWlEgE?K($TO0kH zI<;)K*l1gBLtnI7w;n+%+7_odXViCf&+9nioln|+CfqT-g&Ym2$ShzynVpmsWb#AY zWU%9_MBU;Rr6Hm!SgibyEc}ym6HYSUu|d<%WetbtfmjoPj~jJdt2~$i^CE6_sb)0E z1QfQ}c|(@Pmo!QqlR6f3VEnxC6s1)fP=x2Vx|#QP*YPIn;M*GTFR{90tp3{uPsP++ z6ok|ejIE(xFN3mWSD#OS*TRxN*N0F{<_t2s<_&V=`s#CtW4Xa6vud6`3wx?S^2Dk; zwela>id*3EnGP(OAN3)ZdvBJk;^V7U*w2z*vLabZcbr=~^a1V5GHyt;kJ_lQ&(i zgzx;aUKE*j=Bd_wD7NEF<9qp&&!^oAu&9nTfWv|;N4Z7ftJR@?c}*%cTAvdh?bd2C zU@(yQ^(UdsAbZ4pg9vFVeAzDaOJwIr?BoT=_asTf`JYcZiC!w2=4tTB5Nh9zf`i|R z?rRN))VA){o{N$L~7y3FZ-e2xE>bdZO&9o@a^bfsNU z?0SQQO{)1lY^!d?NJ?ACo*5-K|1W(V#0h^*u*mCP`0KwWEGlmlHfHuqy=qz6_HkP0 zl{k7Opw>)$oOb`G2fq(*leTmxIQnOfTh~(k&x1qxysiTxWe0*D_YhA^D zsR_@`K^~(CjYOLIC5kU1fs0ADTGP%vXF^}?D;U)M%%Y#Q`}gc zF#w&Yeo4p{BFr5S5v7ba(7iZ&;A~`T)4_ti>vdrNDf-J!{g6TSwvWW-Hg*>`>x%zL zk2Mcig5EjtW#tIjPqGiLpK)Y8HT}nL)b%2=Poa2z6)I(&;vkajai%zSVra0qAG4cX z{_Xb6lL~Vr@4Sb;2f?Ov>5*rPpuIlah?+=et(=GN_1*r(zO)_=)&RBdSGeF?4X#|a zn~Ur?bGNA|OKj0~q?n?RasJ&02CH1vh?0Q%qf@x03MR%{j*jT+usAv7xGk?;1vKFUX|CM)4O71{Ch!tcVX`gGtX@7u!3y=m?qY z;M-1(KNeu~v3)wAxydCDV&~$ER6movQIPe(ih%3zw7;T=4knp)0y;cOVu&N_G&`#` z1?XU|ubRnxnaABnFq-xV&PSnR)seRwn0Uiv z0wMc5J8lhT7CZRu_VO3O$C_BB%(xamhF-4HY9GMnyq&2xSl3FmPm8f^+5KbT&Ac?$ z;4)zo*fa!CHPK9tP*S+DeB6f-r{CX*5;-el4cen{IHqLxm&@Fux%^Kr=q_oLfnX!9 zHr;5)I8N*&_LYq)yej)PVSP<2Tw#;l5UQ#q*8+s0Boq)!yw4)1i25CQEMXo!oh$74ItRK8#bD zs#;7mGN#kMfH|}%>a=q*Vyms+vL9Rp+h#)e%pRau=CbTs7tus6JNEVd6b1OGqaq|qW`wyF`}UfP7Y;YZRR=?eR&6y33(-{v^K14_kG|{lVjn6u ze8nhKLeouP#kMh!(kSm;Lg6QG0X&(7Rw81p=NkY~&;cy!{k4z&uMci0&)8hV+he5O zGs%Ro&Qf?)x?8u*SE<+ve$~g%MX6B_yGvI5YJM7i%j912(p?Ri0w^}CtgvbFUCQdg zo}oZeiFxJpyPWQ&0h=onL34ZpW*vTg$OHPR2=`9Lva44XWG@o=%7}p^8q(kjnk~Je zoRq|$DSfWX(352CV5n~K`f-7#pt7UZ7!`v^3vS(m-=>a-1@Ebr&J+7m#F&(*&O&(U zGVBp{>qtBtNi&jJ0_{~0D)+3T1I{NztCy<5U9Udq1X6$I9Dgy%WhO~>e7TuiAK^*Z z{9NDCA^$e=rr%1oI2?IQ>5x@tK)^}6b&lD7c`xUKq+Q4{2a=PcqNS!wRb53$U&36N z(lBAA+2Ml(mzTQp#L&C!w9i8h zo+Vs^E`~v4+NKu4pJant$`0F0%ITkrRQHo>^Xsy_GAf{5R7TE~n>F@6at!X`(}3ix z(y+33D*s?7i$OEJe~Pm^`X(x`?~{|?5p-Xn!z=0GbGx)@$jjt=zk?`HY%Rcy{i0OY zx2ejVgv*pU!3!Jxa_#!X∾}BUY5UvGFD5K#io^kmRj@U;K$DfiVNfFi>zie8d|< z?hZBWi}RZz)wTYArf${rN_%#lLEWb8(iS>syUy6{Gs=AFp469oa5w00H_N=74^^p8 zB%ZfcmuS`XBO46v0gVuGlXIAE1=G6D^i}<}+Bwc=T=qSs)nEl?vS)PRv5{dgzi$5L zS{$OBh!;lWAt+^;UA^sN?aP>F2qFdcv~q0{H^yjT6V(1lx0Jsb<6KilSHoREx z>(m!H7uwdM@jptU`B!Jc#NREZvYL-Y@R7kPo3jn~?r19fI;p||L1fgnku@l7&_Vfs zA5}w@x&4^PGc06ry^)zU^4_`Pt4ptx2`DN(=9YfZg7B*|om*YIi8&LdSqa`weglC7 zJ_#wJAQK-$>F{Ui(H;DK3*b+~ znKL2`eK$Au-K2CdkOxV-O=p0CWW|e(o6@PZ_afpKcwI#_7PXkob{BCyt3AG8B&KRG zHyK!oLLda0745D%DMV!F>hhzf<|I3{Vxove)v!gu>hqb1PIpJK>upvtR4zb!w%-|p z7cFG0(;bcbRLG8`v-kJuN_=+07ek>oG2Y7u8QIVpGV?iZ{`>fj^zxESUspdNV%!M) ztvV=uvj~o89_gNBmcJnStHPhN&Ur@3s5;ixm7?l(%SycZe`pno8q(rAa*e`fsIn1; zM-g&2W}O}mv(~1|LZCdJU4Dv^pPC(^1S}?7?~(& zMKyTFLj2M>|I(XZ*+Xv-aaz44&ux;gM zFS!y&GUaw`@T3VFoqj3t8f|GGf6v7WU)M3CsSrxqpSE43L{t(P3!_hU>mR@%P`}>N zM&E-qb*#-NjAc`rEDCJm%j4j_ea86t`*t=yVrtnRPb7$fr zV>0H`tE{>R;~+%(H)BEYU{te1=z`GF**>kO&4%8(?)Lxlru=m*{8BbaH^TZu>~&X0 zLR+TCI+!(K#XP{ewS_3V>uqT(052~Pfc{xrpxs6m$b0(E2q+beQ0d{@ZGS09{paHS z_mlyyWBv^fcB~+bdwrmLaz-H|awm{igo{S111vdd*?+zzoe8)cG&C6re(cxl?K83e zNq#Hq559hvzF9&?a=QSPSoxcrNd6K0jY4q!9qQnEhP;Zj0z;o&f{C@AXopufiz6mb$K zI|edtF5>Igbl78SVMFW_i<6XyVzsu~_G(7Szi&>6d?q9NV#2Isg3m0U&*pDa`sB7} z<%|_BtWmbfiR6y;oVpOsooV2R%@+|ty46+lMc1ZeA2e!SI5pQO#)ReA^8 zTJt)K4UptEownIoCYxD}#LaiWbYnHUmm(=VsUAz|a7i1~0t_1x8L2=eOffwV@yU=+ zr;TL2j#7r_$gjgvLbR>_$6vcA1qz7b!cTJiA9BQg%Nn}QzzpzR#QuF7yh4?wfb_)NyoL@TmVlK_%dJyu+d3qKs z17UwRbkO*y7f{tCsv8(9yw*l361n5u#<~Vw6DdV0fSRpzvtEu{Kk;q1R!eibeO|r) zp6QQq|23EsRmjNB+4>0C`slP%R(v22sVp9XL>g+F-SqHg00yk>$NvbtObV$K(cA*? z>hlwEiuvanvyO_mSF=yOOBVUl=5QbhC!ugmm!9=yMAgKb5X>lvp>tph`AtBDf;Xx9 zrk-{O?l%AP&z&~y-g#f}y-@|I%_-l$1h>R7={hlsw#7G5GAJ>N0>)(XdBigwv6T=v z@~+*y-^|ajkEs8>B|kb|H&Bu;^6yG$jtrkTW|_psi!CF|BSb~ctL}KPdUEJ9McV*+ z^V8rt`LkML8RG~4P@}sU`zJY7s@^MRhM>91+U=KjYPb9SsNMx9Z2oG}2!7*r(7m!J z=w9WRL32qR-^Ip_7?G150LZKGoFNc%74dpSOBgp7hPM)D92riIxmg<4gY=gT^!dif zo+r&9#Z*7tU$aAkU-cy<7wdmb;R_w8=!AZ{>Ni-OR~=@3GBEc!N-{wInuv3+XeAKA zBu;M@^Zxn*eX`%#Ku(tq&%K z+)h>AOzwnn<26Ly?)_u$o{LDYXp2LETTVg7Ea`~&>z_sxhEbo!)_aX!W9yw)T8=yd z&HdbPFusyTd2!siQl0uG2k3;vW#V+#~hQ+y4w|37aF5TOBW8oX7)x?*AjgD!&g{QY(Q^H1HfaUpAb z4o9%L-Jbiay-Nnd;>7VMrwEn)Zrl$0oyW8bp(6RV&;5=4XM$Hu--6TaSt^Z{QI-S2 zI}^pjTkC*%MIdk@?zDhy!R*AEiikPD=pv{Gw&0bR>eoE|U|qZ(p)o-+VbVrWJoTu(fKfga^R5H$sExNFDP>PK{ z3yhq%LxL*aON|(lxt}8u>g$u(6@M0pT;b<1UD1m#P!ltCH-_xg6}RCEjw#?c*vzTy zP^Yd-CjO8t48?=oB8}Ey*ERnAM8YrZ&TPos{a2J41a)#~H*?gQQm1_H&V<G#hHBX*qLOW=m7k z+@h8PP0<`^h-l-Koyx7?rs2p1=D>}5JI94eP83ucDheV60s*1FhtB=p@9+NZ{f$3= z{!tq7`0zZR=lyw)*D&T-?+G<|suh6lB%puxp$kL>17U*3L{VcMb#Fmmpz3*jn*Xpt zTIaCjCgh0meGl&Z&ru#&9Z$=;rb%c3FA!mS28>-)f&-OrsC=pI95+j4_6`&otLFws zr%NR3c%#j~_#2Y9F50VTWFkwt_8!Xd9d^aDvkQst`tZe5tb;FRb=KjMh$=#Cw|RG@ zCGus|0Bz9F16iM{`{MMywj%K|*hnY-^NxG~*npdo{?C$4a{PN&fxYs@o^Po6uLlK^ zEhE!imBRr%UOs8L>0@nFZU<_d?kdSG*Wd6vY`Q30Kb4cf{o2c*Pn~A?ek|so0F{A3 zx%F>ymh)nu-~wbjkxe=e%lzcy;$u`r?kV7zj}!Fkk%ADoi`}aM%@#X%l|Q_ZfR`O8ghALz}B4?`%e1AngG2H zAQk#s}eJ%4#}2O4GyCt>@cCEeho0l&a%oPi&1I z4*}0GcDc>84SfXayjSV}QE#TAEPmZjx1tFk?|TrP6`0%Yp$rJOw#YyJpLWf^vc_L< z%5#@!1ye<#ljXpGzZ%Byq?(8T)!A^gxy%ErsBvdk1Boi(GGD?Vkw8uJZZrY=8<=pe z^lS898^i1Wk-e|~g@(q^pXaAgU$Ya>D&n*M-2#uOHTFI z*MsA#{}j8H{P^kWw!b@Yzg|1%2$XfdzLWfslJ(C%ujGgS-@a|E8H}SS#LyS=!;2u_ z(^KP%o4=-*DoCBvKj;(xJ@f_OC|nE!)FAV3+y}5=GiLl|B4h|4hb;h291lIi|A;wj z9g`ChUWb%a=!f^X*g_I;bD!mhwFFOG^C&QZRy1>lO!MFWXDRgygl**qTb6V_KM)2#}W*{@;!` zax)GP(yLoT_DHt+Zk7Ki%>O>_urU~?t{6kdQNluI0d?o01VA^G0#4oV>7y5P+<8!f ztQCm?cOVEre<%vvivdg-r*u=6&J`N%Nd1omo_LtjLXv#02at0UJne_YdsWi*J@9)K z^&7CrzBT=N=~LR_5ICxY`Tuc=6_RT91CAM-PzL+4<&Vc;aRLR>2(@6Pn6BhNBsb{q zFTdSaTU29bpspNXNM7DNmtk|jBK&6O5-<`i`n%mO`G3AzNE|goI%91jTVlwmdV&D?Ejv<+hvs{WIN5A z6)a)o%@Wtg03AvAzjRBbIwz$wRyxzu9Nwqk5=+RRU5#DG+^S&n2N}2iFsLRo-UsvIIx_ z_g84f`4OeK9@E)jd9Az-z;+Dse>x$xLek+sDlsh;&Fp^7{2PM&4ncn4yMgOM09cL_ zd`q&7pP-W5M-oQSdmnr^2e7t-RPhYeKqAKYuk)is59KoZ7Pi`+O{>4EvNUK!H4}+9 z9rsu1j|xv)mxX`|IY=Rg=H2evWPXsx(+v#I2fg>bqy|*RZ73!SmUL%h^A)h5D9+seNy?V z_A*kw`D2z3-}l-}`C5+e)ltse32v@7-uQQ%l3tYX&IYpp(*bG_UO0|5bkYM(``%WG zEXtJcBMA_>t|#}k$%ayUi{|?y-+=wF${28-&U>NVvN#8o$fo8=thZ&Q81i{N{h1pK zuN3W+>NG4*+~}8%mg-C}nYiCvW+2r$gjnOC!zIM0DFA0RClMy+w!8$iP;>?vTJVi$ zQeMNV%;bHe=K6pbm?>00b3m|7szwKBa&$`j_hm?D9+00IiLF?5e(bQpEi6b`6S!oD z|Lu}3Z?d7(r2*I6e4Cm%tsu&dJ@eCcHorqBqx*LteXMVm6jP}>O`jf35!((t;O-p! zb%~Du+a>Ba9MV>B-tj=>v=mNL5XznzIeD*76{Rl+IKlVzj>24IFK+g2{%_~Hoa&d1 zssoxYwzfzu4cQIAUK++JpqZ7C7?My{cj}Bgy;!<Q0QCSpIH(5TqY+@;)FNaR9#p z*cleglTd*A?(+9h^xEnIU>P1FxwSI?Hy>Y^V&HA3qOrp`OsbW$_%?M2d9v?iar)06 z0BN*fbB=cH-QB082(tg}%8`M$4vp}n&fEqz4Zgb#a{Ar}**Y{qvhc%JI@Z@RdnA_e zS||5E;QqG*JSdlxQhSlVl>+=fnqA;gb3nZxoV*E9rSSXq&PXec8%(!zp*M4b)TA2M z-dBsgSIYOgWO@8qa#JOF(_aB7(1cgSU;Ax*8ts6SeDPm@heqGGEK1(-djL#r46zdt$XeNIFH7&YYO$5~(}EwGKdqJ*on>2TR=*fO5h zC*`E}Z>uft*X3T?zNa^;Yx6-{!)NuOQyp&VuWqSmbVf^fdUa19{{0AUQ~RakETfB2 z_`StOz6L-8n)&hE%e*JlUg^`$pV#>HD=ZfJq(m40?E$QpEa(#Emh0zjN}t(3AdCeZ zsl%#%+WJ7u7XWJgRjM|ajy0h-wiQqe$;T#&8S1H=7U_$2H@6)-t(Ra2O7WA)SthJ) z{0c0>6{n(|HR1pMgoYebmbvOnu#gErPEAWqCQ6un;I3lp%Uxc8IB&am>)qXST*)*! zX@g>e?)^>YysbEsh`!7Xy!3wyp)q|UyiLXI25eojYG=u(uvNz5NcNK#2)^=f~O1bMQQ4rmNow^DvaK%`X0IQ+LlR_ zDznwUV|@v|RxW=XxW4+TmUe4i3Y)co=(-@`M4JTpj;oA8B+-o*cYv7)><2}V&IpCP z9y@a&*nrvoQc}2U;=PAn{FDYTTy9_IwIG(Ac$xHW3zn(WOs)U5;H_7`%|{h+3V_QA zJpMx!i~i-A z`=DfD7xgc{-KKW>SIh@`uuEL>|0Vc2#Z2)#%9}s0bDu1r^C%w+c!91qq2Cf%kb~MK5g$f)op@k5*csT%oRenzm(rgfNeJN- zTCy2~#x(P<8tbO~TvWk;t&pn|Ukq%IN&gJz%R}$G3ENI6`7WOyvC(cWZk|Va+To7; zcz7fT&xdz3!hn)^Djtwu4gs)q;rf0+^ZH(n#A9P>k7{nZenz;2DO)EsY7Io;nc^GL z8Kt2&j785~+wQnb1lq{|1fGev0-*XCdwESo!vTozWjX5hH`%Nsg7hQCQxa0?!V{qK z*Ro#+Oj%5X=KJm)n(MP26s!wd7V_4AJOt9B4F0wg;)%zxz>c-%@N0WF&c|Jl!p#G> zdnjbG;#Sz~k8YzP=~^xJl6_(e3~<(-`Z?|+cIN!0lknbHMlx>B1H`PkbKlp6KI@e* zASZhN&A_-carH`KH2Vk8y@E)jbPBlzw~k5dO$;lA)l^~if+Uv##~l&$UdfYe`SM?k z5;;cTY;qHj9_Itr6mO@pgg@q*=6>wFiO%E$o}z=2RPzaoD*Ek;GV8nANKrio%Nj)cGM^Mf3@ zGg6i(&^nBh(8pcUjU~bNVUE%CJu42t*^LQ4vn1*Hxl7T3IBN)xA2c8hoVqAU(dvkf z-D?BHp95O=>*uUNY;)nqB#W`1053d&12|9AZ#Xpi(TwN7nS+i)2SQ)O)1A_z?h26XW=V}0wSAV|vB~3Z z?gb`@6-Ux~roTZu98RI+SpiursnB_YZJPI^wcy)ulFpITt+-g_T&+r zz0sY2-l#|=o)USu_P3^3iQA;BVn9^mwQW}ik)5C77p!T~*_>?O*?^u}igPjp^XzSwi$BH^}8=OWF5-@MSR7+$bB0|>vs9);&u%DlF_ zx$$b^5-K5h$v62J^|N$AAfYo^;&r2Vq-|JPe>fo9S0FKVsrwnVAJ*a8PItCJqrPT~ z?Oy>-Kq~YNoASw2yvUu`5OU9|_^gH}d(c`$R)5!#in*Yjf6fQ7WoWJrYqHyKzvZfx zh1~6Qfveo{c_Fhqj2@2w{CbK+d2tTz zL!)m*nrlzYa?6{3bRS`@Pp>lufc*dV<3}O+y?iosYU~DT-inKGpue?T=U7q&9Y>*S zZ^BaP3RxC0#tmWOro{?%oqiE2_z3CFFycl^tcHl=9y>LK4}*EE%A0x5R^nX;B6`ny z8Kxy<8bakfj!=&B*9mIjleZ9%y~2ejB`Mi${@L*1!;Fyw!{dRq{WPTz_EVvdJP%C> zO{rdUNBL=FW?*^g3{w(Vjr)#q2!_D0=9mb=yZ4mCl=%d7-))z^*q)Ix?e{AuR9_5{ zDmKJ($c1&-YdV5RbbQFo!YHcJSMl-~MJ7bxJx3-GdHgr_K9|MNc1cea6gdb^L z#@}Cr(QDtyHOy^BFji_p?3F*n08JNj0fNd$)Sd3YW^@BtL(>Sa4XEh)^l}F?AzhB2`?Xn?cs3JwI@kGAJjv5s*8#nza4lq9JUJerSX-zuiDox*8m+O$P7eyXKh44{ zX|_+rb0%z|NxPo>df)ocbe8N|0s7~h`oR^}q~~`a_TBy#Lfki|jXT%TXK98V9 z5JcucRJ9ktE!N-gy}0qJGkTlZ)6r3wg~Cw`cE_aPXJb1PqRF!2B~F2tPcaT$vmn!2K{R(OuEr5Zndtc?Q1GCp&cqBAKkt^} zibf>s8FfVwQk*$oa2VF8+7Ri~JT3R~A5Q|nxR#$@)~6ZE6 z_wN$Yi4(M+klJ0%L6kA`AVjEW)|DR|MU^al$|>0AH2ClwAAzkN(-f}6rbM85$<`1> z5Q$`%+RkjEqZ=@~%^`&L0bPM|al*KIyJWpnnHGpgm2<86=e8g$;Z#2gKb{LYqV0*{ zThOZQ%>!;0a(3hV`t27QWJ`6|kB8$pGZ!~X$_nx{M;mPOlml&S3gu2Rs@=--cxj`2aU$ju(w@zV;mqkUD zW0a^_6oP1k@cAj}kHmKPf3d6AS?R$s?>Co+2A-Iy88{_WPh_iut@FW4)3q;G+J=R& z1mxGjs=I)36>~Y9$^}u!;{j-WNW$Ee1b^JJ_1<4MlNeHA_Ur4QVzeVrP*<9!PUj8S*ugHf3%%ATWjxL&M zlQpvlz>|St`@i0H7)UdEcw$pH)9cjiOQb+ z58GlrN%+e}`RqB#sQBj40+QYA<=I+*2ujzbbGIaZ+X99rAt*9mYIf&&7u*LUetRc$P~)J&6K>0G7V!?C3S5>JV|8|?2b?bw`HpVo4d6QZ}_pmQ)+42sv_9QYIBjV zc$yXKMbhf)%b1552ZMS7o&)_k2h(Tzxgp51L?F{^UTx4gw?``T^aky1CvT`)P|#~j zXl3U!!MPjbsyds_Dr7TuBC^kqwA4%n0y4a`3f7c!T8VEJYINz;H%DLE;ROU?m0hm2 z>V7O}Sa6+&*gmgsn3YfZ>BU#W7kz1uFU9wS?oHlO{!YtOhMRnSsPB!+j$J+qGHc@< zLf5(nBTaw|3jDM?P=(Vm4Uezt`F;f_^1$%ZH_^5F8{T#ZD?7+PS6FLdoLW?SD31@= zmOs8mJB@XE^g8mZ_ZC9gO{~2y99{DB?hE#dxpJV|Y3)_&Tc>x^BI9j(qHppVcIh(|o~04bg!dI6wYhb=^|!;E_;KQr z>F&4jYZkI@`EF~N5T923CgVl{V!JG)5-56BLWQi<* zv#L_oRD$?5?6U@MU@4+AV^OhETIpi9%>yFwEZmJ9C!1*ZFWTFL%{4eDk;r`E^4 z*ojo~>i2K!jfv(Fvk9TP)E9d`;^})H1zh{?XiC$&^r&rr30-`7i6c2v0!gdN0`sEf)8Fk^B$p%xmlvjIa3a%Iuh+2-SAiRjoGx2 zxin;i#R)pvemty)5L?y03IQx+Vne6+XAOrJCL36y-B(Sp+QZ{3xX_G@_yv<^^+dsS zJZhb&nNPV;41$4N9ek-V16I&Q!L?RMjV*7KZ;GPPJgP)?M&pJ6*5O90JLYqokBt&$ znlT6NXMyat-rC|jaaLMDNjvulQolv}Zg`>N?xj+-ZiQCXJEf0Jwg@VPZ-(0fNA?Z5 zBvW%-!-)|C5vN(MGG|wt6Z2zz3L@WYGlaIEoZ0!uM0uQvl=0q#3Fc<*+CTQ0!*)_| zC|tM>7(E|Faz|QVs{y0kfLswssqv_1``meRkoZl>5sYF}z1WR#HLJXw6SWx9Gm8rF zGzU;+o67nyL@T)P2k*TPZp_}B3U2=Knx?rl;q-)8M5=)#}&jbPUz5`V|{R2a5u1E_jjN zFVD8IVhcM`ns?rBHS@leGM85=3)&t~SmH?z7-`cDoE6Cqnv$wLL%+n!&XnQN9e9}@ zk2Q>T;{j$!|BoyN!^_o)0(2c(2QnMmK_rjbyE7$a-FDcJN-Pl%yU;p2;Y^(#)4gwn zCxUvy7r$d=&3XA1U?OFr4+UtG-F?yjt}PK^SM${SYB=B3;G47N$;%{%Zc*v(2!sehY@+ceOty&wps+%SZZ5hjH9)72zmER71e( zX=3Q;WC}D`RAvx*3HjGxhI$0R1)19U1`&8UUmbfx;hJVfOrWo+zkIsGW!0?TK07%Q z|6zE^e8kW5ErtX`TiZ}jH^0rBBL}^2Ufz2kz<;zQBg{N=^y9F;b>RN;D=&8z3T~sw zHgD{L_Z?_WSGBPK+)dGDfGy&Se2FXn zkD)+PWI|*Sol#LV<$DBZaPn2{m8}qAw83c0OQy-ZDht1NSAJI)TaW?^N&eJQ52}`) z#Xdo(eD2`toJCd9U#|ux{}IXd7hYc@dE?IUKbJ=ZKF!WD>eJ)Q5RgK`8{OsM^{6YH z)Wqia(jFZ&Y^YJWtunW^1rb_ULZ=&+)c*cimq|O^zo)L%k!}Z_@n3Ek#f(96nkNoq zX9hfNIL2@AtM{q(Awxgc8%K|G86ld~LTi%Cnh%f%sIyvdWW-U};2T|4Sh%>on*Ybr z&GnzS80yHIm^KXSD@JHZkNHyr_f~e?0>`2IGC2(=u?#rAh23J!JPsqaU}CrSB!ZH1 z&|{Ft&B_O=54i{8t6V*T2q6q1GY9h6ijEpy!!T=g-gw8Ei5bu%a8m7DYkiPFh@kHv zHspl(9u(i?!ZA@kqh*Eoq5L`p7X)Oy@{gAq)b=*l_hB9W7T~~b)t0O_AylONns}-a zU|2moKOzfssD3=GLr#kA*Ox4-_h0fjkzDNDLDYWiiF+O9mg}*p_svD4!W|Ni`q0X< za8}%kp?^Q~4JdagYbwB}u7QvBHhEQze_gf7>_BO zm;v2*`kbwt<qpmOgR6W6 z&SOSr`Z`2&dQb;v_u4y(boL2?4u6Vtm?D&kax$uQa`EwW+O<1mi(l2=;+l}Z8?Pj1 zWQ@jv)2=3emYNofEb7&7s2K_=RsJ@z7QgXYS*7OUlli&6)nRXsvw}O1ltwN%=(HM- zq1M0P766E58&1z*72WZDd;u>fj_0I;i}aw(hyIl~`24wv`8#OchLyQcxtah2rmB3D zWg~0T3b_ytg&Et6XJ(;SEL%r0I~rNX`}Y)Dqc>DDRkd5yi0@djBHP*BT&VfPoNVY* zA+2$oT3A1LTJr5*pygSEnNQ9LyJEz~=E)uoS- zg1DG%MX;yix-i=epN}+pq1Sm|J<6%b`tGI}GIia8ONcKO!rv1NaY7ClO}8jqChepD z#?!Z$+F3=ZOc%Hd<*clH1GQ+pE`fPV$#1p^ zOTM7fGh04^GRw8j2nS1rnfDs%IlVOawpy9UDOzByQ>io4VmLN$&>@eBStrUFphZW3 ze_AJfZR;r=hr}|5t_uMe#M~U;LhtjKVA>+9Ty%^i$kA(A_COS_GN(fteU!hxvcgy6 zy~+Gn%SN}iT<(NXxM#ov#nz~0E^&G<2@#0cxYcj_<1R|&icpa?Jl$4Dgqn5$yBFf?k(ssEY%ZuFg`Jqd+!S$B@ z&SHawIcKK4xroOn@lAE^ckRdn^?olGeaOUSuchZ)hdY{otkEQ5iBSlzvao{)1XSt0 zW-3w;B_VMW`jso?zZoC08O#fn<1F! zQp+6=4Ta`y3kwGSND8c|s~D>rn0RuDluT&*G;lVsg6Ze2vIJEkgbz_jI4DxCGW>ea zsY8tGqL2L-!_LqHZZUpS6TMN&Qc275xSShUA6x)mX&(H0l;CNw6{<+j?a&MITmGrP zB$l*=`o2m^kFc0>ZAOv|0b4QScIE`y{zI6;Ly8_G)KP3<)sNJ_6=*zCdt&Lmyvdtr zl#BaHbU%=+I1Ocz#*6yOh$q(qmP)=i;qi?}dG~LW?^?pN@pO^DOi-Rx;lsH(&{4F{W2MO}7RNTsT<( z;urN-U8<}wElP^kC2cq(1O8Zo$33kd*UC$-LSQNUgx4UseO?BL9_cnpqks59$ z#*!e4UL{p2@tA3+K!#qXKy6IC1bwi!57nq68Z*cYC{JQi`u8zxUp7=&_ms`VOzyg; zJfiKj<&O!0&=6A{V@YyDfVi>6m4yv6I#YbSaZl~4P2lLl(Ad@!N>iHN-=AkvEmvR5 zK~6ZF$^H@7HzPEcy9wHmLwDQ^eeH1YSjo3{npxba#{D&C!{?$&$uXb{?{>^!$RL3Obn@rU&BcdSI0cQeji2KQ_EBb%L3kZftNGhM030Hwxs(** z3OwfZ5+tK~$NCWKr(NAPjh@!o@PlO87>WbxH!ITsy}M^G2&QnrE%JV}TfR%FKZ&gr?M#}x z#*8M5i(d$j94zPGcGWYn8^+Vrp{5|QxFS2@qi&~8m#cLGvH!PukGxn*$Flajlust4 z@Z2sl3TDNJY?FErDq6TDqec?lqBU?|eL#5T-YNX!y-P#T9%PI~%c63;yO+((E_ZY* zL@V5fAC+uu1raMPwdjPc`*fU0x(Ga-^r@?sKf2@$^D_Wpq*;zbvhhs3?hjiC73?|o z4dl9$aq;cGCaBah@SK7MemWl!VwChwTk9}@M|fp`YM)3jIc z8BwRlY3G)zJ&jl3kmQD)-rF)xie|ivHawS1PX{IT!0aDDKQu@JJ}JR^s&9olNv2zT z|15xsTn0Bf7kGdr=?1kwzPL`uuF+4h2qI?1?Yo7(0_fPhGyH^b-+W-bwvVHs|7t}0 ztsP)O=uK`zgIwLYaVp;`NJ|-*yqyL5$ZeNPU$+Dvs~+zf@!MsmxTn~ogEa;|eq+{M zXD_llhA?eUZ4=!>W~z_6G+LJI!iRy%vz%V^S0G#(t)ov1CP=la=+-r`;mG%FRqYoD z(E7>xn(&;hBnS52!_NLNl}qX0Z8I>YsnFujp-}M zeuh^^s|)c@a?$N3HU2V)j!i$d!D$G-(E3Em)lrq`GuH5Q)#=rzkm?Wv7fIRP9SoGVT9r*`N|D16Y6E?7tnt2+FuqlbrSbb!A(r2btQJr`_4^Mn-M#cW*Jvt z)1V1w8cZHwLngZG!;!dkv)Jv3lT&45ggtv5R%8hl$ZU;Li^b|n%fNhqtJgmyEHdG8 z*cJk~KslSHLx7_;^GgS`mS|*!f?4JDFW-P#L}MkxwEO`NY0xr;p1m>=6TcwfTpS(CUETS|Vk8XBBk+ z3RETV)NQ8fL&|pB(Q1ly{zCpISp(tA<4L=f=D2t$BK$=8vO_(89kkiYh(ld_cjx zcAVn^*W5LKlH@EapLG@rR23TYeSYt-eJ1G2$6@R!?|YE|>0olMYCp5YzICH4-2?(3 zTi5m5`Fn`XOmpS;_ChSG4N6M(0>Q{;eWVz7_1QlK^vV#aS1GvLDdzSI6#6b)eeQr} z;+?73s>*`9jmBA1rll61T-|a{6dSX1OsQgJI=N+UCHLw(1XrKl48=QI~u)S10!AWNl8y4SwCpl=Pz z#j0W}!D?D98}mz%=q4H{)Q6K9r}hEXud)&<-{#nOwA4}sdn0kU7M>P~J$sq*2|s?C z;cniV;L)Jy;ZyDX3>M>h z53G@R=O&`j-q^V)(~gLcJE7#~GLmHurTMz51kY?Rd7`_2(vlg#pj|7fXU52;WD3ES zP71dQ;&0RIGI&bo(%Q5&1D8kl%nZc76I>%tMW&|fw#}3}%SqcFiN%12kmsHV@%0Rm zH?mRZLR@9{otNdaWjtTBMz!sG9Rz~QFRh1X#V|*3pf8t%GBwDMlEu5EcSOH!7$zLT zU842lX`C!~%f{+IkCaT`fLp@@YCVda?l|#o*C_NX16up92w z68Pk_?3C(By(gE=o&3eqlP;TIC!ixiNO3th?BwPUfe&?R@oCEM?)zSmT@)AxAkd=} zOqG`7W$~r$jFvm;!>66(gLCZwUf(NBRtfv|m@#P2B@YGZW3ti`K-HFZ{2>7MrGdb3 zwd|?$dT4=dom{_ckiv1%P&d+-Esf((EtFsRfi)4qC2t`)z`S%HXYOo0@L^0Ll&*7DkFGYpR)&DK zG+jc=G{D#}VFIgrrT$ca1HHR-Ww&P6H3SZnaO7zL_J%YK_xSPrUzO?VQ8O7O`!jWr zLj;Vw`gH|Df4OeRwHfz17Tqu}&L!h0UAI9q*_kkga9RP!M4&(7uzIk(hpXk0wvloz zp`NNzey-4o8@&;1nNFg!h(?=F&YwDX< zzV`2Hr88c{m|n`-Hq!rTfL-^qea6nx9#I%Qc-dp@VTT{TCwqLWrJ&D#Wff?<xc^MgG0)c=m!RRfx&6QS7zs#OG80-9+ z*>VkXuqpkz?RE^WHl2d+gV>Mzn4F|P(ya5zaz9RXDrw|5CV*gJRN}3&)@9R?vST^x zWo)-C`?W?5^2}t5OTHlWOLZUV+vo$MAi-g1Nl35Y(>Z@w27sfc&qW}$e&L)%T!zf+ zl=5Bomi2;;KP+Ye*zO^ZS&LLj#;c8h%;dKI(K$L-LNEbm=%Bjxk{}WLf~aQ2A>~99 z-@DtDzNpr`(_aZ(r_Yxi>ykHX#lMwGe;*@yMu&g&Hunt;C@*1qbHU|`HMOZuL5sG>Cu&*o>}QeF z?5P-cqai^+b2+qX<*gqvQvg`nUhaP#GgTk-?ZcbMyH`ECEl%n^m^*}NdE?Mu=4fAy zI`cA4t&p7%dKtd08!@K6gOe*vKw8P((%6te0MrRtpO0Rby6y@pALzcN_S~%)aYXOW zHWN>=Xx}5JsT9A;L`T~tW zp#=omrsx23@+n9oLMp&zB!}F;4^we3}a)1c7bEMhEsK;9v*V) zVX<|&H14st4J~%fXG>56zfpX@ci$ZJuP9fW0RN{idEnkP8lbwOV(D~W^VMYRL9crk zq5v^{w3(e%Cs4i2{Jp5EcP-kwQ+gz`Zud_?{X}WxP<}~0Q-T@oW96R=7s(Jv$H|T3 zU0yWAi_71j2EAFkzLxic39_F3uUa0*jyZmmgeG*BtUkQOlZXc6_P*`4IYm1T&{Ent90=-5%XA znq*gG>%-|U-LpDt^la6^bjJuTW5bbjTx77BT6}#{eguv84v&G4q0dgYK*N;m*ZXn2v{I+)G>+kDHIk zixb)W;dipXovb_Wwj#tEJmwQDP*J80*De?uoSkqZ?}g7ypS6O(4avgqZ6U9Att9U( zUDSJZ=W3!CTD0axRmJ-ad?jNVu}&*<4aSB7Gbr`4@Ko8Y*vSTyNkeEA!fo)&Zg+Bm zzr@emJ^>51P^4%A)W9LWnM~hRMlK>||GN?8<_J!K#%Ictu<}pKcP@{dJnr>8&WAxo zF+*A}j>T?g#{@uKm6kXuiQC_@JL1>y*}*xr`rir?*8~7j^O6uyv%&B~NI)av_klk_ zVR7s!Z(c2m8#j|=vHE(!HIeHI-;%DjSg)TlFOD%5ebx>eW3U-5MQP>mRb1S*qT+|kKuXX3Cu42UFt8(t$WU8K`Q0qEb6*CLzpJrJQv_1_Jd~4_fnCh9@|Z#5ArE3jag;5OjQr z-7$hW$0FyDEG_;7Iukt83yxDbd1}oNJisTmTJvhusZCtIHDa(-+oPWvcVJD(pKCzK zgjYpDk_AeWw-jUG-!38!vu93|Cubf?@p@SvyF~Q$QZn-gD`UZQ26Rb3T`U+PH8v|E^~_0cj2W6XQE_^g?mc;dd^L z)m0-gB!fxV@cFRrac^NHnk!#FR2PC#d0mCl*JqV+GVe9~F)y}H$BiX`a>804Ir*rUd}Q2pIJMbz=7#|` z(!suN23#C43w`NtjFcaF@rRHM|I0hh-u$^9NiTcMe=0JgXAXOVno zl6GaC?MNl~03ZsnE?$_d2N%jmIvM#JK0I4xvSX|0xM0d+?C8_KQRUvQrf2Tow7R>k zzmJnN@NEL0VCZ}htN7Yxi%qK)Nedl%9Z`7XdZ!rIn^~qV0Fw`0{}hxExH&xr++Avt znk4-_dh<)(wqwa>kuC)l!0v)Qsb60-tF1@T%eOoIU@onG%clWPlW3nY!6oX0k{G3- z<;Li~24`&Oc-f|AqEw?W&NBe;N3f}^g-aT{)XG&^d`@Jm1Kec<_VAwQ9Y~#F%J4w! zY{>7kETU$stpZGpBfNqH&dZ2swx`^H zKV7h}(Q7cZ#=SQ8OqVy^WiJC=wo!VM3MAxXKteu7(+6gytO)9-f;TUBt;Av9Lr;hM zY#FLoEb!G@DyB?86N2XYM$^7}1cI-93JT2X8{xh%sWy~9sTVEtp(Rwo(QbTGirjl` zk4AZJ^;;4>Qm=>pF_pBhc&M{8?fQJEMxGgYk~CPmYtH0M^lT^E zWOrhApQ0CbH1-{rcvN#=1H*%`@fuyKCg)2#6d`9g-!-uVyHb`$5yLOM^+i6e#*^BbB#2(HPa?O$ zn1~S`BeSMx@X=i=jghpq{KiNK{9YgeGyx%lNH^NIyHXz(fgm)eEaJF!6q!23yPIT% zY$WOQjcq$I^B|_PkF~bKO3`)l7?O=U?hfM@evGZ#`$d@~T%UhEa4aK0hW_;TTO(!hnJ9$fo{tVY+i^na zfUZ+vO`o6EA=S!J0=K{!fFDx6#mWEGUv6~|zzeKm*S0k-Hax{yv~V){^oy`%a$yR^ zk+^XW^VJ9KfCp9)s9cI#xt9|cYQ#19Q}>?}*zuuoVSK9Ss=~F(t7Xfj9Sr8Kv$9s9 zj{SA|p%Y3`&FlK6R+j9!85_6_dB+~Jyq_kiqMr!80q|p=OdnZ=Q;(U5xKE`Q!k5OAQ3*rVWwCtu{?-+kXA?9o3@`BX*quj z-IN;Bk#xb=b{HnB_`S|jR5vIYb{R@=p;}U__G@$Yc}K*Zma+HhZaf^pv_@01O<2HK9osndqrCI1R49(V*_#@wCpMBp0Iwa?bwV7 zKT&qdsD~Rg>19MXbs9GoX(g8Z|Z<&GK-Kh0xt76%(uflxccYkvAN71FDmm4!2z!G7Q}%dDyL|s9?SlNA z6T$XF^JOyWJUv0=kmTkcq}-|lV;{<(W?jzPbIHtqVHlTj*<3eHk;Dwx`#m#*sr396?>~>=dvRKS{`BkTW`w+-GYaUuPk%aZqNC^B z0A4(M?rldo(Azx6WQ<~s|_~m%)Gksm4Mu37_CzJ9x@~pfGc^a_r2}^)Tw*cS={~%HqX@sad{Tx0H}CIeou`H zwGz;|VL{MIh;|}b-H)rpP2SsQJQRgxyzgJB*ksPYadhv+Qn&;R%ay*Jt5x~e8_}1a zI`g;FBXwz7J#UA$T%Yk9Z!ul|M`lkA2q3}RCji_4YAAtKSsd~C$-!M@Z*~UC45k)! zzqXgPE@|X{Sha{S7A^N5cfR&S4WG^7=hn_AHnhdK3yoL6VI~KZdk#O=GZH`M z83gkL@mx5OLKQ}F1-4U>MR%Fv@b$!ZFf_mgV9gmL@H>px8ZfMf%yT$ROw*SWRBu%l zpj%Q&j1vR>se$U1bqV9NYn}p(t67CmapLORBM3GO5DkB&&;HqlK}IL7*ht|2RfH64 zSVTLw?<;b?i=cH1H8g$8yyS4cI!WV`fAny0iF_u8SJ2^X~&XhVrV#6A)qM3Ze(_Xw%D(iLYKv$Tk0&)tW{%7do$J=%Dz#EbsmX0@-Y!eZ^1T{q1>l@^(1mMes(CS zJG5uYpUi~p6?lVCARG$>iAAs;*FwjwiP3j=naJdyEL(t7PP4`-EiL^ITawFDBkhf% zsYcDo!z%{**gmL5NhGuoa3l7n&~mU8?Uw#ne-!EC8%QsJh;A4!Yj4LbC-?<%m5NT{8e7h2GFq`Cu8t>C%d_A{XH%`0lp7`f(&WeF> zRWn)80&Rr+saN%e;RS8jF#uoZL32XdmVOi+XtsN*+$ua*Pg!#+>|r%@ zzKTe1wMprN3!3084C|T2Dd`wx556o`T$M250IHn;$f%0trwW;gKU3F-0p?bC&fq=X zYdJ(vSTy3zNTRR|AkG2SrmzWii-bTP7_7#$x|OQJ&|_SXZ)gg=%Pejf+WHoDd;|IT z?1AJ51QF@2iczz)Cu-Rfu@g|IyZnD}_nuKrZQJ{B?22LoEL1C~RHYiasPx_h3`GQ_ z1T2IqMG+AZ5Ropug91vE&{Px-9qAB?inN3xH9&y;=LS7T?>%>n-`vJ=vrSkpWsIm0i>Pl>oalN-BYGlPJCkgb13Idye+G}}|^UX>3!kDukW z><_&=f&lhEn587t-EVi)X?|_C*<}7`;$g~@X1Fx)kZI)xq10BB`4uq)zlXS7(@}K- zPnT2b#6p8g4!?>b9l<4CBCDnQn77)AA44~)MiO3nr1o{im^7Xfv{8uIQTxPlr8+n@ z`FqQp(%IT1*22-drCgdiHP5*u3mTNVE)mn^GY?r?eoEJe*F#y)m@dT#{NeMu=@r)x zB+9&CI2p@J&R@J+Ykr|YFx2(ZxbbsShI;+$~*LA*4yUR8HL8mmb zz%<<>w4k7DP_DnAq{7urXNAd6JVTqsntAQ%jUVQK z0q8RKS|XP`Na={#aaR-TmFylta@1onJ~%j+QQIx6weHPkoa0KxQ7*n2rNS9R~G1kY=Nu4NM2G4GZ)8_#U%W4Gn=8Jo=7 z>19?s*t*0FoB7ZCU^8E2OH?_w1+-nV+;culW|#Q3YMFVrbffmQ&ETQz=K+po5tu7b z{dZ_=LjS&mrbK&fGe?eq!Rl%R)Zth}wr6$qYnGYcl)W!UGV_1DYu<@K=xeu8h?bql zYK_xz8A)0&)Xtmz<#}(pbaP=~QMsF4cg-_9%N{@_8QyF^7v2?nm9E;f<8-#p;aN4p z$3=a!SfccSnE=_97uxoiO|@IEJwK@x%I#gii{x8kII=T2ok}QPu`6ndukGv=2x#(RQgV7Lv%mCfsV-q_~zVO2^1=9;Z2MtrvMRwr75> zbP{|B0fBIP1C}du&5zBh+i*1@&n}r^=H0v6}NATCcUqjG1Tn-mZDE)y@SGSRJ*M_R3@bOBb4&#^#p^k+22|($_Jaf zJJ(-&!7@rYCZynz{i85z&dCt;!kG@80QCufqELV~huxLXgR@89x(HGXsbe!3qg1YH z!55_5yNG3+8I+b_&xD- zD#01kJxAF(`$YYr>LM%U%7jI6_trc9*8!~`qiL6!?R!97B|5WrS}$7ex>_}Rg()nCR$;j>@~`6a{FTu9*rk2cCe2hmrg_Ka74~DN_xzTWY>~{8w$i%Mpa|6nF@TL5!XnF z*R`uQGG1cob6~@LIsU&)wUK?t=1fv&Au^{CBeuot@?(kIT^$btn70N%{B5bZDW=Oj z6t~Lp?v6q3kCfwabr0z>8(4$7gfz0+Mr~XxZBWL=YnybfkxjZ^oLc$CQpd3!bm=VC zB|R;S1?=xnN2m+rrp)O@N^Ibc*f)-Bl`Yjdxy_R&{oV5;&2pDg?Dkq-b(?GB@ZVv1 zaiM2eqOj0E_%LQMQF19UcUq{nC9I1$R85^J5t9T2%_I7_TG&JUbo%?*r1&G!LOTu- z>23t?tFScwYgdg~wV>yP7%?{>l_~GC-kOeWIB+FYhs#W1o|h8g*-`f>_l5_Qs?T^X z)oO1{b8L&}Kmu%sAG|U{hxJ0`V1S_K&E~h)6`K?o19Km&kliQA4WXU>oAbU?fzmtm zF)d2btYFN=@)^*v)z%}N=zZBn5U9!6zc-TGOIV}9T|=40XY-O@>BqYImD zQ@nEK5$dWgZ+oRsJ#4okCsIx|7rRf9bK|?I-*&%uaw!rv0UZICu$kqv%|Ygt&&t{n z9+a}bnM_(b02Cd1WK4h6t%K!ZsLp280rj8cI0axDpmKa590^s1?@o*OZ;%OP*W=jE z;Q0w8HGSQ)K8Qn=P;#U>VRLXhf3S+l&|!E#Y9?)+K?Y%owfE$Rz5538*LNb_p1W=P z!k2XCjbY?>SfE*2&llP5C9;pZIwzNGLDEzWKrIn1=G+~+CFlT-&(b|s-#8_iVT0#| zpTrHFTka)o5<^IaoQKDUk5Yz#=V7a-9VdU~MfA!Nm#hebUgURRTR88${MYe70nq-c zlO5Si5Ac?!t;rG72LcF9Vh%*0B9TCXppfr|W+P&wMZKf{NqU1ShTOfU+ioBdC@gD3 z{lM-xR&OH|q-WZEw21d-IAIaEbekyokiN5(F`rf98)}J}&ol2&iU9(vfDU*C8%PFV zH8BP8(n*)l7bJh&+arKNY5t;e7;9d=WI3j`@#6g1Bm5MuoGr#w3^JdjlWqYGm$YK= z8g3l+=P&ee?~7d#ys>Gm;YP?)PpHOZX?w|SK&2@C63R&s%1OBsAvtc_Z&pi~8*M2- zjg^C0pRfo}#D?H1kO{&!yRwAy0$+Hr11WYigm~qVZcpDc?r=w*kF+`#dH!+L7#`^) z;PLy#kK|C_JkKd-tXRNx#{*yp0-Dc_Lv1KRiA*TfkYssCXl4Xg0XHu-E?FMNRj;=T z-$!7~5v`sUqFkvl_~L|EN@syNJTKd>8o(!7J&$3L6%oR2ndzdv{2xXV& zGduBi8p^s(tTok<`+a1MfiW^AVa42}6d~q}2Zk$mmAycMcJn7Kl4tr}gm^}WS9$s) zovHxBJ_D6{t_Z^jUHol?o&=7UX$q)XR^;3IL<^RNfxAhV@GYx&A`<&fq^EHsN^=@& zZGKv-y7#()6(z!6s-Xl&6~`)Jv)3p?=-t;Z)GZU%!cTM{*}h z$rl*#!sCDh=*P=s`Lg9fUSWiU&jp#>TBxCaCj&YT^m^<=|0o2RFm}B^zhDz8N0}J= z?#g!#7^%R&=jpqcHMx`(oGicpVq~)X5=5VW|KQ&**FrAeQQdq3A&sgY5ZJm)FYL{k>|X%a1r=6pmqR^_Kke~O=3DgBXj-_I+gHq_mWV28~%Oj z(Y(9+uk1g>HF79~X2^nDc^Y=N$FC)?e%DLCb4h{mxba=Uf6Wq_ch~;)>1;r2=*fbI z8uF3ILM-O|B3T}B2iDa8|B_UhNcQ;)2%(~l>7gpx@cYQ3WSR1vQZ`A6eBI+gUGW0l z_rC=#alQuW`^YEkrHDG0q^@-+%#Ykv_AhM4{GnTxYJ)cv$i|d-5KUwItC*i`L*-Z! zV~Kax=(6fk8_v@LrHdzv>o;GHNT^M5CpXsZ1*~KBw8A}22&SAC6_wA`WCR@`{znIN zZj2DRFNV=Jq%c?EE&4%7CQ@H5O4kfwwLg zkK(Gw3lJ91^KMJ_e{w!f9<1^x{7J|t4TOv+*RLs2wpKBq zdSb*Ra73*;!_kgHroYpFuD%_$iySB?%&t*BR+u0nW1sA{&KA$46Pp&v<&^)8snf&d zkBg7NeDtg?o^4hXrAp|?zWtp?u}ayag_+u$vQAom0Hz^Isn3nv+p$RgPX^M}+2($M ze;ST3Nuq+o0)9aGljylZF{p}Q&%gC|p2)zc)s*wA#mehEq+Dxq*@nP!^{O00N0q7; zO7ud1=YW;;pB1j3>c=i2q=N6hB5L_vpPcLhyM_Ail{Z%1Z!J4Ut@EHBLU>Sr9XFrD z6#w-lBXsX%xV(Z^c68ofEzSI`nZ!{Cx zA%KuEF+qhUMPgXG#R1$(@c7@n{qHn^Q*iP|AEgi36gFN!*c`kO#rL?CQ?r~)V^V1) zxEB%A+93t_mXqPo)n{>Ag20q|~kTFS&S|tOr6lm5t+TC_6ULN$mqWeTFSK+%0G`m zo@f`mHf%iyP9Sa5-f=8i2+T>;?9K7~j+nL`OwKR5+~(h>kbfJ$aN5tlKGLe{&yk_H z-^oYzOGrtUE5v3+-tlEu^tDMKDl#Vul~S93Z~3YaGT>rYbRBzU-5L;@NWuYxn$?_; zV845?6tOkTBR~%hzpPqgKs5ON)rTzx+gIzVJYE`CUk4**7TzU^aN4H1RXN_3MVQ$H z*0?DB>PR+jz=ay=mhxx9CD715ru-1m|Y!cE{z%aKE0Q;kOB+WV&$V5>mAMCa`4pM{tR-xnr@ z+)$n$5Si2+e`H@C9z^e%7_gQmQv}50Sl2iS;pwaN&!jngZ|5Y?>s5m};7z^0@ za^yf*Yy!Gv_K3HZP*)~nR~|?-?<;Q1U9fSX{x%enc~`Kv*Cw3QQKSbi>>YJ?(cgrU z<=T*RAFN0?jKxEXwxOipll~dE-N-;?=9NyOD8tONe>+vl6a^dF;?|i=m*|veP&e;x z4w?W8dsNdjkixu0=)fY8S%C-$dFqk(&>dAU+)R)$9s*ezymPgp;W^66bS>{ecmQ&6 z?nR!ousMizH#|vg5vhMPL>Ig8vDy^59gZAERU16r{U2;M2d{+gHexfc4iSmFM-4 zsRc()HSNw0v?HuQ@ZP`#FJ0LD$Y;VQ!Hcu&=RI(VHEHAezz4h3lrLKnGy7??!3 z0#n=ZN3%Z=V~e3=;S;Z|J2BNgqv*^1os4T2#YwK+2=cgvjGSHINr#nuKbhNav_jMs zp7fwnqN>z4_nah<^2vZpp?71UQD4IE0;!1&?4D>8$kYdr)tg!#YcOx|=9q4Ap+@lI_?i$eY?^|Q!tG=iL0h;sV@I@Eu3c0aB`xiNIR}Kpz&Pyl5XCakf zII~JFFLxe^_hH=K50TliuK9PH_6Q&h#PjukYn1YJ@?y!>zgHL2?#oE0`9=87Iw9f~ zJP|$%Wkr%AKVohUS%9G{4 zEBNGcGz1w}&i8sCc}8lkUYijFlQUAg|tirx?_ zDo#>532-O$D2MM*)YQ@tChku5wGtto_YmoE!Ar{!KV|ocN3#$$v*9UUzK!$`961}2 zlbIdI%(mZyh{`6|EB>Q;@|Ug@L{4H9_MVnO$iofDQ-L0L=MhrEsWFS+g%1yUm(l~J zi0aiGdu~-JG@frI@T^KSMP&>zPh(Zka^-{Sexvo~wb@4$YS~egW32A-FpUSswJBT14;)q83nq*!gf5o8&=HMFh?t<**J5F|u!3}{uHCz)E8m^nd$9&cwt+(Uq|R>cmucI`Fn$ZJN{(m%eccfWmq z4I>FTuK2^rm#5p0nb9?f=k~Uv-t8-zz>`$Ys3X*bdj7JS(?}r*V8(pWf`mMCs}5r9 z)n_2oP`6qZh`{x_4y-Xb|ESm_9Q6YXvtnnc!bCt&ne`n|g2>$F4-2PkmQuVp6lGy> z_xo|SH4bodC1fTCVtYx-pI*BeqC`%xExg+niNtV}J6vd!W~Sg`7Zzr=NaK zOuqGrX-urUSi`Q@&5B~@d7sDzERgGc9KP7rKq8Glg16qGL$i_ z2xcuvhp{d!4@>svbV9N1?nUxzw89?-iQA~E&$=%(9}Utsjs@v6t=&cmm|O^0i-X*V z1Q$AcW^#|%52@q-@fF2?yGOh4A@{Sv{RYVWFo8xvc@n$W!)FOB$>7+aNp{Y6fj7i_ z*MIavYQ?&z;&TC(5!sJdQQK4Nr*|! zv4TIo3RN5=Grzj-`j6f}0HsG=*EvXU5nrX3Z$ynfKGNKqU%_lmS+n>Wj&6QdsR*Eg zAw)e?52SAkCfaO5c@E@`N@+N#9!YmKK#9=9tcA$f#U<=U`NCS@b<*QS78N4 z=|RNggTs^I3MjVljnYYDYfG5vxN$Re1}ae}g%z$}kiH)#)7Y~k8V;i+TTz|XkhNH1 zL2gAIO4)4v+-yRT&j&QFr}F-bHCf1Vyy=K1qFGR=O2pvoWS34-y7xDMf84bQS5K;> zE0yiKcJ#3GDjU7B_kmLnw!hvRZ;U(Ypg)qu+Vp0#>@49d=9qhzor%`iWS39}bmqBt*q zCgFy=kQzQKE-wBz)&B`u_zo1k@7~kI2kB6WU_^~3h{vFyj*?N^{>q^K%a@;4F?@10 zUp?4_r#2qdXjSgmBEO>g4xL7TWd~hV7r|eON>_>z>)IKze_eyY^+#Afz^d@>CGC2moKAGab??s5*oIHcir@L*+j!lJ9S<8!L?6W zP2l|L7HBZQ^{{(TQo? z(wT%MVf~O}7pY%KQ%~Fl<8#4-_sGS^_~UYsYDPw}T_w)B&cLwpuvfz5!{%K3Q4L;c zByzC{C8e3RGyOsmP4MF^x^?l!&5Yz)y9Jcq{Ca`kaiYg0){Mk&C#c$)YZ2I0=0?f6 zOoJ147`wu6RzuJ4Jbj(tec6fMye_<*8ZGMBaJ9tg^9u>LZYbYTbM@`b_X1mRf|)!q zM$A4qDNI|3naoj_7)o(~_t&X}EOZ-rQj%^v*hED|oVtx<`urm@TdCoB~Ug|$6(QC=^GsD${qUXOIImaX>s?DKh*}w{NbfJ5+O!Q z)o;aUd_kk#wG@9tzjSHW#m4sn{MMtn`^Ams^0VxZ+02i1nKviN`%c~&sylpV;fKj& zZXiA(ixjDLurZq?vO7}-)1j`5OCzC)8M|>9Sxj4e7AY+g9j&9Q9a~{f7{(lsZWdZ* zyUEFgcbT)A1D(Unit|M9CGz?}niTgd*P4ap8}@Sd}1Yjd_p!0fWu*B)x2j%)U2fiW!~;PbJ9hGX*Lv^lj3E zP{GeUgYKF%jW%hsUNkyOO*aqS{%MAjvC70bjgt`<;)736*ZutzozHVWX%PSZ$TJV( z1~CM@6NNVPF;Amz)QI~BI~>SLNV``7p80c9QuxT?6qHyJ)s}g5 zOkKAG-v;eutui^E#>t8c@xp&o*Zutz+W9>2h~+M7>oMYsxF0tTt;D+Rhke1WU^=I~ zHqpS?m-@1+yg1}ioJp7bhulDnMr>nLt14NyuBQgA7G0KITX$;v^v*68tC*)(Jl@He zx|^dajWYt@3N1JsP;pIN_gt$s7g6CM{_!PDwVLkhG|t1sGdij5-d1~0_}?07if_k> zM!OvncOIilQ)zh$YU1I>BxTy58p!AEccFCUTF?&gOZ@Q{ChGpVliC&#=<+6Bpo7Cz zAg1=mRsH!B?ckSPzuN4?=s4E-I)$b+M5PTkwXEPXkaMUtWH*k%3*Q`{bxlnd-KxEd zsNjQt{5c&LkKQbA`tr)_53ZYHzgOIAl%Uj66plgS#cJf@D2yMU-jqqz7a}HCjsKhj z7RyV5xhR-C91 zi0}FBl+=n>i~hVd=$Q&fb4N!9%`+YGEjv)CEM?H%l#{a@l8e5FnVAJ}1&))3lONMk zZngThr16OTSAQ%oTryp2NpfLf;g5iaj2`uog66$=TnbmNMWcg|xDma-q;)$NGWj$Q z5Nom?rY-m27@;pxJ$mSJ1;&t1e;981qw@w9KMJpRYt?;v^mor~q2(tg6;L)i+ z>wC87?&6!p;&O`Ur1j@C&hm4lZ;K&pRVMDOA#AuoSJC@3&ud;m2nuhn9y`ZO4ZcF9i zG>gkxu6gMwE$SOkO}BQlirv`FEYh?T^E22p zm(Z9}n+FSn=EFCtTBH1d=hVWO%wTHP(gsxAO{=9P*7)Fi;*WXXZRzWFkCr0KuHlAg>zbt9{`z)7ty9;R7p78_g4va) z+YL?-Z>K(iZZ4|070!YylEHMThl#d^{C0|gZgx2$7pZaOQ6Au{;rZ3eCe5%YMVeLx zz_JO4+N(D*wxJ(vFP%Zl9ZVkK;cOk$`3w{4w%-6B6Y;hm{!KhjVH)Rer%@`ecFys2 zA33XRIvLv#v-dPAF6xw025J-n$m`x&nxVFIArekL7SfCP#K>>{dSlk-{j*O87sqlNl^7lyP7ck4 zo05do6cBR{Opn{0jajwx(Ag2kFklX7eeEf_lOp$j7Iky5szldPoH62lw5F+>uPn`B zCktVv1`5(!C8{M z$~QU?C~4~su{}rvAn#yyd-y7ckZ{RQNssf7Fe6~8 zsAUPL7)z^VA=U)od*ZWxKZP053Ue8Jj~;q<@ta>f9Wl{z)=-0;BTibp!Qur?IY0Wp_&lRbY5i3EDB}M z#Khi~H?N)Fy5^L`5n$$}=bauBxTCAr;zqitt;eFuV^}hRvp>BFH>hh4gLZmAq%IIY zotU+p#wkgx`Rz0t2JIYT4WlgR*-ru@0S+l=5JIM(iHAq|K?hDRmB%4%bKfiWND3U} zxytu{Ry%*2)r?9QI=|g-0ghsi+_`u07B@x#f<-#*$;{U z5rRHqO-p?>Z;ZjNj!Lein$M1WUUx{|CTIB~YTrcjCSj^Jg@+R=o~W2;&Rh0C1l1zN zrZvQE`3wuai7Y|oo&Y; zeuAP&-aBdrP4*Udy#4UgSe#>Kn-Xz96gzde zJY&kQay8rGZfgrf%7?w13$O(96bKOw!zuySv{h{D6l>*vb zyXf0Z&YdPIr0^hS$iso0E3F~Ba14IW*D_WS1{TmN6o?lovsyH~m{NJuHkfn>_bJl=2JN?7%%NZu;;b=#2e{9feVckLiy4f|a%$Dy zO?<3dh!?Ql8K|3wZ}Xn^7s4PK1;)g4MU&UrEu3Mdavl+g`AegV^`y|f{0hajtUalT`P+@j=O_#;npv-jdnaeb5$g;L#|a@ zg2?ELuYup%UT1x2RAkL2!3{NSIBAX2BcNZWEg3(7_tV>h7$Y57m!p-U%tqYji7Zf!UjD@z(Mp7%t77m(4 zGc67U2Aw(8{`)S3h0-opx z_(<MP)eGeD z!v6UzQrJf*ebpJ~wB(n1S~JC|V=hdY&13oX=?{69EhU!BxNmunPBTFRkiYVnf#a-I zdk{liSFlVXt&k&sPP&xTbsw?RdCf{YGW~2b4}$mOMJ3x(+!&CE0PzIZ#TupddX2VW zzTTz9J<=K|sg9#^`u+~(HuVwwRm`Feb-Q-&UQ2C7$B!I2ogw@5ImGo*Y1@#9##Pxz zdHj1%aCgwc0FApgGu$W~D6y2LS6~w{Iy!n5(YVNQq{#TDz#%S(ycmGh@a8C@ht3EB zf+81Y>*NxIA$I(XCWG^_0oo;1c>E&Gby--~P+COzdFb7}O|K?@yNGt}qGK0nzVmm( z3$$#Ejo(o+PETAiX@=r=b>Mwez}n~i*@LgUM`ZHyodI!OUTT~F(8JRm)ZH_VsqVv= zGW0^o;<(4sE64R@XWCAt$k!V*D5a9dV61W+g=eodp!L3_lFRC{Gt3QESk5CZ_X^eY zEcZq!H%_xoy3l(o;M6SoFwZx_jeNMn_C8YoAlr5cTTj_^X;f7*~@lx97eSD;? zGoOZdLE*w7)wUgkjAl5h@s1n(bvQh;lJ<^vvIWfDj}QHjU{to6eE2Rlhk==Pi74qY zh~X)}(?wYwk}T@lnb<95*{CUQKFby$bTFoL@A>sLN=k3-k+ks0(%C!nAV)1@h-D1{ z!%56~|G(Y4E69DZJ8WpiEH;XFb=nWwu56awG%`rNS%izKY+InoTn{p7DSu`3xT)1o zFow~keBlxOsdb^bf?snb=N&S|xeN-PfE9S8(4f8N9M?tkLYUW3bBbbw=%dw%*2%h0 z)TxBS%bD-nv5e)zP>yUT@{Oq0O~_Db*>`NqV;N*Twf3c{cN7=xgW%EkLFG&3%S(;> zyyeOlGI^iuB$^ZV1Y1Q?q7L+3U%MGa^n@42r9&@#T3bD>>|mfVi`O3 zGF%l#{i*$F zy^zP#nZj)z)>qXY&4x;re(IU#wLQF#U0b)gmil#iZm#h-GR@&2`XDm21sM(wa2){m z^AJR$&Q=Ama0=XTsbX<-3WKQ?Nynjbr~_H)5ltonL-;jrrO_YWDXTv9m+InVyTFtnG3~zzH>R^ugdz-fD_aEHH9xZa~+Q@j8 z%Exm*Etk1v%@$d2`IZNAgEe+p;b;D1)kWHGgS6iaX}?|G=!M8>hYt^S_=b5vG`7wn z)=g-vY?W~T0JpxtH}_Qn;k^6uTgPfs@O}C&kvbeMs&i*lKd`auJU3pMOW{zYb%+{x z;LkMYR4IPhWO?9YukiR(t}A(>iGUO9e4WR`eK6l{B~V(exay<#S<^}1fWiPmepzKDb}$tUB?eK$6ypX*f319NyPNi5q`7%C! z(f?J`061j^_jIinrw7fct1Cl5*~zr;fZI@nMaiJBO{Z}mpk$k{+AA~)R);0Gh~i#e z^ycn&A#rC{rg>U1I+U{yX3Nf5Gp&pKC8oIj#6DpmHFl5i+2zF<@OH#28KNJFyUdQ{ zj2@j^0TcGg&Yxj5qY}>4th|&$wmH}1UP=2I*tK(;xsEn7O%{PdfQ8$lrt&|W5@*Df zNl}XUw(@y!VQRp~tsT+;=OzPj9bIQ84C%U~?u9nuP%tzTCvN}zSBBD@34GBd=>jBQ7WxHbr3v@ z((z^?3D^dzNB8AUL%%s;|0WoFBW{e7^@!MnA@a9o^kBa>ZQ>;b2pvp z0o9aCsf8@+b0XkrcNWK(N2_ZEe#mi$O%{QLt2rx!$4ZE)0|K^nZ>x5miu#b`<>fWh z`P$SlyfqngsSAgppmLH*-DTY3FK6U-l`1ilx*T#?+-wB0wrrWeD--6oE7&kN zg>IDAv8M^2b;O$CKRi7FFO@^}G2*Tt*$gS851#R$-_KIYsgyp8t3G?!=2cF$F|2hC z^wPZs-gJZOS2xi|XE%KkivL_B6s`DJbXZ3)pu(lCji;_+;1UM%Ij!2;T0_dS&TUE( zGpx_pIjF`NYSnPPtq$DJy#z~v(c4Rs+{cM>R-1fQ3AI*~>~{%$A`M<2o!f&qkHadz>7ior^P4x{TBVeQjHqu3>WvjHD~lo~9DjZ{lW#>icxLhbK=F z8bUZ_RF6|lSP- zl6|R7je+vwnL~8w*9Jf)j;@?Lvs9b19w=60-*N#fGF#hGCJ-isQ79=1aX?efdPr3E z%yJ0R_L8%=8XVO|jT~5QaO^f}+$CojK z@mV)owOI+qC+c^(NL!lEaqg@#5g?w~gS(WNHLa%m_o>$0c_rBQX3IBnrJyzqjd}RG z_tMETV%GhhL~5HazikQrGn3Jlarz9cXqsD>Uq|afRwln&B`a&O_~q9AaKh;5O6|$y zU_K$S$YGgHk_2BZ3}3|uAAIlDoxw`7Mkl;a%vGqxHyA(CYUS44peg_6l~8yFTON(k zR+*&fS9Wf-lnghzT|tlM1ILf(&#i9Yy9x{T29$kj|7vP~K^AAvd1AqWq`u~e=2ult z>?1APWiKX~4p@rS=-mnyn`HJ3qMc8`fZ^|7oi8k%B6|Ll`;i&WwlNJ86HttazLdRN zWcR?INg*Uv6tB7O#JI5#bB6FU6V=-uastF`TI>z2qZ*>gF0D?uq<{os+!^(+O5RFy z>|G9+{0&!e=Wx|*J^sY@3_^Z=ce$k`O2mHTDY>o9)KpSryGrVW}NK);<#JH zoBMr#3$rjC%O8%KZWh6955CRJE`#}$kaayR^Pg}GkNvUxNi0em)I|kdubleq@HH>` zve#^aam8~7E+!Qm%Nwp|AMOghI6Qy3lPzt|1M~Wt_pRzI(F{uF4NyjH&W8J%>B$5?~a%YHQjF;louq1Lh)s0TJr7hDPC*1aYUVWB& z8;~3qdG3IFH3NuM9|!p@Y7*XZNS>WtHAV>$L1$RDM;G254C}&ig)=n4DYQXnq`GHP zJ3QJKM5Dlep?@DURs=xnCyI=!0|6|e?OOMK%XBd<*}G{N4HWH4P5E>y{9n>wO_tlA z=}6gqOyJ3yBa>Z?yreZes%lTPbQ7d9XruAF^$y4rY(=vkdh=ts9gMnMIIfS-Fjh&F zsrMSaLSlrS)ADJAi~?;_Lc?q4PKShA&LU-h%nO%e^|61g*={XRNMn`z$bq z$ExM^769}>VwSq{RR>#+u0lG9rtaczs&srcJLLua`Zobhqv|ph={_uv=)?JCKh`@fu~(@#y=}oQkGlch5YKYp3&GWwdbxF9D}0po(l|wl zw|($$5_ue5TwQlg9?F=)@!VQVFf#l6S7M0;vC7G z*)Gx*t3rOO$c2>Gf)Gfv=o~M|Ozl-Qd%SbF+fs4tl4cFTGnO?qBZbBHSzhLirx)TQ z(Gp|Ng!vdhJ(LS%KL5=P16CyAW^&3)-A5Yt)V*adv1km}M-AgnufVdUmKhLzE839Y z>mp{qA@ZV0^QLXP522(Si6#0@Yy%OG!e!-%2DN^^k(z0IZ zwf9bZ>Uk*L7%h4beYvxB4j;(62Lr&q?DHggYtdlyc&U{8Stg;pMyyO1bvV>KIzEdC zy&L_;%WYHGl5gGN!lD30gC`?lTiMBZ3F1e~Vklup88-(<&k##g%a{~w2hFqcVPlTv zjq;3Ot8fHb=|}sV_&SMn3hh0vwB&Dh*9!(f(&11FZYWMTfTF50y?T@^bIKp!xRE0I9&Huqa*^tJVTu!eG zNoKf{ga4K#*%|E_kaYNTny!xS=Mws5dgXgOG)1Gd7B|t3os`U86R- zV>dGC&q_+T@W1?$Yy;8fEB@Q|u-b52gjLsO8AvyZ2;E?G;2J%vnOgg05O!A@^XNu56iAuft4ik4@$3&6i}s(@aN7u_WyA3rUQy zb1CiyI$o1KI_JzLT#+2D-*V~X&=;h-4a6L zJ@oDVb0R)HAj(01;Y{bvq>tFlRaHX97wjP%DX-PJ#XWn_%cB~qfVhmF%ypW&Mt6zQ zB=OLBQxWPhgmYkj$EcmovU#TfnW*4GitV?urAXS&l=bEPNYUxgP`{~Fd&oyhz464|4)j zYSL~Rmt%1iagf$np#pwODFpVSVJ1l{8@5l`H}Kc=NqaA0Y0t820bU@M~8V=q7*Aq@(N8ppOrjh>Yhy{ z*DW|!xrW`6Wa*%Z=qp(sGTETfOZ_%;uyhK6qj{dAXPNcYum>gacltZ(jZBixXaDPf z^Z4)7^@U(CsP;R!xO#JpN0q0=C46`?Pi0`IEwcZZQjZ*kUH2IhPLw^EFeR+A1XkL`z6sR!x=c_c9~a&cxj z3J`G+gY@K?8SHnrl%P`GC61{@KDdzRI@3ESkf7=jI&;V} z_1>Ob8pn-%*P{}Xgzx7pkW%VO6W0JsW|x$CQtnwAnP>9*X= ze@X1hvkb0QG`%63Z|l8}!M1H_u9TInnAa=5E=(Y1)|0CzKYN~}8x17=Ky zT3hRFGo?1&EyRrB1?@GoLZ8a3%!~j{GbOCi7VsehjN1Mi%pi)S#ig5RIE$fk>vI_NsTAFQpO7nIv8@E z(0qG{qisX$ywVp;I8Y3}?tHk23o~NujMIq@-etv8AvZnNmgM%TA?f{i8@0;Ypf+i| zMPP#F+8Mbtzt{`UXl~*Ubd0t@oOBS7-qROe5Lp=uUeN?~AGyKUy^W^X$~BJ-%$UL6 zRoLd{^j^C->&wzb%CL{*a{TCKlexuoo%=~Q@Q{*-fVJ9*MIM1!*b8q=0Ku#uBL_Ci zXMcV^Ktofo^GCjcinKIAxtR?;+ux_soL<E=r*UF%|Be?JfpAKLzoCQi542i>a#zdPrM)foG2>L^jmFF&u4)kVNhWy@epH@aoWZ-byC~7%T+B367W~Ez`O%WihqQBt(5$ElCZQ%c;#p> zfC(X&B9)3Dq5!V(C%9!sNF<%)a(;Kzzky$fPv#P?f*|G5Sm)xw*~&97P0d@!%{0_< zF{zr~dxLFLFGRkNGcPtae{q6vkiGW#jAa3T^@1L&0am7YTyFa9^{zegUFkg-?#!pj zJo0v`GSYt;7dDH~+%KRj#L%P@HN2B3obstnX)907jW$MqU}G1*yTAtv=#B05p*aA7 zn#|;J&$74+otoyCBl&`-qUg@BQqXTepNz=&S!pT42dd%?T0>}YMVByLYGJ=lZ=738 z8Ev2mi~+Q4)C4O^O*QhYRPSZ1lYofgJ)>47Bx|LdO3u_xQwI?n7c0aq# zjQ{m7|2*28z4$%cW&9xHLK19q@eyX3fwZR5zV&E)CZiS1k){x~_qruxawm zt%1wDN(zti!~<0S$N;ppwBCj!`&l!MgU`;T5EFTXf0@iJJ1B4ZJI-9xuWX%X_5m!) zpIE!yxcJc#Bv0}50=W5P+|8R!Z|9*%U!-2Hd48Fs%WPU#v19fv@Ab4q#?*|4lh)B@ zu0E#$+Lmzeu3lwnHMF5J%c^-d7vsVY+fSre^}6KeY@kd6dLf+li$!v=v&`D zHW4t)WCil3cS zav5nl`rmR7R5_BaC>JEKia1LVzj_{=rPh$V5zo|0BS0&hrmHgf&#pI<#f+@M*-`r$ z4;|uLgMCb%Q_7$#eP7PpLPh`)JnN7T$fqk>RwNM$?Sy*+27-~%@OR38C5@?Cbh1RV zv({B6lyBKp@iJAJ%>NrW$BH!CE{$+|@E%lTAgeeJ_??Lr z=8g^#^adfjKCw-xG@Z}V8VLMyh1e#4qSS%~;EC0WEuEGQ0q7TE30d=LnNF>3tT+Z$ zlV&WTAm7dC#e>y_X~6%y6u4-0i@87+^C|J4s{--eiCMQ1e1g9&FM>40Tx836g@gpM z_S8U3o%XRK0bA&N5CWF&rV`ZUJ1y5aRAU4n8o&>*mCZ6qOGgISR=={VK961F)vNGMAtXTiSduJ(Wm0YzX zBUkF7vsd+N3;VLkiK?TxQ71zLt<>wTy-=hiW{4yKE@8}CEWWIE^vn3{b}pCv;iP^L zU&X)!8$iB(NRHxx>4(2o3Oow_-Sg~5S*r;ygUIH~*okEO1~Fk7eWj{3RVU*S8D=V;+ZNu=U$9Rva)ZG0I1F)Rf$izcOZ zd}YT zKu%BD!^4B*imRMyJg^CMv#Q@$Yj!EK!vxivY@J$wu)-XtPD1c^x_jYa1f*h%KnTkF zC(Eg2z6RyJTVE-Xp6DBLKb*vqc!Dk57-d?n+@Or3arft7+tcyTHhkV)jnz5xUdeH* zCz;%%^W7^wwg!?bvn@?4&%$~e-VP{^m2J1prcze|lS9ut{V*oXJ(R!wQWopuad=Ad z@~uYR@-Jlpg!Tm1gOb!+hja@&vHk`P6Wgfn0qAzZt36TDKo&x;=jG? zR}NHIUJTlJ+*UJ3iTAD0=8@DN)uilliDO);O?t)S{`#>Oth@Ulb{e=)4wz$C+ytb% zI?*`S<|D>8OZCI%)kSHxfcBVqHA!eo8-(PeC0N{o->A=Qv#okzTMLQuwo2n(88CFz z76)>=?8<9EqC`Rq>H0CwLx=4SbCbxxVitZblzqcre>k=6FMAKd zKCME|cZdeJW6D#Jm7D5XN38k`yvNl3Uk$sEQ zkfXAWr3@iUwn|x|tl7uTjO>jq6rCj7FwBI?5oQdDF*3&dKiAYb_c`}{zW<-cc|4kP zPMEo_&-Gc}@7L@3n%3XrXrZzF#84rvHYm&ebW~*iiZ&nX#k|%B+5vaxVpur{)Qq;4*;Jl6&Fz@)?Qein5-o#SMbp^?r>N?U!2l#gn2Ct8uNa{=7J504| zYXSh1{wwu5*Ee2043FDDn&p_>Q;ga<+3v2gtBfZ3bpNgc}hwjKy#-9#1}+q_^mW>9oj-IzFk~r*@Xva4xyLNF_}xHBYLgc3L(N^hK?Wq}jiW2E!-4>~DF(DLC-?>mgplP}M$(^)YKm;jvKGuJ7n*8e}BpA*iZ+cLY9n zGRH}2Cx-{hW3~lk;l)Tj8ZL6B9yN|N7>#?y$t?pKJFiJ`hxU=4Dalum<}HPTeP|4el5F>p}if9jE$bPHm}O95DSjaK|LK8XOssivfpTWx0_A`;S%=Dw3yx^m#Iz zdbflp%bf4(AnSs$vcqu|+cc#IX(?0%*CMrE8Cm^OlRWZ@TjNknd(Otx8%!v0e>`y_ ziB*LLu1F;MegtJd%S>}*3h8iRO8lflHf8cv^m*hrJ7wt_5B1mQU&KFLiG73|z#$Wq zsZKj{PJ@NSIWRDA7wp=OKd-0+r20E=SL0%yHwEfl4|Efh!nq?6z?s-^tlmNe#l^Oof?UgdE zJ8tTe(WvBFljmxmgN;du;dz_T?RW)vbao(FY5@n2NESKFFHFYQ6{eln+}Sw6S3W%A z=TJ|%Sc)|NHfU{Q@#5Y?2@&&6O7S}A?&K3oazG;#_UEH80F8nU9oO70(|OJ|Ah81&e|wMIASNQx%q@#dC|A@cC?RUo05l(ZWJHrWK*KVNhllN zqdgl2?IueVOo1WCAsYNm^ii+_clloBG%8jSo<@`-(eTl+R2IqU%{Gmr^CLU1w1}K2 zD+=3*V=@n+*@3`W|y#JpTeAUu2wd6MR;%IU8oU z#cM=8qaBzHVl94M&?mDrW5(po+jLnkJR6^{#nT$RzgG8Zk~l-jK11IP;NjA*4o*c0 z*gpU)=t<9d>j}yMGj5`Z@JC-gr$<_1?j2lpS3IsZ2b#>j7_1@|1=RP7s(ICW!g@}r zsk!ioYI=O_&0Yxh>Ofzq_oQv`!f-qP9F2cC>YB-1D*x6(Ou&P3FgMJXi&e9-g7N{o zp9&HfpZcYg2sC(S9526YPUiF*EqT|mAzuO(vKpMM9eH&Xth0gwd@hz@7L0Z}IOTe((IL{-Gmq1KU%WEnWvX)R7H}YFR50aZV|lT&_4Nymjx$4J;>eJmJR- zRgo{2PAhyig_*MpI$*f^D*ibkc;5pz+p`|*^&BS{9cVCFp!;ds7MaDnwzc3lnIW@j zd8HK2sHib{Oj%*J7A#c)R+NxcCRQSO*SX%BCOKEQ=22;-CBe7XSMa@Ib?ExCZDi(| zVL!JF)I)OK8bGr+li}&-hR>oMzNuMWFr)el+pYM2TE_Mc6FuLp`3=6gAVPujWPwKy zYw2dCW<^?pYI6*Sdsb0LhXiZJPF6d1L>qNITQO8n@MtV{8zXRIGGZ}<2M;I9$ykhk#;8qU7T|9U!NWUe~a&?TN;UYVP zsX$hgPQCdi?MAPfF*#kV&)m}$9})GzK0lJsvh^VGQTvO)UA`G{Nr1LmH=m^Rz@w-p zo;)IiDwjD|>V4D7%)09_ccj}cW?pV3I;5?Ss%Q2wVT#*YmULGxa#2AP*GCaxrjm;h`}rkpsLU za6aL~bACYgluaL^Jwytd)wY=gTk8+7B0gFq^ldZacX`a2NIZL;n1F1|$)E?%(9G$>L(e{rqzXE6n`m4FX%wi--8eaLp)lWiheAY#Ok)#tNOU-*{Y>kqb%^5o{l<;smnQTWICLldm>+H*M9*A@f{i~HOY z+{~tp`{ji(aih{Rr&WRAejxy4*~>@TuPjU~oFL7=ygnM7ui!t7gorRrlt}3?U%ml) zwgi7Lc3xf5zMql3XzqMai@N730}^56q8NFRJxAUR&X|xLsnkMCl)pkG|e9+oHBXC#1@cvo}t!{ZmbzzW;4l(~?am33&4Pj4Z# z^FpT2`s*X-gyIRRKP!lLUr(+=``+|N(5$dWg@QcLECp34Wo$5!Id5PnU_ec=9!QUvs|rRr9M!p9aXst)9cx~fh~Sz zM`TWh!*6v)(QD)?6eSxf$lF}$W$jsBEk6ok81yU+H;?zYN17&LuX&i@CnlSVdD2+sphXAk5pu~UEEJ_;l=vtHcAJjtf_8dn3 z?*)kkhbLdSvAY&Hy1wov5#CsrT@r-t09%qHQM}C09~A1iN(#PZQ$H2Mu6U;hMfKuo z3I`;H0{KR`IO7(V2TgeyEE|X5Ax7yp$C0&jmURM)Hrp?+nA)cYp7C7siCW4FN4Fts zwXYJ|j6#e0KmEQ0O&{&|Q{)grJ`SwOlCVO#5Nz{`U5SV+f3ejLE5_9>U)xs}B%uqW zE7l2k(bt{=*ZWwMANh7kTMdvl0}TqUR;o6P=e|Q60yD?p=0INB?>MMRW`OaKdMpjJ ziUh=-0eN&#?!d7~gGb}6$MNvV)twg}C<)q6Kvh;?G6RktNcire*^Y+6M?qo1*bXcn zuV;y3B{~^Z!501=4PI)5c@y3VyA)HS~sm6w% zJAjopaFbdTQK@b?mpBJcGAkbNyZG3Ner4|l)5-lYSMqseMEe8IiGs`7=Y}xIMSm~U z@2wv`y(pM$$0)jymhTY16~>94!1~ni=7ry<9#U__6gz3i^%Y~#oUdYVB)7{^*OCrl z+fc-=Q>7s%Cg#-)be&u?NU=q*s+H`}mwpWEHhW0RY(Xh zcGUt3`31%?;34>RK?LJ=yTY4VPX4rGL6#zLLHh1OYp8S&F(YboF;oKsY>mcEuvT*$ zc^h_N4Y|SZ`2_e`gWC49 z>Z0Ml_mM~x1mWSfpn*-W-SLFp4+P`_?T#0)ZuA+sB{+4U^XTAKZBlwxSi_=WLo;12 z2cO6GP9yIu1Eudaml(Fwyf-uz0F7Wd(d@OJGwG0;H`ttpozL0#>##gp$&Gk>rX0G|{n`JAE88go4B&Tk`h@~P6ed=<6Q z-sw$ZKYcUG=O6H$-3LxR?7w_=-_{N^+mYX=v?kjE6xQs zTq|@O@=@hTKEl}Vw{L#m?3XsrXq_a;1!3&{92#@fL6HKqW?El3b)`d2uZ5#s@uFUx zj|B(UT5nH(NP}cs|E_0-gv}AhhHiAA$)B7X6VLxdl>paULGL^8D7o#IyXx}wLndi$ z?&|W(m-ay(Fe^fJpYyjHlWJ%=UrlB}4?nOK30$smc_MIUPwfi!ivZQr7ocqeTgziY z#F;=i#$DAfzYgvD!=XLmQ6tWxV^^EYTLLfNrUqJcVeOSRS!HXh&KB-2F_B(|3d+i3 zmm8CI!NSCwLi}1hgRsBKtbkJ7Y?wLAoCN3g9<)Ky-U*@$P4a^asu%xF4_Lj&UO_6s{L1`mC(+e=S%xzYKOUka|`9mDY(gLWc zi4@#By`TSUR`EM1UoFSo$GDzNu?L`sMFXb11Jzt@`f(vq!%1*6Pm7fO_N zzsAGgg2V7$Ip5|nmuMaIU~oNUGN#8yz6@i3^J??mo?XVSIo(ZVDaWz)d~dYo8{_9& zr-*lEt{OQ*>Qo1)KE|uDY*spd#IX<@HlXT$&?Hc}gNperGn+GH>Fxjh(uEutd?~At z%k@B%fpT1D*_ODDJNPIQw$UWV5j<`0*+vtRf>c7(IA;qoy0EIAPi#nurid9EcaCn) z+h~ojzUtj5NIqLSbt&!hJ&Uo0PqmBvmmw(!44-1MGJ`fH&L1&XG7&KW*pz<*mF!0y z1!~bM@qrqIe*Q@Bcp1-60qM-St-z-O6Oue|?C7hga=yc~P>Ea~G-otMu_Gd%x{PQ( z;}H#iYIhqO+k2(w1EjeDfTX$Kv)By{^r8}`u%T=-ssQ}(9E@*q+5ZAc7?)pCDl@!m_!rh)WS$gEK-VSqhT?+_(V1>{hGEg!O!!I-llFe}A{u!MM z%0t4cp3l=j)lA!uJ+qLF2S;9N23#La($|X>z5kaU@^8|}UoQtD^??fKJ*c}IfqHG; z6f(6YCNAON{aJGX_)cw5db$1+ueuMH zpS%I!5OKIZDfX-hL%f|hTGQLW^~8>Xu~%ULY|da6%MVy@U)qQHg2vRg(Vz2~YUapG zKzK72^xa%i?kVxN#^is$Eb9Fo^m<4AP6vlzmiDd51~XQu<4Dd#a?lts_Ab^rmD@Mt zYA|Ss2qFOA#J&nVN}yp`?J;@^D|}>k&3E#RwOUG7&$s#95Y7yY!s4aWnGy;C(R|mB z3ZU0bTil)~MK#B7?)_p$AoGYTr_+&X->&hGw$H$?he z;F;gjP&bROxLT$9M!g*GQIa((jX%GRFtEw^&6T{i0AOSllcGP^cgqF9N@iv>od4cK z{#!el{qh1rXL9EDGT60$X!MN8N!Ku5ww@sMengE$8|4=}o)-jSs2n+^F_*!pWr59k z`*M8b-v#a=a5W~sIBrM_AD_o^$uf^>j|3*kQi_n3TDAk_*fBVBIfZv)c+_Bcc0&GA z7IV^p4mLpdGb@J!zpR*FS~(fF?~F;}$1dyxs>#LBd)@?ue*gg_LNKkmp}sf7t9;^g zAy(kv6^5bH?|+%N9Os+q_6rn9i|v;tLD?SWSpduAjwhP^6vqO&&i=H|WBLr82H&D?nkQR zyY-Qe7UtmU<;p1`Y_=KHBS~MI`!s+2kz{W;+c32*Pp-N(b#ahH%5c#DtG*ke{`hbC zGus~$ZLkhnw|+BcDm-lT8d*y$A2d~VVsZiguPm>nMMAQR4_}|~%_%W@UV*UW? zUmQse0Bq~`fbntYbY1d9y;nAHu3ea6$-K>-HmuC7o395mQ?)w*<5(|KQ%bGujs$*_ zH0aX9VC}Q9CGEa!VKPv6x9YV}8jmFZQ23*+#oYlY;-d8@=jo%hjPjL=(0U)BrZ_$R z0^^DHj$l71x6wZ${8N@Sm%y)9(fe=g%-;z#65OA{xj(kgRMR0`k)NQ}l;xKy;{brc z&_lRXVRj`F&xL+`RI$;UrN&K>ED>OpXC7nTGiC^I*`;us5mg8qs2^JjIo*ISCKMRO zj_qW@JGOSD!42jdiZ*8vRbrQb5FvW}xSIXPjhzhlog7L-xGNyVZ~RV2BZispH?|U4 z*R3%7wI}0>d=+%4_rK~gx3RI|Si9=l7+x=>AO+8gSvO3Omc-94Y?NFphL6q4^CZh& z{xY_IS5m3_jAHMZNrcUI3y$=2p9cViyz2}NZ#hl}zTJXX#o^@)`hqGfVHmW8KO;V5 zX}E)$s^e9ez`Hi=-KUCCX+owPU>mC#4=;Ds^K`|B?tQ9yIC-{l1!ee_3U+^&qCYx6 z+sT^}qZKl;;go!RneiabOTcmj$jTLc?czXHI!e>|>IoLKyntEr4@uYF=3z;)R8RJ$n}taeRA;=#SOpI^Ia>>QR@oD!sZ)Ik&GJTY}weFeIHdK zvwB-=evxy-gs+a59C+|N^NiZ*Fy|#)HaQLBldS1>acSi)JYP}a*+#4ecer(9tj+UD>LGQmL zV|5X(SvCftS|)oXXsSmo2j!JZs|EK4TMtY07_}ze9F--ntlx^leU!$@;dQ~I&%f2` zn%O-g;j6oO8Iv zQr;zDHxgk;A~Qd5&S}qFoEvK*t8bW>SkBON0nVTb3TFgS%=KdkZK5mG!FqwM)q*6YeiMie7acEv__u(hX`>#C4hji}t_-O$-sf*sBP@5|20|KKhZ6 zwGJD=ORD0SmmCj2y{}F_qopY>qFC&v>Ip3G^NwT|K>6|ue)RbMeN92rBjAKEKlFNq z_dxqO4%LjNzl}9(FxKq;KGyp2x2$(yR9A6Aw7b>o@lGSX!#F(u%G#)$J~@=RQ zn=ibKm`suKXl~1HqlTkVp-mFDmW^F9b3dj+S!P9G)U_V3;{tiTH&G!PMoa4VSLxcB zWKK=zFL7@^EeN-D)yko28O@TCch-k$<+@qxF>B>--QH2oe^bsMbJ|~jSA=|Vsp;Yx z!za-sL4}cb^`X#{9~q<{tv1Jw&U(Dn^Uoau*Dx#d$W z$l7T`;;(jD6aQ^XI;Wzau<#lD1`j2x)Vq*7-fwcFBwuynZuL-!)tK%HgiXB#hgq!= zp1Dlm1V=+2kt-}L22J+hgc%qE$(OCZ>2fnpOT}?LZ>A$zlsR)DmiLJYbFpG@Q4!cX z$IeX!-;|UPDndN4(tKnay5Afj{v;m$gzWEASpC!Y51<|D%1_({_F5+>sa-hiOd@s7?i4#e;`vC8p)gw0bD^r-EZ#OhC`+y(v}}aG~XfpI6M$y*OV|;_1p-woSo?# zt13TMXBz`AyR@r6SxH+FxA6IKB_nPcB-Iw#>jwgfL+)ThVL>a5Hz?~IMjDB(4$wdnSm zBm0bC>Tvj@u5oYtx>aaDh9YqT8zxBQDR8?K8^)HveJx-%=SlU-%k(Tf^y%`lbl^QY ze-gvKN6J(5*qVjz^zV%5XJ3B|JtYLBEa*|pY|^K#2eLoE+g+RAJ3^hNylb6NyrZt% z+yaD71oe8y45E}MUQXZ6*R#a4SKg)=;=<`@_)g(hesg$=^zQF9aTlGlA{Q}c-kh1+ zdr*ESv$D@Ybi}j7IK_`OGCN0Otwq^ej=5*&v?$Tm@gjrrjPcl9*s z2jqpDPu4my!f=(UrNhC?TU>454ne8&_%CjYy`NpwN0%Y!`8>l{2JVF`irmAds_B2--4 z#sYSE4$#QBzLr#Hg$H4ZcF6~>S3w|}GbTJ7)R9>gLXuIEMM znTrpED4tRyPBU5+27?Ll!gLP-!KjM@vbLW%3ibfe2sn8<_u+4?Ma&m#4aA7qo2)Yk zbWT=$CZO9}OV{yqD5f1onidi<1&+%)BDCMeFqv4Bk?4Nj=dxo)NoMzZ)i4q0%PaA9 z+Kg+!O=#b6DL!iDe#j0AB(z>&4Pza=%nliAu|S$n)JC=rT3E$HL2tAbu=9ZI&>|cS zOSCYFwTT{*dyRD8gV3y=7ezkeZg$HC$jq+mZ({hzA9`{a&>~J7bbB}C z&gX({@xrz`bb`YF< z^Jc4GRcYq`3)b*2!Xbz5GsPILFzKbd=a16SQd^wlRkq~!PAi{p^hD!KFOMF8q}E)S zIb{*(I*uBE1QhsmJnH~Cg^v)cge?x6f~b3L2wlRrl@xa=*jErRY9?c$7_lEow{Uoe&;RM zQ@o(`8>JM@1(>T)A?D*XT&n#is(~d3A2P?Pbcb{@ZA*^la)*UD5!7Da$Q6ku=D99> z2L2PL$A)!5C0epV*V*#NQ`BOalJ>$n87bJS6QK0j(_C0fP}(uWv+>aEQbz7s*FLx+ z9}hREegzXYw4~3$$FB0`r@MDQGaek1k*MHx#{D>O90E@Eje@$@+q8&te=#9}D*bN) z^}oHuKT|SpaR%~F`xJ{{2Q)$3*Y1;Hd9*weKnYJBZjhC+Eakc>CnYv4^1OO<x^j1Jn(FBj|D`AhxIm6-=5BS6X#bpVh{KO>y& z2)Aap)`jerJ|{%j2m)C7iYfJ|&ChTyxS_+6`fjYv;cmMNs} zkqL!y$TY11ui;T4%Wl{AfM^Psxe3{t#5*78&4u*~%AT%G&srABX3lf6g&$+*va@F; z;wee`D}R_b+{ zv64dD6e_8{zIg)d8Ax?cE~&c^%0-0n(jXj0d%xro{i=f-g_U8D7Vb*11F_HbnZ$1yQswoURYn24mz+y95k?w7!h-a5zVqq zfoT**2u=|63RP!kdv61@l+XlnfurTZX?APESDravG)o1bBC7I{w->VoHUJ?LYrGwx zO#TMy{Q1%mHG8I^b&ut!qs1XCV4TF6RCGrh(AMj!#hi1EVuYSp4?R?RFsbkp?xEbF zo=_KLZn95u`M!RV@4(!`0$rz~Evb=MDBG%k?6NZ-Qn!&2O_iZ`qqRoD#jh~0SvOea zj)P`o-jI~lq-(2-2jQVVO0$;oFcE#I5j)_Sjk^_-8PX!jiF6~cT@Ve%Q_e}RZOSQG zpjs_!1MPN*`)L6R9_}%1tzI`#V3=aZIP!QNJK5o>1RCMQd>-bz`7cj_6b2)xc(VpWI_sJST_fNXxSMI0a_Xqt=AttZ zI+GiM-X!yF23QW1hi9jc+Fn%dpp>}x8!&t7Eq6Ck&m1Hm1U<+8hh&~Jj(=j6X>g)B zK!5MCOfbk#`^}|lS9_cy6uqpEb7-Dx2@IGdqJ(vydsC}s$Wxk%e45-Fu5pFc4xlwv zD#%#&|4YLD)Cvj347AMAPu#d&(N8SJrP30E_LRsn_w*6v$l*T8=W+p%TK z7KoS$ID@Bv<|pDSI7|0$H7t8?CZ@!VIyBUkxwK1sK$_ibki2gqvHn?fBiFwl1-@5| z+c>|PuAW*C6D8ocp4R;#Gmmj*U=Ru7?uIE)DN`p>-XP6|UDux&kc zq}{tK0HYB42Cy$)0@5cK4Ap^$QoP>E%AS)3Yr>KoECwJH*?$A(B3B57+MskG-W@FM zT$FwVIMN8g-iZBVJOitPs3T~SnF+8vgb8+yyGx+{1Y55C^GEcZhQO6!@B@%R1^)J* zwOseLRXK^BPh#MSpq(|Sv@_nznG)Gm|GT(Fmc!!ek2n92YQf&+D3n0y1&mVgr$g&F z_<>K^jsKYg{{k9J*KFZ3&_xe`jx?a*y8-2?YkF8$eKo3mlJv)Jsb1`z_4VKWA!vi$ zI1J>qu+rDi1<5&L1gbkD1{VEva!bdmCwk`-J0O}-F@7~k8ijveMBaMvblT=jNXS3M z8)4GyyqaQ8sB6}@=dWZ5V^J-vthmLGPSqC2^c<4tgb4xEticEU{DVI}+u;}B7*L=g zplOaYP({Gb@BR0nax<6+x^gSE^AV!`>Plj9L|UlU_WBcU zFD1g`Uj{!O{MaRrj9poLK=SuXYj8RoE8Ar{z_6vpMWGY%hgQ~+_02xpp(n8`TIK=$ZYtcTMATQyOS?T zQtoX(qyQv<4F+Gp2lPbPjQD^)){|~&*_U8LR}I=7dss!^Y(EBf45wVr3<(Yn?g+om znOcG4oWQt%zN*;QDrWeZwgr$Y zSXhb4Nb8rfsvYG|h@!)6hU1=adR*r8m%5&d8vS%bVXp96iq_LRAXhV}+nwo18urX| z4F7zR!&=lRxqkJvS=+%fvm)-F2+IT*-6L$E;d}Q6=uI=qYJ4jwm^NvDRN|PWQB{N#bJxwVNH2KDhkenBP&3t8r?}xuqumAyF(hat5Omn}xzyTEa#wyD!5g`h?}#;PNUxi&IO&9Y`}Eu2X;$}#OPZ)`Ql zj}zq(%3q0xu8c>Y!W@gN)7ITEol>VowzUW&v98*9uZ%ipBcq}xHz(K0VB+);@j=L) z;~3hAn5(yNp2X?82$Hidy7Y(|wK*;xas-i*t2TcC)_1i(*F9jFfF7loTToBndO5db z^rHU`*@(>#R6Z6eI9dMKf^tNd@dI3dXg;~dNm8=t)pLoo4~?=!PmYuP#dkp{;)V5A zH@uT;V_dYktr^S9UGK&6dR3RxKrp6@;CHxoQ@3PY1P0zpoNV;$Dt8eD?$$gzB>iff z0Sr|=6aRQNxHA>WdUR5B9iJ-0>ZLtu2@UEV;(eDTXvgCs{ny(}1m&I9)>az85;ZqB z100Q5JangKZs=w>I?9LckaZ(74v)+hGY9QeA7-vyUAHXRKDzIVo#tcisUJm;v6c-z z(T`EP^ic6N%ETKH;Q1t2Jnh&f$-mj;B9>p}nrnQwn8;F{Ze`qk;7*~sU$lLIAGmF- zJVFqM6%R!E=zi{ZXbsYk#4TqZP-K4i+&vp4tTc2>blt&5>+BV`u=9Z1{quPfI{_^+ zuWG^Di`i_scRx?{CIybtFX1*AsaMiImR$ruGZ}2c61{u|Z@ZHh!ND2syXGPMZZL#! zV69XkTc^l%bC_Ngsx$Lr{U%%EN1mcKS|Xu=&#eVS#Jcyas7W}gr;$f?33vE0YT^S# zI(k(O3h{gF*C5!3{J62O8D?#__I8LczzUW;1OHrS<*z*08bYH#4%N-!>-lSkboLUo zsD>FLge7Vnv6bGbt&`XJP^*@eC+8B6Ru;a>9WLubbH7xHr7ENs<>!WSdM+-+H|)Ne z2G;PI;GsIb#f6CC$u_^ENA>N_Xlw&GvQ~c$WQwE42bBXA)2@Ahsx=*4ixs#LnLXID zvD#57-?ExVKT~F?APkd5iI1&fACJ}rNbYtEUU_*kgGEfW4)uKhxa;u`;*e-Y{zgV_ zcN1$k^*D>wx$LF-brp-5=-PHs`H-2-pIS-SA9}#C1Zy}&^pzqlS~yBam99`pgf%Tv zfzrGLX5L9RUYiqHLb=JudP?BTDCwAjYOPVdYin!2H3rH98#`=ak8LliDeTet8uc*Unn>PJ{I|sGf52 z_kADw4s3K8v{{UOU9c}l&EO6V=EaRdUd zlk}Qa06L5FNei=8P?6r+5|Y84;JHS(!DjOURsl=FcXWfUJ5k2vy3pEm=p16H_wnWB z_}G9u*2@RLHVBfiL0{gW&2brWn_7cETSGvgG)%wLJpz3TunOc~mE@#xClarvzDCJe z!6-mZ^Mobf3>LuLdYQKcCJ}lr1lj-y!@*XXWsU43*9CbrGD#Ag^b$Fa^&N>{i~8mb z<(v7|LgAh&9wP}-vOnGkI7E(n>V9tPGU+Wk463*Gp?q>fl#hV1)AVf(s`F1LalfGi z(1G=)V19%XCI=q$3>I^U7iVuEx-m!cd_T{+Lu+AuCvU$=D6Gp4q7|%BPedA+i~e;> z8zAl@?2n-#Z3~VnrDO~wcQHdFHArwnb^8m-jgGtEX6N|CJaP1AbS+~gHy{L zM~cm|sOiNl8?ibv23WvouauDz5nr+tBlY_281hL{j=bA_qW+FUM0|WO zb%$orBOa~RjWtRKPV#ucG%ol=pWo=DNoE-TmtkQeQiUMwUEb_8Q+$Ai-4&%@J-|no zVXc!1@v*ZaZm;H|Kp4~V%TV{Qi|h*OS?gaY;NJkb(E~_>Cf4RGpyOL`2nw7|Z-cOr z+n?Jx2Kk_4Tn_!tpWzakiggP=&MnXZI?xl)I-KgfCgu@doN>Cp^8Q>b5RQ=fkq7{{ z4{iiB6_-I_5&-R)ZY>b=^D=oz_L-ajmq{<{eJm%cG%2tBDtZ45T;J||m$|U!;EJ;& zy}4yu<3*xC4cy4fD?Y8E%V5m9!cQ9k3@_!MukY$_0co8nv4(-=$dung#jM6mk*lLE zQe2kzzui7Cl=rGhPThZg8Gf|v14MCzboH>af=c|&h1vDrqxG3{YCw5HoBPokJKE9> zd&XW)!=z^-`+?N!12 zLG{caPoTW%hXN6f;szT23gw(Rj!1D8OzEPP_PQv3S z6!q7~7D5ONp^t||w&Z!=wB2#;vkU4+Ik+q|azH*6Z1y&v<;TbRt`Y8I+~I39R+&^} zJ6a~0hvQ;xi!v*d@k)C4eYRoqoeGqwd95w{t=6#VMa5IIw+rsw+q6I*DK&>Q!1py7 z69CN#++o#b^=cd!Au6?9lhz+oUES!32BY?M;%c%DO$f7erK z+!H^}My;VuCuOHRbwXpDEEV+9HNUVRMJ4e($GN1w z5X;!h8>ezCU9yRlH?4Ap$L%|}yRR$`IprnPuwLLmdV>ZON zVkX!`V?!hDU{Bvn!~#+-Ffx1_o$mD5pim}A^XRjJ?3p^p10H~uI~)Lbr9u}bT|Y|y zR2xU>GZz-WS1kx4onlJ8{|!w0Gbbp@1Wyw&K;ALVRp5Z>Ax@Ia6+C`VJo=*B1@10@ z*b$+v#Q1}5x?Ntb(f54CU}%Qhk}2&(>1EMCmqvW7Ib$G8(uQDzl75D0^sB|Stmy{$ zh}*{&gq3#lT6@&%n^)O;Hq(Y38P>{cq8AW0#b)BVL;1i3UW^6&a-@jBhEP%^!T|9o zP*yoAz`V=!5^+267@|hr!^Nfxrxp2JDJLWPkRd~jbgf{_hWy8Ds4Vql@uc$nY0S6k zxXZ}KZe|c@j|MO#;;1{yse>f?eokGb5F4UK}@f*1;q@^0b7;W32{>0-**Hl)ckakf-kiH3^_iL5)bOd`kN|0?ReZ{$YvW*T zSIpyb@ecJGsdRje<4dg-ITR5eA=x0HObzA>EwW6>ixUmqZP&$w5Vxg;%<{g%xavw~ zDRbX06fRHUMnar{MqJyg8t%*mQoX$2`@`+MeoQ~%yb-aI;%(H%;z?_({H#VfP~FQB z;W%A!YI7#}BC(Yjs0wjDt-$iL@sEhzI< z6~+IdX7tY>%Kv0i_8II*O#dvRPvXE2L<%tPm4hX!mYP@msU+MTOj!r~8tbGCw|7cV z9_N`|Ze*ON$_z0Jspx5xt)5_{M>^c~w%cFj*xXZkr!kUE75U1u=OOJRk)_Vv2K<9}nJGV%l!20wW=KMYnvcwi{ zfmQUJ78ee=4!51*N^DuIGtd(^LyR}BRn-RDd}lOI2c=~&95UW{-}PHrwYdJ^DU|sH zQZ%8QQJ@O^6*r~{72wx-@exY@#lkJBuP2Q9u9z!r03_RckRoVwncXY<_uiT>XE^$% zq?o{*+5tVsGTD1F9Chm=n>!>W_Mg|r`q6uEA0%B@44$GH7sXXVQ&ktL%o!&i>pTjW zd0{XZP%}39A;&HeLqg zu)x;$c0fp>45ahQ7^UEKTqW?w7S@@%b+fL&e|H=cI#E9ccG+1VM}r4VA9I&(TO1vo zG<+h?PQwM^SL~qNO!zOb*IM7h%*KTl(o^lOr*Q?Ra8Jyjw9V}C2?%5K)Dh@*AUb`4 z@4o|HebUWj>ZKN1=-AV^ z$?EE^l*77cDAx)kT(FZ_)KPy%;hqZk{zrRbf^9lc?v4k46dO4UQbDZls9G3Wek_x5 z1q$?~|D$^in7!dp{qKm;bG4Jpem2K1W@v^C>KR|iR?}a5S;E)l>>TfK{ecyfTLngf z7UVniC;u6=Rxq~@4tKQPM!pq+`);1ncj^SFv_HKE$shhV%@dLf=nU?G0+dAl9`8YM zZ*REX{iCRm9<&fVxZ_}UCP=guw{Qe8uF!>tDxVW2lRu-#ig{7n1hqn%qy~Xysfo7# zAv+)uFYXJ!Z9A`nx;=Tw42zr5Z#sWaGxO_~&SXx02o zPaM0TJ+Pq~JyEz&zi`o)?kHj$&w2;l0mUx)Bj=?5{SKJ>ev-Y|Kc>${_!Z6pfQy1F z$Y}c)1}Zzr6GDT6GH#zZupm=14-6^A!F_~=qKd9`VIR?Xguz)lm-1-i#mQh&(wNs2 zOM&m8wWL+sK(SnbZo7H#F>Y4&(wHg1K;C&Q)X46yq?FbU2JG z1b+Qv2fzR8*V|iLc0W65?Z97LmIdi6OX19?Cu@WG)p#hUL#+QvYWzFn=)DXuYssP6H?id(6j8GMgeIZ zFxjvh&Tmjht;1^Q4)&}NCd)0%=;u$51Hl|FrQRUq#1h2Jf)rRNg$M8`7Zgwbzpggy zsV7+6QQP@ZM``yp5CaVxp6Fl_cY=^O=nTN{Lm(IyWU9`m>a$}<8~qnltR>f)kNqnX zl8uLqI>wH93L9{BV@z*QX$ypPMW?{*lP0cbjNQHZWBD$#+e~LOUEN+n$)l%AxC{g> zY32~1Idh!#3;+d#dQKJAO{@7vumR_PlSKS_+2_mdN)%fJk^id!gf{ta8fg%6%lFOK#@CJK! znI3a;ff=#DaDF`c-3w!SA$vH|r_*EIrb8DE(wupBC2+DYkW!S%%+n#kKfb(qRDjaV zg)&E5f!_9+O4KCi78+8**cPC_$cO)M5&bd-oxP#-hS^aTecwP?gh^tNzPZkl%)@|z zOl!6E@#Upn%ml_qbYps(eiVDVo__9l{5vwwci?$Qhw=N;~on1b8xiFsFHycTcjxU>?8(Lhh zUfQPr_~%E}Kb|lTB<@z(K}7|<;oyww@$MX35l6m=1DQv|jMs-H&*D7C;Fib!&#+pc zHOP~RKm#+7=|S>SQ1#E{-@%S8dNm1}13J17Idy^F05YkN`BFOD7{t9ZiFKEGd&1$8r z&REdI;8_Av__`LGv-o+N*b3Cv(HmdLpb#{4WyvcxV1b$YtvHit?_d+6fdC#(lWayd z`0yTw=buFig#C(4*y{d;DHr*ZRKr6c>WqHYX=kR-aIg7^|%ETm<87GzT5O(*=EZmx$uzD%3ez1OFNAN6I* zbiQY>_8is=WLJ**eAfX<=>GB>WESbQyrDC2eOv#ZMsJ=yMGs((RC;^U2u3}6hCJ1U}*nrRCVqG-& zNo92w8-RL2!Z7qd28ZK^p3YJ>0UJNSKwn4Bf(m%#X9dJNFE+&LGqNvw2Z0BS_Xc4uX`?V>;p%&H64E>6HqFn^RPLU3IhN#sF7) zoB*)_hHSQ_2lbT2$MrUkM7(_D|C9s8NY>bL>WEn;QhtovV(a3c-%W} z#3FI1)}{Zj<*`H9pCPm0G(UsKnYCc%^ob`t5)?7{MVe@{Z?}NQsPia?CI4^i@$nsR z13J1#%a4?Ek?-Pw5(x*Izh;WyP^#f5$&=Qb1gIRHwQ?=!VnKYDwBG~_*~ zJ6SUCwVE6h0cpt30W=uT&91aq9`Ao9W0MX5l8}i`S}7mK7NKKN9e=B~0O^PUd7$?- z2&hr&LZSfD1oT&YK=B%z+fx7c_@X%A5-gW~3&RmGn zN7`6GEdVPg035yf^?jN>#O?(oklP?Z@irj3ZNYP}XLa8#nOg_+vIQm}jmh^vejjH` zKa%@@F9go}fbLAdFaT)CHoORwHzm2N^wGGD90G@zg%lp1z3~rpiUPA=0>f-|FvD+w zfdjsJ4E$%@*hTx45FY8X(kt#j`SMrQECQH6ro{B^?ls`+Eym?sgaZ1LCWca7M?8yF zUb~sTmHAX6Ac;(I7Xrl~{$m9|g$SE6H^Yk_1YV5dn28Hvl0UCEa(Q&NKmS6r;qhF| z%~?E-k7o=3+WluG4&Lw4LHzuOIval!kWPDI_+^?F$~coa<9~(GRs*4mA0(ae>64L2!+GftArE|xV1Jxm#sxcJ9D8Hi-m=i6x z#`U`WI#_%-1Y&bTA#K0{{#U#+co1O;V1NWVP}o9{(cKa~`r7Xr6mC2R$$xkyy{mM* zB$UJg!uxs{vwt*g&H_!3P)F^7Yc>NXuGE3J5&9jFD&7OaQ|u-v^5qb5=c?=4YJj9{ zsb~JgYNu)wP;M9cfK(()e}F`>t<0e{Qxv4SoNIrb!ocCvd zMZxJ*la0u3ji)usL>yvvJ*i@;2SH~sHi6m_I-vcGAcTqSas*qsbjmhBuL;N3%sQsP zEB`;j-aHWM{rexE7O7EDS-NDcWT&!aiG;|$?+Mwn#ZdN0Dj~^|k|q0|eTj*RuCipC z8f&RAwk%^C!}q*K_kQm7{{3Em++1dPzMik=^E}UaoX2^b{lbw=t#?%7*#wQ~`{VR`;y|aDb&DO6<#A@T)luE~12o-V z29N1!$=aHC_7LN@XEsIz)hp>5>Gnb;(~XdFRPc{Gggj3u908)sm3^@r^9fi{z~3gK z0~e2uvFgM3vtY;nzrKGzWhTNo9I4Arv_R`C$OQNpubeJM1e=C%puzd}C8!}C6{@wE z0hXNJu^=N|4NqDiyh`chIWm(MxB3!lun)%xSaku-CW%`ovdF{V@O0pBNT}$u^e@B$ z;mOELBPl(H_>a3-oxx+cbU~9!iD)oPa>V-$3L#RezpHJv1oc2DRbD_{YQMf&ER=>9 zVw#MEQVetq_rBIVH3G=_6D*9XUwL~`p+sB$MPMoVBr*Z*L_dH^u1x|NYV8bEM9R?c zVHEjd6A%A*=|B@s-3apVXC#jO8>)Z^x}c_c)cGjlZhY!|lel6Dcs`dvqxmudYi@3S zyuhxs>twgKik`(8hAeZWO;k>~m9B=5n=~d697fGP`yEfFo6@9i6r=J(o^05PK}tBs z8oSm~(Vr*cbg+`&(fop;T+q1wpxzz;r%z8PI>uOd1OJI19Ucf9#w9dEV^W#f78I=Q zKrrA36Lux+bB$;A@ApQ_p3c1oO7hg^|F91ZNJCjvoBV_F?b9KVXusOs-mRc*Nuc7Y z&*USYoPOR)G?LS-&`b>R9MC~78h#s@8dziO_4^nuO z*>YsZ{-TUzx?=_{m@pMvr1}{rW2H-ws0{h4=$;aC8uCoAu~NZdq* zw~u=Nk)O2>(N*?pN!V%k( zHfB;h=ohat*%_psK1@(Iwd`ro&JlAtVc6U2T93ZN9piNdW+HiC1fMqZlxCjX$~Zmn z2#1V-<6nVYXqVnal>ajvCJ3q}dVwo^>46sn7Qh0aAKQhj{cl(5Vxd7mEv6rIY}8Na zKzV?w5|Ir&fK;|W6Q=T~dDQU;$MBL>7j&ZUv}NGxonM>sfj+ayCxuf;g4U71DMLK) z56@(!y=yKfmLJNhpalWA%MOgaf9Cj6Z4ZyBJDtzygClDnv`g844hdrwb?@lw^-WIN zM9aoYs^__8Br})mgI8}!uiMnE(R&Y4-!C7EYx96Xrb;kGl!%c*?h-2L%&jTk(6&~ zr`sC~R!73CX3rsR#SIXsdlyZ)J>|qRp4^;W^z>W?KfS?^{Q>(;jAuV5GJND37XZd3 zU{2H55G{DvnujSdut-Q&)EV~?6ld2hggyx3p)V()h=fz!l#UVwBX8}z0sRuX)&?Zn z4{6C*fjv5@jI@L1cmo}8SKkT;TD7OhxzTAgno+5aA;OQvBDqt*b9}Y$FP{Lzy3lnD zQB?~5+8KjPP0a2X?(C7m+Gm+XFm0= za%p!-eFxR2Q`e(yl<_9FSRT|{oqVRgw@Xs8`Mu<*+?SvHBuA{Co`J=nMem>inXHmI zGcggEM<}#ToLOsIIQUD82}Ij-k1O&)L9Csx9}0Dig4m?K{`_dl;VCd&;kqcAJ&dLg*iu}Lz_Pq=$M{0MZ0ZnZ` z3Uwtq3RwZHtqK{naKvGNn|bZsF3zsz|62jqq#({Rt8g-Kx3C5=|3=)}Udeq_+G1~I zVJ$GS!byRWdce*V_xSN|P0<>iy}bq| zBv0J8SuRI>1%d!9mxL=}I&4X0EyZvp{q322EAZ?(Xgx*cCI~T5=er#=sYr z(<4PJ?<-j%d>dTWe>gw)89O8qA1e`mba+4*-F|*4pS8qt%1GKFf!sBlZhAXBJhYhM zU~l@cS*%n%!EbF)?CQq+>WsA9MpWIztm`!ia*70%<&)x#WuuwmgbhN&8m@SN$t~c( z0AK>clwRoiyd82$Hi{RvJ9>6YUk!W62ebC z-Mn#ICF1Pa9yb^A%aa>D*=LFGP37F~2RTnZCr@lAL7ngYxCI)H+Ku7Rn}WJSJ(%`*#pO6@8SAp13nAG?Om;TF}35>%APv)@GwWDjYaByHkrdaFZSo2&y}(zzm{7+Z~c=q>e52==1TOr9Mf}mya|V! z1KZvPiVa&x#2-jF!Vy{a{mJ)Kl9aa87d-XY7xO8jg%_5dFD%=P-jKV-UZW#SFY9AR z3ClguD{5Dxu1?FxC!51h4(-VBSY#GV*^KT8mFH!>xDA!;+5qN}=O%dM?X@a=Yo>!f z_q4H(HB-()Uy=xVF2S|AUgOCf;?G4Kp?}Kzi@&vR%AC(oVrM5*RR#_n4kGBKtl1(r zhpixf9O}tc#hT#`aS`|J-MbUB-mX=c5v4D;F&90FmRnhzJ%4U@uQTa|$xvm7*qV!Y+SO;X*t z%5nU0czHaV|8kz^HqKosorDSQx!z%c)Bmtkqen^UZ&qf#&BaPt`Ov(N6Z>?Neeo zQDV7TN;hF4#Nt!41iek!vR(&%k04(RC$}1zEWZaq2bCnMUWD5>PJuDydCeRA&&<#9 z8x>V^^|*!Q8x_~4^Sqzi2P)Zurr%1nzs>zFt;nY&7nC!MU%O$mo~so=NT`6h-0bVS z;8!|aSk{nJ>}zsc&)L>04ABh|Qp?M(MdZ+(sh0A^r;N_dT(b;6TNbrl(EBsI>g^3< zmL-1nDI=Fp#r}^lSb@bevwjQa@juAy3#ayF!!ahhPP-e0x_FN1Vn>Rc+~mmsS!BCD z@FBnS!eDAn@pR8sh~#F~%0YR>dj*}AY;b;g*H+Jy`}#UqMqzY%*aBqEsw&Uo_1HLd zZLGQsy`FW67MO&y-w)%M&r7*Q&SNFT)ypVk*m<-QW8K;MqR6dmBm?Osem~p6V)%V? zQ^t*6YE){npgY*p<0r+%8O@OtMXftCE-c$((%uCMHDv3@Puy~7pG2`tRa*V;;8+tsd>ANX6ce>huvG<_yZSw^fa5TAfKgmFDiC-E9T(p_ZzZ{j8 zH%yEbT22UBjo5)ojinfX%ygQ}8{VIPuM0F3P|WKmPU7mourGt9u1OzV;pT5$5tUYS zx(8~Z|FzCihx4g-?J_@&9LM`;X{oWV$21ymfns8}Sm<^-0-glquj-dCEF*2O%s#kC zTaLLsKDljkIHWr|?kFtH2ZS@Vy`I)h8PaY7uO;HwgPh#i>~4~sj*xmKm-0Pq2ZgC* zu%w^>oY-rhpfwALh57q+8^2?yp{3QkWi5!%P@P_Zcl7DGTz**Ye0~zvrnhb1mIJJ9 z2%YGEuV=~q*s}yl@4ay7)cAZnmGsbn^N!tREOSesm}`XExUv(c?x5?yii>fNJ*RHK za8TgR`qDFUVv@9FuL8xQC_||(oxz1*%^o;Pbj4ZlhaSI|Rc!>d)~H|7W(V(54vE@{ z3CHlIWdLWZ!`Xb_5f>M$Pi;B8SF|h?p5%hiqS*Pe?R%7OM!@%%HU^4u&t*3^1eBdm z`y!Id&L!sCaEa;p)>k(Lirpr+?jrRj!dLqQJ+uHJFBMC(P1BM$VXAIxbx)3%d;EP2WR7PJ8!DF$EU8TIo)nW!|WHJ~oLtS?OOkB;&zm7f#M&C&jTY zc6vpC2ln1~#h z5X~4gfDSMSz`>{ZE1t+kfmJ1Q=QxsV`wG}B_ZdL99wEC=wcog;9(C_ zZE0y493Rhl^7yfblH#8s&I1Vz4Y!+}IUjne;Sul-u=f^g*x}jTyL)ix z8=HYa&}9UNW)E6_@%|7gPH-vz>>XIU#kt!-+1$ zaodjy);vd<4Ec4s+#H+lki%k?x}c?{RZQc@w1JNSX z_%TWpgXV&gX(=guDedh}fAoW;rKMeJbrh9-_{z5I;kqY~6`WmZOT=$L`$Mm`3?b9D zM5lOU2OG$dCuL(6Z^gun(@jJ!uV`94z_2iI%R+71w^<8M$Ro;pKwbrgddi3lD-E|v z_O-C;s&468A=DQmZhoiE*38mzcD0u~d`xd8d!u8wA_&tlKvRkyBjVUeX`g3(;5?j7 zy18XJ-lQ9^_oAA2cA2yjdy*~A&Z@aFZY`IbP}4y)U@KJ>7XbZp{Z8s z=8a;vR4U(t2R`=v2*{X)nX%q7vY(E7?XD<#Ub_!=Jv%osh25atU?Mn)v;B4R*2w^t zxTt_6w(wP*us@BF0|v2%Qw!4Q-NpdjQz;6T+8pTEjjGM1;oULbKsom!uMvy#^WRr& zr-YxW#6`dBWwp21q`Y``$BKtG(Y6twF92z_4gp}rc5Iy`_rkb0dP>|K^zW4Ge(M-M zd*aJj6X!A}vHUklS{4|>pQb(`tExCtEeCU~u!U)JG=c*>A^8)OVW41fgDsi<@ZrAm zR{gO&N!zIF<&I~0a-yQ&l2o_t#?-XN5=cP>xV`W+KNUktX+p8<(BkP#(<++)xi9w7 z4X3{)MJ$vAP8oS`9pt=|fw}ySL))}^H{WB|-)=)8z14xKU7iNP-!2+D$W|gm&~u69 zP)dm~xToI+p6GDC=a)Zxm5>FcKW^W}#qtRo9h<9i7Grl?Ciy%YY^Reqy`_wkK|}+tNSD_Z}%}GH4zX0OsQ3b{wZGmh4>xNfT-mPth=W z#-}KGojkl*&tD6qt{Nx~d=>XQAb26rRhQrOT4@8XnyJRNE>Nl)X{gnxF*kP_}D;8WAAF<@L7q*oOuN!x9Ooqrpc z2|jl|*qh$X4Zht0L-PZIt{d~sC%zh7U&J56h18-@UVK@V5SV6`Ksai`q^e|@Ptw%6 zbv}b2eq(*b!)R;dplzH?`rHxt18^1d`1!pt*Zgyb+y5O8*jaT!Ha zKBzJQ@L4r3c3fB&#Qt^O&`PbheCdW)+=uf+qgrGyq#%*a|7YV!!$y;Nb4!cGbGQ%m zf3Ke)!Z9fkkr4PbF#^d*(J5v7p5M&1CUJ_9ls zjksye*@OxQUqf=^RUaQ8_aHz`;>LIc=H^=1sXO#yZy^S*LV78J>_mN{!DOcP#nrkO zivgZN&zGO>fvVPz6u88HaON+NK>;aKWE{IWiKT`%v|}y99yH>DyJtEqNz=#bal#fp zmi5bVr1`~!nTV=}-3mF-4$o{WEFD*vfK-pY#&AwC5m%vf`O8OS7bRL}sc`E4?zf(s zG6c?6NWVZ+M%(WH@c?<)!p?^SP?M=nf335oFjM_gu{GE~*d4dACat#?N{rzbR@i%) zFi7|D4{E0y!QeSj+@D9ypFSD8Tj_U85}F)8W?d~8an$)PyV0X1e1x0)Emq6CFo}Un z?1&|y%JZLa41)8*#Y2mrpQ-v@X^(`Y3uBj8^yFlLms3ju7qxG`D=S=O-+!*Ut#_kZ zx5Uw75yTDS<5LIR7D)~*M(#qT{6BF>cxiAS2V!_S711$}+orBUuR;!|ecQBRqmH0= z$}ecXL{2ged?MK2Fsy`C0On8{U&2)~Aw0S(fBw8tuv2}bCWm8TWo6~*m@!fh^T(wf=!MV;-#u>f z1)V2|Qm93rNX6i5K7r)|!vUehA6Kd;g?RYxamaJ%6qP@4Lh+s zAD&1Qmr~x)8lF21qe&WpBA!MBs@xru{ymCSnsK1WtV*cbAaOA|X#M$=A z=Q+DTR@boMSz@AlXlUsEclQ*K++U-Mk&%&zg`~E=RJgsJ-CyTs01+q|-wjjfH(M`J zO;tJ)u3`%r(Q|aP4%9w_n1ucsPkX}HDaD{=~%yI^Lvn zX^_xvH<&rZrBt^&TxAn9La60-V9eTJ=kkqJ{PB#DHwMpJJNW?^1>-#pd;-4ediO8j zZk2~s(@o3IQRV8~D{S#XJ^~v8Mf3}nUpz+J= z9u{9<HtU3@;({DHn75FSHES@I3zGMW(fMPnMh6c!XN&77oyuUa^JpP_8 zS0}L?{KmEqAF5?M0(G&;2zCj;ZLVb>a^{B?WC?TB_wTt-1ESAmUwmac}aSwRSKE9q1eP8Vr{^xq1=+9pav);~hIe?63JJ zq6o+3*qghD;$S<1+{0O#BWV63jGg3yrtO6#z{jY90`!pv1xr|vaN?Xi{M&E2{`IgZ z^x5rJCLe+3K<$?=RtfRV0}g&==zPZ~dw0fm%Ak9YjnvZcAtQ9-V6d=e<6M3_pO76ko3&p*k44=E4 z0kocbwBK=Y_MDBO%fRj!F1KHn4BCb+4?w_W5kmneJuFljpjKdVaZM^qhkkJ# za2=-=lMo#;S0|@dv3m=#|KSB9w+>fp7>DDzu-Drkay#_4BW$&cIeK5hp&tizmE$-j zkV8~sw?SiLT3VV(G%vle+JhcT`*<(&$=rbP&a_Xnj>BDj`iHfw27EhCP^~phcHtbG z>WMRcf})I|yYP%S+cN%Ao6ay|>8J*W*z^)#HD6?uTlR|uUS4{@Ms7e1U+PPAk~n*I zrsz^V;U3qF^&B9^M~D`5KOlA7`?N)F_1mF@Fqz3JK1FPgF?VWjL!R@>CJOjs6||bK9tPb?p-Fj~Q0g`|`4S=CMwXL087~%3+3${Nwb- zc4!$S^*Ga>s$-LL*-rY-+VW8;`=-sna(0~#mq*PK(Y3ajDO4UU8pu=lLYJGh!p6!f z2p{alxw^i(P{!7ci;ayHqERX?P91|CWG9=gPq6vh#ne zMapw{M8m3jI+bQQa%YoQ+AwViuOh*xb+Jx8UQCg%Pp5ZDqw%#UtH5y?*#PFi6@43f zy(ihzPamHM``Xl<=WLGHw^dV%Hp0HviKl3sk+GJIwN36^$=ETB)V}UX7Xk8UF22rt zI&aOl@U-hX+3_3Z0~9N=nVoRDIyD?E7fIeAlR|~cmu4??e8kGpd?ZZ9#KepY8#C*4 zlwB@&f_4yw7CI6x_-}MtqgT?dNe2e10F0FMB`usS=@3^NpZTY&khN$Gv(fO(rFn0n z9#v1+S19>BaqhJtI&W_fJTeGAXFw zYc)fA-cN%|OP<}_mKYDJ>->~sPat2#N?@&k%n@*FLW z9z7a|o55G`Uk@;}`Nlq3J{FFhf_ffe&A|;HSrHL!%##efct+v-d1Ct9p0X6bwa!h}Hh|f0Ep_b9 z%5vL5qgKYaxm=wwoNZzZm!0tXy1=Gx&$927FbxfjnoGo4QdOo(e_IpfBIzv7DAj*)wN?=MF?gO+1%I1f zlm__8g8`pGBp^6hZN`s?AN+Cg#8d6xr^g}a5`6MvPgqcxs?xTulG5-j`qHx0G2kQFV`%p2-LVE; zpt4I(l&hVy>igJ=D~B{wJxwDb@4DtEj|HM|QDGYa@YIWS&s%BA2@D8GWhuAFv|-t4 zUYiUnb1R>ra`nIebO7+Y~JfuHWS#s*-M{)U%y<${^tIuTuYQqb-#0d zR{h+L7SN4UCrbtGEhVTBQp|R8jApXV#>5nsg(&lwp|@YJcX+mf*8AaT;KEc3EE0(ZGbv|0n>kIBewG?n=iQfM2L(Qz8aXjB zu`!Z`g+)*>)Q*lo=+e>g zvJyzqaB*d489C@j5H?p@IIJSg z>hsZrT8)N{uk$yiW06Ve4a{8TRM!ygJBaIv1_9a(%)%EG^TNLyY!| zYh1-nxAo}{>670fvm5I9UQ}A&>{uYvXQx(XZi7@EdWZ7hdN-(vngP+7#06*()iMaO zl4CL(5SG`?2F&_-vPGjs`CmFtF7xhX6bq|RNYvTZP9`mP+?WFB2LqB8H9eIa4vEd@ zM-OIAYj?I~Ydh+@GdP5$hpVtdSlx=dwdD))6#d-mi`-T4HBJ>%x@1Oiz0~g6%Qet? z;Y`OKEje~3aU$<6t1qHqZ4b9cqo0a{aI$L?X(rkyaaeY~Gwpm?L$*inb?0@1^DROy zKP70sOc#Amv}3r~BhI7fWO3UT$O7sv>_^vUr6W)Ia4Rk3c;?o|C}V`Fk_yMWKMw=j zGLFQ^28|qG2&YT{Cn!P5A6O@0H|zYM>48Q4dPgNoTE`htn}aBnBf{mML9o<9Z_poF zAc68*&+dnnyie}cnQF^h<%@1>O`ls<9ZfxcWbcCXWv8cIJ6j&s+Tm@`jt=V;JYJ^F zbA@44^2dvgX0uhjsE8`HLR0gr@S>B&PpjbXxQ~kx%ollo9j!DW9nZT0!u?Ic@+3D96EUxgNybY92vMj3{lU_G&qlvkP5fd;=6dh>nqJ6l= zDsp_fe5qm=8-vUsWHKkO1TuA1bL6sjM$-6))zvd*K5?z4ab|4PB9r>k<)@)LvDN$o!UmchQ zXWI4x42MS`HTV&gs-Q7}dN-s8Hj_%{b|f>?Mut%}uRnNv)?MY0%3f-%(JV)t)V;&k zKZI($bxq&y_oXwXy}SBFuO$A__qM4e<_+yU{NkcV*G@*Fo7mm<@4(%lT6SSxa<3ZH zWZEA?Fy=Kb@>G*!+8wo>C;!xKB$EaH21ZX$4+-OcU7bKotmLG*YnpIDPCf6kt)!x9 zeDJwM40E}B_V(+HGK<+2S}e-?MtaXv@led5S6>txn!)HGt~2(S>UUy9$u?NV!}0c%^K%uhV?qEH{EP0_u6~=_HFYM zXdjQruhd@7X+fvk&=Ul6-NPR@lAY=uHwUkNDoMKr(oVxZCaKTuO6{obZ%eiCQn(|k zYGjmvn{4WfinB({DhG0I##vri9I;pZ9Bpg1yN3{0u0+1 zL+dIuo!#_sMmCcrsqQDdl@>edfexy8wuQZ4)aDAS-E;*^LEcQfw>m~BGg)(JSPs*& z6UCvH%*F)`rqd>8XKAzQ+1c3<-M?Q~@Q)|6oqhD&k?67Mq58U`+jj-JcWh7oBJ|i% zAuSO10eqKQ=n83Iu^wzwKd2s|KK6|LQzZJ5leqwW-onYYYNvG#OUo2z#!cZ2%>K;B zx|c{s&JMq!g3EWu1hR7Vy12O&Pfkp1-7n=kgMS=V!T%67xWj(sG<1OSgdSR6w)Wlq zEL5cRpI_3>hwJvvg{C^b4klc7BqTS~=XkYG965~!-GL;s$6O2lUH1K&EaM*(B)y(y zMU=?x@(noQok@lI_PqOi#x)|rV7WzZ^(hQ$GSs0N~)z>^Yk#!ba>c#pJ_b9k&fzs?uB)_s#??zp4&+Kv9=VsVGC^FU;rq7 z6gk8TnlTDM?<)@^`fvsURKV}26@B|%s;J(dNb&!g1tCaZDO(CCrBPKu2O%0)2JX0* zSv6ybzx{W9$RBaj{+$qmTsH4ZM!)9Nr8&&Ao+Xm$bum%jN{7a4jZ=5pGs&ZBuGG}g zB!K3IK8=j2$P322S*;pZudAD>LU&9wUBudwAs`kR^ZhifuMSBv(U~?b&wOX?;Q{c>EiOptUlTY>+bGlb)W^l zj*8yUc#GL!aLHF_Lg7=kX9V8qVbe=o1i`?3XCEN)JjKKqEmjCEU25?EEOrmbT*8q0 zsH>~94^vUH8H8F;lrZTYb4a8m_yL z6aJNf2E&dFSV^Xw?N! zOA~?^{hj|rA&UcmV?M5ew-eC6d@i&)_~!6qQ5U9GuI`O6X~V2NVSFpO+8G&-=VjF= zOK=xd*Df)8^k>*y(wfc?xfc?vYGGa>=o^qEAJN=PYn`=+czfby1E59Hpg-GeH} zi}*V!2t$O0+$`j-C&%H?`ysOds+83N-()v#{O1f)mWsB}PSySgVKnsOD(J&JyR(CB zPwf2Iwd=@hQSr#A=%(95|KbD08MlW5ubZVVunhWyX*usps}XM~Rwb~y^gcel}@g53h<-ZGs_)$bnQmp6UnnqhbKsz7x~ zz9SFji$qZb9Wg=&3rv5gl=#dc2~7bQ1bQE4zHE`(f?&a`m#&rrWQOqee?MCp1k()G z)d?==fi8>V3U1FVLUM+muC?utVWv_oBPcK*pPi%aulBH^){nRkB z{NL^5?3w~c4FoWKismbtGN3r&uSe2V3MmD3O8$`y~uwb2vX8fD8m1dLEyLA zUKH3LMUcN!9QJM&s$e8fs*7#@zQa*Sbh;fD(Wo2mH3saL0+TZvNb6Tw^aI5nlk3uH(m$1L!!gg z&aU|{bC*Y{=0+XkP&7S)j?Fv7`^yn3#emRdD;dsuASS^eYqlPOH4vrPe&uVfbev_O zeA1&sl&R&|&e?TsVk;SpOpXJSZ}b-|_EV`X@Z~6%sTJkcL}=iJCV$DjB58t7Cy+bY zJ!FYAnrbW*T=R8z@6=FNm)v@1>SH8(fcgi+SeZPJ!$P;oL1^)FZBNXpjkaQ}^;`5Z zsB-LD_Ve=#?MJxA)Xms|f8})X;NJ8D=k7wF)~qarZL2|+yZURK^U4TG=20`Wlp6%S z5kAWl=;j+48Cg@Hja?@)i28rMdtv~%)}bw>d*NFPb0MTg0#qii<8d#WGVTY}{0kVb z0LT=5cjj|h_NnCh+6NS_#X~~FP&iU4DR}ASr(e(Hh7F_*P}=CNQ$~NWqIOn0WzCTg zem(;sc>{9r_?F6VKn(?UrZFA2m{gyMrf0`WLft>1avf31FIMnq zOPxCP6)IvFWLiQpQJq5c?p3e=;p@(Br6~Vo&g#O9!PiNjP+S}E?4Z&Mw!4XNl=pONpI-aa%k&}VO0g`DmH<(pV$rD=CvCu;Xy0ZxT|ePB%nU%@8s3+wVnj` zJNm3aP=pyOq;X$sl>kW+VIbH3o#cL$wZ5aoF%{ZP8s1Rc=!&G`B4XXeia`pEg zKvt#63Uq|#Uxs#tqsZDBz%3H{(!8v#)4MA4t9*m5tu0QBoIzI8H3=jWhzwp^i1pAh zGHk<;%+IiijEHB}XcnOl_s(TNIavImawm8SGGmXR=4ThDE{M_XbKL{_+$atjq-W9h z4Io9i*SQThTuklZL7LLK`h0sA1DPFDNKSaeS3#*Z-!fF0!B@$zen1Wjd1?;Ddy;PT z#CTB6Gym=!rs}tqntgDbO0icP61@!dT65XuMydLw(zoD@wB$9@;(c(q9UE2R-GVZ<&3R!!Ah8!Ri%L$WVDt8|&mm%FX3PUN71-6#lL` zk|qCa5IF2{B*NuCR^;pL$lo8)bL#e7wCV!zZzr@mM|JYZ$v~HJ_z5m~X-f4(ur8P4 zJzomVQr?iBt=I1$hDl@L(wkX@i$Ce*r9apC6jRpYBPNQqeFU{b-_o|Safp#jWrX4_ zR|f~4>vB-jy$4B!eHsHZlXDFVY4L6Hb)6k2Dc^RZjWS<%>9~~G=((n6XMJR4Wsw`q zc?%|te^7~p1 zfv7w)q&v_<&!G{M#O~;|G#$`og}5;PW$`ZynM5HHpwQ4&Y2F*U(~NjSC!qv*8 zcZ@Tl@NvVcs~~FqYb{fbcx_`qu&*+5L%XP47FJe7J6~Rwo_P>C-GbA#Tdw?H;Es4Z zXlOTVs$|twR3s#E`t&IL=GT=v{zDpmOuY=*{@y=J%-uq0K0l4)Id!2FC6X{Pv8VkbRQlH-hvsp_y8NRs;*Y>UX3H)$0d>dT z;uu3f;f!>T@CCB_oke_wGm!S>V(EB+nxtHKsa{I*f>ISa&Mpj&kAGUg9VjWJrvx9e zAyBFQjIfG>4;q(VQT;YrtL?iwZ+n4kjkAQ+@RV)>G!&TBKR%{H23qU>B?LD4Q($jd z{HzGOST?TS`X4j#WdKTOSy5gG+jdj-4UaJLv{UK73f;D25X}gjBn*Rs{S35C@Knfv zNc=u6u|K^5dP*3(Pr^C3x$fx$5~8@>|1}X~fcO03HfS@dyTHW!rJAC!gp(LyX35zF zC8wvW89+c1s!S{hBzkTGd)*pXb)C88I!K+=kvV(Km`F-dnPu!| z{rf}kvVYj%iuCX2L>U(kHD$Lr@E4+gDdBP?1!x{Ubm-7uw@Ifstm`Sr*^B|* zdK~y@tdut^c~IO*=!ElEcT(OAjCQHsokhs875HoXg>)k?oj40`3}yt|tm)pu>Z3oi z48#G@a`@#=BcEDGann$ON_Xg!zXD1ON%OqFBR$i>l;TU%+jP8AZO+B@KLA^Su5o$@vZ9|X%lehi-HQo$dC$#Ml|kSk2a-3~SMQ$*zj zb}T(Br|!t4$e)%9dBZfcKzgEht_yILI^St9hB@#;?JKubZW3y1YyU#55v4iT#m&sd zR>Z$I5BEmH2Nt6?J@PW-0R%3UrPTa=){rvlyY%te_8(1q7nosW-U@f{`l;gJ04ebBO^JA1bSRlIRD0T{Ymjq$WdgFJUa4Lgxo+{$Hl#o9)f>YCP&Mf;I!I z09<}In{UHA2wck?u-1RjZy{)pT%Y-%F>9c;zlOX=n+%v@{U z4nh*>ZbyyxbP@?PAhj1J?_bJiHX$|A25Q%ALLbH2f>qKARJQ+)bXD_w_Ix>N7ZAO1logoUH;wCN*Kp zhACzxG6C$Hu7%?G>}--_c}s^~Mi>|d(VKk@^4 z0eji32Yra-ch_}7ZuTkKhkPnOii2RYJuZID{sd_jUcd4+!t>d}b9VAO+4Y~VV{9@~ z@lYb&532A_alAiQ=HCl<>?u6XN*ng$(v9y7%l*R};`1a>&>AmrcR5lYc;(6)+wUM( zec1pw(w>Emk#Wl;o$^TR2C>*C z^saBfz>1T)rSC+hddE2`0WnRF$)1{@g#6-m--7s{1mTxI6>Oib+oZQF){FRQu9)LI z`mmuqd6_QvX*{{>`)+j@MMg~93{Y^RNu&(7AJOOkqy-Gq{w6Eq$h*(-XTz$5{WrpJ zx!Og|DlIBvqB9$7O--ZEaSmiDL+tu@;n2(0D;2)x9c82Gvt~FoDglPu`mav%^%&FA zsNtoi*FWOC_kTVN0CU$ntXeP5*J=8LW3sMNG44#hscE`Gak@*nP1;;f2fL1Yf2DA) zUr}%I_oq?P_80-o5zz7281dV(V5!%oPO0EsyH{o)MRLma{)~hIDeim2Ahbb6@a#r% zsYcLVRC4R)=S$okOy|qcr*!3pfVug7{}D-%6N-5eqTp-frW68WgF}&zuw5YXwkm}5 z|6JYcKNEQF+e6jVmjY-3FVH;`@5r0MgMP~EpLN;d**V?FyW8#1Z!**5lR_NPzw??q zq{&3)vmd;l|C?A*9~nZkVP+;Ty>XF_7b19CnnB)UhlgkbT&zqnzU2>Wdmb&L-#N)F zUhi7Xz^rY%7?fOFEXAB+x&D=D1~Aq81qZ~==H=sVRT=1aC=_kH5cQ30Dn*MDYCKqN zoYMk&q=;^MlY7I@5(8)yO-p)l<1nK|L?S-w=a~_=>@e|w+M+ACc(1(zDtSezj-ds4 zDdH+gUK@7^<43w~6q0{GD3}%}1^~Un3n~ucia}@wl1Y)5|7EM+W{o_fB7wX+n@m>GgBy>N zlF&o^rd55S^sa}fdYf)ax>RLVFwwMUrUj?B?aE3u>k^%AOO487rk&Q(KWu1|R|A+R z1O(dH7cX9nH05M@^n`U6WM9qLxcY@|A_XX>ms?#hXRQ5mw8;Kh_kXdK<0hsdwg6SK zH_5VoeZ3KLZi0WWCZ|uJdU@6KY8dHi>#U5_Za(AeqPys(%!!Lm=Ztiint7mt7%k9O zI@s6NU2B;&{U9m)iCe>&YGU?X+U{kqr{yV2>&l;ARG~Z)HFgvS&p^t-Ns4`a%*jvi zRjzj4vFYqf`J+Xesgd*N3sfh2s@tDPbl9r)7mH-J6MbtRDt|nar`bYOKW`aXdoRxp zUz9;G7w{O19v?1nDuUKolpy6E5E!3m|5O0?-{Jl*h{G~s%PnmTOOye)WcJ${_YrP` za3pT7R)*T_w|_6xwk|D-7C*iO8E*o5f1ko24})R1ua{h&TE-j{)pdJLlJHDabk3h$ zsmSzR-Ogse9jkcoe7g^RtU92gC?it$RaaBMm)wDXrPHq!LMwWr(`VxFa*m+5 z(~+e`srY9ZB_-|W1x@rclE+Ma%rEqqjuVN`CVO+@+rvaT4I;D2{pxE|V?cDzF6UE3kC-uX%K{+bo{xmD!A@6OuZ0~J6t&xS79;y1Lctd8&UZhnC_|M# zJ{BPn%-vvoQ$Ch$%Xx9~(QjGpDd-PMf*w}C`#MO|vl+Ctum{QPX6vgJuK2ku!K>ml z$}Dl#V`}|{P*rog@bvJ=$Yna?_G?F;uXHy$CrgbvTi6er(ROL|O`fdWa2oS9^?o@o zDJP(w|Kg(cWM!)B5z^;}UQv{1fj>my!y~GosnEW}7hq@bvxC3tI_R9MlJqT(Gssqc zk=sUY5ABmJ>2v(*oZ0>WeaLX~qSH;R6P%v?bDfu%m<0S6KVAqu-U<>}p-~Ygo!;xv zA>KP$cFCAdvwT55n*Ua|<}>E(jG}^y5^|xqYh^?A+>6vgoe}*(HTE0dVn&>)ZU?OS zLU~l2P_n7w&GQ(!-ZN~08qY;t`x?6~vzranDuRsg+4d;`;`4s^8Az6P8JJ<0Bs8dp zM5lgbh@;0x(7d;mwWv~vgi|@BZtR&~$O|Km?(Oxa^ol)_2nj@pKXTW$ z*pN!82Avin6lw2mw_)|0e!e{h?d+rzt{bKuM9jCe?R%@+G6A|`@%_kRkJ1A-*(ahcu zxe@u3TfmC}IQG^P5UIOD5i1f};S;u7BeGso%Vkg5@Z0Yl+#g}`O3&g5%f9;KFNnt3 z%{-1FzPhuEI?DPniBQ z(G#Dz!yXW2Nez|fEG}XiOiC7^WX4BJmZ+bVrs@)lk*=W2Z8jQ9t+cv)nPYeC?aT=& zQ;lkV$D>n;h`Oi{?b_7~qpsV2@HlQ-BvG?n--^Rf^m{1~%rS@`=pz)6hC*@06!fTE z7!&@%*K}4AJ^k1cguh}sbUHQDDhjgnXv;*>nw$tdhu`)Ux~N*;d}q4Dm9h2xsf;Jb zS&oL$2Ca3c>^k>GK;cHWf(Z(bLGl0;}4Kq zpoA8w*D1ga2LP6-#t?!H5PI_=meh6{L+dzq=1wI?Kv z(KzDyL~CXrT`Ew>Lo?93&XT6e&IOI6#HiS? z3$ampM@f57@|S?Z_YGDIh5}pB0S=`t1F@Q`msgL5mX;A4r7Icgh5IK^&p)Kj$#zL# zL1x*LL$?(pH&WLnMTKAcxFK| zsj)C5(gV8G&<)rThb&2t%cHGH6FwSke#y*NGq{A-8%Ls(cw1TeQ*l z3AMMIo^-p;q4OP36P9pyE5w7Ni(aeEL7dE!vd#xZE7 z@s)85Wz$#Y9<|pUN0{CH$`1X4ovR(2>mBDbq_qk>^CGnjITa#n6l&2XPUaPw^*CI$ zga4Gw@7DsP#`KNSE>B1I*|XX+{OFl0AAP9QqEXnPkU{aHiJx!G7VC*JqIJT zeWII16_%=FwK^@A&9Ysx>y`W2iEc(Dhq1h%cEVV6fW`X`_Q{LPOs-2E?9$IFElji7 z?2+P^(-X|i04eA1QpQA%LOtFL)a5k->G;kTZUdRgoAV_ixgi-w#g`B~_1&{3AGCg$ zENH0#gwDOXzR*!;Sn4>PkMZxRxHstTneIC7)jBAp&nL!U2wl(WCzd@cu_sgo=)U44bqf96f0-c1#jh?44smh1%%7q}mD2^|aT z3SnQDfz>RR`}nFBHot;vmMx|6FM4YqLW_#kLu_U)vvB^|7qX**WhSJQyUK|Gow!4T z01PnO0V&hpQlQ=j*vKl6`o$9f(+02es4(Ifx)sA1iHVmIyiJ#sb54H&8IVH*#f<)C zQ4dV6CPZqOJLr8;3ERtH9N`-k;)|GKI(@y8?q}Jx9(wo|R@(Y@DXV6kHc`EI_BTF= z4lz?xJp_)C$;1^i7rEEvZWTLO7JkVowH}MY;3_;S$mLPhR!g&|_5~f8dKn>Y2n&I=PNW}d&DfXm?yM;a z^{ONXtIle0Nn7;Jc}hR?7GO|t`OzVcLUD6*AH-rI6LI--m7C?y&8%lZP+HR(Pz2>@ zBKTpB-d_|FmgHDcq6{OB`m_>S* zm6Fv0^8CKNI{?xtTgB|1jaws==2PVJR{I0Gl#j`Mk`oVFRUdo@q~cBA>2K4cNp8-~ zu7ht@l0fJVsYe0glIjMQ^5Ej47m}#zP5YIHL$x+%72&3#`YQB+gDwbD1Zyfm(Hkmw zKM)hf&3_qjkSnW0BK4DDe}dG1V4_NP7AjS)gUkcBvx{iUN+y1fJrNp}^`!_BioZHA z=>MI2W}6cP+~K+%)ce032nJ8?H(u_Vhe@ulKP<&JD_S{-VrtPy=15EZhir$Mx9Axq{yE_CAetwZ^BorwxtDi%Jr{%sz5kp%SiV?ZIV9YAn-m4+}aWzaR_TAz~cM;xKyJh^ysD~(Q5d%*6%ulwz* z5^;RS34%~TLy~fT)m}m-HP}wi*b6BS?E4C(Cg@TI$VAbUGUxzKMfwDlWBq`1)=gLT zhh#DP)^GoRY<&kj)%*Xyh6bgsjEJa|O-W=+X0rFLWUs^_;~Xt1GNR1vJx|3sI95?q z#<7lrV7m?>u@u8ct`tKcDw_JzvjJ2uUwfV+1{p&82Wf z>X1`-n6;q(ZkplHoEK|4F@z0)jFeli0CCG}-jaJb2-+$K^2?cFq)0<74#3e?)fR7!mm`Pw*AfCQl z$_3Iz0UZeWM&N-FU--CgYR&WMq_aIn|F$uMDkMR$ek#EZ3w4e-l0y-fZxEekBLJUQ ze2v0)Y2TUWEKDLY4+|8G)lD88j%1HT+%!G`i{zL_gc#SE$Jmy~Ao@&>;~omI6pG!r z2e8y-R^5Ght?7zEq4uYonADe^$Hp>qYhQ}ZmwMd}o0@+~17bBqgz@?74o{B@Kp#a9 zu{AAVwR2$p;By9K3dI@?{U3L$vZzw-x1y>k9Rru_;!xMu7VYYULrm%>e+hM8xeSGw zDLM`DZVx(D%%vP|acLlyA&MXCk z2PQ4KJ3d#U*XSkCNBg{a^$-L&Nc+MkL0g4uFBX?({tkHLc!V#%|KAh%hZcSW^>_pp zf9L%mII)94GYAo&)HSKbyz#7Vs{#ij&<$t@0!(>vO$_K5D-0ClHF9-f=Wvynq-Y`u zA$&am*{kn3euW-az_K~IPNh&lA1S&55EhtzUI6`7Z5^H2$9j++1@osR58Y^$3Fx=x zmoQKAentAqdk-7eJ9!D36WS=;AW09{OegHlPDf10@}8#H@Utp;sz9)kP8^?qNhU}? zt}-!(-Md#OgQhlBAa((No;0xDf4;V3FZv&eHHY5NgFawKtyYK2>rlLT&IUNT3+kqy zVm%#gTj2%-^<~9Qu0ac9JmHlK#Vl8rWAw=0qag}FPgS^53KJbi``8_o0>ci z9w8mc5D^x9_TQENApRfdApK5$Spk@cz#u#yC<5?f0^7f%6yl5jvQlb14AnqmP#=BM zUd0F-pXW0mr@aB10^j;s7Tx+4lhE?zOG~8NDAi{tq{$@Urkwv()e8<``rrLatkL@%4+TB6 z2V(~#R)DFi35OhR?Fd*2?DS9R;G)ZN&zCkMoq|^RVCT9O-;5sIlK(k`vv^<{77jQV zQ1KH98e8g)9DiHwoKMk61gX^bj%tPh>+Lhy`NmT~AH(dMqG0Qb8a z>mxv7AO;92KhBQI z-mpGZJ8NRB{KOq#3w7(qN`Dm=vYW2nUBSXn!1QOk*_>24X;dvtK#UxDxQ}6Jsy2l|lwtbr;B9WJq5beXLw4k< zMwl#1ENwMl8rZPBJv9jsS1jgm7)|Zrd$661fLr^omJ_8DH?-)?&g-$h=Beg>>s#_}Q}QjeLM=aIA2`wQT0)K7 zP?=-Jc%z7MnG`Zly%f>+BSjOm_CUL?R}AoquJl6Z9g4|OnbLgMOF%zWITlaiI9^2Y zXv#Cr=E9jU8C@}p6p|EL0~yQAr*yM-BSN!wM^CL~`fujVZamPA)JGYzPOGkTU3_t> z({nnXtp7b8dn97L4Wus*D#~W@o``6?e$NhdWR&~FF_LYmzbSugWiyiA^^i(9;^@g= zhNRFsgc>vbJ7kDqQ*htM4>N(%cXt_!b98KcbFq)bu>7_4t%EC9GkG8VZk|)_RVZGJ zREj%_}q5{dx(W-Ms8zE)L4od>e)UfCT!$25^7k0Bu}1f1(B)fv9|H*Lm4q zjIOi^9G#0To%l`UJ|k%xF_;112&X3}7~L3ptGhrQQCemrMxRN&0i2j~ETDEW#A)*_ z{KJDqndx3AZF6Fe{LK+(HtYjzYCgbKEG1KEJxYspP?*4w^AO$S5=UP3`@)BJr^was zOIx3N)PjzcQ+Bn3?JO{=Q3vh*Sp{zNrQB+vd zJuC8C(?T6a*>YGAw(A2Thl&I2vR?_s#%ttpojx3#$=Ni-2u?IFw`9?IlJR8Ds{vi$ z#p92Ekj_sXK)o!10Wq;pbycNiBsWf^Fqve7$#UM6d&kY9QraVvMb}o>G&=*luWbMq zYa0<|TkK?pL`{E3n(5!WUPI;*G6;JPGC5R)2MZKgs>+fVbR24<6(jne5_~I-YH7{r zgG|vqtsAs@S02_dd$nK>um!xw&nU@uWuH@7IQ)Bau|;k6LeogKsBr74{sR1nle%`L z0B#;BVz7=>CrjICK`7cNu;pZ`yantR0qgh=GvfZtyV7CKbThZJ&oXSAcq0WX+X@wf zp8APc56f=6B#&gYTw_Y z69F*e4|!gaL(BwMX|NRz^uCbYgE-%J-R}sKJCssglUP)}28e#C=5r7cyH{Dxr(t4$*_of9 z)1Pt~9(GCYXYFoy_pJ||pzOf8bhqp9>v1v-u8VA?xNz%bE7r7|h?t!EBvBkAy*pE% z8WL_QY?;|MG=h5N+q^OJO>X|O=$-zq_vIQfo|?A?#%ecz1u7=nTvA&?&OUZqxJXmJ z?wn_wkVTkVU(d}gP*2p*-$s3C9GQjhSV!jg58lE{ljeh1n7VTq`&-QjB*vaiwcBk^ z4!=?Z0);2G=RPphBt**rkYoc*TL%7~o}a{YYXRzs?x%>06;AiRUTYZ?6-pWc0tog; z**HuG@j^=>oJ&9%oU8#ek;&HJR6k&_+?do2x8*Ecc_Fwg@O6!mz<=me5RbA%kFxCI&XsPPrwX))b(ubKg6$35JFFD41 zg5LCNl-vArw)K3Sf5F}L1{c%29EN<&W4CiCF9w%P6R??gqxWeXhuROBPl4oZOV9(w zq+_2tm@HO}trr8BIAbScR}|857Y6c)EW>n9hE#aEAkTmpgBoSY|#|R!=OwjeEf&JQ@Iw ztygwQ+AQRImKGVns-|!38Zs>YRs|fW&Z-*Cs+)2fmmfRZ>2FzU0;|HcL+MOt>Nxa_EKmDqfFE$l|x`7#qNvECJk@GroS))WKc_ z`1XQAeNL98UIl>7FDA?73>tUG`w!d}-^+tf_?8v6aL|`C{32MVh(#j$$r7b|O;GSr zMC2^T@Svh(cv>OUXj=Alj0q0S2#6N>$<9g-ZQwi%)u<@AB|k4z(Vx$1MZL$Sd*=9o1eTLsN#+ zNmhHipS+#3haiBR{ z77pkX-O$mo;obRCUr_xiTG->eJUNW3=WFttxwSrEImIpaUeQt$DcuZ>H6Wej4hjPp z@o51o6`z>a2!B#fI#uffzRsu8Eu4dCaN8?lYkrVSuWlD6!3PI_@kX#NgM0{iNtRa6 zE~rm*)^KscgM$sFUOjiuop18qbgTl_fM}V1$)^p#!u)*IfGV?(F%;PBZijNW15sIp zHm4REEgELb{d!GvKa~i^?v}+l7Gq2D%r{?)l^d6|RwLe6!Z?$=zT3umirz0L!7y3H zbkuwTYR>6A)uBOr&`3l^_g3tk*%IOlgn&@>RDPeF^fK#w5G<-F+83}V6c{MoXMl!!|eoB*nKWnns<dAM>L(>LF&7!nHayxaYO7+CJSG3*>)@0Jx(>+iH+5ugxBW91GZ1i@t~by8>n)=Tw6qY7w&hR zhl*SkEi3C-5z}CAZ$c+~rtYqndh2fKbmi{qPFVo%X(q_-p`GuX_)Lnmw>OprR;fY^B(Kck}tX@-0@dC&XMH5F)8k@D zFe?Y%pf8fnm0vip(uSvPt1(v88lY_sb%IsU3rdaL1c4BLZJhw@pD>vt_8lhoEk=LZ zCcqen24Jb+ZAxSacKPFqJ`wbGvGyZaBF5f6YOrFshKdO)2BkNzR}-yKs&krM9^Rz* zipZPNep*a7egCFJB35wMO9MxT!d*-vk2cs2D*-*TTI z%*Sy2eNGZ09s{-bJkCs+%6xF>&+m_me|R4iXY?|Ad$stAwMC}IG^ja#S5?*~Sw{_d zcF4y#Pl_;?!X)ymaL4)enR5!$I{K@WP+=W46?+uWEpn*vi|*%?B>nL;P5gqlQwEG& z!Z|QNfa$76?KPJB8U&?>2=YSyKJwLJ)L#b|?AE^imKHEEi<#SVkTqr0=U#eNov7Z{ z4I%PS`O2eO5CIIK{1#66-{NAcxBD-4dx5S7lq9JIfz3lmC#QPpKc{C(b;1uoz~xr@ z*}%&^7m^;WRJP)-w4`L=`qypLl8JCHBsN@DU{HYKM6j+zFo&6+RA^XJR!C_E4QPvO zKYm4}NZ88YddUeANXlm4Xe@~<>@M=5w6&9O0ovQEs+4Y`=7>}cmd1BIw+DcXpL+PX z@d<^FynYTRh(EC3WcSfpG{3tV?gbmDv1%YVYdn0-#fNj!A|Ccho%km9-W|9A^x?Oi z8i*Sve$Od0-5O^@&ln`sU_u|(B8silo`Q|o5Ab1GRc|=f-!1ICdXZSps_l5HUk2hg zL0B^W(>< ziDDHZp$tBGRk^lQmD<06kj#p+wMQ zj5-H7VL07s;W_2wfPdqqxlJiFymd;Ob?}MJT(u3UdU4)1M-?gVJUqBv=MXra=#>Dz zy83L+84qn>0qit?txWy}(UE82ny247=l1jT8OLA#O(uYoLiD$b4K>U^? zp9rJFkg+Jw;r)AaaV#cjLz10GJ$TxIET|aaPsHx9i%sy8lZ%*! zhDRN%7Z*S@`4NpGo3CNv%k3Li|BZdW%e^svCSEyl{WabxqKW;TeM6AX9j}{xB1EQA z!Pf?UlhLB2HjZ~SzPWj1TTnawd|PMu3iUkRlg4j1Jr`^exhtut%23~#-hRSwFJ(|j zgH)@YH+Rotl5%n~n@fdt`<|^17Vv&ThNf9CXxh5hbJ-b|D{9a}a(sQD&FfbgY!^;o z>>N3YTHUvHybRe(0w_4|>wr-?etmxV2s^U%xlCuu9EntJf6TVV-Q*6!R^4=}z0T_z zJHB+lU2U{5qZAW$j#;e@j}5Feh*e$X<#&cJ0zu7(HCb2BU+nVarYhDE)G0L%BGQR8 z@EvJHaKCviHlxyr)Q7e%Hgw?jPVv$PZ8c#crjYt2Vmvc=f~TvuxZ{$P{UhkURqWgy zb$#7Win33C*GFJ}h7EB<$=^*}eQ=*h%l%w1``v51<{WD4XMw7H-*Lh{Qrm+PAkKYY zxc-QxX+)0ZIJp0e2l}j(v3+b0E29lWf$b%}nRbFS4Kam4@f8&7sSR`u=G+%J-hVx< zY5QacGInaD0m&tBQyK@RjlehdWOk$fHo(9Cn;`K^z{dM2xSQ&M(6|Vq!l7y1XjqXZXfs9T&069qy&{R#4H=v^FU(T+93Bf zNtgGee*V#UR_VrsdvUm{_@k}(zc^@S3DQK&W2~PlyVf*8>WV4r&GtT6(r=h_WDN*k zwNLv#kD=E4AGm9F9(7BwI=1*1>@~ZDzgv9)V10 z-r)3da2c2~0hvpC)%BX6CB@ZpwY`7hhntq)%$EGf*4Z11+Zk>d$wHF!!cxVq=Jmc7;HW~?~%MbvHkyF2BfHN(b3zc##oK$sa_k&DXnt9 z+)i1f#SIa@_vXWf58CVIah%2$bkAK1VUFqd^DwY>SjJ%J)VF^ySb&bsL|b8N@}u2xHc=ryPUWRR&wugtWT3BJQHw2YP2)YIQWT&!WpFS% z%wx^OT=-e0kw@Gk%(Ulds88`EP9Eu8`rdKSZDZ6G-Xp;Mqx>h$%yIx(9ygR0NdABp zZ5;Qgy;O!iIk5D`OVaa4F3qjy^}(tUkwc}lDf;shc24HN)(DXgo2T^yj;vU`SEk8> zE)h9Lk1U|kYNwuUZ1SM?t)qMht>*fd4kkeQxJ=-b^JI1AnwgFD@7`_R3o-ADTOV&b z$$P2;;0v>i|ZJ@QC-r0Fyopv^#QSNFCg*t;6o3@pju+J$}Xh~w?HXoyv+J_~?K zfS-`S!gM`9AIZs9O0`P&_qIJ4K|It{ACrD9Ut;TxTfW03va|`Qve2iLYp-RAyfO-Y zN$=*MZCjPeoHk3&v3ZTsB9GN#zd^i*VpPq$W`xn3`F9_Wyx_XI3H1rhT5!V%Xgmh} zu>ZJULQ?_I{<|99+6VK0`y&!Kvf%Vb1|TKP!YOw2)=g8BH%1RW);MTdT+KYsd7N@J z`fu8!0V#T<%YP4(tWG%sE$`I?M8Jo32%*YF-d*nypoPPjg!m*ZPZUJLXU{4nepIW&JeKfMf;#wilqRbCs8xADSxI+yN zNsLL3IlnMy{qc^~mY(6@!U2&4S1quKl|O&}d~s)?q7yMC-PN$NXa5IqPH;Jr$fX55 z6eA>BYfPCGpZ(pGqC*L>eCN8430&Tz4qxA}K{Vn)4a(HpbI*x|sl=ifG4W}Ts$eQ_ z5_x4+|5>DFV0l7Gat+7j@usj^;J^74Jo%FjXnDUT5s7z!g1;)<=RtoE8zc(-^Kg-! zJY|*s6@e?W@V#^A{DH?q8oBIrtuBD8gOekZF*|?+zIpwd>138VScLhfHx51vU!af= zJM%S^J26xe;H6jYf7B4VWi}3Zr2SFxsE2?f-~AB@D6a0$1|1bht1nrEdVqgFJ_`W3 z%^5)`yBDz7nEJNP&$mXhKH3ED{?AkH&+ORUCtL(zOhQ*{0ZSsyY#bVmzaN0}3_jHT zG3c!V|8yt*{76>d3WV0YhJNH-Vg~A{Kacz2JN&=TyTDoUvo_RZ{bO7HU)2jx)b7|6 zABUduWXFvcGWn-asXI0Ue_rVBPT;)YRq$~Il|Z}cFxS<8xC`JJpHJ?*|MQ*q|62)V zBCn$25c2$AI-i6}W=QdM7@)#T8-Tx4REIZTlkuBS2H&dJ7ov25{IcU?_^{T3wMUTM zBTYFht{FKSiJvM*H*0^ayx1M;BT$wGn%2p`N#BX2rrIV3wL`dZ9ONx$P#VwZ#MKp$ zpO>KIp*fV?V1q6vYTS_#CEfBIe48?-p3g*&xlclCb+Fn&j=)q!S0z2_=T zfIg@5N)eJxzR|P+dd*GXY?p04N!6Mh2;hAcfC6|Mw7N5tV8oZ; z>blFbNCH*$-Zjo>V1a7!D}Zv+bamSV8^aD8%$|DofQbA>c&GMSKZsJbk4%@~ouYsE! zUD}w<4vsZWy+|H@jQ1eq!9#ZXXUG-TJ9qB%pgjdpNRKVxgdA+s`&O5w+fxlfD28v$ z0l0M*0Xmb~%~C{b-dMA-kb(e%S)(yDadJnT3huTYCG-7#(A_p;oVqICvJndnq?J<8 zuM%+oXfk({HOZUznmo{CH7P1w%i?-c6PIdROcWU|Su3>Jf0WaAbx#I59%GSUO8ASr zbr5~d$H>&m5_h;|NCUJBPov1a)RQWqvL(6G`92oMW%*Qb0jn6`#?rpRo~S>FrK z`xyHm@(wB!eUiWP!Gi}k?C8FgHhrnWu)ktS*XCOcGf$%`92(>fXo~$_0|J(=Qc{#I z1knf{Ejd30=~V0@&*wY3$@z@R3hUbHul3481W&<3B(pkmQeRLc)+F*=bwQ{NjL$&7 z1#yn@H>EufppNbbQp8556DERI9FcyER+Cc&g(a>YXp;w}R!Q);8iur(FXiIi2I%VR-)IS^}0NvDK2bNS3o=H6?GH)7PT7!Yj_M zm(G?o5n0hrI@V)?bYK80%&>kIm#dMf=&LAAXDg?w^ljt^e3)x)E#{S`XILD-tvPLX zMjX^AV96rRJ{I5|QplY~jxIK?ux0A(lFdX!c?d(}qF%n^3s3@eN!$g1fS_wjLk7%X zNr~I{K;-we+OI%qBN)SHqxUyFWq+V?4X;I4zn2w z(PV^Q%hNcHt_+P6(6u-}NIkW%c1kP&OzDbuA#bzJW+VUUK!%a8(X zUYQh?BpG-JT5jA`A<1pd4r$AN)ds4u9}!7c$&DHX-G2@M(q`~TUFb}}x(Q_bRV5%R z;a_~MX*({^q#e+BCMOxj&Z<;)!WA`Jl)F~Qb=!bAmUfn09;sM8yuYQ2+JSk@8(R*% zG84;=Rg|3Yu4(&ljsm`;E3hGMzO-rJ3!^5|0r&ll$d+b1gu1 zD^`aT{WVQ|^5|J!Dei>|vx;N^DJl%`rcR8B>GEow8Z4Q1>982roo;R6(zIOL@n2B) zI*$TIp(3&W+kxDKJ~83A_VJj}-hnE;222q(dnsn9@TkXM zn8ot2LXqq46&ZyOpH2A|s74;az3pE_cz}b`w%t<`iPVy5zJ>eI-j5sFh{MBfH}yJ9 zJ3cO|iv_8Pu6yk=ue2iN5`cR@FpUs^x*qDHC2esL0L@1!{=RLY#&;SQc@*}c^R)5c zb27M+07j94yk1f>va+heE37XY$=l-FiQ51Z6w6QmbkN#NV$L)lUhnS!fl+9EpanWAS#i=$DI^AS|U^h4T zu+?N0QwOhDs~bhzl7kA5&OZT$!I*xM}@= z<0Y=u^MiCzEa&|(z&3TUrPlq<6?vTsvog*9ebdrG10{gqHo^@xlP*+Vt%iLJS?QK{ zZYjop0^zK_L!#!+`8TSVEt87EjCdCd@WcMS-x+-+m~Wjxp?u2;=?n*&?azy)zWwV$ zK1p1xiLN6{6o>^5R(tH}mg4tLW9F*}ym2He&Sx29CmmU60We*4k&4lY`!!5cAlsXF z18(pP0rbYqyL5aqh{U%+`R{*J0}rK*CQs*Fsq!I@OV@KpfM?!>OE)Hhm-#a6BgWfwW2x5(2esj?zV2-mx77m(go?0&!!Ppm%9AHn*L_`2FKh5Ye|>yXAKH+TP`crQzi9vr*Y z?0bFbCi9JSsa$AY7$bJp?|)o;puiMPs`q25o|d16|^WDh%I^a+8F-PMI=uCsD6VB8y4xm(Wc%Q6`-1 z&olCrOGyRPi(87!+Lw|#jliKar+f}VVwHW(hOEpa@03x%oeFH^^I0m(f*P3hP!5pD z_s1H?!&ug-=yn*;6JJVdKfkqqU+DErT}X&-6EENIF=(#T6L^~0Scn#}fhPFs)5Bqs zwO-JEK#qpo3`Ao8r4-aI_C6G)OPNZ*jL%=CN)V%5{>++5QA%uUffmaP-v4h<(s#ei zd*gJ^Hh`6hr~oLV7G5@1rjh4P984Sy8wSi=OKDHFnrz61=Z@D@r-Z0(Svt%25xhUa zEwSi9?KC?OeD1a5Hf7fLu?=H>l+jJKDTBc)Ul9urhtdX1W%*<{fPv6>|B+x@L6q&c z0nEK~KJn?5k$I9C`L=&RwdVR&A-tVhkze{K*#bm{O{+rJH&h&2}BxpJ1 ztJn0p*=X&w^)7YmA%-+K64pG5JCrEp)U0f6e4_m@lkr}Rak^GYCg36}#{8h~GO_#v zO=McO%>BELCw@%c!)yhtAZDWJuhxl?<=D^(p$BrGw6NiT(_*a}^j< zgVSoR28bRnn=YO%dE;)LYRy6AWLOjH72COFVsA}WFH|%2E^JMn0gfSMs%oWY6v3+3 zkh8-?*9A6tfc&rg`~98;ASU|bw{-i;A`j=P*}V(C*8-SXIqsWV^~>H=2U5G6y`ktY z594DEcTF0~#(8M)MzQg;C}KRfzCSFGL;HU)R;dy!#l2Z5>ShZNyORh1i){6mC`ZZB zTyXe$pbcv=|2)_lrI+1MGL&SbI^V0va*t^3Gg>1mqg&<-JOxI-H49L2a9BAG+gYc+ z1knV)C1WR;I3nqeIg`hJF+eZle%_9@(HKgE`3i`$P*4citMPy5wY&81!;Q2=L>y(ZP>IdQgm~HqHbPpa*?9#nre}H1WrAfYY6ihp@MPmkuQIf#XOn+N zziNtz44~4mn+tbB<=Nb|vYcFvNERWYKdgk0hsQtj`pvI8Cqsmm6nHx=pZIVdun0DR z3E|$FzcF{XMt2r9=`fvbc67dJ#`?VMQR&Gf;A?Q^-9Rbu7d|z2FI|&RMBa73ah~DU z;A)H&V1PBFMtsL^=?#S)k2t9!C43nGfM;Z&QWY-rzi+8xVszRZI3)XTds10aa*Z%!ADdx5?&M`FjQG7f@JOsZVc(tuiw#tNY76jhyHrWAUbE&GfC*ZxTf}=X93$wI zj~9B^IFC1hwyfB=K1ET5t5yOU&XiCHOr6aa2!m8w#5ls~&EzR?S)*MiVt-;t zOxODy1;B#%wb(+9K%g)5vkDd!Hl_q=1H5-8l_RxGj(jmX3VQ@h z4&&Q+B%Ui}xEUVyTSpiecY(Bwd)d>$(pIzg=oexp_T!F;K^81m(f@((E7qi$4lLErMU8ImO3-CIwt~ z=+V06b$})6q-yxRo>dAih>Q#IuSSb<3sv>*L}%ya=be*WWU z2cDt|#9ee6l2eA0P8(44b)%!T`WQLuKTk8g(pkN z#YIJbAV5K2@Asjys1zw@LErJu1BqBgD1gaDszV|oV_OKJM7=pb50t0l|MkleL;y2I zJzSb>T(dmL6&srb7#tX8k==j0kwF+|Nz~sS?a;Tl@8{5hXu2D*V}PpI_xbZv;Gbf4 zr|HVL9w*oMTW6i0ijSMA#m)F>&@hK=zO{A$$CqCLs5}NZ&Sc<$k)Z$7oPzpSrj=?U z=$`?*7j6d#%`O*RTzUU@Tfu_z;`s{r3QYI=E=55qA5ixQ%0j+$H=c!6P5uIx>d$>r z_!cx_p(cCQzhy>cEw0_^U-7fp>|l^`BPFhdjKBR_4e}LWsD%X|b+&@dK4Tf*&)m_9b zm%*i}@#MCp!RZ|4gA&?L4Dz)N53%hQVn1-3z4g|QmX;&gUzaFNYfV-Z$ELD1Re|{u z$0p3;SQu7i?NvFEG&!=se!6=t<<`(m(-j>C4bRA^mSY>-;ENi&K1QamV-1iNil4UR#smR$mLk`p6_n0<2wKuW#B z7TVQabciv+r-lgUkTB?+R!(tQjMUwAp9~co)+`X$0t?<03784m`j>k&TCX~|Qlf^* z6KZYX#gKOygmHkH;+28LPB%dGx@Q0#m=D!_!S2Za{Fc{m0TNffSfNMAA*MGDkeMUF zaeHkMqO4?dp9XPJgP@Q>wrq#auF%}rXX3Z5>KQHx=RHQ1Q`G?Pew!Nyy&-f@-gpIV z5(Ha~0vnlg0{Lt&vM&4ITJ`5zjv8LI8c<7;iCAsH#@G|`thJ?7_qP<^c2k<+(uE^n6>Zq5_u2ki?$2ZvTe_{MuZBg2lL&yU&{?DURJ0URX?DDa-D#id2TN_$ zFST38>YBD`qzojS?B$i4t9wI4a#}{pJ;PrDX>fTB+<6}DUr=8W)4&YIn@vD9d_ryw zSi#OIR$4tYO8Q(MF@0k4c&wI6#NF+KytA*{V?TgCp*CD|a--(w*xfj-nW;TTlzSWC z{4aaF2p#Fk_}NtMn~9`X>bC9608pMZe!2-VQDz^13RtKe53;LvbYc6kPTCag$e4y) zBy{j@yh-**y#-{bZ>3Iqp*T}&&W&t7vmTXzki^ZjpMdSSGb&wQg(5C8Gg)&MTji3+ z>Q`20RoCiGI;XqGfYE4!QMPi(av%ZR&M+GAFHnO67tsN??FWjUU>9DTtCyNT1F!sA z=McX1smZx72u#dFREu~9_$KE@8GZRWc*i8KK~#4^2i|S$0x+apTLo*UFp&g4`6IXs zB33CoHzZ^F_~>@Boi9)Kyxwg_hDr)Ptf@VgBorVeBbPQXKuuEUrEK?|CAe(r(Yiwn z0-B!0HD%e6{DDV75LK!4^;#%$pbV|mRO=VmLS8HEn$nwXM8gRvx6xdHxTggzUsrNK zg>SE?1+D)IvZLF}8W1Kbw@=nQjp+1DFY>`#GP+Vb2=l|JFQca;B1$~Kzd3d;;`IrJ z9hKlO9tbI%tieE$PdRNY^?3nI|M{52= z1%0YNJ^d)SbnTmBBrYTn9HG2XS40vu?jK#&~!F2{;G4vwe0$RK81-c zo7QF@WGv2KOu^eN1ytX*50IX*-dw<=U)ycI4&Vw&I#zcyOugWUbHR!cm}rW;@a z1Hyb)CB-k!_fu_7H&RhZxX*8E5?lZ*^uLzL(2P)^uSt5T#>;h6zJ~k=_e_#9aEku| zpsUaxl>H*#@!%G>xIvj7hk!!JclB+D$>e@GzC6pW+J2}+E^wQ297{W%jPfZiBul!r zD3ZTlBxlWx6_N%f6bBYm2H0+^z8L7i;-{+`?5yWV^H+&dR)6lCwR99M@^Eos#)7%vsuOo$S4sbS+Y= zjAIBVSetK8z}&hm_JYk~yz_NQhagUtbggLM^Du5Y6JE^wtCtG9gA2kcpo1qX{4jJ-ZBS^{k!p^^CYo~mv8|;isDIBOR zSJbt8_;k7^<%N^cu=I3RVONYqzN*wmyLedK&Hj+A=9o(5sEb9QV4fSDwF1_4!}vjG zCr7)r3tu)|?MX-7Ra&A31I{9mi-rAueYLA_yY_Fu*UEZxE8VNg1D*G{hZF(>n~JQr zpGHCX*;DeocXx&+ zt*p(I(wzJOnhMm*Qw@5Lo*n$ene>>Bey8Ye?L_h?zV+OHphIu&4V(Abl2_MWt|V>; zG*%z5#u93#ZLw3P-6+HnYa`|%e+x$v`C5*ACA<-KnF4M+&vO=Q^{;RrW*@tzNV>ON zdkj{FZPu&&r@JFgeF@I-*j<21NnaCxImF;ROP9vCq(UD~&Wa`TCCFNOzg++3W)XX6 zX`6nM`5+(ODftbuJpG)6MH$(}X~b1&@A5eltLuEWrcIS>zYKXXgy_55uyw!mqjzU? z53(343CMI1e-2k`(uIg`vx?Zgd%Un&D&6l&7a15+-0^jvV5>p&&dS=r*1ar z(K2U3f36qT3vd_i_2|6xxx;4P#t0u6(x2)93wNm-Rs&P6a%|aj>)iz-M%#ohLlN>& zJu@7-9DE%7o}3b0aB;|SDD=!OEPIjO@iP9h(l!&4O?$Ywr}QV4?%`Lxj-x?rCvdab zB<1e<+XR_?1J51GMw1oHciSa3H>)<=x44ui0O2tK5?|dRC~7)R<_}C%^Dq=wL%`m4 z-N6N|&%e(gA&>`&2du*u>b>+g5_XS{&J#-~Bje<%SzmBPVQVnx;IeGn?Uc7ap)_Z*}2v&4eI1wD6rz6W=nHM z3`!H;ihzBq#R?Gy*}qmidVG~uh8S`_k6e+KX;}b9>j7Dv3Puge-Cz$qpN{-Fe9YUh z(;llZqk0c?!0}>5zT7|gqC-n*QVNXb=zK(N-3eHN{fov|y*ZNH5kh^(4$LuF`zcuTtuvM3Q|2lK@w1WZc_r+8F zDWPZud-%3`hulU_SPjlfa%q zBm}k-Js-fBirBvVaCS<#lX%o)a954XA^q-)nhHpks)PG{Q_A98vQUMI?q%bK#{|2S zv3v$DomBg-S(pXZ9=~`0B8F6DP+F%blskgq^;+tSM+626)XwUOJ!az%aHyA^#%HyG z5~bg-9mD^_$1gj9?4BqSydRpT>$(A%TF1M>c28;H>)(EF}`-42C__BnVGk-3+x7Nb~)?V&&nX;O^ zD^-?X?v`IoOmj*zI=eUX{>|jEOxL)xzfqt#H@Pg6+tK{ScenG%ImumL_HuwJdqMcc zG#?9G+u?)UgC42URU2xWXZ|#;3-!hNEMI#3h>la;o7YBYy+Uj*Ddobr9-HZ}zpWZNG0y<*a})hjA5CKe5?LzA(FJfizvAll_pdzh z{+$Fo5Y8pP?OF%>!mHWtzP)PI234fXaJ;wx>Ah#%3Sczybf@_1_oYM^pRcreq6^BI zev>fJ8_R$wUDZX5*oLCeEqAP(4i=%@zqADorVrxqeBPM0LKAF7Q}>pTdiq-5UfmUF z4ZFefa?&RX>QcO4{!1wHEjvqWkZy(ab*N$wNKVTV(zK0QPAbRKI^` zM2GCDew$Kka8{=+w~eC8bXIa9!LR-&-PUI#^}?ubLj7X_ePh|#0mmYhmU~|jTqZZ= zJBBz#dfvWtnG;rFbiHCnNFnDjh(ekd+MpAztfz|_*WsVLu6FRPt3;=2fO3eh{am%& z7s}U7bM(kp&KD&A$%Vz(m96-_HkPWAAmDE^A|K83gqpFIZ)0B#VuvvG-=oEcRMq^y zYp#F-@KSXiV`SJt1>td{A>U06-Bz$)5zWu?qWu_dT0CRp^D-r3vo?YN&P4hD%tJCX z57{44-*QT=p;P>KbGTx3xt#*bcC#4zygW-;Pwu^OP~n{H z<<75(E@{-ge+>k`Jo-cE!-S3g(?Cmx7lzHwb;1xM6yhlFlTahMX3#E8AGC-)wlC|P z?xydEfSi_=mvm4f9`9D+vXmViscP$TF_cDtsoAwWnO z%urWmIixiaDzK|fXxW+LxM}~%`S>jQz1uf3{7?0-`(U@-rjBK=YwoNx`8!71&@}Uw ze^_ua`Hrp4cg0h0tM1z?T^~gt62$@m2ZTO4UCBpG*Vb@?Y2o+!3m;BAr);_V_PldL zoF2USAlc>6IhB;I26j~gwX4o+pih`pTM*u^)>)U*`>n1l8OGeTD(~TbVNG)()^rKg z^n)H$GeJc0OqvV%u30=RP3x>bmn350(n;~M((I;e&*`WshHkyzbL6@*avAg3l(czQ ze9{STzf#OxeNoy*GR6L`-t3A^JDlr^9$mVp@1kVz+Jh`4zfXIHPK~lYesQuTqUJeW zR(1JBe4r`=A3?|Gt9ufp0SBi?Y;?faNvsIn1hn|w&haX8wPZ+-Gcy$>pt`tW?%M=l zf>|`YrUlFOQw`PSwaI4`S1#ExY4JA5k^rf?d{noW;7072?55dk(kZ83(DggP*dAjT{mUuF2O=YB(-njUsncSwL zKp@Y;ji^3qi(&8lHf`Y9W1f>%Df3wsNJ8%-)6AbA-Q{dXLf%+jRuDnj{vxzC49X?>^bCgp0jA+PnqNj|VkOi4kfUJIKAtBEI!-SC zBA=fJGt=9twag{E^K?s~*aOsK7sAaB)za72$u83RfqptJBg>S>)VtefdsBZ;87HiQ zBO5VXbvn~untn*qCCll0a>zPh!5UpULK4HA9jkM2srX9ydfC8~x!QY@ldA_WldT_- zv`;S|!EoQNx6(;m?)+ho^jLTiYUt~t&`#r?K^(PLf#3WLtn{n<7+af8$Sv-t{)?}l z;RWV4Y_lT1gOllqeW^ye8vEJnisJL-q$8yfRJRDZByzsjX71vr>S4s^GN0_XBvy9p ztxfyQDraIw@qq87{H&$VrfM}N3JfYHigCMPGHGFIt(&-6uvorAXl^dL$_Pq_#=j}- zE}D>{K3_m+5KeImV+=XOk!R^}TnlWo7Q1+y_HdBteY^MU7MK`Cpg1va;B_sNWAn-Q z?T*ft)f`>8h|FAt5w19VK(Wom2K;}%C)MdByADj0Z-9?x;mHY6PGpa`*|XZjV7iEGw$sI#OPp+%oy+y!`FD(s;%F|G8?^f9$UiT7PvdU+Z9uFQvJ`d!~*PIHlOL zCL|}26TCC<9P(p#+Cp#_NUFzPzW%|@?{bj3T<4`wY#)PRnFEn~8L^((MrDsDK(Nyz zq#ko;=(E?4ISE%<`RopkQZkM3=)OzX(SWyJV+}vH-n}+G_}MU2#r5V%sIp9&h^J7# z!oE_<3U7JyWIy+44`QFMHoxdIsYF(gnSYC(b;gs38!}&1lu8GR8LQ+{FGKu01})Tg zrg1cmYiXi6Z4j#+o z9Zb+%Z|;N-uqOE)&@L3;fG)4qV_4gUVi#t1IGYXKM3i#1!OQ&-GHFb}XqM@wW z5x2OvCpY~U^+OD*;PiC=0CDHWK7Yx|q2LOI_Anskc3TGzU+YhAsp%FN6!p;WW@;rJ+JH~3?TBp+g< z$LyZe;xogw>!E8MhmtXnN9;w)Q$xUg@fj`r)xE9}R5i2`yLN^4V{T~1Er1<{y(Cy) zDjyvEdT2?Trc6%PaU|O4457(u;Rc}p?=$LqiCeu49G$=Z32tU#kY{mxMnXsmH5OCy z8DAyGdj;qrT1}f=fw6O@AIjya-^idgMH5K9H^L+H`=@)kj?^*Jwt9vdOeZP2TAS7zHS8BNz2KEat)7>lY!WoT&WQ(pS11bkq-cv0(QiKbsFmi|$*qma zOmLG+_{v*TZ9vj0Bt!>Ig(G=Tp88nYSXTz7?yfT{!h#3MQLr+fKJ25%B~{+I=YUX1 zQL$Nb4mcg6XZ%Ug4KL*OHLz77EH!5CKx1-TSooZgez(s2?o}rpRcF%ODs3fW9nr)v1BR z-1KZ^%xk#tWz>%fcZ*`G;aRia>${J}G{db+-~NGc9TG#LPw7yBoIUQ|1Nu3y;ZTDe zA=@9PVoeRa6tD1j8{$1^lHh(1x_SHWn!k~F60p1HD^$~XKYSLceY8FE;QJ_A)zu#&HunFfAd^#(PW{|VMISg9#vEmDcZC7@Lb1*jqcEX)zg$*+T_cTyX1ae z4zOA;sU9Lb^w2>ELrRj_vX!g)Bl zv+bdYf@kmRrbwNJ($&92;#&KeVBop_B6nokex;ini2C3eSPxrnL7j&J$ zI&D5^zA_^p{c!zp+v?-j^H#seL`V2m5HkWz&O9%RabW3^*Cf_%2yO^9Jvun0y@ZpG zW-iG6l5|fmf7P6L8-+15TMII_JpV30nLlLr=PUsPL< zfz3h%(QW$hC^v)AD$_JEIwJ3Ma5VSUp=*zi&73+N5S(A_x>2Cp5j4$zc!7VZS59dd zy}4NV)cZG9aa6iie_%jCrp*pjx5_^EPn^pE25n>MHzdw9C3LG%SelQ0)Mi?;-Wl|| z+tPfTX}CQ4_r8AogO{V+gXZR#^*K0yyx4gT{qx;fPIt-05d?TxXCVR4t0!f?i9%jj zi2T-LV3uL1yMNctS*Bpqbz{%cswHO{Kb z!+4Tg#I(Sb!SbdS71}Hq>!|mNi!J0r`! zA4+k>l$W)SkS06%!Cq8C|0SZAFn7K(WPGxBHCB%n9;p_vcJD>H%si)SMLd)Es|}8Q z?P-+Nn0)6(N|hpS`Qju2b@T``$d?B+E@wJj`w-F0mUXn# zxOLyzj@LLK6l#g1%;1Xs$R5$eUz{?AD>;9$h z>GQ#JBR9IK;UHJwcoA0rlri5**jh9+C-C2XE6V1d9*VRpwK-o=ltLo~3J1%>~r>VVl>}vWoCqXIGXf0!~ zVc)nOj`KnS6_3pmSWLPf-S1RLYMjW~NoOo;u*J`FJg1Lla#zbdkM57NqiOc&voq|t z0!~X5O>^dUoF0B`E6MrV{8(K{8++~S+X6Y0rS*!0i*vG@fX^e?Znkj@JO#4-TgzL@p;3^m3JSEY?~equsFs{_j7qSS zOL7kGyU^Y8qWVL)yjX}0PS{>E-^!VPB)Yhlh8Ub%;#SYy(i64B`zNp}+DHyBOj7+9 zN{k)x_ytGdrmTn;&nGlM4DFvulDJmvn;tmcmGaY7*-A9UIxXvd^%z!ee<+UK^AW!% zyVz74HSE%kF%UBuU1C49>Z|h6oUD$)(hT5S5Pjg^LM>frRzHQ_^b#EBZfJHGsC_@| zS<>F)LvUB}l*$)ohm$>xvDEIixaJZ|8k<_vR7fkec9_3*B`lGB$x;;b^T*2g1^8&~ zuMEJS2t@q3LkNFi^Ig(?^sLm|eOX=!T1?QaGmkPzak-T3+w(&lGOR8d+>Sf*1(K(5 zpYI=JY)Vy$~v^772!bkVTzJeR9?1R-s5r6W(5_6F!hBCd|58{k=|`@y!{) zX3Mj^7d6EX7gU!;EN@)8pBQ%<6=*>RHi1abw)4<})WyfV@%ZUZmTkmdt&%K`<$Eo+ zoFTE8(ceX;gOIQ-bF~%vhtF?ymmPTq!8y3=BlATua7yjtIFY-QTEe@K4C_W)NL82V zAkY3uJacJNJK-5lyaLXcw<7jEuW_1M7i|7OptG9Vo+SXJs4Q)AV<8NhxOT=HyZ13( zs%j(Ou4?A#9$#u;ZhQM>wm!&c`w7uE#*!<$f*QIa57 z25a_jY!JsXYzZ#KkL*4Gh!F&Dwg!|O#3ADaN0Up6ibqoC4{)WVI;)Ud>zr>OUwy(dMsxyP(zDpuQPvs1QD9rWaL^jU1iu-uzjun>8!sh*MM#+1tQxWHM z5_Ds&kESjbQwy`)V9!@%bIWrrZ(I|09u9*gb4>=js>d+GE=jRrJv9OQ$_lI9ob7}Y zYxd~jv3S+fDc1{s__D8?8_0C%akI5CVRfd5L{AykOYhApsmAdJY^x{uR~g0z1Qeo! zt1y{^bMKmsV#o?8A)V+h+>jQHriVnBq3vLjc6@B>65)ku8`R&4+iGC%cx)*(AZ^rN z9>uQIM1KS^@<8CSuU>CUoAt(mM-bn}HY)#Cz{#@^Oi}5a-*5j$h%TM>?67R5nd!F@ zR0%cY{sB~eON$hq_h(D$sg|T;R>GDcMJ4;Td~Y~>75Pr#K4*Tu0Rk|vzCcqaZ_9mBxXJsb9$P9b+1KZ^Z z$&;bIB&D;|sw-Jf^Z@i4#7^v%|IIJNkk(!->#u0T&1#vBk2<2Tgox}bJt-dan0@A#91eoBZ;&3ab#UquS1VAGbsU9 z=AqE3Ha(Rgy_crNd(eJ8(M@s7*4R~5kanp^2?sc*0l^9GE>jYmsu&dJD9p2G&9JVF zx9Lb-mo@5*Y~5Gt>7P0^in4D-F+yq4LY*tHAlm0KEG2N?cWImGAmb+FJ7ehzMkkQ1tOe;1I{o#Qqq#XXB2jCO5h-#n)~SaQ`e6Terv%t zvR(Ws?V7F7AN}+CU0`adcidM}z6G_P<|s1tx8{bP%x4T`L5PD8kbw6V1TqPRwY}%!OYNshejUT z3+sPW?AN>i6E^(DQ-)fXngDH2XsjfHCyQ-1u^yEG454&7)5-#M2ymY11pz22>9@+{bvH!Mw8ujr&+ z=#f|4`j^u2{z!>Et3B5Y4nC9{!mB#o;9A!u?B51Bz}~2P`s(JrJdj6COmpC;y2%c+ z-?ODp?>vh!FDiCkYk2uwQds_OPVN(T627wEGGw~j@?lcYQSQ>HPJ)D|4vl!si*&~N zr;2@rJys=8hsah)i&uU%^2;IjZ_wml*T&p@c#qP!Zz__!3#;{9#7;fuNb&7q&E0Bj zOpeo5ZvMoUG_T**)^4qvz7k=aUYcLMsBxfb8QXYt3VlWn*X0x0B>i&+R_kZeZb1hy z-_Z?6-~0C1a(BhUg(Q(%Zg}>21!d>}fUb`o6F`{UqcU^hpwovgQ^^)t(^^@fO>0@@ z*8&C2Nq-2Yoo2z7J{&KEpEE^}k?#D3@@j=ZSn%NWHlRC;3XVjaz7`#A{Jw(NGJ@Dh zsT^)8@i+kceQGsGAC}~ACl1zmgn4aBMYo>WZJJY99MVIn!js5AcjBIOX3SUoupjA-h%$Z27up zj5XEQPR7h7zr;g40V|(UchvyXf9H$s)!o4Y;VQl(%=6To9-A&}V^a9TPaXomlr2}J zI4M(|AC?Q`uHg~$Zp})gPUoUveW9%nKMX2U>aID!H1KYdMYT4ZVW=>xiHciA){ zP6eWvvJG))m;Yerj$VKzIY;rQx&a=W>436FeC`v;Ub>=b<(iK8UHxa3vbX*rh}XG4 z-Vpv3_BP+q8RqOE81--V`}u`gk?P&1n0Ka{E$|%5<^#O_gaV@BP{>s8QbWa>d%S^L z2ByPmYD!=%kkDco%6`2q>fcRLUn%zZh{p=W*H`SFbGGY3i>JnC*lgn54 zCwdwt^|VS*rC*PQqidvZ7nfwv*ZWgR!98{2bhS%M?97-?rtkV&7{Gl%B0KtPhJQ*l z;l_Glx?%n!hUJBctO#Nl3=?pFw$zCuW7M;Ro-{D%SiLck!pkygmvKS4s*N&mAt&ic zG(7Kj2~UO#Hb9B&(iS%OnR(%K>F%#-K?>qqV=|xMJLQV)~0rcT;C~FDPncK zaPy?sSI&CXGN79ToH5<Nw$@y%w*VDa=%YXU)aA_7btA*icP?yI zNw`k5D@ta+vcf8t$;0-Ue5Y`nAW~uaO|jpi*f?XOj+fm_1!Ry)gm4hfs9)dZUTz<9 zDAHiZ>lovj%f)9MZwR`PQAD_mIyznFQ$VL`Vm8S!?Y z{XQNJmmlPh42p<;K%G`dS%!a99uyx4paroJFEkepc{<^Uu@-h|@?t^{?_I}UmWw_$ zmi%qL0h6^Xu@*1;dO|^Ky#51r%}hvvjOMwx6b6n66e?JH6V(d5i#z>mMO@XRxMLlg z16D>=%D&n&=l}wxSK{}s->eL(CkiUQ_yX?2j!v-P_6&rTJi)uk^ClF?!HHNqhKZn| zP$zz^cBV9dIMlRI6NqVL+c(F1_VZ$M`B(84>{-u@McE2vc34tN-PpkS_BcDm&s{Bs z_3R4ZsD}-XugMB?G6kIvpRczmyu>k?DSOO5I??b02#0s_dwJyEe|n98j1QI!wPI_& zhdcOR%<)RIrAu5$rq~T8^b82~`WaBj3D+06v>yJddptwOYD znB4y}=BP~hBdrddWyt|)!L%+zr0&!YezkFWLnh>#jssoUzZzh0i8OB9qymFpwlfcR zN@s|~K~q)DIT=J94S%AAZed~LMOL=q<3ofxuhJkQb2B3an{aTMe->KpOAq@M2J9)! zLi22I52OEMsoNg_Vn->PD@_2>&jb(#6#sgs?y1iQ*9e0N1;gmjVfQ?8MeB+q2OLUg zVm-%%@YLrMqgp;~K=5{0k)CFl)#KK2aiiuZz{Tgb;U=TabM%MjfKp{ynaI zUn&Mdz4c4&;OXA!K>W$f5SuKldUQKSaQ;rY{Oi80&B?6@3_xMxkQNvEfq2i}9fYqt zJ0N#Wk<#S=196;m_mIm`SwV`! z$mXD(i=^gXfA5*NqWc!`Wl0czanLEa&oNDd>c9Zs*fpdGI3RCd^3b$_L%S|b0z9RU zi7?A>g#gD=BT!>*U$*voW@~T0S1vOEXp1YqysLlZum5qj9)7BIeTk9@)sjM`kP@RC z_;Tnkt^l4OngetYD_OK5YOPbmd@Zw;7!{S3>KyL%!9$iD321$_oTx8<$XqO^_Ezo7 zj))+*2u5fbpu?4br0Td6oS1mO{{Db9wpBaH0GZ@W%_WV})q~3hdI;fJ6l`cZqlvb$ zx7GhK0 z9f)wtx@RCTWBXFs1C~TtBIsOtezEif>>5u+bQoa#Tv1`8T-K~Sk14rC02^Iugww{l zHZb||jX!EI($xvf12TYaiNHg@0pmJ|KYnR&1DR$-X7fT*sg~uolw)OBtf7m8^%FD( zMADDsaeDY+vb6H;J4Ym8BEdis@mv@U+R6%Q4s?41<&h!Ya!EFQhO%3S-W7gKY z{c{{Z5M=E|Lj)XeXfh3fgC<@gqSm_g*+B!Bwc>;yVg1%_8XB6PD>Gbuy}t*mL{8m^ znv!>!8FPu35tNc|_HT$yt~0b~M-wt5(yu0=w z0dl53U^6z8m9B2c@382}-02IBiN79+kShWj700|k370l2`UWH`I?BdcCfshX*zf=7 zsXpulXu^#K{I5xQ9J2NgW!-K+t8+ z2ua_`|9J55go4wKUJ18z_rd1K$HJK5QM)D-N^r$eGeN+~QLcgsUd` zJz*Ja96O>_P)0skLGiH~M1KC$nql^&(g5ish>cNJ@8LT-CkcI3m})vT_OX!V1Jh^d zpP=PiD|BOY$V5(8+Dn1l^==}603j$Y&GSGTZp>IDw4bI)3s|K&hllIl*Uo*P2yh?b zqPJ88bPK06K{sHPd;clLXO^GTm_!3o9W=-B0Q2Q`M}4Ga6b$*Zh+e=D0Gu^pP9{Qq zcEAwPcgo`DGwS{Z*VUu{_PN7~w86E>OAo6G?MzsvJXixVkl zWicT-a=tMEH|OR<4H(iH6s&jJk%D??+h-SPV`cP`3x#abllv0%inB>TwFN%zLK6!- zs`mVrmG50v-(V}cSHOcKxZO0224u+zjx2d}!V|ms7?8xQ=}B~X@=}LIR5fOO4PbRG z?cIbd7nkVaN#KV%9C3Mek$MJXEu4|XxKVa^JNV$ko)MeNl0F9PCWZy$SC_WOH$i{} z;)%a^jsd_w3`^QWAzB*H<=hnd7-bcwVI@}Wv`bS=I-ue6gFd~OsPZkI7)&Rz!T}%LbAx~+_K1kBEmWGXW z$0?2uUcr>*XWT5QT9NcgwSL}RMH9G^Ol^9tN4l)O)LQ?cfO=o3_EiH_Tli+08okNs zqg$ty-09ucD+KI>&4&7*E^6>YlkjxQjp+TrD0(`Oc&WVz z<q;7uuS)^F}3mWS~wu6V5kuk-qdbrTYtv=^EYnyoSb$f87@0fBz$xxpm5l&VR}o8GFEfw z|50Ot(;Y-myP7Sm`X=-$wHA8ycwXwx;39+)LS5z4XTzaH?c58(#@sdus$hp}gz>jR zk)KfZFDbx8qS5xLM#>2pn|qH#URB4$Esa@zQxj7dI8ZKPj8SqLyqYIsqJ@Id7$2M3 zE3M@eKM(n)+U7{@d1s!7rYzHl7ab|<9d!f$dJ37<#37-5@&hO3i3e;Z#AZ|>C|h*X z*Y1F1;WAVMuVvvbme}l@S5L7-X@0n1`s54}oQPUrXm&9349s-%({96?XHW(YxE~KR z+KK}=Mh+MHW9;zm@Z_`K8hrSm2#u}FKJ)W>RRYd;Ij|^zfx2KNn!3Tsf>Jzl8C;^uVY%74qL*~`QS9^wVu z4Jt1bua7aORGEu9J70vYbHVQ&Q~>p#1%E_CK1*68?VJ0DL331^%I7y1jPLi60_RH> z9O4a(-b0qn*6GoGVrmd+EFQTs{#*W%XYntNyA3=W%_2ctrf&YK+<>{df4bq6{sTFh zn#=Y|_yokiGm*&X(>O4L=g41Qwr#Wi#Q#|RLU%BrKJXnMlmch?u%H&?8_&}3H&stP zfs4#tyJ&FtMZX>L)GbI^8ei82wtWvQ&it^mWTJ{EthkOV>Gss}R~MB9z#n8G5`cza4+mvofhM(r;GlvT zvp?t_h4^zcpM=*T16!RSjUKF11XvNUqx*JgJdE5DOtLpdB*Vs3lS&d1-)Z}stOOdp zjyaFwzK=8$fa6zfb_-;cflRBpHgIHTaywglwU9Ve>p`vY;DMr$4uf%`ZiX9GN4h2b zM=!@4X7DFn0;7@e3eJ;Qx8k*QPA5}`r=RMb2TmDFCaV*^!fi+`sm5-U-Aji}wKwnw zCP?K5(Qte7?{wTUst7i_YzkyRVywIz9x|XsCHW;_9Y=fG)=;FHt zQhjR#)aDyHy7QY1Y~=FcI-gPV&y6A;&C+w)rS2bmj)>E$yCNoOP12R=7?TE7)ELdi zOw{}G`{f5m{L`>03%U)C^2djW?z5tuU4c_9u7s%y3gTh%FkssYp(lc)TI!O||d1koQmvC{=Ks5fc zv3iOo%Jt0a{w${9#(-gWBy7d{tAdS=&#=o%it0@%%(-+OQuXDhoh7i|Q)VOFrVF!~ zhOjqaEEF23F97K9Udj}JS%E!Ear~C#9_XuYpRV$1|MQ?(2Epyu4?@~5E)6P1ZKQAL zDj?19m@rMdg+5jp*{U3w_R2-zgS}iVkx@~5{%-W*!RWp+yAq#*FJ7rJjt}v^=JzRJ z3hw168j3+1)d^WHFgzoikds%{LYp?69CB35acvqhrV>Pk3ax~Ld9gH*;XA$W|DHS}Y8n}utrNLt%mKCjM ztnDwaOmT`=eEmLGE~Pj~9QS%ap=u%|r?I`b$l>8dl$1LlvKb$%3P4AL{i?;mV~i>2 zZmje#86c%D2%jmFb{!scx%aI2%dlAc#+QR_+G}YMMUu8kHs~7Pb%lGN%d5NKh^Nmr zpTTitYzU(G%W2k6f>pP%nnc6oo|?}1|2vF>cIolgq?jF|DrWheq>=MUk~2k336x;n-|ga%VvdU*z5Gh$dLOCMxj0DYasDWUjcC7bR}&k_2ziqM<<*%N z8bF$NF_$8uD;heNsM;lAalbb%#;veV%VRhi$fBzhU-#9aSq=&TF6Um)$b;W?D?qNO z9i+M+&D_+_z1}MT2+v>FD=SmW3We;PUXRom5lYa^{L~EXDMg6Ff z+Y?ukDr;f?(n8MX)T?mNP||pu_{?265nU z2u`6yYRU7_f0xKYui(urV)I3*t5<5QYs&J~Zc1AZ+f@ZS7JqVB8Lsuko_jIHcbLZ* z%s%nMKVm1+LF$(}e=uKy3^g{IoSn*Jajc4DY-hW508qq-clG; z#3;C2J zZ#UoE&CmD>YMTrPlU0>m^72MDJJNF*3yUpS^D4H^U<&Xkmx>r9SC`)jNRC#w<$7nqjPS?2lHH6?tL65+tc&f||1B};^znY(=@G>1 z>Ts;U0Zq$_IQIfpQ+NK?VH#wq(P(qhX%|jR<*~@BhuE`jK_@s!_4bJ1_at68mju-U zU8R1Ik5eAbodAokOKr5+MVSj5S_=3WIH-+!qG9?`E%&|Eu6jjs)AX|&FkgnfX>Tox z=tjzU=IN~dn3aLq8mcF0?is3^6s@V~s&LqSZ&(8A8~=5A=Igo(-=?1nq}QPuhzhbU z4>xC-u>TtllY{U&dFd_9`M&ps@Mb*k_A4B%l#u%yp%jx>NYHEyl4b^&g7e={l zeQ7$xsX}(+hwKQvV>FPFQb!B)4b5qd(qc|^T;6Di zWb~V?DSn_wquK5FeySaL~rC7(zW2 zT5UFPwB7%Re8H2kP5cL<1OM^r5oE&h&t-q*AFNksCs1&tUjav{EC1i+rP`r|I||cRbaSu zZ!)`Ap;E(T2zsRq$X~V;sA&uEM7McW@bi?vVm~x@7V}Qbt_&CT<o3%5k0x6VP@|corUnS8mEy4 z%puRQkM%t4`s$(wz98F>t2vW-q_+C8PayeNxr~~`gLH&`BO2JSOFB&&8-idn_9=>1lVHO1+1pw_NY3E&ps%qTc{76-)RCqu|lcT2{g^Hq_R#G?W5TBC#;1X+4uIl^YAnDtEMyboj?CJN*ze%k> z!(_(f>^rLevGH=ejtlDu9U|{_Lm{+)C2^`xa|MfQJmV*x3P#72@nB1xssR{47L%f%_6x4;NNo;ZGzp!$HjxobfK*w6 z5O>m)?urvNkk%{_)~*Y8Yz4ORnaKP$5|3~kwv!RQ4gTssKySse6om7qHm+L@2M~Sc zDK48qnDG!=_Q8EU+}qQ+4?Pb5D3!s&@h+*(;D$ruuMkb_zH15~GBw?M%xcS@ej1tJ?&M%r6&w&Ty%3S@bnW@PHxSY;V_~tKquBq50SImqIO&a%s`GHGNe7NG! z&eWUan{!&(xQcpjk9&8Vj=whfn)N8ltvE>62S&m_^JScEEP%;+mh~@j*-^aaBNNdK zGXb(Es=tYGwfVVEeEwJsctae%BpabY%2An@COfTlg;3dcof}WF{bI8U?c)L`8z0e6 zimxYva;ktrsupm8-vTz4!D)J8uipo#n`TYHKl_2gY2gDs z$@In3MgO+YL=ct6-e+ZBOgDs$yKlZsK)6s1o+QBD4t{RYZAd$dCYrSSpkHBb_CT}o ze2x!vjYDZoO+u^-geNW`^@*$W>5+Y!9RHYK<5#F%5Y~vb2$O zH63h?GhO8YmAI4vWb5`(Jj!&n!g@0E9kM=2;lT8|nSB{-bMf{T8c;(HXW&8d~{)F!M zy|8|xFt}Ft9PTJl0=LH>BV0hdj>&nH>wO2-ky4US`XXjRZ3sudipMA$wO=US8SXy3 zpj7;eZmFB%4uJY;ArZIJ>GGWB6)>4G4p|`g_2u!m@E^;jEsY$3H4Tfp|;nTTLMyiCJ%m#?*f%&8XePq; zRT(FJC;i76&mLRE_#@PqOl)f9IA~KxBve!gPlkWUt&WL9?p~Z&Cp5Xqw#WtU?SjkdN*Nlu-m|Yz!Mvt zDv;*6Pkx*_1Tb1DjmNgO=^{P`WL3Mbpr zQ60?Nof2Jw)!wQqed~Ynm46||Zh1Au-ux-2_nL4aNPZGLrfP_FTur<8#I6>y1+T;ZXX9G?|E64M&dsn;7 z=Q8;Rp|YR*yh;-Bo%oQpsm*lm(h8y>dl53Wy|iJm6jd{<7V5tK;NA;BA~=jIlZR8U z4STwT^h!`(@Bpm}LY?ap;f#19?~m<8{O>Gkuj%bzV7=l)fTq^}AjC$U4roQyK;=Gw z=d^(TpNFyVb;RBVF(7KPOl3>KO7hRSITYyTR=$*AH+a7oh_{eif!ulXRIbvHHErc* z%(uSn-|8*9^u`+u-x7%&IN_Gao_FYvycr1(XN`K|&k@|kDAp9S!LGR)mZL{8TY_D- z&L?c~r|B2vmGQ6q7dlKz6B+t>B&i^OSRTWtrVO6xhWZYO^J4ub+MbJ4sS_LVkBMi~ zpZ%&jBVc*h^eE&~Nw01!R3VFRj)o-oy?aDRr{Ip$x4-E>Gy>3~zv`qD;7Bi#icczu zgT>_gy_=e61JLM-kjnLbg`L@frb5LrJ1anM_ey1n@-Le6t2YFC3k_}`cp%=tyYi&A zl^Vl68a|yH@-W)d@8sYyPfa8uHtW-u-qo*-q|QIKaY_d;mqIYk-seooqV}$*-;YJ< z9ZK6^d|YE#-pg>;7yVr_!v`kfIokBxKRN#$Dum*YMile;fUzra^I!i4=Vz)s;! zAH#%B{hTWI7$%HYojvl98*dRo@PfTh*(eyfP8_v;a_olPH(#K_^6}6_rt2Z*GC^!p zFnUvGtvEhs`kNC?jeLzkh3o*`wB^$Y^mD_*0ibkg);ILB=?|JLfZtT49b?N+p=FAF zb>q4S)d-xL6hCif;YLjY~H@8F^Aum-HW{htv#I1s-2_ZByY zX@}f@U`{}ecU@a?0_)m4^O}PLUO#1@=sn0;oF=E%Q~KPywG1bx;!y_A$kNC<69CIo z^xphMoPbVHL&6LTN*K1qBaRCgS~=8XJt=8Dzl{Qf{;5+|9J6`?j?MLv2Y&ZqqZ=hN%mQoaX{^De~8yFKCk zPHME5Tk~h26$M!rrn-aJp8Encr_%5SKT`<*5df~qp*&Of#MNk+XqU!}drz(n)nCkQ z5IG}|UQPYx+;!rYl#To!xG85{bPCH4jb#+7ArT_9{C)a8;YycJ8nsKXEK|ls^g$bC zY3_*4y&_ofzL^ZRlc`O=@8m&!#gwxYIy$o{1?Qe=!qby}-5bjS@W+Sd%feYuKlxIE za^u%TkEflQ;u*WAkV=^A>dCy?P_bd|=^blX z;WvnE$^cBvL$&(@*3n>_a*8VAJFTCE9y?esmtL->+SuHC zH0mB%pfluPou_cUaSn6ZS1(xkWxkx(i+-QfpeU%vg&|9Z1=j_kYrgvtF9@o6^ZSvM zz&~;UCPr!>urM6=eGp?zkvL7R|0(3cms}njUDK5sHi@er-s>LUbwYu%vORbUS|WSb zz@Xc{b;$FyXi8P+8mib-mnD}8t4f*5N4b{bOHvTGtnJF>Kj*a+u+usdSLSI%yW;0* zDz+{^s*sZ~CbxYCOXgA*P2As-7CUGs=rw)$U$ma*7~;qojaP+nHQGZ~MjAj^-`m*T z`|$_vu-$mAh_vWEv$V?6l&V8^vLW#Nim{zjj`dSMP>M($j=Hi$I?y=e^qCf9uyj=O zu_aSmC%$b<&?aSIZhYGoMM{mR2_P}YD4fhdmx5pY#M$r%9^2}OznA>Xnw;1H*bkD1 zQ7z(y)`!h7Emv`pw7OGL5b=sP%p*n>}V3HSODjH9}R9QpZh^X z2*quUELWD$^pBkV=5o#x_6l(;1@Ww=O)YfT^WJTQgpCv{lnW+o-6~ncLS;Wa$`XTu$2^yx1Eb(irm9wAFBmstRK;> zy?!k9%EF93f#V$fM1!2-lgKf={q57VK1NCWAv9m%H;ikZ+5s+_y4k7CB+WEmIziU7 zMre^8+ji$yblsn;A!*5^vl?v?Md;mlayJ(|EyOQq1`q20yVN^?1opqUBX@QkI=F83 zqi5nVFX>2YIsYE3bLROcyv|*k@4N$(9fyWjwJIHd5o3W8swjQ#+M{Sxg-jB0Ls_c5 zE|<7;&mLwsb-THw=+sB8pM^)29+a340gkgSj@LZtbc7JY8Bu#6P9Qz=d*eo5N#F3( z&>te<0fPOy31pz(T9_FB`CIO=-UFAw)IGMg=maGy0JMzyqC}m`QUcRGyjI~1KPd|& zRkI8Eu)Q=!2zVG>7HqgCx)oQLw9C^oSs%5CIt2oDTeQmFMu#PmEhC(^*u#o4FRF36 z+$a)=%Kw0LV(Zn4JRIeHgt|E8_uZQpx_u`#X_SVSgBaY+*e2&pom3t>x1>9h>->uG zU$}|3yisoTI{#uFG)$L<-$mT%z~#_Cr09}cl?2AxN}mfIcE5tVn%?o7B;kBMmsHP% zy7X$&6K`4HyMlGPcPan*uO!>)(0&D3I5I&eCu{4JAIFJ&??W)jr< zumd={L!`}4i9<~lGOeeaK6k?3S1<+A%yhTIVbqY;I+i^vqSS(foWb_Yw%+N+yiN~o z+98xy^WUvdAOoczfcr8axA=y#Ck(<{02s#rv~_QxL+o##-*yAK*Tn?LKCO?baL|wi z<@uFnklTbDFeu)>egoE-s@_nGJr)A7C=gfAUM>~POe<;yRevkW3XtBntJt=|L)%yk zWn`;GeCyrH{#V}rNcy9fSBE_beh{VwaHu;|$DqDr-2ZA^_5X%*uuv$n7Y6C7E;Hf9 zQ|eV5WJC>Bc3b70{{x}|pVqYzI{s0gxBJLZw$Y~{TI~N-1cI-Q-mcNVfsSq%atmz`j%>;a@z2xfBG8qmcq-KYWRPaoYNVnqY; zZ&pYB^Q)u5R6Pdp_st`R0bF95H{R&~zo;VgM})T972c2w>&hWl2Y{s!#Bu8hfX?`O9}|j^5oiu{@#+FO{u8rU?S)kt z?ADBW2d;l1tiMVYMt3g?QbcXXiZ@|vfVI!xTcYE*NCvnIIX(6?Ze**{LaA36axEo^c z)KW!b^>2T31sqr%DnzB}R=BLTe{DjZv_7S*1#~n{o|d3PMC(?&oxO1*vTZ4)B8yiG zY4I6Gt<<0dvl3jZj5uM%r?VE!vM`Cw4CCM%GgTj>Iq0{@sn#$9_QRwxnR#B$7-o3# z{C?JhowTgojHhzw{3?SjgT4b_ijea^~kVa$=JnP8Trf zwDJj;d;0V9qO-C#GNWQ_56K+U{5QEsf!soEg5r(>C<5nMj)ajo*;ih9AFw&mEFA70 zxl}8%i%ZKHqsJIDxI4xy_XAM}~^T?l! zxh5KvZGG+*z)Y~cno&-eW}KOmX-M~)J(Q33kwrcu0c-Tkb_3O+cxQuq6!YV31URGB z|0T{dc2%Qix=}Sv40R^PWI%hJ=drSx^yydFeUI2qO-0TzMHh?8dUTR64Y?T<9n!tA z!gQi1u^e6UE{GtR2)CyDNQZ~+r(B*{&j}OfA zgtsyL?cF9+nYanS=y+t0{8x4J5^Zz2>p7v%WIngXB*RaQ+^nSp!j;e#Ps&*zXG_wG zN3^WV1FT?kwZGN5M@DUO2-_qjbqkgx!Rv#K-^1h|z4TU7U;PSuI&}QC@o9w}6zk%$ zxA$xHX*o4I_~p!R*X1P54@)cBXk+%C7t^BY)kCA9dIa(Uv)de~YjD%KYZmyKstQgU z_2;@NyB%~IJkAnGyXid^yomYbB<7i9fA``9FssYMc`7B-*?T0SKs7GaT^s3#E~@ka zZX?*X0L63dMD$h{OTVXKzs(l9Voh*v<=VX$=jj>qHBJnhNgheQdT#kkiZ5N( zSqOnWEwJ*f12&6CZ4Q%VcXS5sSi;J?X)coz1=2JHzXL}7K|M~#`%j3;*BHL$b7d#> z;3pJ$AIk2%s<9F#;;zp^8-wXf_THY*3w^kMK?P_x;4G;D@q49$>3uT1MSf6qV0KRs zKk_Hkq;gF-Hgv~$hkM!VSw{m;6yneD(eUO&dwv{B*~4O~o7XAxdK(@(k+%w%X)V?MC2j3~KW{sp^NOFmMx;sy5v53g&_f9X#1W9t1ZfFHMF=fK2qcD3-nC(z=b1U@Ip=)u_xV0DcxQ#VeZBdEkIhN9d_mQ*iekWYmyXUQ#lva#jIhSEgz=Hwe_w zuVD!jY@ZO%?b+SVBT}tFS~pGQ<_Pr*n6o5KY~i1+3}C?$pEZ2oFV6kU^;dCkzvacL z52)-w7Ygtktexx9O*lbkw0mL7k}IH;%4=!hbyLh5{ zIW1K2XG#5gtVz%w>ZXICR=`HIu)-5?X|-N_nCRoJdA^5g+X!J_eF~Dxo+Qk~$?G&I zHTwfLZv|CgPIth|>b$?jVMc@{;L59w)VIPJ6|-`zY}={jXS^9pvEO*29BXyP2MhO= zO7~x6X-3XK+;TBNact;TLshB^D2v@R+>wt4eTCFP7BO+W zWex$0*9-20PSr&5{k4<76;f{U#7;byKKne`jmO#G$cvHP z*@mvdV(sWS_6_2Q(+8QP1SDo{(hnE3J- zZ>b<;U7Xtf%-an4i7vACS!73kY|ewRzz6i2)v83W_eNNM zf%o2T_UI9B9`%(`fDeX5!lTQSnLGtFkSY_2;ZYT)5-{~G#k~$8L5`vZDJNd03 zp->Huiub2baeD5vd6JnQ;w~BpJ!Q{Q%a{fgP2(Rat+qHhx&b$IrLn`_=_JBoKt?=u%})oGDLO4*e$@ichN zR^?oS3{QNDaWrMt)~qquTCIT_TUx-panH9*=2mbHqOp?)) z-uZ}a1NZ!uqa$1WwYL92^+E0L#R>5Bd6D%y#dPD3(g;3*Q+n6Bf#6$YT5}TB!cW+3 zg#qkYoNIk*Ewhh!-b@fOAl(~3_P#Dw5?8i67SBq)VMm6vQh6JwMBT1RZmYvZ#>4IC zzP-;AISdAXK@tx4@E<6Y%1#iSx}j*8kO)=D&*6y6(-Xf*;ea6h@4pJP8yQ^cZToFA zSpQGS*_YT4zpt%w`r!w~wkKTUi4oT{dH~N3bbN3w+virDOqrcZf37e_b~H}GOXqvF zG|XD=lJ$fyaU>V#NJd=qJ2AcQw)qG=v%;8YWqgh7gL`tL)@3*j)XcZqATpvMCysZyYi#iO{0N8 z=#mu_5=b$)SI)hp-IdGjrY){%mUUKDz(@a7AvaOmp__ZJ&b98EZYQ4bgz-v3@ca{` z5W0Z=FhHxKtMr*)-q$K^pE;glKI^Db>yMeAp3b2Jn}bGtgca~?IN)dj@QnK&=tjO4 zu-?MaLhrQ{pKRU8?Gw~T*MCXv719193CbS?S><2zOOSzOg_Rrnp4*aA!)q^Gn_v0p zn&kaPes3Jr9UCX{Ea{9(5HVrA`$YU1iVxrILE63Lsoc~69Wz=k2-PO%0kl}&HT~kr zjRe7{w2%zrClTh{9PMa%`}E&lSua$awh1X6D%pxp%s2!sMppVd|RP7W?Df zy_;>jkP%u36*MxmU;lF={D(BuOo`{a*wrITK1A^srdvH3KgHUBRP7&i)(9Af=wB7Nk0gs5Ijbn)Kzo18 zvJ5w7fLVyU2?U6d51v|+3aJB4!zUqPY|6F#d|ioW2lIAYsv^~c-a+}(2Vc!lIf{cy ztFP1>qDn^p3WyLrl_XciYE!*ko6e0rFLs9Ia7QyL4C8$g)Vh}<4C+>^U|s$pPYH4- z#)uzSET3anSo()&B2Z0LxI;paaRYf^@jdI|@`%1F=(%y5xzk?ZPR|FFi01}X72H}U ztLNgZ;v%k%Vg^$-66=5A90aiMoBugvEq_}(xXc`VFQ43!YJ9IPmL(sRP2*s<9^m=n z9xv*W7ct?~zstdNaFm*GDujc0;9l^f_u*LWAw<3Mdt!JJ{nR`uBixBSA9GF~XVfh^mbH#mnml1j3}J)&DM8FhwK&)TllTxZH0Mvl#qT4t1da4=$UhVxDWWnn48 zV*>Rcm8TeV`o6~pi(u;w;%5d-qWslcuCO#G?5xFoV}Hv6qhgpIPy3I2Sz3N$nav+~ z#(zSXwOxYrE!%g=)fR&^I$7*?+&)uvIPJ{MMrEFd*Enu@vx&Ts=q)|mY;D0e%M}0x zyNie~5a){L>aF@@Fz7j{nzWSOnbl0*yIXvJ5^*_FhLTGUFb->e_(cszC9yIGRfqp_GGGgux;Juv<$%(fY zQZ8D?jbGP2%U8n${UA6kp0ozXRyZ3P?Q=#t8V{||!4VcPKJi^s< zG5EVT3JA)67z5QVw?!D~-M_#e|8suUpLsqAQU8@x1zN(c%Rnj224xHO&!Phyz}!lI zV;=%s3TF1B7Z`kz?X?Z6*hbXb(7j=+B?DaHeg)sv@y}SJpU^jEEO$d~x(+Tun-R2amqMqCf-Cq#TI5HUFZ9m;Gv1i(uU3Im*{_%o9=6f3 zt+zRG{~e>t2g)lFnm}jpu+^P+f^HcZ=kL4>n9c(nqyK_~k?#c1*>+I%)`-dgO{)_? zd1VhcRni;K0^DE-;bO25v8Tb$r!W3nOs+U(!&d`{HbEH{xM)5wEVieueBO(vcXm$m zW&it`1&vC=6KHqe|36yO|AZEghn%`}=gaS~&T+rBMw zKZa^1#Nt1Kw~p_BESNw9c}NKA*!d1T)Q+IxIuLiL*>SaCXTuk3AQ#jQ;tnvNXy)LC zG8va`ek!Xon@c(a0{x)NJ-u@0AE7jN00@RUjPv=CAXG3P$cG5OIzR`tnAckxOsG^t zuAB-WqgPY>2M^?ss0yfS7T3bNpmE!L703R_bfLfh%O~ugBu5DEx%mX_Bn}_JL1ue@ zOs|F)VVW$UcF(K^=Lk50#Xo4z;8TC6O#V0q`dKOMKk*`HT3P?dC3l0;pK{7Un|pTu zWGh0S=Q^~w+0p^I>LuVoN5UU3vz!0T*Gk6#2k^TeN5IxU0k-P)f~idu{6A?>gT8X( z(i*y}03M$zg{!(nV+fqoKL=VRj1{E*{ZnbX1Q9GhegX8Y!%&lb!b5=5hGrLoloqw7 zd4#Rf>km2mK^V&@_HWGvv`wa4;>8ITomsIv2$z()oJOm;eUfuZ-U#K zeukjf%?;HeA$EvInfAldUh`aqcgg-204Hss|lVSvqC3ZRizBtNz$ z_&rUy(VPL_VE`r9dpk-B{OpG$Fr_CE_#;I{u)j}hb``Quga zde??e9HcA(yrtckbHf({SAc(^Vsn&;7x>h_&JD;M6H>q;Z{F7b*VsJ-Kqwd0F0@Y0 zZzzO-+o|$nGKZ@1MDYT7OfKn@|HY0Y{%490j{Pe|`QxMZ-v<65Q7dvad>IcarIWba z@f%=2m_r|>8#lxS0dMi{wqn=m5EgYF@H8~N@{EU^W2u$ z0Kpwv0pii)lOPNHt^sX>q2jlG?0;a~LIo>g1+|c%M|%QnNFIm0K*->hJ@OQccEj?@ zoPWLBTIV`jbx#BKJw0x^d1TiXTh8+OTK#&_AlPyyOXxG8e1?(TZfoId;~D=ai3oi2 z->*|!_6MI!cMf!4Izz4&8v-61+t9|1w(XL`%s<&=b^kR0_{+2K*WLWDV=L>v@l61; z`}@)Wt(084pCOAhP@Kz#1T^AICtNpIVTCJv2(-o5j=ym@Cf6eZmEfP#TD#9tW%Q>+|seT?Oi2nWBhaq10+ zcw#V9I!J!Vl$J6(jU;KJrx z(a>APSV(Dg!oQv~43Qw*#q%_mK|lAoo>n?@dUR<^19o!{k}zq^0*x&9Hi}{>hpGIf zl~Pchw7qy97?VJImM>Hl17$ofSy0vZ6~9jN3L#`_=YZno27fK|be=Bn8JkAt$U(5B z4M?-Cp#r+{Jaf=Q(U9vN)O!u0s)uJzU#<_+oM~;(TUcm05d}4)pD_Sx81$>3yWHb= zbGRBf8_%L`df!nAz2er`=!Vj1-k6EH1;|VleA@Q;g2E(ZGUj(thrF$!np?|h)(y$A zC>uObVgTMBQT9kG=Pm%Voni$Z(LD`k{{ak~xokg_{@f&Bpz?vU6f9F;zm03{YV_T3 zP`T*_a@hs2yOTG{%u9=A6*$mMmY3KQhu>!MAnV7$`G>IQA^)NsT<7~kF7)hGmilx; zgx&J`-TG?%dL?_9j_GeRe2@m!e=f;}`~>vIPF}2R?`P~ZOB^_9)y`Pxrl7u9CRk9{XGNjCJXm z`moNKGO=6Q1yuWsS5rb}%QL{tuw}%baS_J4u4}xtFi8R-_{o$2ZL=lFEp^o@Wi|er zD<)cN%0IhP>1@5i z==5mrqIIGyXw&F)UQzm)dgtvP<=vh}z&y#Vu0F!D<5GQD@>h7$*OYjURQ{(d`f`-r z6UhFWtHC5b<0c)j@he81#^(burrWvl|0YbWQnV@r5B|RCH9LYD^i<%Gr^kLcPNd-7 zJ%Ar=F6gHY2uuT0(jV6h8Nlk_6@m(gXyEV6$MgltIz2r#z)7NsX=#BcG2_mI0E_#8Ol6@)yT zszo)zeZKCneueYBNtr1BVX3wDx#OB%pu5xBK0NMDI>N2K(?J@{7DAVOH!z7k=YgSt z=jq65kfKafWlsr^edY62 zbcIw%d7_H(ulvDW4aYZCPRS|zuRb!JOv*zH}u+g$0e|)3}6^bv#OMS z?d?s>`2z??fC6388pjU^OkUEG<)o!#LiLwaJsK?RI1 zxUhxQgN<5kf+KvU< zlZp1N?5`^d`0=$?>R_ki$WC>rTh0As+?woGE|cK|skjSK_+)XXvfnL&rYg*(RYz^U zNOO|z!YXNJJU7iyz<)_y64u?s1o1H^w2(Qo*2Xi?S!-#%zK-xXKI;O9d&K@9($@po z#me{mvH{6o6OdXP&nkG<;`_MAnB801kdHIE~25ozd3U_diSTU>u0)e zsVChw#g}i5`rvu1KHT3ypksM7pE|9phw$|stwHwGXMCK#INXcK?SLWt9p~2Jxg2_X zrb7!Qo0coRvrkq|)wg~gNRdU(Dbp*L6S_wLaMjan#Jo8Ep(7b0Rgc%R2o^+kjAy8sLFvVt!x-E<{9{^zUt0v!e{-O~E*y z7wXHqN=^w1;0USq^65wlMj{g0>Xj+2yHTr_WdunL|Lmv4{+^Dx1*Wx&7hx3dGk)_K zvl1mCsBVgQttV1pf8k^0&O%G`GOy}o_+Sp66o8Mvn?Frcn|l5hJCh$IT#zzq^z~jI zSIubueeEo+;$h$#w&pPpWW8lY*kFSWm3k~X)wUoqHJm*;VFk|3Go&g}wTQD#8=T%m zrOPkpCiaF_a7gt!Wo>Lpws=eIi=nRAPcxb=vu()=7enS|QfvFdx0b1Vui>O${Dm;; zn!Man&o-4gi6kSHF16T>OAcq#DGcH6j?d6O9ZNmm!tl64I9xQx1}#5$dqvYpWsb*s z)Fd$ZLe7dG-)IpHl}ZF>RfDix5Sd1~g;X8%h(8R@XYX}&M4oO^=-5Y5TPUA<^J`L`Oo!*II}Ufb9gRq>8TDA*PFhr3b{}TrN6!}ZdDvfkEcJS@!pSsR^v2 zx%QeoK=9O#C$4$|$Ihg6ZYlKWm@0uk_ySL>uIob(1S-5An z{+^iY9a3s|l^kRFOKoH>`i0f!YuM_9X1l(Qhs@x|_(IrF_!CO?kSw}B1E6nra(A3Y zb>ZoKfcqP{YG<5VGYVQzkGT3gjAHf za_0ED9kLnP^bUdpgk&qSR!&5zl&(}l`D%56*@gWHD_-lgdYLsw$Xe%<;pw$hpY-B> zHiPOYxAjINkV?QkS|SJ(g@puSDW;s%o)z1 z5bf3)O}QSOIsN@sq^`>T&I_6Id12533bdpjicW25Pl&||NKwEfh0=G^$ovCBzpR`L#+*4PrfSv3r-5I3 z>HS#^I4U)I7le(=9>Ry>hwKoZYl z2!Jk6*5_5%n!kt^3fv`ecv6*#mtNShj#h&RvoGW1}+=GN+$A79q4pGax7$}6JA!u5{w?>_)0QY#2-2RYEa*T?`5 zY4Sn_2h#w8_6D_Nx9V@$9Vs`4pt({+icU192ZLUI6VJpOw=DmGiVyqQ$ivrj&}+MF zkJnT^w=`o{?(iqa2U)dn^Daqz#;;H+nZt#egO{%32cLA2!L&6sX)G8k5)d2aW7pqTkqk~&lK9jw zdotsOLkC^7@tHYLifWdZK)u*noaYVOFIM=%<)g}E>e~Q0YKn)HZZ&ofYOsB&BGQ-( zLQE%>V07M(CP{baS~EAI04^?yTJ&P**PX}M_Zy~qa)U-YOYg3g0RhQSUz$YPpb5cB zP`izjkJ)mTe)7E8eF71gqf41yt=m0eeYzV7#CXt_>$%aG^iuZ~8%^&!y(%s1chW8e z`LUM<4pn|0aAyG%B)J?-@xfKzSbYDx8paa1rK9W%pC=H_$v$XA^to`1IkfNt_>$fC zU|XEXx7+6R%Wp2PMvLmi2>;FpSzer9##-2GWjE&a@zfOdJnueu*G=k_Bb%AW|3&9T z=`jt&gHWEZi4uLqk1O?RN6*d5281N(7Cx)#u=1ce2@ZNzq0%v4mFY8yk5~E6Zu}Ms zSXd51;NiP>PYxzV0UayvaVDsaJ$MCBSW7E#zrkWF?UEsKruR;c$s{<&_G0mT&IX zOliUCQUqarJlu{jhR?J7vv|8MW?GK(+gH%@`Q?-w6`7C+>D!sY z$*`IXP<@_pn@*~*qqg-c&2GXBJAtjY!qZ2W%m@Q*+9F+Sg zmgabY6^qcL!CtvJ_b^PJ;y}wac}olTX1N7$%8NYg*NP;lT~zI(QX8X-(>0Yz8fE|+ z^MbIOs@(7SW)~ht53h;JswTA%buiz%)}Uvv6(U9Nj_Rw|GjcemMlilp@afjt3RwTF z`5%z#ty!Fd2LxJ4ck(4&$qcX?$Rc&J+p&=;{XHW}XYTEz$+&dZtjp5F_d&|_7xSaU zypxI)Z1T!eP;U98iS?t$LRRK=9+V&MS;dMH4wC0p{O5)~M$y9bXBYv`kEjn9Mwm}H z1GmNCba}U;BID!j!L;!5RrNC)SMd0$=+v`3FhPxMrf4FbadEo-Hd$7u-m{`3%tJ~u5VX5XGj-Oya&bjir5e|^ zvnD*L8^~#coxKa^QnbiQ`SI>p5s9xmcN<~$?Psf9WiElK&caAEJBPGzgHtc$B^2^f z#qS0JQG)mDHp5bEe^1Fv`t0p(Anfr7@HG7`ObrD$3SpnKPas~HKHfbSyEtRA--GJM zG|KD%)1LvY(mrj|%cWQdgy7#Ow#Q8T+@`br&mqVR%B zIntLBSrkzh%!_E(6M6f3_9Qt{>VArw)2QDF$Dl84P^0wKre)Kd&hG!Y-L2x;r;p?8 z8pAg~XB3LhEvvs8?%IG9H-5T7G6A2Vl_2l7gaEGY-74hPEPKQI<;Scm=t?SN53o7? z@oG4v)}4I2ntczn`k2UCjvm}Se4C)88wlopy3&si0R;58=7m?>Fo=RWZeI^nO0wxJ zM$(_}H^hIo+~w%I@4o(K?7#7bt-!xldo2G_MAG?JXx9C-+D68(aa5?xU!d9SAFwp^ zCjKuR`h`3Ec~K~HK%B46LD(Txj){`0Bz<^jReye# z%Xa230QvvrBl3U4y{>VE=A14Q$b`x5jeFbohroDbg8lkED=X;E#97tet6DpZ2IYc3 zgl61Zf2e9h@Bi;U?KlhQXwC#s{xXAq0Ubn^kmMGWxPsUDQycw54e;{;3tzv0lovS! zgxsD0(PuAq72p~t7J;U~;FDGW(t=)`sR7kv2ciAHLF}L$j@3B{Q8^ADSp`Ll>yp>=Kfk*GY42_v!xgHx$(jh71SGtR$GfHNLxAheJ3x&m1_yut{P;<;NJuc- zW^1k_q*e}I#lV&w3_+-3fl0fu-zA`PO3J2>fj@21F8I32 zgEfBhr)I-9{#C32RVUJm%G}Y&U(WH16|zRaD)Yyv&L-wVtA4}tDgcQ3ZG8#B50iLv zCR|W=b6K=i-K>-x3jimV80EVgA+vdCXr1jw!Rov%5-5P{D|Q=i6j8vLuKv7HSKE1N zOZUdmEy+|4mEHqh#LzE+S21!^Lx3|`)(N)+5{9?fzLGH${{XcZ@Eatj%}));Z7ev& zQD8&KPj{Fl0TI~B^Fho4kfd(BI2{`gL~#`V5lHJhF^V@*!YXB))|mz?+x$f6>fKq9 zs3*|fwT*ppkh4H?y|e(p zXt@jliNfhDGyps*()Ue)D<*7yXXxtYQdeM??wV=-at-jnt#)Xgc+(9uXWX3zmuo}7 zR`U*MnN~BBJ5|?%3ms^PKg1AhmH)Wr8E$aRAAm@G(Gbwl!&ySlQSNV3#y^()+3<~O ztu3GUp%lqiSGIiM)?|$2W?*XkHW48%EV99-ekojXJ zCV-MObnyco(RV%n_SKu$^1rrhR{6iwF8}|sLH>WT9hl01 z(G9JDmdYnUdeC!$32}42Ktcc4GMbw;v}!nHxFM0_8|&)CGbJF`H+GS_`awfPM&wEp z`GVivuss~inJ3WF#iphiMGpFV4oXDLDs$*Y=uuim-WUu|_gs&t@EvCNFj47F8NN+* z!<_@vrpSILXLo9vs(l9KV`cp+?H!BAsvBXgfd(15wZ&Z3Js71764lh)Np(<`Qqt2K~G}l9=C?z2(va--W-$O(2KC`0=r3{bpV3DBNl5tc=y}?t<%8;B$z)kBRLYvUGqAM9N zfL@4GtLIiAY*d^>3fTKTP5Y?nc5RAMEzpR20!?Rs0-|}wmFf-}9gOK~k0&~@)z66q zY=uOi7PIpqLxR6JXnhr^Yyja5*&sS_vKZn?dHUW-!8H7~kmSG;ki*#>PT|J{0oK-m zE3_4`Y#oF;R}Ws{7Yzb6*`RIDjo|e-pq^#=ik^!c0cs?qMmIkA_fKCm0RQ(^d^wv6 z1QQ&Js4M(7b4OF5>I2lmYjr?fXok`UfW|w6Ziflwtou8UO|I&8D6Eo?DKsD1MUE6! z28Aj=%?ShCDB^xM(2~j}72gg9RM;n#BZ2uFmo?q-`E>vLu37^T$g3c6yNH65k9E#) zA@@ln;2orUJO@uSX~LACH+rwnjQL0=mr2VZ6ORxMmiKE;m@MCNPMuqIa#vZNkOX&< z%M`xBOtjU59L*d)S90?RNCt!CQEr!pejc$p5^?+kEI$T+ygQ9jjiM${lfCV>hVa}T z23*UB&F-8N86+ou3+>`FYE8lT{5{7c4kfQeLZ%mkoi3eF?RmFAhs)~+&hM-J#iNrM z?_m>Gof)bjT9yL0NgP@L6Ea3XGja`p{%-Hho}#Hvm9r8&&XEWMkB0@2M5Ar=Idu1) z?fq8tnIcaLr9}6}SxRmp%<*|o$W&@3%99!xnM`b>p53{ia$s%fDk4gn$70y~?H&?U zXa>}Rh|=XI3)Lmsn0%}w{(i;c`vK-KexghnXi=1=^F)p_cwQ_$qVn6cF$3s8pWNK4 z%G-`zUH%!UP+DT(6T9QCQ&eG?$YiCHX-VF0$e)n-cItHNLdDoOb6|KfE`+js0|{dz@C}zM2=@xNETGP!sCT<3(qAp z9ff*!t}n=~m#U1IZEts{Jg47?F8A8)b16rJVZ&0X@#yq5LaB^Bmv3<*d;9r|#K7}Q zbhmd$T>HA>vjR2fUgXf@>_M9)hcYUgquo`r^$ktlP1+5!3S@(a;&_Q34GPVczH+}n zZT|t0Gx%I#Ldr#$VcF;tRI=#jHur5Pns>HWo^k`Z z{6hU%o=?K3TN%btKs2Gnw&f83O%o;9ekwMcM|w(EJ0Y7N>jvlp@2 z8|k2KPg`?ayNz_2=}Mam0(KWprR(K(9WyeSzZ0>Uh+etOiZD4&qXo|$J)YpDyl=MZ zzU)BhF6F+yMFyl(myUfk%<9SD=@S>~J2+IpXhS^+K!txb^ewmie1_J3KBtanZV6$m zn-Pidp&9iXAq58_j1XJf=cU9BAp-DX#);n6TfgLeypW~-a;k{<&6>%Eyc9|qv%8f0 ztdcHd_*naLWbS`9K2jLawWlqC_hSy9WLp}QIF=(v5Ddr{LFq*-8KfHoMGA`1jxtOn zWBTSPC2ebD_JzCF*O!A`0pZ=~csJ4Yg^O3mMaC`Xj=AekLvZzBSMn`pB@Ywy$UVY@ zw)90OattFFh<$HHp#%J*8-f0!rkTQWV;GM&957)>Mfv;_#(6c#T()RDg7lgttib=g ztwew?Wr7N%d>kCG+(*pQel#(rlJS9Tk0WK zJZN$IT*)KH?2E+H5XUQsk~IVb;<(Wsh334=qc!|LK_27@NYnA(yXO?{*Aly|JIWrQ zW~A+jdF9G#c+L1$Sk^EPTRbc2o2#k%bKf>@Kk5zC&y3ol2liw`K?Mr$i=gaFEYAvx z$)o)}bJKBp6T>!I^>j)FgypzVLoCi7N8J*B;Gh)jF$tS2VvVdSv*> zy?)_DRyfyZG``{!@*1Oe1IkcBX^?mU7uO=z+wKvZz`vuwHQFdYC!Y5J6i{9)LS&Pm zr00{9)JhVK?_mk{^;8B?{1Z$2=ztKfQ(sk%wP1^`LpN-EKruluht@iuUpKSE+~~MZ zJ>v2i5~VXQp|pda%o9da>Pqx88E;WqB;yHQJoB-BUrUg(R#nBpL_k(JfJ!Po7c9)k zN%gd7%Ut#=80Bvxaq`3nBIJEvZ&VelKcOwqIRChtjwKd-wF<}H>brFN^|0V&z4-Ri zuF}o8m_@|$Ebx-DYCA1>8ztAGn8qmN3}L?ol1n9T5OC9#5pO)-=7G!P656v7U^Y1L zAIlT{GSK}Q5a=x7MMg2_B&!B?NK2i2*5C9XG0!#tB$%WGzfmtiQQ=*7IrP%sSGu}s zsC;h^a$o;rL|%DgVL%7oUQk#0rhPKnA1vvvYXwKILc;Tjoe*k#t3C^B7qPuf>E`hg z-i2O8upVD5$&hYj%Ub)SK!gH}-7U{7dUnqSR`f0|`3(osg@3 z1me72m)z=wetFY8OiL1L$wRpz?b|JUVL#tP-sQ;QE3RpeC+3PkK&&Twc_eh`3fECHV8> zFLrikUV@lN%?_#8Rty;0aJ9sOl7S49{AW#uC#YNw-| zr7R=!L_gh7Byq>OA_Me4Eka4OX9&Psb<|%Oz0J4hpAf=E)?mLls{eZB&F^7zP+X>C zqocTCwxC;F()iRRq|uVc%}-S!anU~AsY42#8tuG%k#+>zXOWR7PCG$sn}IfD_cv{M zy?Snt(?YH=Da~Fzaf%v@yBIRjy#vqxLOVm^P*Q^h)@<+LU0)_pG{doMFrSE0znk7C zmY--I$vW$N6sJ>M@ZcV#c3;|kD>Av*e0zsatqT7yvu;!&ADo~j33bdo{31=K%WJY~ zij$dL*E14Ur6Z$r%>MJ+>TDY9F}ENEY@Ynn?W>|ub8#7P5mx@ot=mmw7&n*US1)4Zv zKb8HCwtROkvoB0nB;adId_1hbcZaE^;4M9oyOgvWj^ub21@^3X2FSc7d@dn&dfe)f zpIin`Z9zz{hs7xEq#58^K82P$UK)1%@z&d^(gDaV4W5^!j~Cbdv?zXv2~Zf>hc4#V zF9Lhm-+gQXzAlxEqi)T;u#)s;YSlj#mh!-jH*rQ%B|q1AUTa;X-RSbMhT5@8V{bQ> z2=)~=xNnasD_FHc+|W7tlPBVQ^&b92A{XlR2;_!!G2WG>5OMKwNrU|(X@#98+s`T_ zh=3}EPsh;R9=(hGTH9~-cnvy67CRoU0j1%?qr7X8F5Al3ZD}+J8~)U~*&(yj_LNo9 zi_shRa2rTdorzr|;(kM5$SKH^fmIirBl&ou&hr!`?Sgcd>{T?HFE>yxwAWF}Z5)>n zyec5pe&Q?!cDd!|ej2XR7-H4YsaE=sV6?YGbkLF|+N+#hzD?_s9)D{FhZIZs44 z!af?^Yi2TV0Ue8NIZSFB^jzv1etpK9wN^G)_hM@qidze@LG&1q-B{kex%fvew#8pF zl`%)U4lZX#f9+Zrqh@tR~i}=8iDVDVTDE3Z=68M??A`bZjmi4%)U~9PbRDWAKf`c@bMDBfbAY=&$ z-%AU_HL{!hv=)fdrQ-LCeSzrWVUUq)J=mOfzZuBOF%!qeh4R}8F}BE# zO0P6SwX;zRK;O8-r>zA4HKy7k-V;@u=0=Ge*6u3y3gNkw!|Of@%Q@ol^@z4=xugo@ z{wsVh`W!=?)@lDb)@|!^hhD29?fN|J8vXOtTk!!|MG6TX+ee38>D!^U>%=Iy(cJdo zhFQ;#L&=2Spq!KIaZlw2?k3E`6yKc`YA1=-$(zfHC)9o7FgVuf++tcAdlFU3;}3jhIw~;O?|7P^bE(c~r&j zcc+YbR1DOe+mCIVH_ATOy>mFgbT#(HS2feDmA4{m#lgppl6YeshjO{K6zU(VCscc$ zO}Mg!*&;951~c6kFc+kTjlI6LEY+3z>jvk!!S6;4gv3?pJ_yXao4C*A7@G9>q?p;8 zw{r`NshZ!;HtU*6kjnTTof8mkFOkX$*w@`xp823WKNvXlJb7q5_RRGa=vSH@&cWvH zeagCUHl#Fa*3DVECq&Tjx&4}uq99W@yJOC$?W|;UZ@0pwn z(}1pSZN#^+I z!M$XI-bet{c)@JYQPZ4iY%J~w2{eDnWGF;l(V-yo_;mtMVY?vdOA)o!HB18jVpg(;0c5hh0cRpWg~q%6&64bWor@S=1p% zV>IJC%av&b;{82+ymdM0gA*^x<5im*E2n1C3?7v9b`oMx9Yj}Y0bPA$IpgH2B6S6( z$z${M5uPkwB6U8&n`-9!hI^Ucf2`5olUfw``>Vg*U75=jaBrXVM!_e_|dN%DV#0IR8ey&sDKCwqCcG&{ z!{bw&rn9SwtG5T2QVtF}rel+VHMf@6jq*z)z=WmP9Tk~)2aNSfUWpL)9wG9=IwD>* zzuObL2W(!=PY{x)RQ4Z{<2mnSb9KqB;xyXMJP6zXVDPe$J!t46whG5`jaunEQUwXs z69`+%H#XV+K6c+HASUKmv>O02_nSOl%Ttv!x=N&)kLEWe@4LNS>lHcr#%=Xi0e7># z_5z8Oc*8mJ02TVf{Z@e^*s^nuImt~K? z_XOgGTI+Xx9~Ky>%>OPPf-81-gyAe#G>wZ^WWFnT;cc0$kdgfAw`qW21em`rp^PSH z4BHnTA?Z!HfsTq%;R`k$b#g*BYBX3vaFN@^{^ca_+!))UJL*zXB|Gqe=I-L5gy0lcT6R0wX5)b?WGV7^HaWDrGF1dn)4i#c+CN;wl`G;!q!8A_hK5{mR(ba0)gw~Vw^?>Z+-?Y#jD~N+BK8w!!%{ zSU^^oavAFY>Xv~(Z$3{JP^|Zw2hBl2)q41+kTCrddNB3klhcp@73-LPE55M&xQ99G zUVh}@GSL1n{SHJyYWw6Vn5j541mct6PiEI4bp`R}!`)0&rNJk-DrdIr4aTFn1n^|5BieKG`leFn z?g7-Yx13 zw=$#y`Nl(_=KBv^TP{fMO+-m2Q`oCj-V#z470-;~J{&2c3SUm(Q$Y_B8?7~@h(b{i@m>)zx{OW6MU3OJ0Xn$=}W(XMRz*Q z`M!OfEC50D2cTrpVYM%E3~0Uh4svM09d{JqgcAMHyHVz>=ro-<8MLd7KP3g8#J^XB zprkB+PQt}$&ie5BTy4Jm;TjA%pxa;OSsKcE%pQFpbwrAS5q70x8L(4E^mvcatW{W4 zrpMVFNaEPvMubC5q(7~GuAqcEk~)(==WB$EBXSzB^qsKgU|i6U-nz|=K5#o>Hsxr= z@wJ|l8Csn13H{q^E5MG-43q8_C|jKig`ahAy11pGE+Eo_1V|)(w;!H=p^eIcC-!sH zUjQLc>m3#fj#HGD45$V>VlP&(JV&6*0+V#up`Y5R3dgQ^V1Ov)!IS!FX3(vU*u+5; z#c#)d(o_G(6}9eK)BCaRqw(I&Id(6VyyGr^0)3BFDkj_I1fIExb9IN$K{e2ECJck5 zmMeWeFKdy+cBN{-!U&=?uRM2O@X^w43QVNLvjM|^;7G+-Jsgn2cV zo~lL0;3w)_-KN5OyTf@M9EIjUPx`nMd2M>`1o=jv1U1P%C#sL<96(>lvEDZ#)0H6X zIj!Q>EyOnK3cZYyYMbLUT-ucp5?mG3D#lgvQEewO^D{+_ZtoUoog*scmgfcOWj_sA z96maM+e=oiw6v!e=5NiQC2b(_)S82@YVHFA~vv4(?dx!0n*+FXL#BUA=8{Etcgzs&DLiYajnd*$Y zoflVQJpek7QE2u}CSriL#dWGaIz*c?QFBlQ!khNFh#4UC!jBPj2Ky{6wNYKR<0(oykv%o$R4x1*ZhcR|a^}<{$)T zEudHKJ9O_UdK-=Peb2hoFvY3|YVyX>0xgrfu8Lg+RM#StQ7V7K>le1_(jJ;8K0j0P zHoG6g?U1wMAN?L~$`P?dtmu>(Ffd8wjk|jVo@bHBW%s}$K;Je;E+Q^kJ2IZ3bMGXQ zZ@X#fy5|dt%vAS`RsU^|tX+&KOx{@BbiXm~Nn5rt-kK$J;`316SRx}2L(BgqY1Mc@ zt~s4wB6S*+rJe8GNO%`S$w*2m!jUGnn)SE`% z?O984Z5?{9145W4F~%i^EH(X3I%?$F_GOfurn5XUYP;y;xp-=4M#d1*x2?{i6s=#4 z9A8V~+!$8Q-EmbJU@+)WH@ZL1OeBIma@8Mq@*W%7v;+8B&|;`p7fooautkmSTDc=$ z3Pl98(fv*pzE%DPi4eJ#rMNpJ7R3s=wE8pkTk7bFBb7LEOaVPaqjK*qvtC;(ru;@i zPal-VzH~vZFK+kqKk=rOHzwzZ6V}IXw1~@bxvo&!g1@x@n>nK$su1P-Hp|8B=mG2A z4sH|1$PM4#FioS=igq85Bf z6JN~b?@tWXd2kk>T#N#=n98$#H+9WBg6gw|1q*_JX^d=;DmAI30Q(BfAKc3E#Ti8& z=HXj)PnKl*g{MHUl z>Ze-Zcw2+y>Es}_#up`ttmc#xGtx&f(PX+2m^!fL2uno5P@z@6FI}q`lnhpRq^UMZ zGb*2JR=a>v3~x?ld?#ZU&oJ_Wa};bT=$#EV)Ri(9D|)BR{ORK7$09cHljjFP@5|Oi zM5e<1PGUA)8DZBEra~Ps-TE?5DaFr^>fWB4nN3H{2_7qiLt`>|Vv|ClZT7119$GC$ z$@YT3Z-uuqh^8Bq! zg5=r!WiwZ4H=b{>rQux@I9W>TO6v+~)o~6rxC_17BkQZ0)%YuFyJP++nZ6~ca4r+^ ztFLM~3cZ&mXR{l$H{h;}%CU?P;!pkNj6c<`J_?5!pSkPKW^+r=$mc?Q)|20pT{8*s zs22Db(E2BQt*gm>n(l=7xegS)Ax?+wFB_@8yXc$L$z8+7`y?`dAvREK^1NGb?x_W? z9C{?`BE{@lsWff*cL9S%*?LR6#>F6J43+nkaDuIAI@PR#%)5R!-ySrNMW8I_@8~8Z zMjiF4z2lv_$N@El+F&6+!QZfq-)iQX9uz0 z+^CXJuIBGN`-1x?Air8fNH7iB|vQ(BDB}^ zJ1q6yf)Z-LsnN?$sU+DoS$F*k%acisU5 zYGK#!7m}xR1h4`Y7Y=&Vs2M?{k+;d{bIW&iu`+2tQ zcDZtc$1Y3Ke5)i6OYFS7)3qWW(8}|M#h;XZg3C}XdANKXYO6C&)!r(`6ZQ4O4n98Z zbSn0nL6ZW-5wr!1QIj+f+l6+?QtP-4q^gX@%s-bzj1DY}5n{YQ4y?TQHMGOyh}F-d zoyfp6(u;-nmwrJx!pqKL{n@ z3KMU+=f7J=KH2|*I%B|V&dD`lz`H*Bm(DoanIcQ>3%qKXU@#I*RL5E!<#(M|NpD*kYY^MO*`rH17|zO zh`u1ZL~B4094;%_k;EdC@pz0l>*6v|U&R~hpo=<%tEEQt@1#V;`#B(}k3@`4d`#~e zkUx=j!KKA)Do)fi4_j^qJxn!;J*Nt~I}I2eeRfB8xX+{cW#Kspa8F7OYn%3$q@wFb zyVqQsyg>xmHkVMwa6(tAO*~OyUPQ9ID;xRjh^zsxW3){|F{5AC+fg;H3>&Im6jgk0 z?4YQ7e$xghc@qounXQ!)jQ%mulV0042Yt}k+7YM2ceAE00zHm(6h@-fbf`y5T*+*g zau#Ug^x1jlj_y*h-0?xtAlX5UN#)cxtCQ=bNiO9#_cbB2*z{wbB>K@E-S8Q<8L-tH zB_3*GUSN-`ED?3c{N5Zk@WBl0rks^2Dki<3=5c4<|52s?oQGR#loMsX?p{X(8o52` z!h{}1hH9Km>8WVJBN#3ij?j!)^-w1I(CvteH5jVe6?ucHPq2ZuMK%-dHQfH}7T zFYWX;>-o3ghGPQe4B@3yiLu6{!w8i&mT#MITWz{_lycKGv-aoSHSHpyPHL_1niMLkDUAg$b+AmHgbjmrpfkZGEk)pu*8KJdS8~YsSfT#CN?K54)k^vG zPv(8nopvWn6Fi;*#U}5BDdzYaTA0KTk7yE#F`QBaO}}^-uQX=v6HH_plbkaX@fdlY zbU9oLF*#X{i0nQfZKmiou&R7p2QexAd1TujN+vpzJiS(SckX0nNck_Q*A5Pybx%SM zi|#y15=}zLxW)tkH(bLmx2aB*Dq(ot(1u_WlV+eM)qr6J9GYH zhviq(0-A!MMl`rSE;qinYaLyW{g38rqRv)Te`mO}FQI+csKRSHG=2-4)PvtE7u~f|K%L%7?EEJ(*~U z-0^U&ujlC+KU6!gnb5(DCy#1cHt>uSjMFytZ>Kb5hxEaZH(Y~^B!s9P1*wJ~RK#~t z$uHntFT)jd0>(cmlGLIS!Ta5^^v5R9A8=eZ?dtwXpf_S6%qH*5iMq0mWi>3>>z~7Q z>Yd!5+-zYxSuUi9>m+W>y6W17o)zf6x#<|2jyL=|y}5Jeg$G)&lF2!U2eLO<2^|TN zyE<{Fx^C|>F#%GB9A7!%KM1_=m3jrP%`n%!@z-6xN{C7JVex&DC0Pj#LRh z_0}Iog*tPmR$&!!_WeH#E(PZnteYMyEYbLA{-Ho$5AjZV&md1dZ2h|o>&P=&nNS=! zyS_NjpBzPdONj>i=K}BfM6s&l$iDNB>M_Slv*{Q8+EM7C$|cV#jqAw8q1CG6H&(8& z9Yl6VBiT@8k6kZd!|-c=4>vrYJG?4#x8d1W2ewUQHKtvvyE3!Q0oqnfFoBeg)M3!r zV(61Wxcb>2qk-kfy8NN$Mai9p`*j&Uq%2M66N?gg^z(~@~BXQ*s>Q7mNHZN>vjlaP|6er?%f3f`Q)W)I@ z2Krb@4HJ=iFb0cs?BDUt1&i24wQ(UF<}UQY&29^;u=p(pi;v~1vuA|!#4|wf!=j{W zXZ(PQ0u!J1f*c><4|3M7c{cV!hH`81A? z>N(zdpTj?@Y`Htg{=iea&Kg-b;VAuF;a5Ll*%h~}71;&LVbZWom?eSVL zrl~e!*dh5*417#&6u!IX@U+kTD(+r+EhZK+^QnZ4+HN<^Puh6Pz3EZm*l_Ji2wsQ5 zF18QpJ3qq>apPQIe;v(YoDNecz#hE6Lvh52l+mW8+B6n1X`7TDLj`c zMPMNaChVP)Xz(c0dAa8VF@5!Mvj@G~RcWK5=?!57#x4Jy*JrN@nMf*HSKbIP+Kz)> z*9c!?Z;xlIBT{ZdlXVMVQkQ4P-$i2!%BaoBp@h}RL6RWZ#zY!Xgi$oA5Ewc0LuL6 zh;zmXPUiZR4OhEbydo!w$EhxkEsuDQfl|}t1``#n`SryKb4iwOykq-r96FU5tl*aD zd$bl4@DqM!RBndQB0HqPdAHI#tWw(EurLv)o3Tfl8#+$eHSG{#nC)B4NIN9@j3cld z530nU$jq$kwpe#ChYVS|`8Jb%4ceTP`9i|(;Pi#Sq(6U&&-3E|j)Wv>B(Y5!1c^Jd zq@Rt5xzfcibmO`PkGaP_ah}3N8c~crzvKGLI7)=T@95@y6qMzcHSDHNonx7;W(wiXVbjE95{8Q%NY$HP6usk=0`9Cd=;F z?M37Pv2^TV1!4*yB4aA0zmML_ObMKcQVmpQoLF0H*`Xl+Q!9#*=E{t3JS^(UP%jo` zRQBR2YgZbgV?-2`Gl*F$(^YFEx(>jMJDYDmGJ>R!kn8PmO1IZ&i#a;jtuM{yMF6&l z8B@NNP}ve=z>)ADHYQZ9$DZnH1 zUFGie);AUNN6lZ4M_iGkF@2ORB~?mJUerMAsk5ZDQan|&7yzERlOH|>x0p<#$Z(9! z7Smh*X(s_W7nb4vph@jh26_tiYHs;*ZS!K<^5L%Y)RN^^v&lZo_{|so^|5T(^0Rzr zp(|oWtNc6){(_|!-i)Q+W$)WC*yIU;4F9uaStgkt!CjsfovVH0{nA*DF~Ecq|Tm<^9dW8v7m7e zgLQGgla8QG6PW_;%&(GkO58p(jXQgHthO zmfoJv+zo({%CE!M@d_XI0XSow!@hw?Z1+tk6#E%2WS%gdT2ExX(*XGoYxAA-=ssG14sM>dl}BmI+w@FD4h61Nmf$YyBD93RZ|r!oC0cmj+(vKSsNDH@g5ZR zJcRqz%1iCSBSF$8U;bl-E-@ZI`WSiT1N)7b*jE@GeygR?WnHgL60cAnDw)7L;W86x6d=R-Y zz^=(6+mLp{s~O_ow}}e_{=&`vc4Dg&iu?9Te-!&3B=H6 zSr%KX;W_CX)SAa`Gw-AM&eldTQ|1pWJ1|KBKxDp`Jq)xl9c2JpMZFhO8!Zi4Z@(H^ z?qz@-Rz-Jbmg&6tr_yO~R4qC@2T=5MU!@~34&{tL#$pHe1WYxln%ax=y9B7XxytIl z8u7&)FNkX@dw(-irx&L$T^$Q+W%EOsOAg7pRd}S+V941u?3(gkQa9aT0s$Uj^V|=Y z<5_9*#19Taw-J09_2lPcqk8=j$K?Pr<jN_<9D2H+t&m z#;;@BVbv?bE8Y^DsBcnw0mMH$ylPq%@41rMdooEQx5JZ`>8IfGui3HLC)yZI~ z9YYvXueE`N4!~xr%T<&IIS4h9C|_pRu$YxgljQYca+HO0h#U=_)p~9u zP-Im+bhOvwK4MrVVn5=q`Dl8WxL1a)y)FU`NZ!QG^|8oyv9aU|1&l4FbcBk} zJrX@q<#Av+ptzECBub`#piQ#cE0O36c$LaqeZ0(C47#~Sj&UQ>iR0r*u4t|Ocnf~E zc|V9)dpEgA+5J{o`8BT|RC5OtyY?(~IZDDN6gjl2*C3qSjUXLPF zvs*~fdQfmWQx{sMD-_w8ysu)F*$5d?^dJ85u|W8v)zxKRA^a*x;A?RSbYD#MsdZmX z)0sz~@XihQ;szduwpQw=7>KKzZa;VLwWW>Lc7Gq-nqt*-W=)*+%NBBwO|SK*5DVOp zg=hOx+Ep-sA9QE;I2By7;r4vbRATU67|&XA8~nUZ^K0YY1B=u{6eMB|ZPsJ0ME#&7 z3R*%CHt5_wvM?&p>YNEnRw`v8_knFVnNTx$%XX{3{Ie7ue%)Yey&2i(-8YFFT!Pm@ zL#pZ#QZA5G`g&~8Ox|Jc*I3>^KOZOZ8;|7>GnvM8L?3dQB_kh6=4f=34~B?n)WxCX z*8Jtd=tTRi?HUes-riNnH&&U2A13yU1jeTKqSZ^T>c>^a@PMya=d>m2T}f(JY8nQ* zD@8`5s)WyAmV5IS^BY~EyA@Pq-IUT7$*(>JvDE3RgPpG|AD#D@a~WAV=bwR(R%X17 za>yT*liQ_}pQ1_LjC<~yC;xz(U{mqx=-Cq)%^X;leJ?rUbAasu+6`TpZco!81|Mkd z#5ZL0IWiw3pW#f#OTgp9({v%V0SqwSh~`?XBtO2PcFMZH4IV*?D6xe7Ef3*z?6U^X zEd(cV>E0FfME(oQpgTq3vwCkTzk>0I*O*Apd%<-o^{81Cy+W0+88rbq(%s{(P&2Z# zlJoZD%eorU+@MjvwI&91gLtG+VIOcbuRYb}*bV67 zK8}>FJ|*=HDCHeBo20*%JEl-SnwjwxrG*|a5 zf^cJNDcYDrV%^(V*W$QW;@e@tQnmQMa4?^=`|5VZkJa^w$8H>2Vo_)11Xm_IBDTvF zrQjx?Z1L1jMaa(O!U;>9Q4{^*$`~T1g$i(aMR%q{XHA@u?oJUTn8IdWCrIOL@Giz1 zHyULOQ&xwzd3c&K9T%MxRY`aeYSkY?QK{(9CVif#eo;lc-+Vf8u~ONo-O`~*W>R7s zr3A*l�>GDK@!TdNn)(ckct)CV9HI&@`plMo2(uiN>wfA`89)N5@K|1Ek3zEgqf8 z6qcfhx-euH^YtlX9_>?L5|VHn^FDR+OR%B3Mq7z$JutfNh%G+Y+m#A;N)h}vK|HS$ zKi1d_`XJ8*Pqzrd5ye4yN=p5tv~nLdf%%#zN>3GW!lh}ebyK8DVf6m4K1X1S4W35l;k1{=U>V{{3QqL$>(uOU|t!XHjUi!2^iL z^l=PdKRC53Q~396R`k?{)Cs(F+k3Ogp)Ah|>h)yQx%2Fwou*fh4n8wvGwAq>+A%WNN1}3XL zNxW#%8DD>j&yfgGo0Z7?oi;38ARYSW3;n>uGy~`bwfD6XgP_ z@%iJ_tX)i}!afGQgsdmKg`{J8$hVG5Pj_@;vH5o4dARAdtaI%P%f#~UA^H!kryCV4 zlNe+Tmle>G^!<9i&~>3af|J`Sf754c?%LzubIcuhh1c89ajb-N7|^3Ip&IC0B~jNe zVw4yK!T*UO{ID>^P2a0 z@p97(Wkk!zG~}f@v``_!Q}&bw8yC_uC3BqvT+Gwj3?yOC&<8`|2Pb@q&7N7 zTaUS0Zb)yhHx{|%dE{NqVY)}oWxdFQIQeQTz3MSb(@|CLN`|W8L2DiDwXuDD)Ka_m zhbVu;H46^qYj&VBi=JiLA@B@xbbp0g+Jy_w7QT4=~C9_h-@{sND=oI-p9 zU3WVW0W80Fh;pDH-T74V7`(riYW{eJdBQ3ZK}N$1?`(a!#55qLuki*C`;pYiHq%r7 z@^`IY_R@pD_*i~WGU@--`s68FNDaReCm7u^)kI#h#^Hx8H*%;HK4+5#&foC#8Pu`D zb}yG*xJGuRv&wBC+j?N9Bk5JE&SLKMi~dT#*KRYX^|ChWoRRviCi3~bPfkFgR7Bq2 zHR%y!;lB)YoDH+zfE3VgHMI41`g0#Tbvh6Bt9&|TtX=_{a>Ck7b@{v@9+0vFmttcqm?2R${c^<`=8V4oiL!mt!ueB#}0)87Hit zZ`nJ`;)J55&HBp8HF?)LPCISU8%1Pf>Z&>C76>t$#J6hWpTIWOT6jCb1(VAZ(MuPsSAnyy1^L;12fM8TYa@k_HU1nFZjlJ|A-vT0!m7d*E zAp4G25Cid^)R^OO9{%lJ_W*U{$Ehvy{O1&z4j0QNb}t9PbZhTW7=c>c4wgjU#D>76 zxdd;h|FoZjd*Bh`8XS#P%Keu{E_D9e^UgON$#sy?7D7~& z4<_wjxn;=h+WB3*5H5Mu|MBtRh$rH9rjO-5(jsz|d=Qk}66EL#@YN_33nx=3Ef{>d zXSlBVt?!0aYb^9$4Gf&&n3zIo`u%SHq7H_4*r; zg;N_5P3)a=AgObud^&4vy%7k_-yPgIy(Tf@N+>R*YsgLvR*i$8*%fjhr?y|0f@Glm z$NyXcU*9vC5~Ici6nhtqk9oqRSihlHXdy`QpM&N5vPM*ok#xKb^2W?vM1&=ZI#p{z zZ4t}{a1{#=8@>HYQ*{xda>tm{ zGmP2ArKthT#bb*2J8oc#R^;4atK;x0yxJ^*edI-DIv}kZJ#tpx4{KKyJfJ)|w|NCw zK+rWP9F_$IT>V$)R%);BOCg)een+`Rw#_Oyy^PpuAz4W_aoH%ml+5EZpYiG-W5yFj z_m%XH#KMb-xHR3c(35YH188k#{^irzItZ~mB{vA+J*jRXpv))Pk1))ex{gcQVjyR3 z#??sU{H3ed->U=aR?;w$kcC+QG}M3k9)sCy#mbwb%tH6idz%(esSzlURVw^kJl^Cv zl{~?1TbLSZh1v?atu6NrIO&U4l3!A7^_>ya$@eXqz3DD^gVNfBh;3S>x4yCw z)#mi`lIxqu#_q_6F4%p*Li~AoI&*GDri}nj>j30M63x}ad2p_A22bRO&-8qR$YD{j z`-XJKkY&q!B)4&3?zBm-YpU>`iVe~DP_WK6tl8il(=W`8uBpH7)0!DjV+{NZZN2SP zuX-*Z-BKNI$c*3O%dG6#gm`!(eA7T3Qdg>|YTU8~;fh5o4CXqrat7c0Pm-AbUrC4A zyz#(a2mbe8&E?ws-v9U&|K$*%|3@P4{{J)kKgJYp{95(yk5_ZwD Date: Fri, 14 Feb 2025 20:52:03 +0200 Subject: [PATCH 29/85] add disclosure --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 6546d1bc..1b627dbb 100644 --- a/README.md +++ b/README.md @@ -5,6 +5,8 @@

Decentralized Compute Infrastructure for AI

+> ⚠️ **IMPORTANT**: This project is still under active development. Currently, you can only run the protocol locally - connecting to public RPCs is not yet supported. Please check back later for updates. + Prime Network is a peer-to-peer compute and intelligence network that enables decentralized AI development at scale. This repository contains the core infrastructure for contributing compute resources to the network, including workers, validators, and the coordination layer. ## 📚 Table of Contents From 4c6e15b78a4f2a81c44146b25f3b75818da2b5b1 Mon Sep 17 00:00:00 2001 From: JannikSt Date: Wed, 26 Feb 2025 19:56:50 +0100 Subject: [PATCH 30/85] add video to remote gpu setup in docs (#128) * add video to remote gpu setup in docs --- README.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/README.md b/README.md index 1b627dbb..3facd1bb 100644 --- a/README.md +++ b/README.md @@ -165,6 +165,22 @@ To gracefully shutdown all services: make down ``` +### Remote GPU Development Setup + +https://github.com/user-attachments/assets/8b25ad50-7183-4dd5-add6-f9acf3852b03 + +Start the local development environment: +``` +make up +``` +Set up your remote GPU worker: +1. Provision a GPU instance and ensure Docker is installed +2. Configure environment variables and start the remote worker: +``` +SSH_CONNECTION="ssh your-ssh-conn string" +EXTERNAL_IP="your-external-ip" +make remote-worker +``` ## Community - [Discord](https://discord.gg/primeintellect) From 047a514a321a1a4c901e7016b78ced75def60c89 Mon Sep 17 00:00:00 2001 From: JannikSt Date: Thu, 27 Feb 2025 14:32:08 +0100 Subject: [PATCH 31/85] fix submodule setup (#129) --- .gitmodules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitmodules b/.gitmodules index 844c76b5..6750372b 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,3 @@ [submodule "smart-contracts"] path = smart-contracts - url = git@github.com:PrimeIntellect-ai/smart-contracts.git + url = https://github.com/PrimeIntellect-ai/smart-contracts.git From a46c301ea8eedc368763f88201bb4dff53946960 Mon Sep 17 00:00:00 2001 From: JannikSt Date: Thu, 27 Feb 2025 14:36:41 +0100 Subject: [PATCH 32/85] Fix/submodule path (#130) * fix submodule setup * improve readme --- README.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 3facd1bb..9a43d9d0 100644 --- a/README.md +++ b/README.md @@ -57,7 +57,7 @@ Before running Prime Protocol, ensure you have the following requirements: ### 1. Clone Repository ```bash -git clone --recurse-submodules https://github.com/prime-ai/protocol.git +git clone --recurse-submodules https://github.com/PrimeIntellect-ai/protocol cd protocol git submodule update --init --recursive ``` @@ -85,7 +85,11 @@ gem install tmuxinator ``` ### 3. Configure Environment -- Enable "Allow the default Docker socket to be used" in Docker Desktop settings +- Enable "Allow the default Docker socket to be used" in Docker Desktop settings (MacOS) +- On Ubuntu, add your user to the docker group: +```bash +sudo usermod -aG docker $USER +``` - Create `.env` files in base folder and discovery folder ## Development From 10ea9d72ae08886edaa73b79fd228cbd52abfe44 Mon Sep 17 00:00:00 2001 From: Jannik Straube Date: Fri, 7 Mar 2025 10:59:28 +0100 Subject: [PATCH 33/85] do not run format on xl-runner --- .github/workflows/checks.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index d463ddde..7539be01 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -19,7 +19,6 @@ env: jobs: check: name: Format & Lint - runs-on: xl-runner steps: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@master From e217951661574d8b1e2b047e44be4d257a9618d6 Mon Sep 17 00:00:00 2001 From: Jannik Straube Date: Fri, 7 Mar 2025 16:11:06 +0100 Subject: [PATCH 34/85] fix runner setup --- .github/workflows/checks.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index 7539be01..c82969f2 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -19,6 +19,7 @@ env: jobs: check: name: Format & Lint + runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@master From b2fe49d72fdfef28d1f69d3235374cae7ea285bc Mon Sep 17 00:00:00 2001 From: JannikSt Date: Fri, 7 Mar 2025 17:08:53 +0100 Subject: [PATCH 35/85] add shared volume to containers to support model downloads (#134) --- worker/src/docker/docker_manager.rs | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/worker/src/docker/docker_manager.rs b/worker/src/docker/docker_manager.rs index a4963e17..fe4ae750 100644 --- a/worker/src/docker/docker_manager.rs +++ b/worker/src/docker/docker_manager.rs @@ -112,6 +112,7 @@ impl DockerManager { let mut final_volumes = Vec::new(); if self.storage_path.is_some() { + // Create task-specific data volume let volume_name = format!("{}_data", name); let path = format!( "{}/{}", @@ -134,6 +135,25 @@ impl DockerManager { .await?; final_volumes.push((volume_name, "/data".to_string(), false)); + + // Create shared volume if it doesn't exist + let shared_path = format!("{}/shared", self.storage_path.clone().unwrap()); + std::fs::create_dir_all(&shared_path)?; + + self.docker + .create_volume(CreateVolumeOptions { + name: "shared_data".to_string(), + driver: "local".to_string(), + driver_opts: HashMap::from([ + ("type".to_string(), "none".to_string()), + ("o".to_string(), "bind".to_string()), + ("device".to_string(), shared_path), + ]), + labels: HashMap::new(), + }) + .await?; + + final_volumes.push(("shared_data".to_string(), "/shared".to_string(), false)); } self.pull_image(image).await?; From 65753cccba1a888590047dacf0094e6caae6cd04 Mon Sep 17 00:00:00 2001 From: Kemal Erdem Date: Fri, 7 Mar 2025 17:21:32 +0100 Subject: [PATCH 36/85] - Readme improvements (#131) - Use modern docker command - Ignore all .pem keys Co-authored-by: JannikSt --- .gitignore | 4 +++- .tmuxinator.yml | 2 +- Makefile | 2 +- README.md | 19 ++++++++++++++++++- 4 files changed, 23 insertions(+), 4 deletions(-) diff --git a/.gitignore b/.gitignore index b33d0b9b..b930be96 100644 --- a/.gitignore +++ b/.gitignore @@ -24,4 +24,6 @@ Thumbs.db /private/ .env -.env.remote \ No newline at end of file +.env.remote + +*.pem \ No newline at end of file diff --git a/.tmuxinator.yml b/.tmuxinator.yml index 25ea8143..34f25cd9 100644 --- a/.tmuxinator.yml +++ b/.tmuxinator.yml @@ -11,5 +11,5 @@ windows: - background: layout: even-horizontal panes: - - bash -c 'tmux rename-window "Prime Dev Services" && docker-compose up' + - bash -c 'tmux rename-window "Prime Dev Services" && docker compose up' - bash -c 'while true; do make whitelist-provider; sleep 10; done' \ No newline at end of file diff --git a/Makefile b/Makefile index 704dc22d..bbdb5e50 100644 --- a/Makefile +++ b/Makefile @@ -56,7 +56,7 @@ setup-dev-env: up: tmuxinator start prime-dev down: - docker-compose down + docker compose down tmuxinator stop prime-dev pkill -f "target/debug/worker" 2>/dev/null || true pkill -f "target/debug/orchestrator" 2>/dev/null || true diff --git a/README.md b/README.md index 9a43d9d0..9395ac36 100644 --- a/README.md +++ b/README.md @@ -57,7 +57,7 @@ Before running Prime Protocol, ensure you have the following requirements: ### 1. Clone Repository ```bash -git clone --recurse-submodules https://github.com/PrimeIntellect-ai/protocol +git clone https://github.com/PrimeIntellect-ai/protocol.git cd protocol git submodule update --init --recursive ``` @@ -66,6 +66,10 @@ git submodule update --init --recursive ```bash # Install Foundry curl -L https://foundry.paradigm.xyz | bash + +# Reload .bashrc (or .bash_profile, depends on the system) +source ~/.bashrc + foundryup # Install Rust @@ -80,8 +84,21 @@ brew install redis # Install Redis (Ubuntu) # sudo apt-get install redis-server +# Install Ruby (MacOS) +brew install ruby + +# Install Ruby (Ubuntu) +# sudo apt-get install redis-server + # Install tmuxinator (do not use brew) gem install tmuxinator + +# Install Tmux (MacOS) +brew install tmux + +# Install Tmux (Ubuntu) +#sudo apt install tmux +#sudo apt-get install libssl-dev ``` ### 3. Configure Environment From 36bb0055becd7529c04eeedd1f307ec29148a177 Mon Sep 17 00:00:00 2001 From: JannikSt Date: Mon, 10 Mar 2025 11:39:34 +0100 Subject: [PATCH 37/85] feature/toploc-integration (#127) * add latest changes from smart contracts to include work submission logic * start local setup with deployed work validation contract * ability to submit sample work via python example file and regularly query from validator side * support e2e flow on worker submission from genesys image to chain * support secure file upload to google s3 * allow validator to reject work --------- Co-authored-by: Matthew Di Ferrante --- .tmuxinator.yml | 4 +- Cargo.lock | 437 ++++++++++++++++-- Cargo.toml | 2 +- Makefile | 9 +- dev-utils/examples/create_domain.rs | 2 + discovery/src/api/routes/get_nodes.rs | 4 - docker-compose.yml | 6 +- ...a36d4940a5b8c40a989452d2304fc958ff3f354e7a | 1 + ...7bcd49e0-1e99-45a8-b9bb-ffb58f0f1f12.jsonl | 1 + examples/python/work_validation.py | 39 ++ orchestrator/Cargo.toml | 4 + orchestrator/src/api/routes/mod.rs | 1 + orchestrator/src/api/routes/storage.rs | 59 +++ orchestrator/src/api/server.rs | 5 + orchestrator/src/api/tests/helper.rs | 1 + orchestrator/src/main.rs | 7 +- orchestrator/src/utils/google_cloud.rs | 42 ++ orchestrator/src/utils/mod.rs | 1 + shared/Cargo.toml | 1 + shared/artifacts/abi/compute_pool.json | 52 +++ shared/artifacts/abi/deployments.json | 12 +- shared/artifacts/abi/prime_network.json | 23 + .../abi/synthetic_data_work_validator.json | 304 ++++++++++++ shared/src/web3/contracts/core/builder.rs | 16 + shared/src/web3/contracts/core/contract.rs | 4 + .../implementations/compute_pool_contract.rs | 18 + .../src/web3/contracts/implementations/mod.rs | 1 + .../implementations/prime_network_contract.rs | 42 ++ .../implementations/work_validators/mod.rs | 1 + .../synthetic_data_validator.rs | 168 +++++++ smart-contracts | 2 +- validator/Cargo.toml | 6 + validator/src/main.rs | 211 +++------ validator/src/validators/hardware.rs | 164 +++++++ validator/src/validators/mod.rs | 10 + validator/src/validators/synthetic_data.rs | 353 ++++++++++++++ worker/Cargo.toml | 1 + worker/src/cli/command.rs | 20 +- worker/src/docker/service.rs | 1 - worker/src/docker/taskbridge/bridge.rs | 198 ++++++-- worker/src/docker/taskbridge/file_handler.rs | 155 +++++++ worker/src/docker/taskbridge/mod.rs | 1 + worker/src/main.rs | 1 + worker/src/operations/heartbeat/mod.rs | 1 - worker/src/operations/heartbeat/service.rs | 19 +- worker/src/state/mod.rs | 1 + .../state.rs => state/system_state.rs} | 30 +- 47 files changed, 2184 insertions(+), 257 deletions(-) create mode 100644 examples/python/5aa762ae383fbb727af3c7a36d4940a5b8c40a989452d2304fc958ff3f354e7a create mode 100644 examples/python/out_7bcd49e0-1e99-45a8-b9bb-ffb58f0f1f12.jsonl create mode 100644 examples/python/work_validation.py create mode 100644 orchestrator/src/api/routes/storage.rs create mode 100644 orchestrator/src/utils/google_cloud.rs create mode 100644 orchestrator/src/utils/mod.rs create mode 100644 shared/artifacts/abi/synthetic_data_work_validator.json create mode 100644 shared/src/web3/contracts/implementations/work_validators/mod.rs create mode 100644 shared/src/web3/contracts/implementations/work_validators/synthetic_data_validator.rs create mode 100644 validator/src/validators/hardware.rs create mode 100644 validator/src/validators/mod.rs create mode 100644 validator/src/validators/synthetic_data.rs create mode 100644 worker/src/docker/taskbridge/file_handler.rs create mode 100644 worker/src/state/mod.rs rename worker/src/{operations/heartbeat/state.rs => state/system_state.rs} (86%) diff --git a/.tmuxinator.yml b/.tmuxinator.yml index 34f25cd9..33d13ab2 100644 --- a/.tmuxinator.yml +++ b/.tmuxinator.yml @@ -4,12 +4,12 @@ windows: - services: layout: even-horizontal panes: - - bash -c 'tmux select-pane -T "Worker" && sleep 5 && cd smart-contracts && sh deploy.sh && cd .. && make setup && clear' + - bash -c 'tmux select-pane -T "Worker" && sleep 5 && cd smart-contracts && sh deploy.sh && sh deploy_work_validation.sh && cd .. && make setup && clear' - bash -c 'tmux select-pane -T "Discovery" && sleep 10 && make watch-discovery' - bash -c 'tmux select-pane -T "Validator" && sleep 15 && make watch-validator' - bash -c 'tmux select-pane -T "Orchestrator" && sleep 20 && make watch-orchestrator' - background: - layout: even-horizontal + layout: even-horizontal panes: - bash -c 'tmux rename-window "Prime Dev Services" && docker compose up' - bash -c 'while true; do make whitelist-provider; sleep 10; done' \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 4fe7113f..2d568126 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -30,7 +30,7 @@ dependencies = [ "actix-service", "actix-utils", "ahash", - "base64", + "base64 0.22.1", "bitflags 2.6.0", "brotli", "bytes", @@ -368,7 +368,7 @@ dependencies = [ "itoa", "serde", "serde_json", - "winnow", + "winnow 0.6.24", ] [[package]] @@ -528,7 +528,7 @@ dependencies = [ "proptest", "rand 0.8.5", "ruint", - "rustc-hash", + "rustc-hash 2.1.0", "serde", "sha3", "tiny-keccak", @@ -799,7 +799,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1eda2711ab2e1fb517fc6e2ffa9728c9a232e296d16810810e6957b781a1b8bc" dependencies = [ "serde", - "winnow", + "winnow 0.6.24", ] [[package]] @@ -822,7 +822,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d17722a198f33bbd25337660787aea8b8f57814febb7c746bc30407bdfc39448" dependencies = [ "alloy-json-rpc", - "base64", + "base64 0.22.1", "futures-util", "futures-utils-wasm", "serde", @@ -1286,9 +1286,9 @@ checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" -version = "0.1.85" +version = "0.1.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" +checksum = "d556ec1359574147ec0c4fc5eb525f3f23263a592b1a9c07e0a75b427de55c97" dependencies = [ "proc-macro2", "quote", @@ -1340,6 +1340,31 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +[[package]] +name = "aws-lc-rs" +version = "1.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e4e8200b9a4a5801a769d50eeabc05670fec7e959a8cb7a63a93e4e519942ae" +dependencies = [ + "aws-lc-sys", + "paste", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f9dd2e03ee80ca2822dd6ea431163d2ef259f2066a4d6ccaca6d9dcb386aa43" +dependencies = [ + "bindgen", + "cc", + "cmake", + "dunce", + "fs_extra", + "paste", +] + [[package]] name = "backtrace" version = "0.3.74" @@ -1361,6 +1386,12 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + [[package]] name = "base64" version = "0.22.1" @@ -1379,6 +1410,29 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "230c5f1ca6a325a32553f8640d31ac9b49f2411e901e427570154868b46da4f7" +[[package]] +name = "bindgen" +version = "0.69.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" +dependencies = [ + "bitflags 2.6.0", + "cexpr", + "clang-sys", + "itertools 0.10.5", + "lazy_static", + "lazycell", + "log", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash 1.1.0", + "shlex", + "syn 2.0.92", + "which", +] + [[package]] name = "bit-set" version = "0.5.3" @@ -1464,7 +1518,7 @@ version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97ccca1260af6a459d75994ad5acc1651bcabcbdbc41467cc9786519ab854c30" dependencies = [ - "base64", + "base64 0.22.1", "bollard-stubs", "bytes", "futures-core", @@ -1649,6 +1703,15 @@ dependencies = [ "shlex", ] +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom 7.1.3", +] + [[package]] name = "cfg-if" version = "0.1.10" @@ -1669,9 +1732,9 @@ checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "chrono" -version = "0.4.39" +version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825" +checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c" dependencies = [ "android-tzdata", "iana-time-zone", @@ -1679,7 +1742,18 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.52.6", + "windows-link", +] + +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", ] [[package]] @@ -1750,6 +1824,15 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "cmake" +version = "0.1.54" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" +dependencies = [ + "cc", +] + [[package]] name = "colorchoice" version = "1.0.3" @@ -2038,6 +2121,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" dependencies = [ "const-oid", + "pem-rfc7468", "zeroize", ] @@ -2068,7 +2152,16 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d13202debe11181040ae9063d739fa32cfcaaebe2275fe387703460ae2365b30" dependencies = [ - "derive_builder_macro", + "derive_builder_macro 0.10.2", +] + +[[package]] +name = "derive_builder" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947" +dependencies = [ + "derive_builder_macro 0.20.2", ] [[package]] @@ -2083,16 +2176,38 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "derive_builder_core" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" +dependencies = [ + "darling 0.20.10", + "proc-macro2", + "quote", + "syn 2.0.92", +] + [[package]] name = "derive_builder_macro" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "58a94ace95092c5acb1e97a7e846b310cfbd499652f72297da7493f618a98d73" dependencies = [ - "derive_builder_core", + "derive_builder_core 0.10.2", "syn 1.0.109", ] +[[package]] +name = "derive_builder_macro" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" +dependencies = [ + "derive_builder_core 0.20.2", + "syn 2.0.92", +] + [[package]] name = "derive_more" version = "0.99.18" @@ -2545,6 +2660,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + [[package]] name = "fsevent" version = "0.4.0" @@ -2712,8 +2833,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if 1.0.0", + "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", + "wasm-bindgen", ] [[package]] @@ -2753,6 +2876,101 @@ dependencies = [ "regex", ] +[[package]] +name = "google-cloud-auth" +version = "0.17.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e57a13fbacc5e9c41ded3ad8d0373175a6b7a6ad430d99e89d314ac121b7ab06" +dependencies = [ + "async-trait", + "base64 0.21.7", + "google-cloud-metadata", + "google-cloud-token", + "home", + "jsonwebtoken", + "reqwest", + "serde", + "serde_json", + "thiserror 1.0.69", + "time", + "tokio", + "tracing", + "urlencoding", +] + +[[package]] +name = "google-cloud-auth" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5572275b7f06b6fde8eec61a23d87c83aae362bee586bbeb8773b3f98658ae81" +dependencies = [ + "async-trait", + "base64 0.22.1", + "derive_builder 0.20.2", + "http 1.2.0", + "reqwest", + "rustls", + "rustls-pemfile", + "serde", + "serde_json", + "thiserror 2.0.11", + "time", + "tokio", +] + +[[package]] +name = "google-cloud-metadata" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d901aeb453fd80e51d64df4ee005014f6cf39f2d736dd64f7239c132d9d39a6a" +dependencies = [ + "reqwest", + "thiserror 1.0.69", + "tokio", +] + +[[package]] +name = "google-cloud-storage" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34a73d9e94d35665909050f02e035d8bdc82e419241b1b027ebf1ea51dc8a470" +dependencies = [ + "anyhow", + "async-stream", + "async-trait", + "base64 0.21.7", + "bytes", + "futures-util", + "google-cloud-auth 0.17.2", + "google-cloud-metadata", + "google-cloud-token", + "hex", + "once_cell", + "percent-encoding", + "pkcs8", + "regex", + "reqwest", + "reqwest-middleware", + "ring", + "serde", + "serde_json", + "sha2", + "thiserror 1.0.69", + "time", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "google-cloud-token" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f49c12ba8b21d128a2ce8585955246977fbce4415f680ebf9199b6f9d6d725f" +dependencies = [ + "async-trait", +] + [[package]] name = "group" version = "0.13.0" @@ -3422,6 +3640,21 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "jsonwebtoken" +version = "9.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a87cc7a48537badeae96744432de36f4be2b4a34a05a5ef32e9dd8a1c169dde" +dependencies = [ + "base64 0.22.1", + "js-sys", + "pem", + "ring", + "serde", + "serde_json", + "simple_asn1", +] + [[package]] name = "k256" version = "0.13.4" @@ -3556,9 +3789,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.25" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" +checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e" [[package]] name = "lru" @@ -3631,6 +3864,22 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" +[[package]] +name = "mime_guess" +version = "2.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" +dependencies = [ + "mime", + "unicase", +] + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "miniz_oxide" version = "0.8.2" @@ -3797,6 +4046,16 @@ dependencies = [ "version_check", ] +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + [[package]] name = "notify" version = "4.0.18" @@ -4065,9 +4324,13 @@ dependencies = [ "actix-web", "alloy", "anyhow", + "base64 0.22.1", + "chrono", "clap 4.5.27", "env_logger 0.11.6", "futures", + "google-cloud-auth 0.18.0", + "google-cloud-storage", "hex", "log", "redis", @@ -4177,6 +4440,25 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" +[[package]] +name = "pem" +version = "3.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38af38e8470ac9dee3ce1bae1af9c1671fffc44ddfd8bd1d0a3445bf349a8ef3" +dependencies = [ + "base64 0.22.1", + "serde", +] + +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + [[package]] name = "percent-encoding" version = "2.3.1" @@ -4337,6 +4619,16 @@ dependencies = [ "zerocopy 0.7.35", ] +[[package]] +name = "prettyplease" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" +dependencies = [ + "proc-macro2", + "syn 2.0.92", +] + [[package]] name = "primitive-types" version = "0.12.2" @@ -4675,7 +4967,7 @@ version = "0.12.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43e734407157c3c2034e0258f5e4473ddb361b1e85f95a66690d67264d7cd1da" dependencies = [ - "base64", + "base64 0.22.1", "bytes", "encoding_rs", "futures-core", @@ -4692,6 +4984,7 @@ dependencies = [ "js-sys", "log", "mime", + "mime_guess", "native-tls", "once_cell", "percent-encoding", @@ -4704,15 +4997,32 @@ dependencies = [ "system-configuration", "tokio", "tokio-native-tls", + "tokio-util", "tower", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", + "wasm-streams", "web-sys", "windows-registry", ] +[[package]] +name = "reqwest-middleware" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64e8975513bd9a7a43aad01030e79b3498e05db14e9d945df6483e8cf9b8c4c4" +dependencies = [ + "anyhow", + "async-trait", + "http 1.2.0", + "reqwest", + "serde", + "thiserror 1.0.69", + "tower-service", +] + [[package]] name = "rfc6979" version = "0.4.0" @@ -4786,6 +5096,12 @@ version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + [[package]] name = "rustc-hash" version = "2.1.0" @@ -4831,10 +5147,12 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.21" +version = "0.23.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f287924602bf649d949c63dc8ac8b235fa5387d394020705b80c4eb597ce5b8" +checksum = "47796c98c480fce5406ef69d1c76378375492c3b0a0de587be0c1d9feb12f395" dependencies = [ + "aws-lc-rs", + "log", "once_cell", "ring", "rustls-pki-types", @@ -4864,6 +5182,7 @@ version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ + "aws-lc-rs", "ring", "rustls-pki-types", "untrusted", @@ -5077,7 +5396,7 @@ version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6b6f7f2fcb69f747921f79f3926bd1e203fce4fef62c268dd3abfb6d86029aa" dependencies = [ - "base64", + "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", @@ -5168,6 +5487,7 @@ dependencies = [ "dashmap", "futures-util", "hex", + "log", "nalgebra", "redis", "serde", @@ -5221,6 +5541,18 @@ dependencies = [ "wide", ] +[[package]] +name = "simple_asn1" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" +dependencies = [ + "num-bigint", + "num-traits", + "thiserror 2.0.11", + "time", +] + [[package]] name = "siphasher" version = "1.0.1" @@ -5488,7 +5820,7 @@ checksum = "da31aef70da0f6352dbcb462683eb4dd2bfad01cf3fc96cf204547b9a839a585" dependencies = [ "dirs", "fnv", - "nom", + "nom 5.1.3", "phf", "phf_codegen", ] @@ -5718,9 +6050,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" +checksum = "cd87a5cdd6ffab733b2f74bc4fd7ee5fff6634124999ac278c35fc78c6120148" dependencies = [ "serde", "serde_spanned", @@ -5739,15 +6071,15 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.22" +version = "0.22.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" +checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" dependencies = [ "indexmap 2.7.0", "serde", "serde_spanned", "toml_datetime", - "winnow", + "winnow 0.7.3", ] [[package]] @@ -5876,6 +6208,12 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" +[[package]] +name = "unicase" +version = "2.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" + [[package]] name = "unicode-bidi" version = "0.3.18" @@ -5932,6 +6270,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "urlencoding" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + [[package]] name = "utf-8" version = "0.7.6" @@ -5974,7 +6318,9 @@ dependencies = [ "alloy", "anyhow", "clap 4.5.27", + "directories", "env_logger 0.11.6", + "hex", "log", "nalgebra", "rand 0.9.0", @@ -5982,7 +6328,9 @@ dependencies = [ "serde", "serde_json", "shared", + "tempfile", "tokio", + "toml", "url", ] @@ -6171,6 +6519,19 @@ version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6" +[[package]] +name = "wasm-streams" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "wasmtimer" version = "0.4.1" @@ -6193,7 +6554,7 @@ checksum = "38928d7ff5274e31594da2d46453a2c741fa340d1bf0ef6f2cb3e43537361265" dependencies = [ "clearscreen", "command-group", - "derive_builder", + "derive_builder 0.10.2", "glob", "globset", "lazy_static", @@ -6367,6 +6728,12 @@ dependencies = [ "syn 2.0.92", ] +[[package]] +name = "windows-link" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dccfd733ce2b1753b03b6d3c65edf020262ea35e20ccdf3e288043e6dd620e3" + [[package]] name = "windows-registry" version = "0.2.0" @@ -6570,6 +6937,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "winnow" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7f4ea97f6f78012141bcdb6a216b2609f0979ada50b20ca5b52dde2eac2bb1" +dependencies = [ + "memchr", +] + [[package]] name = "wit-bindgen-rt" version = "0.33.0" @@ -6611,6 +6987,7 @@ dependencies = [ "serde", "serde_json", "serial_test", + "sha2", "shared", "strip-ansi-escapes", "sysinfo", @@ -6749,7 +7126,7 @@ dependencies = [ "tracing", "uds_windows", "windows-sys 0.59.0", - "winnow", + "winnow 0.6.24", "xdg-home", "zbus_macros", "zbus_names", @@ -6779,7 +7156,7 @@ checksum = "856b7a38811f71846fd47856ceee8bccaec8399ff53fb370247e66081ace647b" dependencies = [ "serde", "static_assertions", - "winnow", + "winnow 0.6.24", "zvariant", ] @@ -6925,7 +7302,7 @@ dependencies = [ "enumflags2", "serde", "static_assertions", - "winnow", + "winnow 0.6.24", "zvariant_derive", "zvariant_utils", ] @@ -6954,5 +7331,5 @@ dependencies = [ "serde", "static_assertions", "syn 2.0.92", - "winnow", + "winnow 0.6.24", ] diff --git a/Cargo.toml b/Cargo.toml index e47f9598..967412ad 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,4 +8,4 @@ edition = "2021" [workspace.features] default = [] -testnet = [] \ No newline at end of file +testnet = [] diff --git a/Makefile b/Makefile index bbdb5e50..f13759e0 100644 --- a/Makefile +++ b/Makefile @@ -20,7 +20,7 @@ transfer-eth-to-pool-owner: create-domain: set -a; source ${ENV_FILE}; set +a; \ - cargo run -p dev-utils --example create_domain -- --domain-name "$${DOMAIN_NAME:-default_domain}" --domain-uri "$${DOMAIN_URI:-http://default.uri}" --key $${PRIVATE_KEY_FEDERATOR} --rpc-url $${RPC_URL} + cargo run -p dev-utils --example create_domain -- --domain-name "$${DOMAIN_NAME:-default_domain}" --domain-uri "$${DOMAIN_URI:-http://default.uri}" --key $${PRIVATE_KEY_FEDERATOR} --validation-logic $${WORK_VALIDATION_CONTRACT} --rpc-url $${RPC_URL} create-training-domain: set -a; source ${ENV_FILE}; set +a; \ @@ -77,11 +77,11 @@ watch-worker: watch-validator: set -a; source ${ENV_FILE}; set +a; \ - cargo watch -w validator/src -x "run --bin validator -- --validator-key $${PRIVATE_KEY_VALIDATOR} --rpc-url $${RPC_URL} " + cargo watch -w validator/src -x "run --bin validator -- --validator-key $${PRIVATE_KEY_VALIDATOR} --rpc-url $${RPC_URL} --pool-id 0 --work-validation-contract $${WORK_VALIDATION_CONTRACT} --leviticus-url $${LEVITICUS_URL}" watch-orchestrator: set -a; source ${ENV_FILE}; set +a; \ - cargo watch -w orchestrator/src -x "run --bin orchestrator -- -r $$RPC_URL -k $$POOL_OWNER_PRIVATE_KEY -d 0 -p 8090 -i 10 -u http://localhost:8090" + cargo watch -w orchestrator/src -x "run --bin orchestrator -- -r $$RPC_URL -k $$POOL_OWNER_PRIVATE_KEY -d 0 -p 8090 -i 10 -u http://localhost:8090 --s3-credentials $$S3_CREDENTIALS" build-worker: cargo build --release --bin worker @@ -108,6 +108,9 @@ setup-remote: . "$$HOME/.cargo/env"; \ if ! command -v cargo-watch > /dev/null; then \ cargo install cargo-watch; \ + fi; \ + if ! groups | grep -q docker; then \ + sudo usermod -aG docker $$USER; \ fi' # Setup SSH tunnel diff --git a/dev-utils/examples/create_domain.rs b/dev-utils/examples/create_domain.rs index 5e34a7d1..6e9f6627 100644 --- a/dev-utils/examples/create_domain.rs +++ b/dev-utils/examples/create_domain.rs @@ -48,6 +48,7 @@ async fn main() -> Result<()> { .unwrap(); let domain_name = args.domain_name.clone(); + let validation_logic = Address::from_str(&args.validation_logic).unwrap(); let domain_uri = args.domain_uri.clone(); @@ -56,6 +57,7 @@ async fn main() -> Result<()> { .create_domain(domain_name, validation_logic, domain_uri) .await; println!("Creating domain: {}", args.domain_name); + println!("Validation logic: {}", args.validation_logic); println!("Transaction: {:?}", tx); // TODO: Should print actual domain id here diff --git a/discovery/src/api/routes/get_nodes.rs b/discovery/src/api/routes/get_nodes.rs index bd1109db..bab39415 100644 --- a/discovery/src/api/routes/get_nodes.rs +++ b/discovery/src/api/routes/get_nodes.rs @@ -23,10 +23,6 @@ pub async fn get_nodes_for_pool( let id_clone = pool_id.clone(); let pool_contract_id: U256 = id_clone.parse::().unwrap(); let pool_id: u32 = pool_id.parse().unwrap(); - println!("Pool id: {:?}", pool_id); - println!("Pool contract id: {:?}", pool_contract_id); - let debug_address = req.headers().get("x-address"); - println!("Address: {:?}", debug_address); match data.contracts.clone() { Some(contracts) => { diff --git a/docker-compose.yml b/docker-compose.yml index a03d3205..bf0cf496 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -3,7 +3,10 @@ version: "3.8" networks: prime: driver: bridge - attachable: true + attachable: true + app-network: + name: app-network + external: true volumes: redis-data: @@ -17,6 +20,7 @@ services: - "8545:8545" networks: - prime + - app-network restart: always redis: image: redis:alpine diff --git a/examples/python/5aa762ae383fbb727af3c7a36d4940a5b8c40a989452d2304fc958ff3f354e7a b/examples/python/5aa762ae383fbb727af3c7a36d4940a5b8c40a989452d2304fc958ff3f354e7a new file mode 100644 index 00000000..84ed78b6 --- /dev/null +++ b/examples/python/5aa762ae383fbb727af3c7a36d4940a5b8c40a989452d2304fc958ff3f354e7a @@ -0,0 +1 @@ +"hello" \ No newline at end of file diff --git a/examples/python/out_7bcd49e0-1e99-45a8-b9bb-ffb58f0f1f12.jsonl b/examples/python/out_7bcd49e0-1e99-45a8-b9bb-ffb58f0f1f12.jsonl new file mode 100644 index 00000000..84ed78b6 --- /dev/null +++ b/examples/python/out_7bcd49e0-1e99-45a8-b9bb-ffb58f0f1f12.jsonl @@ -0,0 +1 @@ +"hello" \ No newline at end of file diff --git a/examples/python/work_validation.py b/examples/python/work_validation.py new file mode 100644 index 00000000..1740dc21 --- /dev/null +++ b/examples/python/work_validation.py @@ -0,0 +1,39 @@ +from random import randint +import socket +import time +import json +import os +import platform + +def get_default_socket_path() -> str: + """Returns the default socket path based on the operating system.""" + return "/tmp/com.prime.worker/metrics.sock" if platform.system() == "Darwin" else "/var/run/com.prime.worker/metrics.sock" + +def send_message(metric: dict, socket_path: str = None) -> bool: + socket_path = socket_path or os.getenv("PRIME_TASK_BRIDGE_SOCKET", get_default_socket_path()) + print("Sending message to socket: ", socket_path) + + try: + with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock: + sock.connect(socket_path) + message = json.dumps(metric) + "\n" + sock.sendall(message.encode()) + return True + except Exception as e: + print(f"Failed to send message: {e}") + return False + +if __name__ == "__main__": + # Generate random SHA-256 hash (64 hex characters) + file_sha = ''.join([format(randint(0, 15), 'x') for _ in range(64)]) + file_name = "out_7bcd49e0-1e99-45a8-b9bb-ffb58f0f1f12.jsonl" + + if send_message({"label": "file_name", "value": file_name, "task_id": "4063bbd7-c458-4cd3-b082-6c2ea8f0e46a"}): + print(f"Sent: {file_name}") + else: + print(f"Failed to send: {file_name}") + + if send_message({"label": "file_sha", "value": file_sha, "task_id": "4063bbd7-c458-4cd3-b082-6c2ea8f0e46a"}): + print(f"Sent: {file_sha}") + else: + print(f"Failed to send: {file_sha}") \ No newline at end of file diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index 1aef7318..e4d6815e 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -7,9 +7,13 @@ edition.workspace = true actix-web = "4.9.0" alloy = "0.9.2" anyhow = "1.0.95" +base64 = "0.22.1" +chrono = "0.4.40" clap = { version = "4.5.26", features = ["derive"] } env_logger = "0.11.6" futures = "0.3.31" +google-cloud-auth = "0.18.0" +google-cloud-storage = "0.24.0" hex = "0.4.3" log = "0.4.25" redis = "0.28.1" diff --git a/orchestrator/src/api/routes/mod.rs b/orchestrator/src/api/routes/mod.rs index 65370c4b..b1232166 100644 --- a/orchestrator/src/api/routes/mod.rs +++ b/orchestrator/src/api/routes/mod.rs @@ -1,4 +1,5 @@ pub mod heartbeat; pub mod metrics; pub mod nodes; +pub mod storage; pub mod task; diff --git a/orchestrator/src/api/routes/storage.rs b/orchestrator/src/api/routes/storage.rs new file mode 100644 index 00000000..c2fa5660 --- /dev/null +++ b/orchestrator/src/api/routes/storage.rs @@ -0,0 +1,59 @@ +use crate::api::server::AppState; +use crate::utils::google_cloud::generate_upload_signed_url; +use actix_web::{ + web::{self, post, Data}, + HttpResponse, Scope, +}; +use std::time::Duration; + +#[derive(serde::Deserialize)] +pub struct RequestUploadRequest { + pub file_name: String, + pub file_size: u64, + pub file_type: String, +} + +async fn request_upload( + request_upload: web::Json, + app_state: Data, +) -> HttpResponse { + let file_name = &request_upload.file_name; + let file_size = &request_upload.file_size; + let file_type = &request_upload.file_type; + println!("request_upload: {} {} {}", file_name, file_size, file_type); + + // Get credentials from app state + let credentials = match &app_state.s3_credentials { + Some(creds) => creds, + None => { + return HttpResponse::InternalServerError().json(serde_json::json!({ + "success": false, + "error": "Storage credentials not configured" + })) + } + }; + + // Generate signed upload URL + match generate_upload_signed_url( + "protocol-development-bucket", // TODO: Make configurable + file_name, + credentials, + Some(file_type.to_string()), + Duration::from_secs(3600), // 1 hour expiry + ) + .await + { + Ok(signed_url) => HttpResponse::Ok().json(serde_json::json!({ + "success": true, + "signed_url": signed_url + })), + Err(e) => HttpResponse::InternalServerError().json(serde_json::json!({ + "success": false, + "error": format!("Failed to generate upload URL: {}", e) + })), + } +} + +pub fn storage_routes() -> Scope { + web::scope("/storage").route("/request-upload", post().to(request_upload)) +} diff --git a/orchestrator/src/api/server.rs b/orchestrator/src/api/server.rs index 02b4aea3..628750bb 100644 --- a/orchestrator/src/api/server.rs +++ b/orchestrator/src/api/server.rs @@ -1,4 +1,5 @@ use crate::api::routes::nodes::nodes_routes; +use crate::api::routes::storage::storage_routes; use crate::api::routes::task::tasks_routes; use crate::api::routes::{heartbeat::heartbeat_routes, metrics::metrics_routes}; use crate::store::core::StoreContext; @@ -16,6 +17,7 @@ use std::sync::Arc; pub struct AppState { pub store_context: Arc, pub wallet: Arc, + pub s3_credentials: Option, } pub async fn start_server( @@ -24,11 +26,13 @@ pub async fn start_server( store_context: Arc, wallet: Arc, admin_api_key: String, + s3_credentials: Option, ) -> Result<(), Error> { info!("Starting server at http://{}:{}", host, port); let app_state = Data::new(AppState { store_context, wallet, + s3_credentials, }); let node_store = app_state.store_context.node_store.clone(); let node_store_clone = node_store.clone(); @@ -47,6 +51,7 @@ pub async fn start_server( .wrap(NormalizePath::new(TrailingSlash::Trim)) .app_data(web::PayloadConfig::default().limit(2_097_152)) .service(heartbeat_routes().wrap(ValidateSignature::new(validator_state.clone()))) + .service(storage_routes().wrap(ValidateSignature::new(validator_state.clone()))) .service(nodes_routes().wrap(api_key_middleware.clone())) .service(tasks_routes().wrap(api_key_middleware.clone())) .service(metrics_routes().wrap(api_key_middleware.clone())) diff --git a/orchestrator/src/api/tests/helper.rs b/orchestrator/src/api/tests/helper.rs index 4a3f6c96..6ccca2d8 100644 --- a/orchestrator/src/api/tests/helper.rs +++ b/orchestrator/src/api/tests/helper.rs @@ -40,6 +40,7 @@ pub async fn create_test_app_state() -> Data { ) .unwrap(), ), + s3_credentials: None, }) } diff --git a/orchestrator/src/main.rs b/orchestrator/src/main.rs index 65e8f348..bad94ef9 100644 --- a/orchestrator/src/main.rs +++ b/orchestrator/src/main.rs @@ -3,6 +3,7 @@ mod discovery; mod models; mod node; mod store; +mod utils; use crate::api::server::start_server; use crate::discovery::monitor::DiscoveryMonitor; use crate::node::invite::NodeInviter; @@ -68,6 +69,10 @@ struct Args { /// Disable instance ejection from chain #[arg(long)] disable_ejection: bool, + + /// S3 credentials + #[arg(long)] + s3_credentials: Option, } #[tokio::main] @@ -149,7 +154,7 @@ async fn main() -> Result<()> { let server_store_context = store_context.clone(); tokio::select! { - res = start_server("0.0.0.0", port, server_store_context.clone(), server_wallet, args.admin_api_key) => { + res = start_server("0.0.0.0", port, server_store_context.clone(), server_wallet, args.admin_api_key, args.s3_credentials) => { if let Err(e) = res { error!("Server error: {}", e); } diff --git a/orchestrator/src/utils/google_cloud.rs b/orchestrator/src/utils/google_cloud.rs new file mode 100644 index 00000000..24162d29 --- /dev/null +++ b/orchestrator/src/utils/google_cloud.rs @@ -0,0 +1,42 @@ +use anyhow::Result; +use base64::{engine::general_purpose, Engine as _}; +use google_cloud_storage::client::google_cloud_auth::credentials::CredentialsFile; +use google_cloud_storage::client::{Client, ClientConfig}; +use google_cloud_storage::sign::{SignedURLMethod, SignedURLOptions}; +use std::time::Duration; + +pub async fn generate_upload_signed_url( + bucket: &str, + object_path: &str, + credentials_base64: &str, + content_type: Option, + expiration: Duration, +) -> Result { + // Decode base64 to JSON string + let credentials_json = general_purpose::STANDARD.decode(credentials_base64)?; + let credentials_str = String::from_utf8(credentials_json)?; + + // Create client config directly from the JSON string + let credentials = CredentialsFile::new_from_str(&credentials_str) + .await + .unwrap(); + let config = ClientConfig::default() + .with_credentials(credentials) + .await + .unwrap(); + let client = Client::new(config); + + // Set options for the signed URL + let options = SignedURLOptions { + method: SignedURLMethod::PUT, + expires: expiration, + content_type, + ..Default::default() + }; + + // Generate the signed URL + let signed_url = client + .signed_url(bucket, object_path, None, None, options) + .await?; + Ok(signed_url) +} diff --git a/orchestrator/src/utils/mod.rs b/orchestrator/src/utils/mod.rs new file mode 100644 index 00000000..e449e975 --- /dev/null +++ b/orchestrator/src/utils/mod.rs @@ -0,0 +1 @@ +pub mod google_cloud; diff --git a/shared/Cargo.toml b/shared/Cargo.toml index 2b7e80ce..b1c0d911 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -25,3 +25,4 @@ redis = "0.28.1" dashmap = "6.1.0" anyhow = "1.0.95" nalgebra = "0.33.2" +log = "0.4.26" diff --git a/shared/artifacts/abi/compute_pool.json b/shared/artifacts/abi/compute_pool.json index 7f0289e9..8b8f5e41 100644 --- a/shared/artifacts/abi/compute_pool.json +++ b/shared/artifacts/abi/compute_pool.json @@ -618,6 +618,35 @@ ], "stateMutability": "view" }, + { + "type": "function", + "name": "invalidateWork", + "inputs": [ + { + "name": "poolId", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ], + "outputs": [ + { + "name": "", + "type": "address", + "internalType": "address" + }, + { + "name": "", + "type": "address", + "internalType": "address" + } + ], + "stateMutability": "nonpayable" + }, { "type": "function", "name": "isNodeBlacklistedFromPool", @@ -988,6 +1017,29 @@ "outputs": [], "stateMutability": "nonpayable" }, + { + "type": "function", + "name": "submitWork", + "inputs": [ + { + "name": "poolId", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "node", + "type": "address", + "internalType": "address" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, { "type": "function", "name": "supportsInterface", diff --git a/shared/artifacts/abi/deployments.json b/shared/artifacts/abi/deployments.json index b80c811a..3be4ad7b 100644 --- a/shared/artifacts/abi/deployments.json +++ b/shared/artifacts/abi/deployments.json @@ -1,8 +1,8 @@ { - "AIToken": "0x959922bE3CAee4b8Cd9a407cc3ac1C251C2007B1", - "ComputePool": "0x4ed7c70F96B99c776995fB64377f0d4aB3B0e1C1", - "ComputeRegistry": "0x68B1D87F95878fE05B998F19b66F4baba5De1aed", - "DomainRegistry": "0x3Aa5ebB10DC797CAC828524e59A333d0A371443c", - "PrimeNetwork": "0x9A9f2CCfdE556A7E9Ff0848998Aa4a0CFD8863AE", - "StakeManager": "0xc6e7DF5E7b4f2A278906862b61205850344D4e7d" + "AIToken": "0x5FbDB2315678afecb367f032d93F642f64180aa3", + "ComputePool": "0x0165878A594ca255338adfa4d48449f69242Eb8F", + "ComputeRegistry": "0x9fE46736679d2D9a65F0992F2272dE9f3c7fa6e0", + "DomainRegistry": "0xCf7Ed3AccA5a467e9e704C703E8D87F634fB0Fc9", + "PrimeNetwork": "0xe7f1725E7734CE288F8367e1Bb143E90bb3F0512", + "StakeManager": "0xDc64a140Aa3E981100a9becA4E685f962f0cF6C9" } \ No newline at end of file diff --git a/shared/artifacts/abi/prime_network.json b/shared/artifacts/abi/prime_network.json index 546d8b58..aa965c6e 100644 --- a/shared/artifacts/abi/prime_network.json +++ b/shared/artifacts/abi/prime_network.json @@ -335,6 +335,29 @@ "outputs": [], "stateMutability": "nonpayable" }, + { + "type": "function", + "name": "invalidateWork", + "inputs": [ + { + "name": "poolId", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "penalty", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, { "type": "function", "name": "registerProvider", diff --git a/shared/artifacts/abi/synthetic_data_work_validator.json b/shared/artifacts/abi/synthetic_data_work_validator.json new file mode 100644 index 00000000..78d94eec --- /dev/null +++ b/shared/artifacts/abi/synthetic_data_work_validator.json @@ -0,0 +1,304 @@ +[ + { + "type": "constructor", + "inputs": [ + { + "name": "_domainId", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "_computePool", + "type": "address", + "internalType": "address" + }, + { + "name": "_workValidityPeriod", + "type": "uint256", + "internalType": "uint256" + } + ], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "getInvalidWorkKeys", + "inputs": [ + { + "name": "poolId", + "type": "uint256", + "internalType": "uint256" + } + ], + "outputs": [ + { + "name": "", + "type": "bytes32[]", + "internalType": "bytes32[]" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "getInvalidWorkSince", + "inputs": [ + { + "name": "poolId", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "timestamp", + "type": "uint256", + "internalType": "uint256" + } + ], + "outputs": [ + { + "name": "", + "type": "bytes32[]", + "internalType": "bytes32[]" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "getWorkInfo", + "inputs": [ + { + "name": "poolId", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "workKey", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "outputs": [ + { + "name": "", + "type": "tuple", + "internalType": "struct SyntheticDataWorkValidator.WorkInfo", + "components": [ + { + "name": "provider", + "type": "address", + "internalType": "address" + }, + { + "name": "nodeId", + "type": "address", + "internalType": "address" + }, + { + "name": "timestamp", + "type": "uint64", + "internalType": "uint64" + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "getWorkKeys", + "inputs": [ + { + "name": "poolId", + "type": "uint256", + "internalType": "uint256" + } + ], + "outputs": [ + { + "name": "", + "type": "bytes32[]", + "internalType": "bytes32[]" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "getWorkSince", + "inputs": [ + { + "name": "poolId", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "timestamp", + "type": "uint256", + "internalType": "uint256" + } + ], + "outputs": [ + { + "name": "", + "type": "bytes32[]", + "internalType": "bytes32[]" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "getWorkValidity", + "inputs": [ + { + "name": "poolId", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "workKey", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "outputs": [ + { + "name": "", + "type": "int256", + "internalType": "int256" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "invalidateWork", + "inputs": [ + { + "name": "poolId", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ], + "outputs": [ + { + "name": "", + "type": "address", + "internalType": "address" + }, + { + "name": "", + "type": "address", + "internalType": "address" + } + ], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "submitWork", + "inputs": [ + { + "name": "_domainId", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "poolId", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "provider", + "type": "address", + "internalType": "address" + }, + { + "name": "nodeId", + "type": "address", + "internalType": "address" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ], + "outputs": [ + { + "name": "", + "type": "bool", + "internalType": "bool" + } + ], + "stateMutability": "nonpayable" + }, + { + "type": "event", + "name": "WorkInvalidated", + "inputs": [ + { + "name": "poolId", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "provider", + "type": "address", + "indexed": false, + "internalType": "address" + }, + { + "name": "nodeId", + "type": "address", + "indexed": false, + "internalType": "address" + }, + { + "name": "workKey", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "WorkSubmitted", + "inputs": [ + { + "name": "poolId", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "provider", + "type": "address", + "indexed": false, + "internalType": "address" + }, + { + "name": "nodeId", + "type": "address", + "indexed": false, + "internalType": "address" + }, + { + "name": "workKey", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + } + ], + "anonymous": false + } +] diff --git a/shared/src/web3/contracts/core/builder.rs b/shared/src/web3/contracts/core/builder.rs index 729a3fa5..55b76d75 100644 --- a/shared/src/web3/contracts/core/builder.rs +++ b/shared/src/web3/contracts/core/builder.rs @@ -1,3 +1,5 @@ +use alloy::primitives::Address; + use crate::web3::{ contracts::{ core::error::ContractError, // Using custom error ContractError @@ -5,6 +7,7 @@ use crate::web3::{ ai_token_contract::AIToken, compute_pool_contract::ComputePool, compute_registry_contract::ComputeRegistryContract, prime_network_contract::PrimeNetworkContract, + work_validators::synthetic_data_validator::SyntheticDataWorkValidator, }, }, wallet::Wallet, @@ -17,6 +20,7 @@ pub struct Contracts { pub ai_token: AIToken, pub prime_network: PrimeNetworkContract, pub compute_pool: ComputePool, + pub synthetic_data_validator: Option, } pub struct ContractBuilder<'a> { @@ -25,6 +29,7 @@ pub struct ContractBuilder<'a> { ai_token: Option, prime_network: Option, compute_pool: Option, + synthetic_data_validator: Option, } impl<'a> ContractBuilder<'a> { @@ -35,6 +40,7 @@ impl<'a> ContractBuilder<'a> { ai_token: None, prime_network: None, compute_pool: None, + synthetic_data_validator: None, } } @@ -61,6 +67,15 @@ impl<'a> ContractBuilder<'a> { self } + pub fn with_synthetic_data_validator(mut self, address: Option
) -> Self { + self.synthetic_data_validator = Some(SyntheticDataWorkValidator::new( + address.unwrap_or(Address::ZERO), + self.wallet, + "synthetic_data_work_validator.json", + )); + self + } + // TODO: This is not ideal yet - now you have to init all contracts all the time pub fn build(self) -> Result { // Using custom error ContractError @@ -85,6 +100,7 @@ impl<'a> ContractBuilder<'a> { Some(network) => network, None => return Err(ContractError::Other("PrimeNetwork not initialized".into())), // Custom error handling }, + synthetic_data_validator: self.synthetic_data_validator, }) } } diff --git a/shared/src/web3/contracts/core/contract.rs b/shared/src/web3/contracts/core/contract.rs index e77a51fa..5fc37866 100644 --- a/shared/src/web3/contracts/core/contract.rs +++ b/shared/src/web3/contracts/core/contract.rs @@ -15,6 +15,7 @@ macro_rules! include_abi { }}; } +#[derive(Clone)] pub struct Contract { instance: ContractInstance, WalletProvider, Ethereum>, provider: WalletProvider, @@ -41,6 +42,9 @@ impl Contract { "ai_token.json" => include_abi!("../../../../artifacts/abi/ai_token.json"), "prime_network.json" => include_abi!("../../../../artifacts/abi/prime_network.json"), "compute_pool.json" => include_abi!("../../../../artifacts/abi/compute_pool.json"), + "synthetic_data_work_validator.json" => { + include_abi!("../../../../artifacts/abi/synthetic_data_work_validator.json") + } _ => panic!("Unknown ABI file: {}", path), }; diff --git a/shared/src/web3/contracts/implementations/compute_pool_contract.rs b/shared/src/web3/contracts/implementations/compute_pool_contract.rs index 9928113c..97e75841 100644 --- a/shared/src/web3/contracts/implementations/compute_pool_contract.rs +++ b/shared/src/web3/contracts/implementations/compute_pool_contract.rs @@ -138,6 +138,24 @@ impl ComputePool { Ok(result) } + pub async fn submit_work( + &self, + pool_id: U256, + node: Address, + data: Vec, + ) -> Result, Box> { + let result = self + .instance + .instance() + .function("submitWork", &[pool_id.into(), node.into(), data.into()])? + .send() + .await? + .watch() + .await?; + println!("Result: {:?}", result); + Ok(result) + } + pub async fn blacklist_node( &self, pool_id: u32, diff --git a/shared/src/web3/contracts/implementations/mod.rs b/shared/src/web3/contracts/implementations/mod.rs index 65b2a1b5..488905cf 100644 --- a/shared/src/web3/contracts/implementations/mod.rs +++ b/shared/src/web3/contracts/implementations/mod.rs @@ -2,3 +2,4 @@ pub mod ai_token_contract; pub mod compute_pool_contract; pub mod compute_registry_contract; pub mod prime_network_contract; +pub mod work_validators; diff --git a/shared/src/web3/contracts/implementations/prime_network_contract.rs b/shared/src/web3/contracts/implementations/prime_network_contract.rs index dc66653f..9a5e906c 100644 --- a/shared/src/web3/contracts/implementations/prime_network_contract.rs +++ b/shared/src/web3/contracts/implementations/prime_network_contract.rs @@ -5,6 +5,7 @@ use alloy::dyn_abi::DynSolValue; use alloy::primitives::{Address, FixedBytes, U256}; use alloy::providers::Provider; +#[derive(Clone)] pub struct PrimeNetworkContract { pub instance: Contract, } @@ -101,6 +102,26 @@ impl PrimeNetworkContract { Ok(create_domain_tx) } + pub async fn update_validation_logic( + &self, + domain_id: U256, + validation_logic: Address, + ) -> Result, Box> { + let update_validation_logic_tx = self + .instance + .instance() + .function( + "updateValidationLogic", + &[domain_id.into(), validation_logic.into()], + )? + .send() + .await? + .watch() + .await?; + + Ok(update_validation_logic_tx) + } + pub async fn set_stake_minimum( &self, min_stake_amount: U256, @@ -138,4 +159,25 @@ impl PrimeNetworkContract { Ok(whitelist_provider_tx) } + + pub async fn invalidate_work( + &self, + pool_id: U256, + penalty: U256, + data: Vec, + ) -> Result, Box> { + let invalidate_work_tx = self + .instance + .instance() + .function( + "invalidateWork", + &[pool_id.into(), penalty.into(), data.into()], + )? + .send() + .await? + .watch() + .await?; + + Ok(invalidate_work_tx) + } } diff --git a/shared/src/web3/contracts/implementations/work_validators/mod.rs b/shared/src/web3/contracts/implementations/work_validators/mod.rs new file mode 100644 index 00000000..cfa59981 --- /dev/null +++ b/shared/src/web3/contracts/implementations/work_validators/mod.rs @@ -0,0 +1 @@ +pub mod synthetic_data_validator; diff --git a/shared/src/web3/contracts/implementations/work_validators/synthetic_data_validator.rs b/shared/src/web3/contracts/implementations/work_validators/synthetic_data_validator.rs new file mode 100644 index 00000000..2a4ae53e --- /dev/null +++ b/shared/src/web3/contracts/implementations/work_validators/synthetic_data_validator.rs @@ -0,0 +1,168 @@ +use crate::web3::contracts::core::contract::Contract; +use crate::web3::wallet::Wallet; +use alloy::{ + dyn_abi::{DynSolValue, Word}, + primitives::{Address, U256}, +}; +use anyhow::Error; +use log::debug; +use serde::Deserialize; + +#[derive(Clone)] +pub struct SyntheticDataWorkValidator { + pub instance: Contract, +} + +#[derive(Debug, Deserialize)] +pub struct WorkInfo { + pub provider: Address, + pub node_id: Address, + pub timestamp: u64, +} + +impl SyntheticDataWorkValidator { + pub fn new(address: Address, wallet: &Wallet, abi_file_path: &str) -> Self { + let instance = Contract::new(address, wallet, abi_file_path); + Self { instance } + } + + pub async fn get_work_keys(&self, pool_id: U256) -> Result, Error> { + let result = self + .instance + .instance() + .function("getWorkKeys", &[pool_id.into()])? + .call() + .await?; + + let array_value = result + .into_iter() + .next() + .ok_or_else(|| Error::msg("No result returned from getWorkKeys"))?; + + let array = array_value + .as_array() + .ok_or_else(|| Error::msg("Result is not an array"))?; + + // Map each value to a hex string + let work_keys = array + .iter() + .map(|value| { + let bytes = value + .as_fixed_bytes() + .ok_or_else(|| Error::msg("Value is not fixed bytes"))?; + + // Ensure we have exactly 32 bytes + if bytes.0.len() != 32 { + return Err(Error::msg(format!( + "Expected 32 bytes, got {}", + bytes.0.len() + ))); + } + + // Convert bytes to string + Ok(hex::encode(bytes.0)) + }) + .collect::, Error>>()?; + + Ok(work_keys) + } + + pub async fn get_work_info(&self, pool_id: U256, work_key: &str) -> Result { + // Convert work_key from hex string to bytes32 + debug!("Processing work key: {}", work_key); + let work_key_bytes = hex::decode(work_key)?; + if work_key_bytes.len() != 32 { + return Err(Error::msg("Work key must be 32 bytes")); + } + debug!("Decoded work key bytes: {:?}", work_key_bytes); + + let fixed_bytes = DynSolValue::FixedBytes(Word::from_slice(&work_key_bytes), 32); + + let result = self + .instance + .instance() + .function("getWorkInfo", &[pool_id.into(), fixed_bytes])? + .call() + .await?; + debug!("Got work info result: {:?}", result); + + let tuple = result + .into_iter() + .next() + .ok_or_else(|| Error::msg("No result returned from getWorkInfo"))?; + + let tuple_array = tuple + .as_tuple() + .ok_or_else(|| Error::msg("Result is not a tuple"))?; + + if tuple_array.len() != 3 { + return Err(Error::msg("Invalid tuple length")); + } + + let provider = tuple_array[0] + .as_address() + .ok_or_else(|| Error::msg("Provider is not an address"))?; + + let node_id = tuple_array[1] + .as_address() + .ok_or_else(|| Error::msg("Node ID is not an address"))?; + + let timestamp = u64::try_from( + tuple_array[2] + .as_uint() + .ok_or_else(|| Error::msg("Timestamp is not a uint"))? + .0, + ) + .map_err(|_| Error::msg("Timestamp conversion failed"))?; + + Ok(WorkInfo { + provider, + node_id, + timestamp, + }) + } + + pub async fn get_work_since( + &self, + pool_id: U256, + timestamp: U256, + ) -> Result, Error> { + let result = self + .instance + .instance() + .function("getWorkSince", &[pool_id.into(), timestamp.into()])? + .call() + .await?; + + let array_value = result + .into_iter() + .next() + .ok_or_else(|| Error::msg("No result returned from getWorkSince"))?; + + let array = array_value + .as_array() + .ok_or_else(|| Error::msg("Result is not an array"))?; + + let work_keys = array + .iter() + .map(|value| { + let bytes = value + .as_fixed_bytes() + .ok_or_else(|| Error::msg("Value is not fixed bytes"))?; + + // Ensure we have exactly 32 bytes + if bytes.0.len() != 32 { + return Err(Error::msg(format!( + "Expected 32 bytes, got {}", + bytes.0.len() + ))); + } + + // Convert bytes to string + Ok(hex::encode(bytes.0)) + }) + .collect::, Error>>()?; + + Ok(work_keys) + } +} diff --git a/smart-contracts b/smart-contracts index 4f7ba13a..a19acc39 160000 --- a/smart-contracts +++ b/smart-contracts @@ -1 +1 @@ -Subproject commit 4f7ba13aa442d42a67db6551f9d107950e39bef2 +Subproject commit a19acc39824d107a16a44b82f75b0972e83f9f75 diff --git a/validator/Cargo.toml b/validator/Cargo.toml index 5b22a7c1..c5520be4 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -8,7 +8,9 @@ actix-web = "4.9.0" alloy = { version = "0.9.2", features = ["full"] } anyhow = "1.0.95" clap = { version = "4.5.26", features = ["derive"] } +directories = "6.0.0" env_logger = "0.11.6" +hex = "0.4.3" log = "0.4.25" nalgebra = "0.33.2" rand = "0.9.0" @@ -17,4 +19,8 @@ serde = "1.0.217" serde_json = "1.0.135" shared = { path = "../shared" } tokio = "1.43.0" +toml = "0.8.20" url = "2.5.4" + +[dev-dependencies] +tempfile = "=3.14.0" diff --git a/validator/src/main.rs b/validator/src/main.rs index 012122c7..3697f103 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1,22 +1,20 @@ +pub mod validators; use actix_web::{web, App, HttpResponse, HttpServer, Responder}; -use alloy::primitives::{hex, Address}; -use alloy::signers::Signer; +use alloy::primitives::Address; use anyhow::{Context, Result}; use clap::Parser; use log::LevelFilter; use log::{error, info}; -use rand::rng; -use rand::Rng; use serde_json::json; use shared::models::api::ApiResponse; -use shared::models::challenge::calc_matrix; -use shared::models::challenge::FixedF64; -use shared::models::challenge::{ChallengeRequest, ChallengeResponse}; use shared::models::node::DiscoveryNode; use shared::security::request_signer::sign_request; use shared::web3::contracts::core::builder::ContractBuilder; use shared::web3::wallet::Wallet; +use std::sync::Arc; use url::Url; +use validators::hardware::HardwareValidator; +use validators::synthetic_data::SyntheticDataValidator; async fn health_check() -> impl Responder { HttpResponse::Ok().json(json!({ "status": "ok" })) @@ -35,6 +33,23 @@ struct Args { /// Discovery url #[arg(long, default_value = "http://localhost:8089")] discovery_url: String, + + /// Optional: Work validation contract address + #[arg(long, default_value = None)] + work_validation_contract: Option, + + /// Optional: Pool Id for work validation + /// If not provided, the validator will not validate work + #[arg(long, default_value = None)] + pool_id: Option, + + /// Optional: Work validation interval in seconds + #[arg(long, default_value = "30")] + work_validation_interval: u64, + + /// Optional: Leviticus Validator URL + #[arg(long, default_value = None)] + leviticus_url: Option, } fn main() { let runtime = tokio::runtime::Runtime::new().unwrap(); @@ -64,22 +79,55 @@ fn main() { } }); + let work_validation_address: Option
= args + .work_validation_contract + .map(|address| address.parse::
().unwrap()); + let contracts = ContractBuilder::new(&validator_wallet) .with_compute_registry() .with_ai_token() .with_prime_network() .with_compute_pool() + .with_synthetic_data_validator(work_validation_address) .build() .unwrap(); + + let contracts = Arc::new(contracts); + let hardware_validator = HardwareValidator::new(&validator_wallet, contracts.clone()); + + let pool_id = args.pool_id.clone(); + let mut synthetic_validator = match contracts.synthetic_data_validator.clone() { + Some(validator) => { + if let Some(leviticus_url) = args.leviticus_url { + SyntheticDataValidator::new( + None, + pool_id.unwrap(), + validator, + contracts.prime_network.clone(), + leviticus_url, + ) + } else { + error!("Leviticus URL is not provided"); + std::process::exit(1); + } + } + None => { + error!("Synthetic data validator not found"); + std::process::exit(1); + } + }; + loop { + runtime.block_on(async { + let validation_result = synthetic_validator.validate_work().await; + println!("Validation result: {:?}", validation_result); + }); + async fn _generate_signature(wallet: &Wallet, message: &str) -> Result { - let signature = wallet - .signer - .sign_message(message.as_bytes()) + let signature = sign_request(message, wallet, None) .await - .context("Failed to sign message")? - .as_bytes(); - Ok(format!("0x{}", hex::encode(signature))) + .map_err(|e| anyhow::anyhow!("{}", e))?; + Ok(signature) } let nodes = match runtime.block_on(async { @@ -137,150 +185,25 @@ fn main() { } }; - let non_validated_nodes: Vec = nodes - .iter() - .filter(|node| !node.is_validated) - .cloned() - .collect(); - - info!("Non validated nodes: {:?}", non_validated_nodes); - - for node in non_validated_nodes { - let node_address = match node.id.trim_start_matches("0x").parse::
() { - Ok(addr) => addr, - Err(e) => { - error!("Failed to parse node address {}: {}", node.id, e); - continue; - } - }; - - let provider_address = match node - .provider_address - .trim_start_matches("0x") - .parse::
() - { - Ok(addr) => addr, - Err(e) => { - error!( - "Failed to parse provider address {}: {}", - node.provider_address, e - ); - continue; - } - }; - - let challenge_route = "/challenge/submit"; - let challenge_result = - runtime.block_on(challenge_node(&node, &validator_wallet, challenge_route)); - if challenge_result.is_err() { - error!( - "Failed to challenge node {}: {:?}", - node.id, challenge_result - ); - continue; - } - - if let Err(e) = runtime.block_on( - contracts - .prime_network - .validate_node(provider_address, node_address), - ) { - error!("Failed to validate node {}: {}", node.id, e); - } else { - info!("Successfully validated node: {}", node.id); - } + if let Err(e) = runtime.block_on(hardware_validator.validate_nodes(nodes)) { + error!("Error validating nodes: {:#}", e); } - std::thread::sleep(std::time::Duration::from_secs(10)); - } -} - -fn random_challenge( - rows_a: usize, - cols_a: usize, - rows_b: usize, - cols_b: usize, -) -> ChallengeRequest { - let mut rng = rng(); - - let data_a_vec: Vec = (0..(rows_a * cols_a)) - .map(|_| rng.random_range(0.0..1.0)) - .collect(); - - let data_b_vec: Vec = (0..(rows_b * cols_b)) - .map(|_| rng.random_range(0.0..1.0)) - .collect(); - - // convert to FixedF64 - let data_a: Vec = data_a_vec.iter().map(|x| FixedF64(*x)).collect(); - let data_b: Vec = data_b_vec.iter().map(|x| FixedF64(*x)).collect(); - - ChallengeRequest { - rows_a, - cols_a, - data_a, - rows_b, - cols_b, - data_b, - } -} -pub async fn challenge_node( - node: &DiscoveryNode, - wallet: &Wallet, - challenge_route: &str, -) -> Result> { - let node_url = format!("http://{}:{}", node.node.ip_address, node.node.port); - - let mut headers = reqwest::header::HeaderMap::new(); - - // create random challenge matrix - let challenge_matrix = random_challenge(3, 3, 3, 3); - let challenge_expected = calc_matrix(&challenge_matrix); - - let post_url = format!("{}{}", node_url, challenge_route); - - let address = wallet.wallet.default_signer().address().to_string(); - let challenge_matrix_value = serde_json::to_value(&challenge_matrix)?; - let signature = sign_request(challenge_route, wallet, Some(&challenge_matrix_value)).await?; - - headers.insert("x-address", address.parse().unwrap()); - headers.insert("x-signature", signature.parse().unwrap()); - - let response = reqwest::Client::new() - .post(post_url) - .headers(headers) - .json(&challenge_matrix_value) - .send() - .await?; - - let response_text = response.text().await?; - println!("Response text: {}", response_text); - let parsed_response: ApiResponse = serde_json::from_str(&response_text)?; - - if !parsed_response.success { - Err("Error fetching challenge from node".into()) - } else if challenge_expected.result == parsed_response.data.result { - info!("Challenge successful"); - Ok(0) - } else { - error!("Challenge failed"); - Err("Node failed challenge".into()) + std::thread::sleep(std::time::Duration::from_secs(10)); } } #[cfg(test)] mod tests { - use super::*; + use actix_web::{test, App}; use actix_web::{ web::{self, post}, HttpResponse, Scope, }; + use shared::models::challenge::{calc_matrix, ChallengeRequest, ChallengeResponse, FixedF64}; - pub async fn handle_challenge( - challenge: web::Json, - //app_state: Data, - ) -> HttpResponse { + pub async fn handle_challenge(challenge: web::Json) -> HttpResponse { let result = calc_matrix(&challenge); HttpResponse::Ok().json(result) } diff --git a/validator/src/validators/hardware.rs b/validator/src/validators/hardware.rs new file mode 100644 index 00000000..dfe271f1 --- /dev/null +++ b/validator/src/validators/hardware.rs @@ -0,0 +1,164 @@ +use alloy::primitives::Address; +use anyhow::{Context, Error, Result}; +use log::{error, info}; +use rand::{rng, Rng}; +use shared::models::api::ApiResponse; +use shared::models::challenge::{calc_matrix, ChallengeRequest, ChallengeResponse, FixedF64}; +use shared::models::node::DiscoveryNode; +use shared::security::request_signer::sign_request; +use shared::web3::contracts::core::builder::Contracts; +use shared::web3::wallet::Wallet; +use std::sync::Arc; + +pub struct HardwareValidator<'a> { + wallet: &'a Wallet, + contracts: Arc, +} + +impl<'a> HardwareValidator<'a> { + pub fn new(wallet: &'a Wallet, contracts: Arc) -> Self { + Self { wallet, contracts } + } + + pub async fn validate_nodes(&self, nodes: Vec) -> Result<()> { + let non_validated_nodes: Vec = nodes + .into_iter() + .filter(|node| !node.is_validated) + .collect(); + + info!("Non validated nodes: {:?}", non_validated_nodes); + + for node in non_validated_nodes { + let node_address = match node.id.trim_start_matches("0x").parse::
() { + Ok(addr) => addr, + Err(e) => { + error!("Failed to parse node address {}: {}", node.id, e); + continue; + } + }; + + let provider_address = match node + .provider_address + .trim_start_matches("0x") + .parse::
() + { + Ok(addr) => addr, + Err(e) => { + error!( + "Failed to parse provider address {}: {}", + node.provider_address, e + ); + continue; + } + }; + + let challenge_route = "/challenge/submit"; + let challenge_result = self.challenge_node(&node, challenge_route).await; + if challenge_result.is_err() { + error!( + "Failed to challenge node {}: {:?}", + node.id, challenge_result + ); + continue; + } + + if let Err(e) = self + .contracts + .prime_network + .validate_node(provider_address, node_address) + .await + { + error!("Failed to validate node {}: {}", node.id, e); + } else { + info!("Successfully validated node: {}", node.id); + } + } + + Ok(()) + } + + async fn challenge_node( + &self, + node: &DiscoveryNode, + challenge_route: &str, + ) -> Result { + let node_url = format!("http://{}:{}", node.node.ip_address, node.node.port); + + let mut headers = reqwest::header::HeaderMap::new(); + + // create random challenge matrix + let challenge_matrix = self.random_challenge(3, 3, 3, 3); + let challenge_expected = calc_matrix(&challenge_matrix); + + let post_url = format!("{}{}", node_url, challenge_route); + + let address = self.wallet.wallet.default_signer().address().to_string(); + let challenge_matrix_value = serde_json::to_value(&challenge_matrix)?; + let signature = sign_request(challenge_route, self.wallet, Some(&challenge_matrix_value)) + .await + .map_err(|e| anyhow::anyhow!("{}", e))?; + + headers.insert( + "x-address", + reqwest::header::HeaderValue::from_str(&address) + .context("Failed to create address header")?, + ); + headers.insert( + "x-signature", + reqwest::header::HeaderValue::from_str(&signature) + .context("Failed to create signature header")?, + ); + + let response = reqwest::Client::new() + .post(post_url) + .headers(headers) + .json(&challenge_matrix_value) + .send() + .await?; + + let response_text = response.text().await?; + println!("Response text: {}", response_text); + let parsed_response: ApiResponse = serde_json::from_str(&response_text)?; + + if !parsed_response.success { + Err(anyhow::anyhow!("Error fetching challenge from node")) + } else if challenge_expected.result == parsed_response.data.result { + info!("Challenge successful"); + Ok(0) + } else { + error!("Challenge failed"); + Err(anyhow::anyhow!("Node failed challenge")) + } + } + + fn random_challenge( + &self, + rows_a: usize, + cols_a: usize, + rows_b: usize, + cols_b: usize, + ) -> ChallengeRequest { + let mut rng = rng(); + + let data_a_vec: Vec = (0..(rows_a * cols_a)) + .map(|_| rng.random_range(0.0..1.0)) + .collect(); + + let data_b_vec: Vec = (0..(rows_b * cols_b)) + .map(|_| rng.random_range(0.0..1.0)) + .collect(); + + // convert to FixedF64 + let data_a: Vec = data_a_vec.iter().map(|x| FixedF64(*x)).collect(); + let data_b: Vec = data_b_vec.iter().map(|x| FixedF64(*x)).collect(); + + ChallengeRequest { + rows_a, + cols_a, + data_a, + rows_b, + cols_b, + data_b, + } + } +} diff --git a/validator/src/validators/mod.rs b/validator/src/validators/mod.rs new file mode 100644 index 00000000..85e0f1bb --- /dev/null +++ b/validator/src/validators/mod.rs @@ -0,0 +1,10 @@ +pub mod hardware; +pub mod synthetic_data; + +/// Common trait for all validators +pub trait Validator { + type Error; + + /// Returns the name of the validator + fn name(&self) -> &str; +} diff --git a/validator/src/validators/synthetic_data.rs b/validator/src/validators/synthetic_data.rs new file mode 100644 index 00000000..41972a03 --- /dev/null +++ b/validator/src/validators/synthetic_data.rs @@ -0,0 +1,353 @@ +use alloy::primitives::U256; +use anyhow::{Context, Error, Result}; +use directories::ProjectDirs; +use hex; +use log::debug; +use log::{error, info}; +use serde::{Deserialize, Serialize}; +use shared::web3::contracts::implementations::prime_network_contract::PrimeNetworkContract; +use std::fs; +use std::path::Path; +use std::path::PathBuf; +use toml; + +use crate::validators::Validator; +use shared::web3::contracts::implementations::work_validators::synthetic_data_validator::SyntheticDataWorkValidator; + +fn get_default_state_dir() -> Option { + ProjectDirs::from("com", "prime", "validator") + .map(|proj_dirs| proj_dirs.data_local_dir().to_string_lossy().into_owned()) +} + +fn state_filename(pool_id: &str) -> String { + format!("work_state_{}.toml", pool_id) +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +struct PersistedWorkState { + pool_id: U256, + last_validation_timestamp: U256, +} + +pub struct SyntheticDataValidator { + pool_id: U256, + validator: SyntheticDataWorkValidator, + prime_network: PrimeNetworkContract, + last_validation_timestamp: U256, + state_dir: Option, + leviticus_url: String, +} + +impl Validator for SyntheticDataValidator { + type Error = anyhow::Error; + + fn name(&self) -> &str { + "Synthetic Data Validator" + } +} + +impl SyntheticDataValidator { + pub fn new( + state_dir: Option, + pool_id_str: String, + validator: SyntheticDataWorkValidator, + prime_network: PrimeNetworkContract, + leviticus_url: String, + ) -> Self { + let pool_id = pool_id_str.parse::().expect("Invalid pool ID"); + let default_state_dir = get_default_state_dir(); + debug!("Default state dir: {:?}", default_state_dir); + let state_path = state_dir + .map(PathBuf::from) + .or_else(|| default_state_dir.map(PathBuf::from)); + debug!("State path: {:?}", state_path); + let mut last_validation_timestamp: Option<_> = None; + + // Try to load state, log info if creating new file + if let Some(path) = &state_path { + let state_file = path.join(state_filename(&pool_id.to_string())); + if !state_file.exists() { + debug!( + "No state file found at {:?}, will create on first state change", + state_file + ); + } else if let Ok(Some(loaded_state)) = + SyntheticDataValidator::load_state(path, &pool_id.to_string()) + { + debug!("Loaded previous state from {:?}", state_file); + last_validation_timestamp = Some(loaded_state.last_validation_timestamp); + } else { + debug!("Failed to load state from {:?}", state_file); + } + } + + // if no last time, set it to 24 hours ago, as nothing before that can be invalidated + if last_validation_timestamp.is_none() { + last_validation_timestamp = Some(U256::from( + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .expect("Failed to get current timestamp") + .as_secs() + .saturating_sub(24 * 60 * 60), + )); + } + + Self { + pool_id, + validator, + prime_network, + last_validation_timestamp: last_validation_timestamp.unwrap(), + state_dir: state_path.clone(), + leviticus_url, + } + } + + fn save_state(&self) -> Result<()> { + // Get values without block_on + let state = PersistedWorkState { + pool_id: self.pool_id, + last_validation_timestamp: self.last_validation_timestamp, + }; + + if let Some(ref state_dir) = self.state_dir { + fs::create_dir_all(state_dir)?; + let state_path = state_dir.join(state_filename(&self.pool_id.to_string())); + let toml = toml::to_string_pretty(&state)?; + fs::write(&state_path, toml)?; + debug!("Saved state to {:?}", state_path); + } + Ok(()) + } + + fn load_state(state_dir: &Path, pool_id: &str) -> Result> { + let state_path = state_dir.join(state_filename(pool_id)); + if state_path.exists() { + let contents = fs::read_to_string(state_path)?; + let state: PersistedWorkState = toml::from_str(&contents)?; + return Ok(Some(state)); + } + Ok(None) + } + + pub async fn invalidate_work(&self, work_key: &str) -> Result<()> { + let data = hex::decode(work_key) + .map_err(|e| Error::msg(format!("Failed to decode hex work key: {}", e)))?; + println!("Invalidating work: {}", work_key); + match self + .prime_network + .invalidate_work(self.pool_id, U256::from(1), data) + .await + { + Ok(_) => Ok(()), + Err(e) => { + error!("Failed to invalidate work {}: {}", work_key, e); + Err(Error::msg(format!("Failed to invalidate work: {}", e))) + } + } + } + + pub async fn validate_work(&mut self) -> Result<()> { + info!("Validating work for pool ID: {:?}", self.pool_id); + + // Get all work keys for the pool + let work_keys = self + .validator + .get_work_since(self.pool_id, self.last_validation_timestamp) + .await + .context("Failed to get work keys")?; + + info!("Found {} work keys to validate", work_keys.len()); + + // Process each work key + for work_key in work_keys { + info!("Processing work key: {}", work_key); + match self.validator.get_work_info(self.pool_id, &work_key).await { + Ok(work_info) => { + info!( + "Got work info - Provider: {:?}, Node: {:?}, Timestamp: {}", + work_info.provider, work_info.node_id, work_info.timestamp + ); + + // Start validation by calling validation endpoint with retries + let validate_url = format!("{}/validate/{}", self.leviticus_url, work_key); + let client = reqwest::Client::new(); + + let mut validate_attempts = 0; + const MAX_VALIDATE_ATTEMPTS: u32 = 3; + + let validation_result = loop { + let body = serde_json::json!({ + "file_sha": work_key + }); + + match client.post(&validate_url).json(&body).send().await { + Ok(_) => { + info!("Started validation for work key: {}", work_key); + break Ok(()); + } + Err(e) => { + validate_attempts += 1; + error!( + "Attempt {} failed to start validation for {}: {}", + validate_attempts, work_key, e + ); + + if validate_attempts >= MAX_VALIDATE_ATTEMPTS { + break Err(e); + } + + // Exponential backoff + tokio::time::sleep(tokio::time::Duration::from_secs( + 2u64.pow(validate_attempts), + )) + .await; + } + } + }; + + match validation_result { + Ok(_) => { + // Poll status endpoint until we get a proper response + let status_url = format!("{}/status/{}", self.leviticus_url, work_key); + let mut status_attempts = 0; + const MAX_STATUS_ATTEMPTS: u32 = 5; + + loop { + tokio::time::sleep(tokio::time::Duration::from_secs(2)).await; + + match client.get(&status_url).send().await { + Ok(response) => { + match response.json::().await { + Ok(status_json) => { + match status_json + .get("status") + .and_then(|s| s.as_str()) + { + Some(status) => { + info!( + "Validation status for {}: {}", + work_key, status + ); + + match status { + "accept" => { + info!( + "Work {} was accepted", + work_key + ); + break; + } + "reject" => { + error!( + "Work {} was rejected", + work_key + ); + if let Err(e) = self + .invalidate_work(&work_key) + .await + { + error!("Failed to invalidate work {}: {}", work_key, e); + } else { + info!("Successfully invalidated work {}", work_key); + } + break; + } + "crashed" => { + error!( + "Validation crashed for {}", + work_key + ); + break; + } + "pending" => { + status_attempts += 1; + if status_attempts + >= MAX_STATUS_ATTEMPTS + { + error!("Max status attempts reached for {}", work_key); + break; + } + } + _ => { + status_attempts += 1; + error!( + "Unknown status {} for {}", + status, work_key + ); + if status_attempts + >= MAX_STATUS_ATTEMPTS + { + break; + } + } + } + } + None => { + status_attempts += 1; + error!( + "No status field in response for {}", + work_key + ); + if status_attempts >= MAX_STATUS_ATTEMPTS { + error!("Max status attempts reached for {}", work_key); + break; + } + } + } + } + Err(e) => { + status_attempts += 1; + error!("Attempt {} failed to parse status JSON for {}: {}", status_attempts, work_key, e); + + if status_attempts >= MAX_STATUS_ATTEMPTS { + error!( + "Max status attempts reached for {}", + work_key + ); + break; + } + } + } + } + Err(e) => { + status_attempts += 1; + error!( + "Attempt {} failed to get status for {}: {}", + status_attempts, work_key, e + ); + + if status_attempts >= MAX_STATUS_ATTEMPTS { + error!("Max status attempts reached for {}", work_key); + break; + } + } + } + } + } + Err(_) => { + error!("Failed all validation attempts for {}", work_key); + continue; + } + } + } + Err(e) => { + error!("Failed to get work info for key {}: {}", work_key, e); + continue; + } + } + } + + // Update last validation timestamp to current time + // TODO: We should only set this once we are sure that we have validated all keys + self.last_validation_timestamp = U256::from( + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .context("Failed to get current timestamp")? + .as_secs(), + ); + + self.save_state()?; + + Ok(()) + } +} diff --git a/worker/Cargo.toml b/worker/Cargo.toml index b2d36a17..144b57e7 100644 --- a/worker/Cargo.toml +++ b/worker/Cargo.toml @@ -42,6 +42,7 @@ serial_test = "0.5.1" directories = "6.0.0" strip-ansi-escapes = "0.2.1" nalgebra = "0.33.2" +sha2 = "0.10.8" [dev-dependencies] tempfile = "=3.14.0" diff --git a/worker/src/cli/command.rs b/worker/src/cli/command.rs index 7cd5480a..876f87b7 100644 --- a/worker/src/cli/command.rs +++ b/worker/src/cli/command.rs @@ -10,6 +10,7 @@ use crate::operations::heartbeat::service::HeartbeatService; use crate::operations::provider::ProviderError; use crate::operations::provider::ProviderOperations; use crate::services::discovery::DiscoveryService; +use crate::state::system_state::SystemState; use crate::TaskHandles; use alloy::primitives::U256; use clap::{Parser, Subcommand}; @@ -220,14 +221,28 @@ pub async fn execute_command( } }; + let state = Arc::new(SystemState::new( + state_dir_overwrite.clone(), + *disable_state_storing, + )); let metrics_store = Arc::new(MetricsStore::new()); let heartbeat_metrics_clone = metrics_store.clone(); - let task_bridge = Arc::new(TaskBridge::new(None, metrics_store)); + let bridge_contracts = contracts.clone(); + let bridge_wallet = node_wallet_instance.clone(); let docker_storage_path = match node_config.clone().compute_specs { Some(specs) => specs.storage_path.clone(), None => None, }; + let task_bridge = Arc::new(TaskBridge::new( + None, + metrics_store, + Some(bridge_contracts), + Some(node_config.clone()), + Some(bridge_wallet), + docker_storage_path.clone(), + state.clone(), + )); let system_memory = node_config .compute_specs @@ -253,13 +268,12 @@ pub async fn execute_command( }); let heartbeat_service = HeartbeatService::new( Duration::from_secs(10), - state_dir_overwrite.clone(), - *disable_state_storing, cancellation_token.clone(), task_handles.clone(), node_wallet_instance.clone(), docker_service.clone(), heartbeat_metrics_clone.clone(), + state, ); let mut attempts = 0; diff --git a/worker/src/docker/service.rs b/worker/src/docker/service.rs index 88ad27c5..df924520 100644 --- a/worker/src/docker/service.rs +++ b/worker/src/docker/service.rs @@ -104,7 +104,6 @@ impl DockerService { }) .cloned() .collect(); - Console::info("DockerService", &format!("Old tasks: {:?}", old_tasks)); if !old_tasks.is_empty() { for task in old_tasks { diff --git a/worker/src/docker/taskbridge/bridge.rs b/worker/src/docker/taskbridge/bridge.rs index 44883ea0..80b92440 100644 --- a/worker/src/docker/taskbridge/bridge.rs +++ b/worker/src/docker/taskbridge/bridge.rs @@ -1,7 +1,12 @@ +use crate::docker::taskbridge::file_handler; use crate::metrics::store::MetricsStore; +use crate::state::system_state::SystemState; use anyhow::Result; -use log::error; +use log::{debug, error, info}; use serde::{Deserialize, Serialize}; +use shared::models::node::Node; +use shared::web3::contracts::core::builder::Contracts; +use shared::web3::wallet::Wallet; #[cfg(unix)] use std::os::unix::fs::PermissionsExt; use std::sync::Arc; @@ -18,6 +23,11 @@ const DEFAULT_LINUX_SOCKET: &str = "/tmp/com.prime.worker/"; pub struct TaskBridge { pub socket_path: String, pub metrics_store: Arc, + pub contracts: Option>, + pub node_config: Option, + pub node_wallet: Option>, + pub docker_storage_path: Option, + pub state: Arc, } #[derive(Deserialize, Serialize, Debug)] @@ -27,8 +37,23 @@ struct MetricInput { value: f64, } +#[derive(Deserialize, Serialize, Debug)] +struct RequestUploadRequest { + file_name: String, + file_size: u64, + file_type: String, +} + impl TaskBridge { - pub fn new(socket_path: Option<&str>, metrics_store: Arc) -> Self { + pub fn new( + socket_path: Option<&str>, + metrics_store: Arc, + contracts: Option>, + node_config: Option, + node_wallet: Option>, + docker_storage_path: Option, + state: Arc, + ) -> Self { let path = match socket_path { Some(path) => path.to_string(), None => { @@ -43,18 +68,23 @@ impl TaskBridge { Self { socket_path: path, metrics_store, + contracts, + node_config, + node_wallet, + docker_storage_path, + state, } } pub async fn run(&self) -> Result<()> { let socket_path = Path::new(&self.socket_path); - log::info!("Setting up TaskBridge socket at: {}", socket_path.display()); + info!("Setting up TaskBridge socket at: {}", socket_path.display()); if let Some(parent) = socket_path.parent() { match fs::create_dir_all(parent) { - Ok(_) => log::debug!("Created parent directory: {}", parent.display()), + Ok(_) => debug!("Created parent directory: {}", parent.display()), Err(e) => { - log::error!( + error!( "Failed to create parent directory {}: {}", parent.display(), e @@ -67,9 +97,9 @@ impl TaskBridge { // Cleanup existing socket if present if socket_path.exists() { match fs::remove_file(socket_path) { - Ok(_) => log::debug!("Removed existing socket file"), + Ok(_) => debug!("Removed existing socket file"), Err(e) => { - log::error!("Failed to remove existing socket file: {}", e); + error!("Failed to remove existing socket file: {}", e); return Err(e.into()); } } @@ -77,26 +107,32 @@ impl TaskBridge { let listener = match UnixListener::bind(socket_path) { Ok(l) => { - log::info!("Successfully bound to Unix socket"); + info!("Successfully bound to Unix socket"); l } Err(e) => { - log::error!("Failed to bind Unix socket: {}", e); + error!("Failed to bind Unix socket: {}", e); return Err(e.into()); } }; // allow both owner and group to read/write match fs::set_permissions(socket_path, fs::Permissions::from_mode(0o660)) { - Ok(_) => log::debug!("Set socket permissions to 0o660"), + Ok(_) => debug!("Set socket permissions to 0o660"), Err(e) => { - log::error!("Failed to set socket permissions: {}", e); + error!("Failed to set socket permissions: {}", e); return Err(e.into()); } } loop { let store = self.metrics_store.clone(); + let node = self.node_config.clone(); + let contracts = self.contracts.clone(); + let wallet = self.node_wallet.clone(); + let storage_path_clone = self.docker_storage_path.clone(); + let state_clone = self.state.clone(); + match listener.accept().await { Ok((stream, _addr)) => { tokio::spawn(async move { @@ -114,23 +150,83 @@ impl TaskBridge { while current_pos < trimmed.len() { // Try to find a complete JSON object if let Some(json_str) = extract_next_json(&trimmed[current_pos..]) { - match serde_json::from_str::(json_str) { - Ok(input) => { - println!("Received metric: {:?}", input); - let _ = store - .update_metric( - input.task_id, - input.label, - input.value, + debug!("Received metric: {:?}", json_str); + if json_str.contains("file_name") { + let storage_path = match &storage_path_clone { + Some(path) => path, + None => { + println!( + "Storage path is not set - cannot upload file." + ); + continue; + } + }; + + if let Ok(file_info) = + serde_json::from_str::(json_str) + { + if let Some(file_name) = file_info["value"].as_str() { + let task_id = + file_info["task_id"].as_str().unwrap(); + + if let Err(e) = file_handler::handle_file_upload( + storage_path, + task_id, + file_name, + wallet.as_ref().unwrap(), + &state_clone, ) - .await; + .await + { + error!("Failed to handle file upload: {}", e); + } + } + } + } else if json_str.contains("file_sha") { + if let Ok(file_info) = + serde_json::from_str::(json_str) + { + if let Some(file_sha) = file_info["value"].as_str() { + if let (Some(contracts_ref), Some(node_ref)) = + (contracts.clone(), node.clone()) + { + if let Err(e) = + file_handler::handle_file_validation( + file_sha, + &contracts_ref, + &node_ref, + ) + .await + { + error!( + "Failed to handle file validation: {}", + e + ); + } + } + } } - Err(e) => { - log::error!( - "Failed to parse metric input: {} {}", - json_str, - e - ); + } else { + match serde_json::from_str::(json_str) { + Ok(input) => { + info!( + "📊 Received metric - Task: {}, Label: {}, Value: {}", + input.task_id, input.label, input.value + ); + let _ = store + .update_metric( + input.task_id, + input.label, + input.value, + ) + .await; + } + Err(e) => { + error!( + "Failed to parse metric input: {} {}", + json_str, e + ); + } } } current_pos += json_str.len(); @@ -192,7 +288,16 @@ mod tests { let temp_dir = tempdir()?; let socket_path = temp_dir.path().join("test.sock"); let metrics_store = Arc::new(MetricsStore::new()); - let bridge = TaskBridge::new(Some(socket_path.to_str().unwrap()), metrics_store.clone()); + let state = Arc::new(SystemState::new(None, false)); + let bridge = TaskBridge::new( + Some(socket_path.to_str().unwrap()), + metrics_store.clone(), + None, + None, + None, + None, + state, + ); // Run the bridge in background let bridge_handle = tokio::spawn(async move { bridge.run().await }); @@ -214,7 +319,16 @@ mod tests { let temp_dir = tempdir()?; let socket_path = temp_dir.path().join("test.sock"); let metrics_store = Arc::new(MetricsStore::new()); - let bridge = TaskBridge::new(Some(socket_path.to_str().unwrap()), metrics_store.clone()); + let state = Arc::new(SystemState::new(None, false)); + let bridge = TaskBridge::new( + Some(socket_path.to_str().unwrap()), + metrics_store.clone(), + None, + None, + None, + None, + state, + ); // Run bridge in background let bridge_handle = tokio::spawn(async move { bridge.run().await }); @@ -224,8 +338,8 @@ mod tests { // Test client connection let stream = UnixStream::connect(&socket_path).await?; - // Print stream output to debug - println!("Connected to stream: {:?}", stream.peer_addr()); + // Log stream output for debugging + debug!("Connected to stream: {:?}", stream.peer_addr()); assert!(stream.peer_addr().is_ok()); @@ -238,7 +352,16 @@ mod tests { let temp_dir = tempdir()?; let socket_path = temp_dir.path().join("test.sock"); let metrics_store = Arc::new(MetricsStore::new()); - let bridge = TaskBridge::new(Some(socket_path.to_str().unwrap()), metrics_store.clone()); + let state = Arc::new(SystemState::new(None, false)); + let bridge = TaskBridge::new( + Some(socket_path.to_str().unwrap()), + metrics_store.clone(), + None, + None, + None, + None, + state, + ); let bridge_handle = tokio::spawn(async move { bridge.run().await }); @@ -251,7 +374,7 @@ mod tests { value: 10.0, }; let sample_metric = serde_json::to_string(&sample_metric)?; - println!("Sending {:?}", sample_metric); + debug!("Sending {:?}", sample_metric); let msg = format!("{}{}", sample_metric, "\n"); stream.write_all(msg.as_bytes()).await?; stream.flush().await?; @@ -276,7 +399,16 @@ mod tests { let temp_dir = tempdir()?; let socket_path = temp_dir.path().join("test.sock"); let metrics_store = Arc::new(MetricsStore::new()); - let bridge = TaskBridge::new(Some(socket_path.to_str().unwrap()), metrics_store.clone()); + let state = Arc::new(SystemState::new(None, false)); + let bridge = TaskBridge::new( + Some(socket_path.to_str().unwrap()), + metrics_store.clone(), + None, + None, + None, + None, + state, + ); let bridge_handle = tokio::spawn(async move { bridge.run().await }); diff --git a/worker/src/docker/taskbridge/file_handler.rs b/worker/src/docker/taskbridge/file_handler.rs new file mode 100644 index 00000000..a2f0c510 --- /dev/null +++ b/worker/src/docker/taskbridge/file_handler.rs @@ -0,0 +1,155 @@ +use crate::state::system_state::SystemState; +use alloy::primitives::{Address, U256}; +use anyhow::Result; +use log::{debug, error, info}; +use reqwest::header::HeaderValue; +use reqwest::Client; +use serde::{Deserialize, Serialize}; +use shared::models::node::Node; +use shared::security::request_signer::sign_request; +use shared::web3::contracts::core::builder::Contracts; +use shared::web3::wallet::Wallet; +use std::str::FromStr; +use std::sync::Arc; + +#[derive(Deserialize, Serialize, Debug)] +pub struct RequestUploadRequest { + pub file_name: String, + pub file_size: u64, + pub file_type: String, +} + +/// Handles a file upload request +pub async fn handle_file_upload( + storage_path: &str, + task_id: &str, + file_name: &str, + wallet: &Arc, + state: &Arc, +) -> Result<()> { + info!("📄 Received file upload request: {}", file_name); + + // Get orchestrator endpoint + let endpoint = state + .get_heartbeat_endpoint() + .await + .ok_or_else(|| { + error!("Orchestrator endpoint is not set - cannot upload file."); + anyhow::anyhow!("Orchestrator endpoint not set") + })? + .replace("/heartbeat", ""); + + // Construct file path + let file = format!("{}/prime-task-{}/{}", storage_path, task_id, file_name); + debug!("File: {:?}", file); + + // Get file size + let file_size = std::fs::metadata(&file).map(|m| m.len()).unwrap_or(0); + + // Calculate SHA + let file_sha = tokio::fs::read(&file) + .await + .map(|contents| { + use sha2::{Digest, Sha256}; + let mut hasher = Sha256::new(); + hasher.update(&contents); + format!("{:x}", hasher.finalize()) + }) + .unwrap_or_else(|e| { + error!("Failed to calculate file SHA: {}", e); + String::new() + }); + + debug!("File size: {:?}", file_size); + debug!("File SHA: {}", file_sha); + + // Create upload request + let client = Client::new(); + let request = RequestUploadRequest { + file_name: file_sha.to_string(), + file_size, + file_type: "application/json".to_string(), // Assume JSON + }; + + // Sign request + let request_value = serde_json::to_value(&request)?; + let signature = sign_request("/storage/request-upload", wallet, Some(&request_value)) + .await + .map_err(|e| anyhow::anyhow!(e.to_string()))?; + + // Prepare headers + let mut headers = reqwest::header::HeaderMap::new(); + headers.insert( + "x-address", + HeaderValue::from_str(&wallet.address().to_string())?, + ); + headers.insert("x-signature", HeaderValue::from_str(&signature)?); + + // Create upload URL + let upload_url = format!("{}/storage/request-upload", endpoint); + + // Send request + let response = client + .post(&upload_url) + .json(&request) + .headers(headers) + .send() + .await?; + + // Process response + let json = response.json::().await?; + + if let Some(signed_url) = json["signed_url"].as_str() { + info!("Got signed URL for upload: {}", signed_url); + + // Read file contents + let file_contents = tokio::fs::read(&file).await?; + + // Upload file to S3 using signed URL + client + .put(signed_url) + .body(file_contents) + .header("Content-Type", "application/json") + .send() + .await?; + + info!("Successfully uploaded file to S3"); + } else { + println!("Error: Missing signed_url in response"); + return Err(anyhow::anyhow!("Missing signed_url in response")); + } + + Ok(()) +} + +/// Handles a file validation request +pub async fn handle_file_validation( + file_sha: &str, + contracts: &Arc, + node: &Node, +) -> Result<()> { + info!("📄 Received file SHA for validation: {}", file_sha); + + let pool_id = node.compute_pool_id; + let node_address = &node.id; + + let decoded_sha = hex::decode(file_sha)?; + debug!( + "Decoded file sha: {:?} ({} bytes)", + decoded_sha, + decoded_sha.len() + ); + + let result = contracts + .compute_pool + .submit_work( + U256::from(pool_id), + Address::from_str(node_address)?, + decoded_sha.to_vec(), + ) + .await; + + debug!("Submit work result: {:?}", result); + + Ok(()) +} diff --git a/worker/src/docker/taskbridge/mod.rs b/worker/src/docker/taskbridge/mod.rs index a24fe1e6..e638ca11 100644 --- a/worker/src/docker/taskbridge/mod.rs +++ b/worker/src/docker/taskbridge/mod.rs @@ -1,3 +1,4 @@ pub mod bridge; +pub mod file_handler; pub use bridge::TaskBridge; diff --git a/worker/src/main.rs b/worker/src/main.rs index 92a1977c..a33e4f61 100644 --- a/worker/src/main.rs +++ b/worker/src/main.rs @@ -6,6 +6,7 @@ mod docker; mod metrics; mod operations; mod services; +mod state; use clap::Parser; use cli::{execute_command, Cli}; use log::{debug, LevelFilter}; diff --git a/worker/src/operations/heartbeat/mod.rs b/worker/src/operations/heartbeat/mod.rs index 1a056b19..1f278a4d 100644 --- a/worker/src/operations/heartbeat/mod.rs +++ b/worker/src/operations/heartbeat/mod.rs @@ -1,2 +1 @@ pub mod service; -pub mod state; diff --git a/worker/src/operations/heartbeat/service.rs b/worker/src/operations/heartbeat/service.rs index c8eb974c..b68b3852 100644 --- a/worker/src/operations/heartbeat/service.rs +++ b/worker/src/operations/heartbeat/service.rs @@ -1,7 +1,7 @@ -use super::state::HeartbeatState; use crate::console::Console; use crate::docker::DockerService; use crate::metrics::store::MetricsStore; +use crate::state::system_state::SystemState; use crate::TaskHandles; use log; use log::info; @@ -15,7 +15,7 @@ use tokio::time::{interval, Duration}; use tokio_util::sync::CancellationToken; #[derive(Clone)] pub struct HeartbeatService { - state: HeartbeatState, + state: Arc, interval: Duration, client: Client, cancellation_token: CancellationToken, @@ -36,16 +36,13 @@ impl HeartbeatService { #[allow(clippy::too_many_arguments)] pub fn new( interval: Duration, - state_dir_overwrite: Option, - disable_state_storing: bool, cancellation_token: CancellationToken, task_handles: TaskHandles, node_wallet: Arc, docker_service: Arc, metrics_store: Arc, + state: Arc, ) -> Result, HeartbeatError> { - let state = HeartbeatState::new(state_dir_overwrite.or(None), disable_state_storing); - let client = Client::builder() .timeout(Duration::from_secs(5)) // 5 second timeout .build() @@ -64,7 +61,7 @@ impl HeartbeatService { } pub async fn activate_heartbeat_if_endpoint_exists(&self) { - if let Some(endpoint) = self.state.get_endpoint().await { + if let Some(endpoint) = self.state.get_heartbeat_endpoint().await { info!("Starting heartbeat from recovered state"); self.start(endpoint).await.unwrap(); } @@ -91,18 +88,18 @@ impl HeartbeatService { if !state.is_running().await { break; } - match Self::send_heartbeat(&client, state.get_endpoint().await, wallet_clone.clone(), docker_service.clone(), metrics_store.clone()).await { + match Self::send_heartbeat(&client, state.get_heartbeat_endpoint().await, wallet_clone.clone(), docker_service.clone(), metrics_store.clone()).await { Ok(_) => { state.update_last_heartbeat().await; - log::info!("Heartbeat sent successfully"); + log::info!("Synced with orchestrator"); // Updated message to reflect sync } Err(e) => { - log::error!("Heartbeat failed: {:?}", e); + log::error!("{}", &format!("Failed to sync with orchestrator: {:?}", e)); // Updated error message } } } _ = cancellation_token.cancelled() => { - log::info!("Heartbeat service received cancellation signal"); + log::info!("Sync service received cancellation signal"); // Updated log message state.set_running(false, None).await; break; } diff --git a/worker/src/state/mod.rs b/worker/src/state/mod.rs new file mode 100644 index 00000000..bfe42102 --- /dev/null +++ b/worker/src/state/mod.rs @@ -0,0 +1 @@ +pub mod system_state; diff --git a/worker/src/operations/heartbeat/state.rs b/worker/src/state/system_state.rs similarity index 86% rename from worker/src/operations/heartbeat/state.rs rename to worker/src/state/system_state.rs index 0cc3d13a..21671abe 100644 --- a/worker/src/operations/heartbeat/state.rs +++ b/worker/src/state/system_state.rs @@ -16,12 +16,12 @@ fn get_default_state_dir() -> Option { } #[derive(Debug, Clone, Serialize, Deserialize)] -struct PersistedHeartbeatState { +struct PersistedSystemState { endpoint: Option, // Only store the endpoint } #[derive(Debug, Clone)] -pub struct HeartbeatState { +pub struct SystemState { last_heartbeat: Arc>>, is_running: Arc>, // Keep is_running in the normal heartbeat state endpoint: Arc>>, @@ -29,7 +29,7 @@ pub struct HeartbeatState { disable_state_storing: bool, } -impl HeartbeatState { +impl SystemState { pub fn new(state_dir: Option, disable_state_storing: bool) -> Self { let default_state_dir = get_default_state_dir(); debug!("Default state dir: {:?}", default_state_dir); @@ -47,7 +47,7 @@ impl HeartbeatState { "No state file found at {:?}, will create on first state change", state_file ); - } else if let Ok(Some(loaded_state)) = HeartbeatState::load_state(path) { + } else if let Ok(Some(loaded_state)) = SystemState::load_state(path) { debug!("Loaded previous state from {:?}", state_file); endpoint = loaded_state.endpoint; } else { @@ -68,7 +68,7 @@ impl HeartbeatState { if !self.disable_state_storing { if let Some(state_dir) = &self.state_dir_overwrite { // Get values without block_on - let state = PersistedHeartbeatState { + let state = PersistedSystemState { endpoint: heartbeat_endpoint, }; @@ -82,11 +82,11 @@ impl HeartbeatState { Ok(()) } - fn load_state(state_dir: &Path) -> Result> { + fn load_state(state_dir: &Path) -> Result> { let state_path = state_dir.join(STATE_FILENAME); if state_path.exists() { let contents = fs::read_to_string(state_path)?; - let state: PersistedHeartbeatState = toml::from_str(&contents)?; + let state: PersistedSystemState = toml::from_str(&contents)?; return Ok(Some(state)); } Ok(None) @@ -104,7 +104,7 @@ impl HeartbeatState { pub async fn set_running(&self, running: bool, heartbeat_endpoint: Option) { // Read current values let current_running = self.is_running().await; - let current_endpoint = self.get_endpoint().await; + let current_endpoint = self.get_heartbeat_endpoint().await; // Only update and save if values changed if running != current_running || heartbeat_endpoint != current_endpoint { @@ -122,7 +122,7 @@ impl HeartbeatState { } } - pub async fn get_endpoint(&self) -> Option { + pub async fn get_heartbeat_endpoint(&self) -> Option { let endpoint = self.endpoint.read().await; endpoint.clone() } @@ -150,7 +150,7 @@ mod tests { let temp_dir = setup_test_dir(); println!("Temp dir: {:?}", temp_dir.path()); - let state = HeartbeatState::new(Some(temp_dir.path().to_string_lossy().to_string()), false); + let state = SystemState::new(Some(temp_dir.path().to_string_lossy().to_string()), false); state .set_running(true, Some("http://localhost:8080/heartbeat".to_string())) .await; @@ -159,7 +159,7 @@ mod tests { assert!(state_file.exists()); let contents = fs::read_to_string(state_file).expect("Failed to read state file"); - let state: PersistedHeartbeatState = + let state: PersistedSystemState = toml::from_str(&contents).expect("Failed to parse state file"); assert_eq!( state.endpoint, @@ -173,9 +173,9 @@ mod tests { let state_file = temp_dir.path().join(STATE_FILENAME); fs::write(&state_file, "invalid_toml_content").expect("Failed to write to state file"); - let state = HeartbeatState::new(Some(temp_dir.path().to_string_lossy().to_string()), false); + let state = SystemState::new(Some(temp_dir.path().to_string_lossy().to_string()), false); assert!(!(state.is_running().await)); - assert_eq!(state.get_endpoint().await, None); + assert_eq!(state.get_heartbeat_endpoint().await, None); } #[tokio::test] @@ -188,9 +188,9 @@ mod tests { ) .expect("Failed to write to state file"); - let state = HeartbeatState::new(Some(temp_dir.path().to_string_lossy().to_string()), false); + let state = SystemState::new(Some(temp_dir.path().to_string_lossy().to_string()), false); assert_eq!( - state.get_endpoint().await, + state.get_heartbeat_endpoint().await, Some("http://localhost:8080/heartbeat".to_string()) ); } From 82a9b4d87e7d25dda90c473e7b5093d1ba0fcec6 Mon Sep 17 00:00:00 2001 From: JannikSt Date: Wed, 12 Mar 2025 11:23:34 +0100 Subject: [PATCH 38/85] align smart contract module (#135) --- smart-contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/smart-contracts b/smart-contracts index a19acc39..7d8e0d44 160000 --- a/smart-contracts +++ b/smart-contracts @@ -1 +1 @@ -Subproject commit a19acc39824d107a16a44b82f75b0972e83f9f75 +Subproject commit 7d8e0d44c29b442df8138699a2addef37de73123 From 40fcaf8981adc1122c0b0ca3f9469baa5a2508ef Mon Sep 17 00:00:00 2001 From: JannikSt Date: Wed, 12 Mar 2025 15:19:22 +0100 Subject: [PATCH 39/85] support dynamic staking system and ability to automatically increase stake (#136) * support dynamic staking system and ability to automatically increase stake * remove provider stage arg on worker --- Makefile | 14 +-- shared/artifacts/abi/compute_registry.json | 38 ++++++++ shared/artifacts/abi/prime_network.json | 37 ++++++++ shared/src/web3/contracts/core/builder.rs | 11 ++- shared/src/web3/contracts/core/contract.rs | 1 + .../compute_registry_contract.rs | 18 +++- .../src/web3/contracts/implementations/mod.rs | 1 + .../implementations/prime_network_contract.rs | 16 ++++ .../implementations/stake_manager.rs | 54 +++++++++++ worker/src/cli/command.rs | 94 +++++++++++++++---- worker/src/operations/compute_node.rs | 3 +- worker/src/operations/provider.rs | 64 ++++++++++++- 12 files changed, 319 insertions(+), 32 deletions(-) create mode 100644 shared/src/web3/contracts/implementations/stake_manager.rs diff --git a/Makefile b/Makefile index f13759e0..ebf76160 100644 --- a/Makefile +++ b/Makefile @@ -73,22 +73,22 @@ watch-discovery: watch-worker: set -a; source ${ENV_FILE}; set +a; \ - cargo watch -w worker/src -x "run --bin worker -- run --private-key-provider $$PROVIDER_PRIVATE_KEY --private-key-node $$NODE_PRIVATE_KEY --port 8091 --external-ip 0.0.0.0 --compute-pool-id 0 --validator-address $$VALIDATOR_ADDRESS" + cargo watch -w worker/src -x "run --bin worker -- run --private-key-provider $$PROVIDER_PRIVATE_KEY --private-key-node $$NODE_PRIVATE_KEY --port 8091 --external-ip 0.0.0.0 --compute-pool-id $$WORKER_COMPUTE_POOL_ID --validator-address $$VALIDATOR_ADDRESS" watch-validator: set -a; source ${ENV_FILE}; set +a; \ - cargo watch -w validator/src -x "run --bin validator -- --validator-key $${PRIVATE_KEY_VALIDATOR} --rpc-url $${RPC_URL} --pool-id 0 --work-validation-contract $${WORK_VALIDATION_CONTRACT} --leviticus-url $${LEVITICUS_URL}" + cargo watch -w validator/src -x "run --bin validator -- --validator-key $${PRIVATE_KEY_VALIDATOR} --rpc-url $${RPC_URL} --pool-id 1 --work-validation-contract $${WORK_VALIDATION_CONTRACT} --leviticus-url $${LEVITICUS_URL}" watch-orchestrator: set -a; source ${ENV_FILE}; set +a; \ - cargo watch -w orchestrator/src -x "run --bin orchestrator -- -r $$RPC_URL -k $$POOL_OWNER_PRIVATE_KEY -d 0 -p 8090 -i 10 -u http://localhost:8090 --s3-credentials $$S3_CREDENTIALS" + cargo watch -w orchestrator/src -x "run --bin orchestrator -- -r $$RPC_URL -k $$POOL_OWNER_PRIVATE_KEY -d 0 -p 8090 -i 10 -u http://localhost:8090 --s3-credentials $$S3_CREDENTIALS --compute-pool-id $$WORKER_COMPUTE_POOL_ID" build-worker: cargo build --release --bin worker run-worker-bin: set -a; source .env; set +a; \ - ./target/release/worker run --private-key-provider $$PROVIDER_PRIVATE_KEY --private-key-node $$NODE_PRIVATE_KEY --port 8091 --external-ip 0.0.0.0 --compute-pool-id 0 --validator-address $$VALIDATOR_ADDRESS + ./target/release/worker run --private-key-provider $$PROVIDER_PRIVATE_KEY --private-key-node $$NODE_PRIVATE_KEY --port 8091 --external-ip 0.0.0.0 --compute-pool-id $$WORKER_COMPUTE_POOL_ID --validator-address $$VALIDATOR_ADDRESS SSH_CONNECTION ?= your-ssh-conn string EXTERNAL_IP ?= 0.0.0.0 @@ -143,8 +143,10 @@ watch-worker-remote: setup-remote setup-tunnel sync-remote --private-key-node \$$NODE_PRIVATE_KEY \ --port $(PORT) \ --external-ip \$$EXTERNAL_IP \ - --compute-pool-id 0 \ - --validator-address \$$VALIDATOR_ADDRESS 2>&1 | tee worker.log\"" + --compute-pool-id \$$WORKER_COMPUTE_POOL_ID \ + --validator-address \$$VALIDATOR_ADDRESS \ + 2>&1 | tee worker.log\"" + # Kill SSH tunnel .PHONY: kill-tunnel kill-tunnel: diff --git a/shared/artifacts/abi/compute_registry.json b/shared/artifacts/abi/compute_registry.json index d8fbace0..71dc2913 100644 --- a/shared/artifacts/abi/compute_registry.json +++ b/shared/artifacts/abi/compute_registry.json @@ -510,6 +510,25 @@ ], "stateMutability": "view" }, + { + "type": "function", + "name": "getProviderTotalCompute", + "inputs": [ + { + "name": "provider", + "type": "address", + "internalType": "address" + } + ], + "outputs": [ + { + "name": "", + "type": "uint256", + "internalType": "uint256" + } + ], + "stateMutability": "view" + }, { "type": "function", "name": "getProviderTotalNodes", @@ -714,6 +733,25 @@ ], "stateMutability": "view" }, + { + "type": "function", + "name": "providerTotalCompute", + "inputs": [ + { + "name": "", + "type": "address", + "internalType": "address" + } + ], + "outputs": [ + { + "name": "", + "type": "uint256", + "internalType": "uint256" + } + ], + "stateMutability": "view" + }, { "type": "function", "name": "providers", diff --git a/shared/artifacts/abi/prime_network.json b/shared/artifacts/abi/prime_network.json index aa965c6e..0dfd0a0d 100644 --- a/shared/artifacts/abi/prime_network.json +++ b/shared/artifacts/abi/prime_network.json @@ -113,6 +113,30 @@ "outputs": [], "stateMutability": "nonpayable" }, + { + "type": "function", + "name": "calculateMinimumStake", + "inputs": [ + { + "name": "provider", + "type": "address", + "internalType": "address" + }, + { + "name": "computeUnits", + "type": "uint256", + "internalType": "uint256" + } + ], + "outputs": [ + { + "name": "", + "type": "uint256", + "internalType": "uint256" + } + ], + "stateMutability": "view" + }, { "type": "function", "name": "computePool", @@ -317,6 +341,19 @@ ], "stateMutability": "view" }, + { + "type": "function", + "name": "increaseStake", + "inputs": [ + { + "name": "additionalStake", + "type": "uint256", + "internalType": "uint256" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, { "type": "function", "name": "invalidateNode", diff --git a/shared/src/web3/contracts/core/builder.rs b/shared/src/web3/contracts/core/builder.rs index 55b76d75..d89d075a 100644 --- a/shared/src/web3/contracts/core/builder.rs +++ b/shared/src/web3/contracts/core/builder.rs @@ -6,7 +6,7 @@ use crate::web3::{ implementations::{ ai_token_contract::AIToken, compute_pool_contract::ComputePool, compute_registry_contract::ComputeRegistryContract, - prime_network_contract::PrimeNetworkContract, + prime_network_contract::PrimeNetworkContract, stake_manager::StakeManagerContract, work_validators::synthetic_data_validator::SyntheticDataWorkValidator, }, }, @@ -20,6 +20,7 @@ pub struct Contracts { pub ai_token: AIToken, pub prime_network: PrimeNetworkContract, pub compute_pool: ComputePool, + pub stake_manager: Option, pub synthetic_data_validator: Option, } @@ -29,6 +30,7 @@ pub struct ContractBuilder<'a> { ai_token: Option, prime_network: Option, compute_pool: Option, + stake_manager: Option, synthetic_data_validator: Option, } @@ -40,6 +42,7 @@ impl<'a> ContractBuilder<'a> { ai_token: None, prime_network: None, compute_pool: None, + stake_manager: None, synthetic_data_validator: None, } } @@ -76,6 +79,11 @@ impl<'a> ContractBuilder<'a> { self } + pub fn with_stake_manager(mut self) -> Self { + self.stake_manager = Some(StakeManagerContract::new(self.wallet, "stake_manager.json")); + self + } + // TODO: This is not ideal yet - now you have to init all contracts all the time pub fn build(self) -> Result { // Using custom error ContractError @@ -101,6 +109,7 @@ impl<'a> ContractBuilder<'a> { None => return Err(ContractError::Other("PrimeNetwork not initialized".into())), // Custom error handling }, synthetic_data_validator: self.synthetic_data_validator, + stake_manager: self.stake_manager, }) } } diff --git a/shared/src/web3/contracts/core/contract.rs b/shared/src/web3/contracts/core/contract.rs index 5fc37866..33ca72a0 100644 --- a/shared/src/web3/contracts/core/contract.rs +++ b/shared/src/web3/contracts/core/contract.rs @@ -45,6 +45,7 @@ impl Contract { "synthetic_data_work_validator.json" => { include_abi!("../../../../artifacts/abi/synthetic_data_work_validator.json") } + "stake_manager.json" => include_abi!("../../../../artifacts/abi/stake_manager.json"), _ => panic!("Unknown ABI file: {}", path), }; diff --git a/shared/src/web3/contracts/implementations/compute_registry_contract.rs b/shared/src/web3/contracts/implementations/compute_registry_contract.rs index 415dfc13..c35af83c 100644 --- a/shared/src/web3/contracts/implementations/compute_registry_contract.rs +++ b/shared/src/web3/contracts/implementations/compute_registry_contract.rs @@ -5,7 +5,7 @@ use super::{ use crate::web3::contracts::helpers::utils::get_selector; use crate::web3::wallet::Wallet; use alloy::dyn_abi::DynSolValue; -use alloy::primitives::Address; +use alloy::primitives::{Address, U256}; pub struct ComputeRegistryContract { instance: Contract, @@ -41,6 +41,22 @@ impl ComputeRegistryContract { Ok(provider) } + pub async fn get_provider_total_compute( + &self, + address: Address, + ) -> Result> { + let provider_response = self + .instance + .instance() + .function("getProviderTotalCompute", &[address.into()])? + .call() + .await?; + + Ok(U256::from( + provider_response.first().unwrap().as_uint().unwrap().0, + )) + } + pub async fn get_node( &self, #[allow(unused_variables)] provider_address: Address, diff --git a/shared/src/web3/contracts/implementations/mod.rs b/shared/src/web3/contracts/implementations/mod.rs index 488905cf..76567c0f 100644 --- a/shared/src/web3/contracts/implementations/mod.rs +++ b/shared/src/web3/contracts/implementations/mod.rs @@ -2,4 +2,5 @@ pub mod ai_token_contract; pub mod compute_pool_contract; pub mod compute_registry_contract; pub mod prime_network_contract; +pub mod stake_manager; pub mod work_validators; diff --git a/shared/src/web3/contracts/implementations/prime_network_contract.rs b/shared/src/web3/contracts/implementations/prime_network_contract.rs index 9a5e906c..a3530bcd 100644 --- a/shared/src/web3/contracts/implementations/prime_network_contract.rs +++ b/shared/src/web3/contracts/implementations/prime_network_contract.rs @@ -32,6 +32,22 @@ impl PrimeNetworkContract { Ok(register_tx) } + pub async fn stake( + &self, + additional_stake: U256, + ) -> Result, Box> { + let stake_tx = self + .instance + .instance() + .function("increaseStake", &[additional_stake.into()])? + .send() + .await? + .watch() + .await?; + + Ok(stake_tx) + } + pub async fn add_compute_node( &self, node_address: Address, diff --git a/shared/src/web3/contracts/implementations/stake_manager.rs b/shared/src/web3/contracts/implementations/stake_manager.rs new file mode 100644 index 00000000..d9a74fb2 --- /dev/null +++ b/shared/src/web3/contracts/implementations/stake_manager.rs @@ -0,0 +1,54 @@ +use crate::web3::contracts::constants::addresses::STAKE_MANAGER_ADDRESS; +use crate::web3::contracts::core::contract::Contract; +use crate::web3::wallet::Wallet; +use alloy::primitives::{Address, U256}; + +pub struct StakeManagerContract { + instance: Contract, +} + +impl StakeManagerContract { + pub fn new(wallet: &Wallet, abi_file_path: &str) -> Self { + let instance = Contract::new(STAKE_MANAGER_ADDRESS, wallet, abi_file_path); + Self { instance } + } + + pub async fn get_stake_minimum(&self) -> Result> { + let result = self + .instance + .instance() + .function("getStakeMinimum", &[])? + .call() + .await?; + + let minimum: U256 = result + .into_iter() + .next() + .map(|value| value.as_uint().unwrap_or_default()) + .unwrap_or_default() + .0; + Ok(minimum) + } + pub async fn get_stake(&self, staker: Address) -> Result> { + let result = self + .instance + .instance() + .function("getStake", &[staker.into()])? + .call() + .await?; + println!("Result: {:?}", result); + + Ok(result[0].as_uint().unwrap_or_default().0) + } + + pub async fn calculate_stake( + &self, + compute_units: U256, + provider_total_compute: U256, + ) -> Result> { + let min_stake_per_unit = self.get_stake_minimum().await?; + let total_compute = provider_total_compute + compute_units + U256::from(1); + let required_stake = total_compute * min_stake_per_unit; + Ok(required_stake) + } +} diff --git a/worker/src/cli/command.rs b/worker/src/cli/command.rs index 876f87b7..430b3890 100644 --- a/worker/src/cli/command.rs +++ b/worker/src/cli/command.rs @@ -78,10 +78,6 @@ pub enum Commands { #[arg(long)] discovery_url: Option, - // Amount of stake to use when provider is newly registered - #[arg(long, default_value = "10")] - provider_stake: i32, - #[arg(long, default_value = "0x0000000000000000000000000000000000000000")] validator_address: Option, }, @@ -103,7 +99,6 @@ pub async fn execute_command( compute_pool_id, dry_run: _, rpc_url, - provider_stake, discovery_url, state_dir_overwrite, disable_state_storing, @@ -153,6 +148,7 @@ pub async fn execute_command( .with_ai_token() .with_prime_network() .with_compute_pool() + .with_stake_manager() .build() .unwrap(), ); @@ -175,22 +171,25 @@ pub async fn execute_command( DiscoveryService::new(&node_wallet_instance, discovery_url.clone(), None); let pool_id = U256::from(*compute_pool_id as u32); + Console::progress("Loading pool info"); + println!("Loading pool info {}", pool_id); let pool_info = loop { match contracts.compute_pool.get_pool_info(pool_id).await { Ok(pool) if pool.status == PoolStatus::ACTIVE => break Arc::new(pool), Ok(_) => { - Console::error("❌ Pool is not active yet. Checking again in 15 seconds."); + Console::warning("Pool is not active yet. Checking again in 15 seconds."); tokio::select! { _ = tokio::time::sleep(tokio::time::Duration::from_secs(15)) => {}, _ = cancellation_token.cancelled() => return Ok(()), } } Err(e) => { - Console::error(&format!("❌ Failed to get pool info. {}", e)); + Console::error(&format!("Failed to get pool info: {}", e)); return Ok(()); } } }; + println!("Pool info: {:?}", pool_info); let node_config = Node { id: node_wallet_instance @@ -278,10 +277,55 @@ pub async fn execute_command( let mut attempts = 0; let max_attempts = 100; - let stake = U256::from(*provider_stake); + let gpu_count: u32 = match &node_config.compute_specs { + Some(specs) => specs + .gpu + .as_ref() + .map(|gpu| gpu.count.unwrap_or(0)) + .unwrap_or(0), + None => 0, + }; + + let compute_units = U256::from(gpu_count * 1000); + + let provider_total_compute = match contracts + .compute_registry + .get_provider_total_compute( + provider_wallet_instance.wallet.default_signer().address(), + ) + .await + { + Ok(compute) => compute, + Err(e) => { + Console::error(&format!("❌ Failed to get provider total compute: {}", e)); + std::process::exit(1); + } + }; + let stake_manager = match contracts.stake_manager.as_ref() { + Some(stake_manager) => stake_manager, + None => { + Console::error("❌ Stake manager not initialized"); + std::process::exit(1); + } + }; + + let required_stake = match stake_manager + .calculate_stake(compute_units, provider_total_compute) + .await + { + Ok(stake) => stake, + Err(e) => { + Console::error(&format!("❌ Failed to calculate required stake: {}", e)); + std::process::exit(1); + } + }; + println!("Required stake: {}", required_stake); + + // TODO: Currently we do not increase stake when adding more nodes + while attempts < max_attempts { let spinner = Console::spinner("Registering provider..."); - if let Err(e) = provider_ops.register_provider(stake).await { + if let Err(e) = provider_ops.register_provider(required_stake).await { spinner.finish_and_clear(); // Finish spinner before logging error if let ProviderError::NotWhitelisted = e { Console::error("❌ Provider not whitelisted, retrying in 15 seconds..."); @@ -309,16 +353,32 @@ pub async fn execute_command( std::process::exit(1); }; - let gpu_count: u32 = match &node_config.compute_specs { - Some(specs) => specs - .gpu - .as_ref() - .map(|gpu| gpu.count.unwrap_or(0)) - .unwrap_or(0), - None => 0, + let provider_stake = match stake_manager + .get_stake(provider_wallet_instance.wallet.default_signer().address()) + .await + { + Ok(stake) => stake, + Err(e) => { + Console::error(&format!("❌ Failed to get provider stake: {}", e)); + std::process::exit(1); + } }; + Console::info("Provider stake:", &format!("{}", provider_stake)); + + if provider_stake < required_stake { + let spinner = Console::spinner("Increasing stake..."); + if let Err(e) = provider_ops + .increase_stake(required_stake - provider_stake) + .await + { + spinner.finish_and_clear(); + Console::error(&format!("❌ Failed to increase stake: {}", e)); + std::process::exit(1); + } + spinner.finish_and_clear(); + } - match compute_node_ops.add_compute_node(gpu_count).await { + match compute_node_ops.add_compute_node(compute_units).await { Ok(added_node) => { if added_node { // If we are adding a new compute node we wait for a proper diff --git a/worker/src/operations/compute_node.rs b/worker/src/operations/compute_node.rs index d53106fe..306d7bad 100644 --- a/worker/src/operations/compute_node.rs +++ b/worker/src/operations/compute_node.rs @@ -31,7 +31,7 @@ impl<'c> ComputeNodeOperations<'c> { // Returns true if the compute node was added, false if it already exists pub async fn add_compute_node( &self, - gpu_count: u32, + compute_units: U256, ) -> Result> { Console::section("🔄 Adding compute node"); let compute_node = self @@ -80,7 +80,6 @@ impl<'c> ComputeNodeOperations<'c> { .as_bytes(); // Create the signature bytes - let compute_units: U256 = U256::from(1000 * gpu_count); let add_node_tx = self .prime_network .add_compute_node(node_address, compute_units, signature.to_vec()) diff --git a/worker/src/operations/provider.rs b/worker/src/operations/provider.rs index 1f478582..5a353309 100644 --- a/worker/src/operations/provider.rs +++ b/worker/src/operations/provider.rs @@ -38,6 +38,12 @@ impl<'c> ProviderOperations<'c> { .await .map_err(|_| ProviderError::Other)?; + let eth_balance = self + .wallet + .get_balance() + .await + .map_err(|_| ProviderError::Other)?; + // Check if we are already provider let provider = self .compute_registry @@ -47,6 +53,7 @@ impl<'c> ProviderOperations<'c> { let provider_exists = provider.provider_address != Address::default(); Console::info("AI Token Balance", &format!("{} tokens", balance)); + Console::info("ETH Balance", &format!("{} ETH", eth_balance)); Console::info("Provider registered:", &format!("{}", provider_exists)); if !provider_exists { let spinner = Console::spinner("Approving AI Token for Stake transaction"); @@ -59,11 +66,13 @@ impl<'c> ProviderOperations<'c> { spinner.finish_and_clear(); let spinner = Console::spinner("Registering Provider"); - let register_tx = self - .prime_network - .register_provider(stake) - .await - .map_err(|_| ProviderError::Other)?; + let register_tx = match self.prime_network.register_provider(stake).await { + Ok(tx) => tx, + Err(e) => { + println!("Failed to register provider: {:?}", e); + return Err(ProviderError::Other); + } + }; Console::info( "Registration transaction completed: ", &format!("{:?}", register_tx), @@ -95,6 +104,51 @@ impl<'c> ProviderOperations<'c> { Ok(()) } + + pub async fn increase_stake(&self, additional_stake: U256) -> Result<(), ProviderError> { + Console::section("💰 Increasing Provider Stake"); + + let address = self.wallet.wallet.default_signer().address(); + let balance: U256 = self + .ai_token + .balance_of(address) + .await + .map_err(|_| ProviderError::Other)?; + + Console::info("Current AI Token Balance", &format!("{} tokens", balance)); + Console::info("Additional stake amount", &format!("{}", additional_stake)); + + if balance < additional_stake { + Console::error("Insufficient token balance for stake increase"); + return Err(ProviderError::Other); + } + + let spinner = Console::spinner("Approving AI Token for additional stake"); + let approve_tx = self + .ai_token + .approve(additional_stake) + .await + .map_err(|_| ProviderError::Other)?; + Console::info("Transaction approved", &format!("{:?}", approve_tx)); + spinner.finish_and_clear(); + + let spinner = Console::spinner("Increasing stake"); + let stake_tx = match self.prime_network.stake(additional_stake).await { + Ok(tx) => tx, + Err(e) => { + println!("Failed to increase stake: {:?}", e); + return Err(ProviderError::Other); + } + }; + Console::info( + "Stake increase transaction completed: ", + &format!("{:?}", stake_tx), + ); + spinner.finish_and_clear(); + + Console::success("Provider stake increased successfully"); + Ok(()) + } } #[derive(Debug)] From c76d6010da26eb25ff907316fd547ee267366ede Mon Sep 17 00:00:00 2001 From: JannikSt Date: Sat, 15 Mar 2025 10:30:50 +0100 Subject: [PATCH 40/85] fix worker unit formatting (#138) --- worker/src/cli/command.rs | 10 ++++++++-- worker/src/operations/provider.rs | 21 +++++++++++++++++---- 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/worker/src/cli/command.rs b/worker/src/cli/command.rs index 430b3890..e8bc2cd9 100644 --- a/worker/src/cli/command.rs +++ b/worker/src/cli/command.rs @@ -319,7 +319,10 @@ pub async fn execute_command( std::process::exit(1); } }; - println!("Required stake: {}", required_stake); + Console::info( + "Required stake", + &format!("{}", required_stake / U256::from(10u128.pow(18))), + ); // TODO: Currently we do not increase stake when adding more nodes @@ -363,7 +366,10 @@ pub async fn execute_command( std::process::exit(1); } }; - Console::info("Provider stake:", &format!("{}", provider_stake)); + Console::info( + "Provider stake", + &format!("{}", provider_stake / U256::from(10u128.pow(18))), + ); if provider_stake < required_stake { let spinner = Console::spinner("Increasing stake..."); diff --git a/worker/src/operations/provider.rs b/worker/src/operations/provider.rs index 5a353309..1d5ef132 100644 --- a/worker/src/operations/provider.rs +++ b/worker/src/operations/provider.rs @@ -52,9 +52,16 @@ impl<'c> ProviderOperations<'c> { .map_err(|_| ProviderError::Other)?; let provider_exists = provider.provider_address != Address::default(); - Console::info("AI Token Balance", &format!("{} tokens", balance)); - Console::info("ETH Balance", &format!("{} ETH", eth_balance)); + Console::info( + "AI Token Balance", + &format!("{} tokens", balance / U256::from(10u128.pow(18))), + ); + Console::info( + "ETH Balance", + &format!("{} ETH", eth_balance / U256::from(10u128.pow(18))), + ); Console::info("Provider registered:", &format!("{}", provider_exists)); + if !provider_exists { let spinner = Console::spinner("Approving AI Token for Stake transaction"); let approve_tx = self @@ -115,8 +122,14 @@ impl<'c> ProviderOperations<'c> { .await .map_err(|_| ProviderError::Other)?; - Console::info("Current AI Token Balance", &format!("{} tokens", balance)); - Console::info("Additional stake amount", &format!("{}", additional_stake)); + Console::info( + "Current AI Token Balance", + &format!("{} tokens", balance / U256::from(10u128.pow(18))), + ); + Console::info( + "Additional stake amount", + &format!("{}", additional_stake / U256::from(10u128.pow(18))), + ); if balance < additional_stake { Console::error("Insufficient token balance for stake increase"); From 541430f4d515a9b1651d18b7a68f0dbe332c69e5 Mon Sep 17 00:00:00 2001 From: JannikSt Date: Sat, 15 Mar 2025 10:50:37 +0100 Subject: [PATCH 41/85] remove s3 from release pipeline (#141) --- .github/workflows/dev-release.yml | 7 ------- .github/workflows/prod-release.yml | 7 ------- 2 files changed, 14 deletions(-) diff --git a/.github/workflows/dev-release.yml b/.github/workflows/dev-release.yml index 8730c010..d39e5865 100644 --- a/.github/workflows/dev-release.yml +++ b/.github/workflows/dev-release.yml @@ -117,13 +117,6 @@ jobs: - name: Set up Google Cloud SDK uses: google-github-actions/setup-gcloud@v1 - # Upload to Google Cloud Storage - - name: Upload to GCS - run: | - gsutil -m cp -r release-artifacts/* gs://prime-protocol/${{ steps.tag.outputs.tag_name }}/ - gsutil -m cp -r gs://prime-protocol/${{ steps.tag.outputs.tag_name }}/* gs://prime-protocol/latest/ - gsutil -m setmeta -h "Cache-Control:no-cache, max-age=0" gs://prime-protocol/latest/**/* - - name: Generate Docker metadata id: meta run: | diff --git a/.github/workflows/prod-release.yml b/.github/workflows/prod-release.yml index f298a656..26729ddc 100644 --- a/.github/workflows/prod-release.yml +++ b/.github/workflows/prod-release.yml @@ -128,13 +128,6 @@ jobs: if: steps.check_tag.outputs.exists == 'false' uses: google-github-actions/setup-gcloud@v1 - - name: Upload to GCS - if: steps.check_tag.outputs.exists == 'false' - run: | - gsutil -m cp -r release-artifacts/* gs://prime-protocol/v${{ steps.get_version.outputs.version }}/ - gsutil -m cp -r release-artifacts/* gs://prime-protocol/stable/ - gsutil -m setmeta -h "Cache-Control:no-cache, max-age=0" gs://prime-protocol/stable/**/* - - name: Configure Docker for Artifact Registry if: steps.check_tag.outputs.exists == 'false' run: | From b72760d3d08b17a830f11bbf305eb8a45e575ef7 Mon Sep 17 00:00:00 2001 From: JannikSt Date: Sat, 15 Mar 2025 17:26:34 +0100 Subject: [PATCH 42/85] support new env vars in validator docker (#143) --- validator/Dockerfile | 8 ++++++++ validator/src/main.rs | 47 ++++++++++++++++++++++++------------------- 2 files changed, 34 insertions(+), 21 deletions(-) diff --git a/validator/Dockerfile b/validator/Dockerfile index 63ac4fc3..4af52619 100644 --- a/validator/Dockerfile +++ b/validator/Dockerfile @@ -7,12 +7,20 @@ RUN chmod +x /usr/local/bin/validator ENV RPC_URL="http://localhost:8545" ENV VALIDATOR_KEY="" ENV DISCOVERY_URL="http://localhost:8089" +ENV WORK_VALIDATION_CONTRACT="" +ENV POOL_ID="" +ENV WORK_VALIDATION_INTERVAL="30" +ENV LEVITICUS_URL="" RUN echo '#!/bin/sh\n\ exec /usr/local/bin/validator \ --rpc-url "$RPC_URL" \ --validator-key "$VALIDATOR_KEY" \ --discovery-url "$DISCOVERY_URL" \ +--work-validation-contract "$WORK_VALIDATION_CONTRACT" \ +--pool-id "$POOL_ID" \ +--work-validation-interval "$WORK_VALIDATION_INTERVAL" \ +--leviticus-url "$LEVITICUS_URL" \ "$@"' > /entrypoint.sh && \ chmod +x /entrypoint.sh diff --git a/validator/src/main.rs b/validator/src/main.rs index 3697f103..d25c2d16 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -95,33 +95,38 @@ fn main() { let contracts = Arc::new(contracts); let hardware_validator = HardwareValidator::new(&validator_wallet, contracts.clone()); - let pool_id = args.pool_id.clone(); - let mut synthetic_validator = match contracts.synthetic_data_validator.clone() { - Some(validator) => { - if let Some(leviticus_url) = args.leviticus_url { - SyntheticDataValidator::new( - None, - pool_id.unwrap(), - validator, - contracts.prime_network.clone(), - leviticus_url, - ) - } else { - error!("Leviticus URL is not provided"); + let mut synthetic_validator = if let Some(pool_id) = args.pool_id.clone() { + match contracts.synthetic_data_validator.clone() { + Some(validator) => { + if let Some(leviticus_url) = args.leviticus_url { + Some(SyntheticDataValidator::new( + None, + pool_id, + validator, + contracts.prime_network.clone(), + leviticus_url, + )) + } else { + error!("Leviticus URL is not provided"); + std::process::exit(1); + } + } + None => { + error!("Synthetic data validator not found"); std::process::exit(1); } } - None => { - error!("Synthetic data validator not found"); - std::process::exit(1); - } + } else { + None }; loop { - runtime.block_on(async { - let validation_result = synthetic_validator.validate_work().await; - println!("Validation result: {:?}", validation_result); - }); + if let Some(validator) = &mut synthetic_validator { + runtime.block_on(async { + let validation_result = validator.validate_work().await; + println!("Validation result: {:?}", validation_result); + }); + } async fn _generate_signature(wallet: &Wallet, message: &str) -> Result { let signature = sign_request(message, wallet, None) From ea1da66337a5f1d7c499bee1670a07b3f08319d6 Mon Sep 17 00:00:00 2001 From: JannikSt Date: Sat, 15 Mar 2025 18:06:05 +0100 Subject: [PATCH 43/85] improve orchestrator docker image (#144) --- orchestrator/Dockerfile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/orchestrator/Dockerfile b/orchestrator/Dockerfile index 528ef6eb..2f7e5c09 100644 --- a/orchestrator/Dockerfile +++ b/orchestrator/Dockerfile @@ -17,6 +17,7 @@ ENV REDIS_STORE_URL="redis://localhost:6380" ENV DISCOVERY_URL="http://localhost:8089" ENV ADMIN_API_KEY="admin" ENV DISABLE_EJECTION="false" +ENV S3_CREDENTIALS="" RUN echo '#!/bin/sh\n\ exec /usr/local/bin/orchestrator \ @@ -32,6 +33,7 @@ $([ ! -z "$HOST" ] && echo "--host $HOST") \ --discovery-url "$DISCOVERY_URL" \ --admin-api-key "$ADMIN_API_KEY" \ $([ "$DISABLE_EJECTION" = "true" ] && echo "--disable-ejection") \ +$([ ! -z "$S3_CREDENTIALS" ] && echo "--s3-credentials $S3_CREDENTIALS") \ "$@"' > /entrypoint.sh && \ chmod +x /entrypoint.sh From 4094f5e9a27c09d7f4c3801994c8ab42c2c09375 Mon Sep 17 00:00:00 2001 From: JannikSt Date: Sat, 15 Mar 2025 20:02:58 +0100 Subject: [PATCH 44/85] fix stake setting approach (#142) --- dev-utils/examples/set_min_stake_amount.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/dev-utils/examples/set_min_stake_amount.rs b/dev-utils/examples/set_min_stake_amount.rs index 192792c7..d3a89d5d 100644 --- a/dev-utils/examples/set_min_stake_amount.rs +++ b/dev-utils/examples/set_min_stake_amount.rs @@ -1,3 +1,4 @@ +use alloy::primitives::utils::Unit; use alloy::primitives::U256; use clap::Parser; use eyre::Result; @@ -9,7 +10,7 @@ use url::Url; struct Args { /// Minimum stake amount to set #[arg(short = 'm', long)] - min_stake_amount: u64, + min_stake_amount: f64, /// Private key for transaction signing #[arg(short = 'k', long)] @@ -34,13 +35,13 @@ async fn main() -> Result<()> { .build() .unwrap(); - let min_stake_amount = U256::from(args.min_stake_amount); + let min_stake_amount = U256::from(args.min_stake_amount) * Unit::ETHER.wei(); + println!("Min stake amount: {}", min_stake_amount); let tx = contracts .prime_network .set_stake_minimum(min_stake_amount) .await; - println!("Setting minimum stake amount: {}", args.min_stake_amount); println!("Transaction: {:?}", tx); Ok(()) From 7f306a67e8d4091492ea5c6617c53c4a8f27ceea Mon Sep 17 00:00:00 2001 From: JannikSt Date: Mon, 17 Mar 2025 10:17:00 +0100 Subject: [PATCH 45/85] Feature/docker compose setup (#145) * Support launching aux services using docker compose * Update smart-contracts submodule to latest version * add error logging to orchestrator, use parquet file in synthetic data validator, improve Makefile --------- Co-authored-by: Manveer --- .env.example | 9 +- .tmuxinator.yml | 2 +- Makefile | 4 +- README.md | 22 ++++ dev-utils/examples/mint_ai_token.rs | 5 +- docker-compose.yml | 116 ++++++++++++++++++- orchestrator/src/api/routes/storage.rs | 11 +- smart-contracts | 2 +- validator/src/validators/synthetic_data.rs | 6 +- worker/src/docker/taskbridge/file_handler.rs | 14 ++- 10 files changed, 172 insertions(+), 19 deletions(-) diff --git a/.env.example b/.env.example index af529456..13b8826c 100644 --- a/.env.example +++ b/.env.example @@ -1,6 +1,8 @@ RPC_URL= NETWORK_ID= MIN_STAKE_AMOUNT= +WORKER_COMPUTE_POOL_ID= +WORKER_EXTERNAL_IP= # Private keys of privileged accounts PRIVATE_KEY_FEDERATOR= @@ -24,4 +26,9 @@ AI_TOKEN_ADDRESS= COMPUTE_REGISTRY_ADDRESS= DOMAIN_REGISTRY_ADDRESS= STAKE_MANAGER_ADDRESS= -COMPUTE_POOL_ADDRESS= \ No newline at end of file +COMPUTE_POOL_ADDRESS= + +WORK_VALIDATION_CONTRACT= + +LEVITICUS_URL= +S3_CREDENTIALS= \ No newline at end of file diff --git a/.tmuxinator.yml b/.tmuxinator.yml index 33d13ab2..e1e05980 100644 --- a/.tmuxinator.yml +++ b/.tmuxinator.yml @@ -11,5 +11,5 @@ windows: - background: layout: even-horizontal panes: - - bash -c 'tmux rename-window "Prime Dev Services" && docker compose up' + - bash -c 'tmux rename-window "Prime Dev Services" && docker compose up anvil redis' - bash -c 'while true; do make whitelist-provider; sleep 10; done' \ No newline at end of file diff --git a/Makefile b/Makefile index ebf76160..4f2b6abf 100644 --- a/Makefile +++ b/Makefile @@ -73,11 +73,11 @@ watch-discovery: watch-worker: set -a; source ${ENV_FILE}; set +a; \ - cargo watch -w worker/src -x "run --bin worker -- run --private-key-provider $$PROVIDER_PRIVATE_KEY --private-key-node $$NODE_PRIVATE_KEY --port 8091 --external-ip 0.0.0.0 --compute-pool-id $$WORKER_COMPUTE_POOL_ID --validator-address $$VALIDATOR_ADDRESS" + cargo watch -w worker/src -x "run --bin worker -- run --private-key-provider $$PROVIDER_PRIVATE_KEY --private-key-node $$NODE_PRIVATE_KEY --port 8091 --external-ip $${WORKER_EXTERNAL_IP:-localhost} --compute-pool-id $$WORKER_COMPUTE_POOL_ID --validator-address $$VALIDATOR_ADDRESS" watch-validator: set -a; source ${ENV_FILE}; set +a; \ - cargo watch -w validator/src -x "run --bin validator -- --validator-key $${PRIVATE_KEY_VALIDATOR} --rpc-url $${RPC_URL} --pool-id 1 --work-validation-contract $${WORK_VALIDATION_CONTRACT} --leviticus-url $${LEVITICUS_URL}" + cargo watch -w validator/src -x "run --bin validator -- --validator-key $${PRIVATE_KEY_VALIDATOR} --rpc-url $${RPC_URL} --pool-id $${WORKER_COMPUTE_POOL_ID} --work-validation-contract $${WORK_VALIDATION_CONTRACT} --leviticus-url $${LEVITICUS_URL}" watch-orchestrator: set -a; source ${ENV_FILE}; set +a; \ diff --git a/README.md b/README.md index 9395ac36..dd05f92f 100644 --- a/README.md +++ b/README.md @@ -126,6 +126,24 @@ This will launch: - Redis instance - Supporting infrastructure +### Running in docker compose +You can run all supporting services (chain, validator, discovery, orchestrator) in docker compose. + +1.Start docker compose: +```bash +docker compose up +``` + +2. Run Setup: +``` +make setup +``` + +3. You can now launch a worker. +- Adjust the .env var `WORKER_EXTERNAL_IP` to: `WORKER_EXTERNAL_IP=host.docker.internal` +- Launch the worker using `make watch-worker` +- whitelist the worker once you see the whitelist alert using: `make whitelist-provider` + ### Running a Worker Node Once the core services are running, you can start a worker node in a new terminal: @@ -213,3 +231,7 @@ We welcome contributions! Please see our [Contributing Guidelines](CONTRIBUTING. ## Security See [SECURITY.md](SECURITY.md) for security policies and reporting vulnerabilities. + +## Additional Resources + +- [Anvil Testchain Deployment Guide](./docs/testchain-deployment.md) - Commands and troubleshooting for setting up an Anvil testchain diff --git a/dev-utils/examples/mint_ai_token.rs b/dev-utils/examples/mint_ai_token.rs index 2cff0242..d78871d0 100644 --- a/dev-utils/examples/mint_ai_token.rs +++ b/dev-utils/examples/mint_ai_token.rs @@ -1,3 +1,4 @@ +use alloy::primitives::utils::Unit; use alloy::primitives::Address; use alloy::primitives::U256; use clap::Parser; @@ -22,7 +23,7 @@ struct Args { rpc_url: String, /// Amount to mint - #[arg(short = 'm', long, default_value = "1000")] + #[arg(short = 'm', long, default_value = "30000")] amount: u64, } @@ -41,7 +42,7 @@ async fn main() -> Result<()> { .unwrap(); let address = Address::from_str(&args.address).unwrap(); - let amount = U256::from(args.amount); + let amount = U256::from(args.amount) * Unit::ETHER.wei(); let tx = contracts.ai_token.mint(address, amount).await; println!("Minting to address: {}", args.address); println!("Transaction: {:?}", tx); diff --git a/docker-compose.yml b/docker-compose.yml index bf0cf496..3bd29c8a 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,12 +1,11 @@ version: "3.8" - networks: prime: driver: bridge attachable: true app-network: - name: app-network - external: true + driver: bridge + external: true volumes: redis-data: @@ -22,9 +21,118 @@ services: - prime - app-network restart: always + healthcheck: + test: ["CMD-SHELL", "timeout 5 bash -c 'echo > /dev/tcp/localhost/8545' || exit 1"] + interval: 3s + timeout: 5s + retries: 3 + start_period: 10s + + contract-deployer: + image: ghcr.io/foundry-rs/foundry:latest + platform: linux/amd64 + volumes: + - ./smart-contracts:/app + working_dir: /app + environment: + - RPC_URL=http://anvil:8545 + entrypoint: ["/bin/sh", "-c"] + command: | + " + set -x + echo 'Starting contract deployment...' + chmod +x ./deploy.sh + chmod +x ./deploy_work_validation.sh + set +e + ./deploy.sh && ./deploy_work_validation.sh + set -e + echo 'Deployment completed.' + " + networks: + - prime + depends_on: + anvil: + condition: service_healthy + redis: image: redis:alpine ports: - "6380:6379" networks: - - prime \ No newline at end of file + - prime + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 5s + timeout: 3s + retries: 5 + + discovery: + image: ghcr.io/primeintellect-ai/protocol/discovery:dev + platform: linux/amd64 + ports: + - "8089:8089" + environment: + - RPC_URL=http://anvil:8545 + - REDIS_URL=redis://redis:6379 + - VALIDATOR_ADDRESS=${VALIDATOR_ADDRESS} + - PLATFORM_API_KEY=prime + networks: + - prime + depends_on: + anvil: + condition: service_healthy + redis: + condition: service_healthy + contract-deployer: + condition: service_started + + orchestrator: + image: ghcr.io/primeintellect-ai/protocol/orchestrator:dev + platform: linux/amd64 + ports: + - "8090:8090" + environment: + - RPC_URL=http://anvil:8545 + - REDIS_STORE_URL=redis://redis:6379 + - DISCOVERY_URL=http://discovery:8089 + - URL=http://localhost:8090 + - COORDINATOR_KEY=${POOL_OWNER_PRIVATE_KEY} + - COMPUTE_POOL_ID=${WORKER_COMPUTE_POOL_ID} + - DOMAIN_ID=0 + - PORT=8090 + - DISCOVERY_REFRESH_INTERVAL=10 + - S3_CREDENTIALS=${S3_CREDENTIALS} + networks: + - prime + depends_on: + anvil: + condition: service_healthy + redis: + condition: service_healthy + discovery: + condition: service_started + + validator: + image: ghcr.io/primeintellect-ai/protocol/validator:dev + platform: linux/amd64 + ports: + - "8099:8080" + extra_hosts: + - "host.docker.internal:host-gateway" + networks: + - prime + environment: + - RPC_URL=http://anvil:8545 + - DISCOVERY_URL=http://discovery:8089 + - VALIDATOR_KEY=${PRIVATE_KEY_VALIDATOR} + - WORK_VALIDATION_CONTRACT=${WORK_VALIDATION_CONTRACT} + - POOL_ID=${WORKER_COMPUTE_POOL_ID} + - WORK_VALIDATION_INTERVAL=30 + - LEVITICUS_URL=${LEVITICUS_URL} + depends_on: + anvil: + condition: service_healthy + redis: + condition: service_healthy + discovery: + condition: service_started \ No newline at end of file diff --git a/orchestrator/src/api/routes/storage.rs b/orchestrator/src/api/routes/storage.rs index c2fa5660..b37f6a70 100644 --- a/orchestrator/src/api/routes/storage.rs +++ b/orchestrator/src/api/routes/storage.rs @@ -47,10 +47,13 @@ async fn request_upload( "success": true, "signed_url": signed_url })), - Err(e) => HttpResponse::InternalServerError().json(serde_json::json!({ - "success": false, - "error": format!("Failed to generate upload URL: {}", e) - })), + Err(e) => { + log::error!("Failed to generate upload URL: {}", e); + HttpResponse::InternalServerError().json(serde_json::json!({ + "success": false, + "error": format!("Failed to generate upload URL: {}", e) + })) + } } } diff --git a/smart-contracts b/smart-contracts index 7d8e0d44..0a343de8 160000 --- a/smart-contracts +++ b/smart-contracts @@ -1 +1 @@ -Subproject commit 7d8e0d44c29b442df8138699a2addef37de73123 +Subproject commit 0a343de8af7c8da76ae55c67592ee1bf03569343 diff --git a/validator/src/validators/synthetic_data.rs b/validator/src/validators/synthetic_data.rs index 41972a03..de9bb54c 100644 --- a/validator/src/validators/synthetic_data.rs +++ b/validator/src/validators/synthetic_data.rs @@ -169,7 +169,8 @@ impl SyntheticDataValidator { ); // Start validation by calling validation endpoint with retries - let validate_url = format!("{}/validate/{}", self.leviticus_url, work_key); + let validate_url = + format!("{}/validate/{}.parquet", self.leviticus_url, work_key); let client = reqwest::Client::new(); let mut validate_attempts = 0; @@ -208,7 +209,8 @@ impl SyntheticDataValidator { match validation_result { Ok(_) => { // Poll status endpoint until we get a proper response - let status_url = format!("{}/status/{}", self.leviticus_url, work_key); + let status_url = + format!("{}/status/{}.parquet", self.leviticus_url, work_key); let mut status_attempts = 0; const MAX_STATUS_ATTEMPTS: u32 = 5; diff --git a/worker/src/docker/taskbridge/file_handler.rs b/worker/src/docker/taskbridge/file_handler.rs index a2f0c510..994603ab 100644 --- a/worker/src/docker/taskbridge/file_handler.rs +++ b/worker/src/docker/taskbridge/file_handler.rs @@ -39,8 +39,14 @@ pub async fn handle_file_upload( })? .replace("/heartbeat", ""); + // Clean filename by removing /data prefix if present + let clean_file_name = file_name.trim_start_matches("/data/"); + // Construct file path - let file = format!("{}/prime-task-{}/{}", storage_path, task_id, file_name); + let file = format!( + "{}/prime-task-{}/{}", + storage_path, task_id, clean_file_name + ); debug!("File: {:?}", file); // Get file size @@ -66,7 +72,11 @@ pub async fn handle_file_upload( // Create upload request let client = Client::new(); let request = RequestUploadRequest { - file_name: file_sha.to_string(), + file_name: if clean_file_name.ends_with(".parquet") { + format!("{}.parquet", file_sha) + } else { + file_sha + }, file_size, file_type: "application/json".to_string(), // Assume JSON }; From 9420a14aa2c7b1b06a6fe665f5a8eb9813f633eb Mon Sep 17 00:00:00 2001 From: JannikSt Date: Tue, 18 Mar 2025 23:08:58 +0100 Subject: [PATCH 46/85] add basic ability to generate new wallets from worker cli (#147) --- worker/src/cli/command.rs | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/worker/src/cli/command.rs b/worker/src/cli/command.rs index e8bc2cd9..879425cb 100644 --- a/worker/src/cli/command.rs +++ b/worker/src/cli/command.rs @@ -13,6 +13,7 @@ use crate::services::discovery::DiscoveryService; use crate::state::system_state::SystemState; use crate::TaskHandles; use alloy::primitives::U256; +use alloy::signers::local::PrivateKeySigner; use clap::{Parser, Subcommand}; use log::debug; use shared::models::node::Node; @@ -81,8 +82,10 @@ pub enum Commands { #[arg(long, default_value = "0x0000000000000000000000000000000000000000")] validator_address: Option, }, - /// Run system checks to verify hardware and software compatibility Check {}, + + /// Generate new wallets for provider and node + GenerateWallets {}, } pub async fn execute_command( @@ -464,5 +467,23 @@ pub async fn execute_command( let _ = software_check::run_software_check(); Ok(()) } + Commands::GenerateWallets {} => { + let provider_signer = PrivateKeySigner::random(); + let node_signer = PrivateKeySigner::random(); + + println!("Provider wallet:"); + println!(" Address: {}", provider_signer.address()); + println!( + " Private key: {}", + hex::encode(provider_signer.credential().to_bytes()) + ); + println!("\nNode wallet:"); + println!(" Address: {}", node_signer.address()); + println!( + " Private key: {}", + hex::encode(node_signer.credential().to_bytes()) + ); + Ok(()) + } } } From a5716fcd26a492bb38e50d8ad869ab73fc25a2f3 Mon Sep 17 00:00:00 2001 From: JannikSt Date: Wed, 19 Mar 2025 01:36:28 +0100 Subject: [PATCH 47/85] support toploc-server auth (#146) --- Makefile | 2 +- docker-compose.yml | 1 + validator/Dockerfile | 2 ++ validator/src/main.rs | 5 +++++ validator/src/validators/synthetic_data.rs | 23 +++++++++++++++++++++- 5 files changed, 31 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 4f2b6abf..46280dc2 100644 --- a/Makefile +++ b/Makefile @@ -77,7 +77,7 @@ watch-worker: watch-validator: set -a; source ${ENV_FILE}; set +a; \ - cargo watch -w validator/src -x "run --bin validator -- --validator-key $${PRIVATE_KEY_VALIDATOR} --rpc-url $${RPC_URL} --pool-id $${WORKER_COMPUTE_POOL_ID} --work-validation-contract $${WORK_VALIDATION_CONTRACT} --leviticus-url $${LEVITICUS_URL}" + cargo watch -w validator/src -x "run --bin validator -- --validator-key $${PRIVATE_KEY_VALIDATOR} --rpc-url $${RPC_URL} --pool-id $${WORKER_COMPUTE_POOL_ID} --work-validation-contract $${WORK_VALIDATION_CONTRACT} --leviticus-url $${LEVITICUS_URL} --leviticus-token $${LEVITICUS_TOKEN}" watch-orchestrator: set -a; source ${ENV_FILE}; set +a; \ diff --git a/docker-compose.yml b/docker-compose.yml index 3bd29c8a..ea9f5d70 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -129,6 +129,7 @@ services: - POOL_ID=${WORKER_COMPUTE_POOL_ID} - WORK_VALIDATION_INTERVAL=30 - LEVITICUS_URL=${LEVITICUS_URL} + - LEVITICUS_TOKEN=${LEVITICUS_TOKEN} depends_on: anvil: condition: service_healthy diff --git a/validator/Dockerfile b/validator/Dockerfile index 4af52619..9546666c 100644 --- a/validator/Dockerfile +++ b/validator/Dockerfile @@ -11,6 +11,7 @@ ENV WORK_VALIDATION_CONTRACT="" ENV POOL_ID="" ENV WORK_VALIDATION_INTERVAL="30" ENV LEVITICUS_URL="" +ENV LEVITICUS_TOKEN="" RUN echo '#!/bin/sh\n\ exec /usr/local/bin/validator \ @@ -21,6 +22,7 @@ exec /usr/local/bin/validator \ --pool-id "$POOL_ID" \ --work-validation-interval "$WORK_VALIDATION_INTERVAL" \ --leviticus-url "$LEVITICUS_URL" \ +$([ ! -z "$LEVITICUS_TOKEN" ] && echo "--leviticus-token $LEVITICUS_TOKEN") \ "$@"' > /entrypoint.sh && \ chmod +x /entrypoint.sh diff --git a/validator/src/main.rs b/validator/src/main.rs index d25c2d16..2c352e1d 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -50,6 +50,10 @@ struct Args { /// Optional: Leviticus Validator URL #[arg(long, default_value = None)] leviticus_url: Option, + + /// Optional: Leviticus Auth Token + #[arg(long, default_value = None)] + leviticus_token: Option, } fn main() { let runtime = tokio::runtime::Runtime::new().unwrap(); @@ -105,6 +109,7 @@ fn main() { validator, contracts.prime_network.clone(), leviticus_url, + args.leviticus_token, )) } else { error!("Leviticus URL is not provided"); diff --git a/validator/src/validators/synthetic_data.rs b/validator/src/validators/synthetic_data.rs index de9bb54c..a53f7d21 100644 --- a/validator/src/validators/synthetic_data.rs +++ b/validator/src/validators/synthetic_data.rs @@ -36,6 +36,7 @@ pub struct SyntheticDataValidator { last_validation_timestamp: U256, state_dir: Option, leviticus_url: String, + leviticus_token: Option, } impl Validator for SyntheticDataValidator { @@ -53,6 +54,7 @@ impl SyntheticDataValidator { validator: SyntheticDataWorkValidator, prime_network: PrimeNetworkContract, leviticus_url: String, + leviticus_token: Option, ) -> Self { let pool_id = pool_id_str.parse::().expect("Invalid pool ID"); let default_state_dir = get_default_state_dir(); @@ -99,6 +101,7 @@ impl SyntheticDataValidator { last_validation_timestamp: last_validation_timestamp.unwrap(), state_dir: state_path.clone(), leviticus_url, + leviticus_token, } } @@ -171,7 +174,25 @@ impl SyntheticDataValidator { // Start validation by calling validation endpoint with retries let validate_url = format!("{}/validate/{}.parquet", self.leviticus_url, work_key); - let client = reqwest::Client::new(); + let mut client = reqwest::Client::builder(); + + // Add auth token if provided + if let Some(token) = &self.leviticus_token { + client = client.default_headers({ + let mut headers = reqwest::header::HeaderMap::new(); + headers.insert( + reqwest::header::AUTHORIZATION, + reqwest::header::HeaderValue::from_str(&format!( + "Bearer {}", + token + )) + .expect("Invalid token"), + ); + headers + }); + } + + let client = client.build().expect("Failed to build HTTP client"); let mut validate_attempts = 0; const MAX_VALIDATE_ATTEMPTS: u32 = 3; From 624128fefe562d4ba5709c7feb2ea6cc954bd39a Mon Sep 17 00:00:00 2001 From: JannikSt Date: Wed, 19 Mar 2025 09:50:58 -0700 Subject: [PATCH 48/85] move private keys into env files rather than cli args (#150) --- Makefile | 12 +++++++----- orchestrator/src/api/routes/storage.rs | 1 + orchestrator/src/utils/google_cloud.rs | 8 +++++++- worker/src/cli/command.rs | 19 +++++++------------ 4 files changed, 22 insertions(+), 18 deletions(-) diff --git a/Makefile b/Makefile index 46280dc2..af42f3d3 100644 --- a/Makefile +++ b/Makefile @@ -73,7 +73,9 @@ watch-discovery: watch-worker: set -a; source ${ENV_FILE}; set +a; \ - cargo watch -w worker/src -x "run --bin worker -- run --private-key-provider $$PROVIDER_PRIVATE_KEY --private-key-node $$NODE_PRIVATE_KEY --port 8091 --external-ip $${WORKER_EXTERNAL_IP:-localhost} --compute-pool-id $$WORKER_COMPUTE_POOL_ID --validator-address $$VALIDATOR_ADDRESS" + PRIVATE_KEY_PROVIDER=$${PROVIDER_PRIVATE_KEY} \ + PRIVATE_KEY_NODE=$${NODE_PRIVATE_KEY} \ + cargo watch -w worker/src -x "run --bin worker -- run --port 8091 --external-ip $${WORKER_EXTERNAL_IP:-localhost} --compute-pool-id $$WORKER_COMPUTE_POOL_ID --validator-address $$VALIDATOR_ADDRESS" watch-validator: set -a; source ${ENV_FILE}; set +a; \ @@ -87,8 +89,10 @@ build-worker: cargo build --release --bin worker run-worker-bin: + @test -n "$$PRIVATE_KEY_PROVIDER" || (echo "PRIVATE_KEY_PROVIDER is not set" && exit 1) + @test -n "$$PRIVATE_KEY_NODE" || (echo "PRIVATE_KEY_NODE is not set" && exit 1) set -a; source .env; set +a; \ - ./target/release/worker run --private-key-provider $$PROVIDER_PRIVATE_KEY --private-key-node $$NODE_PRIVATE_KEY --port 8091 --external-ip 0.0.0.0 --compute-pool-id $$WORKER_COMPUTE_POOL_ID --validator-address $$VALIDATOR_ADDRESS + ./target/release/worker run --port 8091 --external-ip 0.0.0.0 --compute-pool-id $$WORKER_COMPUTE_POOL_ID --validator-address $$VALIDATOR_ADDRESS SSH_CONNECTION ?= your-ssh-conn string EXTERNAL_IP ?= 0.0.0.0 @@ -138,9 +142,7 @@ watch-worker-remote: setup-remote setup-tunnel sync-remote . \"\$$HOME/.cargo/env\" && \ set -a && source .env && set +a && \ export EXTERNAL_IP=$(EXTERNAL_IP) && \ - RUST_BACKTRACE=1 RUST_LOG=debug cargo watch -w worker/src -x \"run --bin worker -- run \ - --private-key-provider \$$PROVIDER_PRIVATE_KEY \ - --private-key-node \$$NODE_PRIVATE_KEY \ + RUST_BACKTRACE=1 RUST_LOG=debug PRIVATE_KEY_PROVIDER="$$PROVIDER_PRIVATE_KEY" PRIVATE_KEY_NODE="$$NODE_PRIVATE_KEY" cargo watch -w worker/src -x \"run --bin worker -- run \ --port $(PORT) \ --external-ip \$$EXTERNAL_IP \ --compute-pool-id \$$WORKER_COMPUTE_POOL_ID \ diff --git a/orchestrator/src/api/routes/storage.rs b/orchestrator/src/api/routes/storage.rs index b37f6a70..4fb23ef7 100644 --- a/orchestrator/src/api/routes/storage.rs +++ b/orchestrator/src/api/routes/storage.rs @@ -40,6 +40,7 @@ async fn request_upload( credentials, Some(file_type.to_string()), Duration::from_secs(3600), // 1 hour expiry + Some(*file_size), ) .await { diff --git a/orchestrator/src/utils/google_cloud.rs b/orchestrator/src/utils/google_cloud.rs index 24162d29..b1eb0618 100644 --- a/orchestrator/src/utils/google_cloud.rs +++ b/orchestrator/src/utils/google_cloud.rs @@ -11,6 +11,7 @@ pub async fn generate_upload_signed_url( credentials_base64: &str, content_type: Option, expiration: Duration, + max_bytes: Option, // Maximum file size in bytes ) -> Result { // Decode base64 to JSON string let credentials_json = general_purpose::STANDARD.decode(credentials_base64)?; @@ -27,13 +28,18 @@ pub async fn generate_upload_signed_url( let client = Client::new(config); // Set options for the signed URL - let options = SignedURLOptions { + let mut options = SignedURLOptions { method: SignedURLMethod::PUT, expires: expiration, content_type, ..Default::default() }; + // Set max bytes if specified + if let Some(bytes) = max_bytes { + options.headers = vec![format!("content-length:{}", bytes)]; + } + // Generate the signed URL let signed_url = client .signed_url(bucket, object_path, None, None, options) diff --git a/worker/src/cli/command.rs b/worker/src/cli/command.rs index 879425cb..880d32b9 100644 --- a/worker/src/cli/command.rs +++ b/worker/src/cli/command.rs @@ -35,14 +35,6 @@ pub struct Cli { #[derive(Subcommand)] pub enum Commands { Run { - /// Wallet private key (as a hex string) - #[arg(long)] - private_key_provider: String, - - /// Wallet private key (as a hex string) - #[arg(long)] - private_key_node: String, - /// RPC URL #[arg(long, default_value = "http://localhost:8545")] rpc_url: String, @@ -95,8 +87,6 @@ pub async fn execute_command( ) -> Result<(), Box> { match command { Commands::Run { - private_key_provider, - private_key_node, port, external_ip, compute_pool_id, @@ -115,6 +105,11 @@ pub async fn execute_command( std::process::exit(1); } + let private_key_provider = + std::env::var("PRIVATE_KEY_PROVIDER").expect("PRIVATE_KEY_PROVIDER must be set"); + let private_key_node = + std::env::var("PRIVATE_KEY_NODE").expect("PRIVATE_KEY_NODE must be set"); + let mut recover_last_state = *auto_recover; let version = env!("CARGO_PKG_VERSION"); Console::section("🚀 PRIME MINER INITIALIZATION"); @@ -123,7 +118,7 @@ pub async fn execute_command( Initialize Wallet instances */ let provider_wallet_instance = Arc::new( - match Wallet::new(private_key_provider, Url::parse(rpc_url).unwrap()) { + match Wallet::new(&private_key_provider, Url::parse(rpc_url).unwrap()) { Ok(wallet) => wallet, Err(err) => { Console::error(&format!("❌ Failed to create wallet: {}", err)); @@ -133,7 +128,7 @@ pub async fn execute_command( ); let node_wallet_instance = Arc::new( - match Wallet::new(private_key_node, Url::parse(rpc_url).unwrap()) { + match Wallet::new(&private_key_node, Url::parse(rpc_url).unwrap()) { Ok(wallet) => wallet, Err(err) => { Console::error(&format!("❌ Failed to create wallet: {}", err)); From 089b8dc5e8a49db70b53be4708dc8a1007d00b90 Mon Sep 17 00:00:00 2001 From: JannikSt Date: Wed, 19 Mar 2025 09:51:09 -0700 Subject: [PATCH 49/85] only allow discovery svc upload if you have ai token (#149) --- discovery/src/api/routes/node.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/discovery/src/api/routes/node.rs b/discovery/src/api/routes/node.rs index 5f2e07bb..2ac18437 100644 --- a/discovery/src/api/routes/node.rs +++ b/discovery/src/api/routes/node.rs @@ -3,6 +3,7 @@ use actix_web::{ web::{self, put, Data}, HttpResponse, Scope, }; +use alloy::primitives::U256; use shared::models::api::ApiResponse; use shared::models::node::Node; @@ -11,6 +12,20 @@ pub async fn register_node( data: Data, req: actix_web::HttpRequest, ) -> HttpResponse { + if let Some(contracts) = data.contracts.clone() { + let balance = contracts + .ai_token + .balance_of(node.provider_address.parse().unwrap()) + .await + .unwrap_or_default(); + if balance == U256::ZERO { + return HttpResponse::BadRequest().json(ApiResponse::new( + false, + "Node provider address does not hold AI tokens", + )); + } + } + let node_store = data.node_store.clone(); // Check for the x-address header From d2576a9799b8dbf119482eb08f9c4b4d93be9150 Mon Sep 17 00:00:00 2001 From: JannikSt Date: Wed, 19 Mar 2025 09:51:43 -0700 Subject: [PATCH 50/85] support setting penalty in validator arg (#137) * support setting penalty in validator start argument --- validator/src/main.rs | 10 +++++++++- validator/src/validators/synthetic_data.rs | 5 ++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/validator/src/main.rs b/validator/src/main.rs index 2c352e1d..b1b3d0c6 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1,6 +1,7 @@ pub mod validators; use actix_web::{web, App, HttpResponse, HttpServer, Responder}; -use alloy::primitives::Address; +use alloy::primitives::utils::Unit; +use alloy::primitives::{Address, U256}; use anyhow::{Context, Result}; use clap::Parser; use log::LevelFilter; @@ -54,6 +55,11 @@ struct Args { /// Optional: Leviticus Auth Token #[arg(long, default_value = None)] leviticus_token: Option, + + /// Optional: Validator penalty in whole tokens + /// Note: This value will be multiplied by 10^18 (1 token = 10^18 wei) + #[arg(long, default_value = "1000")] + validator_penalty: u64, } fn main() { let runtime = tokio::runtime::Runtime::new().unwrap(); @@ -100,6 +106,7 @@ fn main() { let hardware_validator = HardwareValidator::new(&validator_wallet, contracts.clone()); let mut synthetic_validator = if let Some(pool_id) = args.pool_id.clone() { + let penalty = U256::from(args.validator_penalty) * Unit::ETHER.wei(); match contracts.synthetic_data_validator.clone() { Some(validator) => { if let Some(leviticus_url) = args.leviticus_url { @@ -110,6 +117,7 @@ fn main() { contracts.prime_network.clone(), leviticus_url, args.leviticus_token, + penalty, )) } else { error!("Leviticus URL is not provided"); diff --git a/validator/src/validators/synthetic_data.rs b/validator/src/validators/synthetic_data.rs index a53f7d21..b53a76f8 100644 --- a/validator/src/validators/synthetic_data.rs +++ b/validator/src/validators/synthetic_data.rs @@ -37,6 +37,7 @@ pub struct SyntheticDataValidator { state_dir: Option, leviticus_url: String, leviticus_token: Option, + penalty: U256, } impl Validator for SyntheticDataValidator { @@ -55,6 +56,7 @@ impl SyntheticDataValidator { prime_network: PrimeNetworkContract, leviticus_url: String, leviticus_token: Option, + penalty: U256, ) -> Self { let pool_id = pool_id_str.parse::().expect("Invalid pool ID"); let default_state_dir = get_default_state_dir(); @@ -102,6 +104,7 @@ impl SyntheticDataValidator { state_dir: state_path.clone(), leviticus_url, leviticus_token, + penalty, } } @@ -138,7 +141,7 @@ impl SyntheticDataValidator { println!("Invalidating work: {}", work_key); match self .prime_network - .invalidate_work(self.pool_id, U256::from(1), data) + .invalidate_work(self.pool_id, self.penalty, data) .await { Ok(_) => Ok(()), From f503fb7fdea0d4580fc210933f235b2eb720e6a7 Mon Sep 17 00:00:00 2001 From: JannikSt Date: Wed, 19 Mar 2025 09:58:31 -0700 Subject: [PATCH 51/85] Feature/discovery blacklist detection (#139) * fix worker unit formatting * discovery svc blacklist detection --- discovery/src/api/routes/get_nodes.rs | 2 - discovery/src/api/routes/node.rs | 1 + discovery/src/chainsync/sync.rs | 2 + shared/src/models/node.rs | 4 ++ .../implementations/compute_pool_contract.rs | 42 +++++++++++++++++++ .../compute_registry_contract.rs | 1 - .../implementations/stake_manager.rs | 1 - 7 files changed, 49 insertions(+), 4 deletions(-) diff --git a/discovery/src/api/routes/get_nodes.rs b/discovery/src/api/routes/get_nodes.rs index bab39415..dc8f1c0b 100644 --- a/discovery/src/api/routes/get_nodes.rs +++ b/discovery/src/api/routes/get_nodes.rs @@ -33,8 +33,6 @@ pub async fn get_nodes_for_pool( .unwrap(); let owner = pool_info.creator; let manager = pool_info.compute_manager_key; - println!("Owner: {:?}", owner); - println!("Manager: {:?}", manager); let address_str = match req.headers().get("x-address") { Some(address) => match address.to_str() { Ok(addr) => addr.to_string(), diff --git a/discovery/src/api/routes/node.rs b/discovery/src/api/routes/node.rs index 2ac18437..7293ac20 100644 --- a/discovery/src/api/routes/node.rs +++ b/discovery/src/api/routes/node.rs @@ -168,6 +168,7 @@ mod tests { node, is_validated: true, is_active: true, + is_blacklisted: false, }; app_state.node_store.update_node(validated); diff --git a/discovery/src/chainsync/sync.rs b/discovery/src/chainsync/sync.rs index 5f8c5eda..b8649ccc 100644 --- a/discovery/src/chainsync/sync.rs +++ b/discovery/src/chainsync/sync.rs @@ -44,10 +44,12 @@ impl ChainSync { let mut n = node.clone(); let provider_address = Address::from_str(&node.provider_address).unwrap(); let node_address = Address::from_str(&node.id).unwrap(); + let is_blacklisted = contracts_clone.compute_pool.is_node_blacklisted(node.node.compute_pool_id, node_address).await.unwrap(); let node_info = contracts_clone.compute_registry.get_node(provider_address, node_address).await.unwrap(); let (is_active, is_validated) = node_info; n.is_active = is_active; n.is_validated = is_validated; + n.is_blacklisted = is_blacklisted; node_store_clone.update_node(n); } } diff --git a/shared/src/models/node.rs b/shared/src/models/node.rs index b7eb125e..fce22c64 100644 --- a/shared/src/models/node.rs +++ b/shared/src/models/node.rs @@ -44,6 +44,8 @@ pub struct DiscoveryNode { pub node: Node, pub is_validated: bool, pub is_active: bool, + #[serde(default)] + pub is_blacklisted: bool, } impl DiscoveryNode { @@ -52,6 +54,7 @@ impl DiscoveryNode { node: new_node, is_validated: self.is_validated, is_active: self.is_active, + is_blacklisted: self.is_blacklisted, } } } @@ -70,6 +73,7 @@ impl From for DiscoveryNode { node, is_validated: false, // Default values for new discovery nodes is_active: false, + is_blacklisted: false, } } } diff --git a/shared/src/web3/contracts/implementations/compute_pool_contract.rs b/shared/src/web3/contracts/implementations/compute_pool_contract.rs index 97e75841..96823f32 100644 --- a/shared/src/web3/contracts/implementations/compute_pool_contract.rs +++ b/shared/src/web3/contracts/implementations/compute_pool_contract.rs @@ -177,6 +177,48 @@ impl ComputePool { Ok(result) } + pub async fn is_node_blacklisted( + &self, + pool_id: u32, + node: Address, + ) -> Result> { + let arg_pool_id: U256 = U256::from(pool_id); + let result = self + .instance + .instance() + .function( + "isNodeBlacklistedFromPool", + &[arg_pool_id.into(), node.into()], + )? + .call() + .await?; + Ok(result.first().unwrap().as_bool().unwrap()) + } + + pub async fn get_blacklisted_nodes( + &self, + pool_id: u32, + ) -> Result, Box> { + let arg_pool_id: U256 = U256::from(pool_id); + let result = self + .instance + .instance() + .function("getBlacklistedNodes", &[arg_pool_id.into()])? + .call() + .await?; + + let blacklisted_nodes = result + .first() + .unwrap() + .as_array() + .unwrap() + .iter() + .map(|node| node.as_address().unwrap()) + .collect(); + + Ok(blacklisted_nodes) + } + pub async fn eject_node( &self, pool_id: u32, diff --git a/shared/src/web3/contracts/implementations/compute_registry_contract.rs b/shared/src/web3/contracts/implementations/compute_registry_contract.rs index c35af83c..57fdb9df 100644 --- a/shared/src/web3/contracts/implementations/compute_registry_contract.rs +++ b/shared/src/web3/contracts/implementations/compute_registry_contract.rs @@ -73,7 +73,6 @@ impl ComputeRegistryContract { )? .call() .await; - // TODO: This should be cleaned up - either we add additional check if this is actually the no-exist error or work on the contract response match node_response { Ok(response) => { diff --git a/shared/src/web3/contracts/implementations/stake_manager.rs b/shared/src/web3/contracts/implementations/stake_manager.rs index d9a74fb2..a5a8f814 100644 --- a/shared/src/web3/contracts/implementations/stake_manager.rs +++ b/shared/src/web3/contracts/implementations/stake_manager.rs @@ -36,7 +36,6 @@ impl StakeManagerContract { .function("getStake", &[staker.into()])? .call() .await?; - println!("Result: {:?}", result); Ok(result[0].as_uint().unwrap_or_default().0) } From 5980e8dde1e42e92f095d4a08ae276f37081531d Mon Sep 17 00:00:00 2001 From: JannikSt Date: Wed, 19 Mar 2025 13:24:07 -0700 Subject: [PATCH 52/85] Improvement/discovery security check (#151) * simplify discovery svc security check --- discovery/src/api/routes/node.rs | 35 ++++++++++--------- .../compute_registry_contract.rs | 33 +++++++++-------- 2 files changed, 36 insertions(+), 32 deletions(-) diff --git a/discovery/src/api/routes/node.rs b/discovery/src/api/routes/node.rs index 7293ac20..62b6e0ee 100644 --- a/discovery/src/api/routes/node.rs +++ b/discovery/src/api/routes/node.rs @@ -3,7 +3,6 @@ use actix_web::{ web::{self, put, Data}, HttpResponse, Scope, }; -use alloy::primitives::U256; use shared::models::api::ApiResponse; use shared::models::node::Node; @@ -12,22 +11,6 @@ pub async fn register_node( data: Data, req: actix_web::HttpRequest, ) -> HttpResponse { - if let Some(contracts) = data.contracts.clone() { - let balance = contracts - .ai_token - .balance_of(node.provider_address.parse().unwrap()) - .await - .unwrap_or_default(); - if balance == U256::ZERO { - return HttpResponse::BadRequest().json(ApiResponse::new( - false, - "Node provider address does not hold AI tokens", - )); - } - } - - let node_store = data.node_store.clone(); - // Check for the x-address header let address_str = match req.headers().get("x-address") { Some(address) => match address.to_str() { @@ -47,6 +30,24 @@ pub async fn register_node( return HttpResponse::BadRequest() .json(ApiResponse::new(false, "Invalid x-address header")); } + if let Some(contracts) = data.contracts.clone() { + if (contracts + .compute_registry + .get_node( + node.provider_address.parse().unwrap(), + node.id.parse().unwrap(), + ) + .await) + .is_err() + { + return HttpResponse::BadRequest().json(ApiResponse::new( + false, + "Node not found in compute registry", + )); + } + } + + let node_store = data.node_store.clone(); node_store.register_node(node.clone()); HttpResponse::Ok().json(ApiResponse::new(true, "Node registered successfully")) diff --git a/shared/src/web3/contracts/implementations/compute_registry_contract.rs b/shared/src/web3/contracts/implementations/compute_registry_contract.rs index 57fdb9df..fd207267 100644 --- a/shared/src/web3/contracts/implementations/compute_registry_contract.rs +++ b/shared/src/web3/contracts/implementations/compute_registry_contract.rs @@ -59,7 +59,7 @@ impl ComputeRegistryContract { pub async fn get_node( &self, - #[allow(unused_variables)] provider_address: Address, + provider_address: Address, node_address: Address, ) -> Result<(bool, bool), Box> { let get_node_selector = get_selector("getNode(address,address)"); @@ -72,21 +72,24 @@ impl ComputeRegistryContract { &[provider_address.into(), node_address.into()], )? .call() - .await; - // TODO: This should be cleaned up - either we add additional check if this is actually the no-exist error or work on the contract response - match node_response { - Ok(response) => { - if let Some(_node_data) = response.first() { - let node_tuple = _node_data.as_tuple().unwrap(); - let active = node_tuple[5].as_bool().unwrap(); - let validated = node_tuple[6].as_bool().unwrap(); - Ok((active, validated)) - } else { - println!("Node is not registered. Proceeding to add the node."); - Err("Node is not registered".into()) - } + .await?; + + if let Some(_node_data) = node_response.first() { + let node_tuple = _node_data.as_tuple().unwrap(); + + // Check that provider and subkey match + let node_provider = node_tuple[0].as_address().unwrap(); + let node_subkey = node_tuple[1].as_address().unwrap(); + + if node_provider != provider_address || node_subkey != node_address { + return Err("Node does not match provider or subkey".into()); } - Err(_) => Err("Node is not registered".into()), + + let active = node_tuple[5].as_bool().unwrap(); + let validated = node_tuple[6].as_bool().unwrap(); + Ok((active, validated)) + } else { + Err("Node is not registered".into()) } } } From 527d7dde99f0f650e70ae9cd19d6f736ef5caac2 Mon Sep 17 00:00:00 2001 From: Johannes Hagemann Date: Thu, 20 Mar 2025 06:14:27 +0100 Subject: [PATCH 53/85] update email in security.md (#154) --- SECURITY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SECURITY.md b/SECURITY.md index 4963a1d7..c04fa844 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -4,7 +4,7 @@ We take security vulnerabilities very seriously and appreciate efforts to responsibly disclose findings. -Please report security vulnerabilities by emailing contact@primeintellect.ai. +Please report security vulnerabilities by emailing security@primeintellect.ai. **Please do not report security vulnerabilities through public GitHub issues, discussions, or pull requests.** From f0bce1cc856553ca2da8e522efa4bc3a5397412e Mon Sep 17 00:00:00 2001 From: JannikSt Date: Thu, 20 Mar 2025 14:45:04 -0700 Subject: [PATCH 54/85] add install scripts (#155) * add install scripts --------- Co-authored-by: manveer --- worker/scripts/install.sh | 78 +++++++++++++++++++++++++++++++++++++ worker/scripts/uninstall.sh | 41 +++++++++++++++++++ 2 files changed, 119 insertions(+) create mode 100755 worker/scripts/install.sh create mode 100644 worker/scripts/uninstall.sh diff --git a/worker/scripts/install.sh b/worker/scripts/install.sh new file mode 100755 index 00000000..4e173dfd --- /dev/null +++ b/worker/scripts/install.sh @@ -0,0 +1,78 @@ +#!/usr/bin/env bash +set -e + +# Colors for pretty output +RED='\033[0;31m' +GREEN='\033[0;32m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Configuration +BINARY_NAME="prime-worker" +RELEASE_URL="https://github.com/PrimeIntellect-ai/protocol/releases" +BINARY_URL="$RELEASE_URL/latest/download/worker-linux-x86_64" +INSTALL_DIR="/usr/local/bin" + +# Check if dev flag is set +if [[ "$1" == "--dev" ]]; then + # Get latest dev tag + LATEST_DEV_TAG=$(curl -s "$RELEASE_URL" | grep -o 'dev-[0-9]\{8\}-[a-z0-9]\+' | head -1) + if [[ -z "$LATEST_DEV_TAG" ]]; then + echo -e "${RED}✗ Could not find latest dev release${NC}" + exit 1 + fi + BINARY_URL="$RELEASE_URL/download/$LATEST_DEV_TAG/worker-linux-x86_64" +fi + +# Print banner and download URL +echo -e "${BLUE}" +echo "╔═══════════════════════════════════════════╗" +echo "║ ║" +echo "║ Prime Intellect Protocol Worker ║" +echo "║ ║" +echo "╚═══════════════════════════════════════════╝" + +# Check operating system +echo -e "${BLUE}→ Checking system compatibility...${NC}" +if [[ "$(uname -s)" != "Linux" ]]; then + echo -e "${RED}✗ This installer is for Linux only.${NC}" + exit 1 +fi + +if [[ "$(uname -m)" != "x86_64" ]]; then + echo -e "${RED}✗ This installer is for x86_64 architecture only.${NC}" + exit 1 +fi +echo -e "${GREEN}✓ System is compatible${NC}" + +# Create temporary directory +TMP_DIR=$(mktemp -d) +trap 'rm -rf "$TMP_DIR"' EXIT + +# Download binary +echo -e "${BLUE}→ Downloading Prime Intellect Protocol Worker...${NC}" +curl -sSL "$BINARY_URL" -o "$TMP_DIR/$BINARY_NAME" +chmod +x "$TMP_DIR/$BINARY_NAME" +echo -e "${GREEN}✓ Download complete${NC}" + +# Install binary +echo -e "${BLUE}→ Installing to $INSTALL_DIR...${NC}" +if [[ -w "$INSTALL_DIR" ]]; then + mv "$TMP_DIR/$BINARY_NAME" "$INSTALL_DIR/$BINARY_NAME" + echo -e "${GREEN}✓ Installation complete${NC}" +else + echo -e "${RED}✗ Cannot write to $INSTALL_DIR${NC}" + echo -e "${BLUE}→ Please run this script with sudo to install to $INSTALL_DIR${NC}" + exit 1 +fi + +# Verify installation +echo -e "${BLUE}→ Verifying installation...${NC}" +if command -v "$BINARY_NAME" &> /dev/null; then + echo -e "${GREEN}✓ Prime Intellect Protocol Worker successfully installed!${NC}" + echo -e "${BLUE}→ Run '$BINARY_NAME --help' to get started${NC}" +else + echo -e "${RED}✗ Installation verification failed.${NC}" + echo -e "${BLUE}→ The binary is located at: $INSTALL_DIR/$BINARY_NAME${NC}" +fi +echo -e "\n${GREEN}The Prime Intellect Protocol Worker was successfully installed${NC}" diff --git a/worker/scripts/uninstall.sh b/worker/scripts/uninstall.sh new file mode 100644 index 00000000..e43c6903 --- /dev/null +++ b/worker/scripts/uninstall.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash +set -e + +# Colors for pretty output +RED='\033[0;31m' +GREEN='\033[0;32m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Configuration +BINARY_NAME="prime-worker" +INSTALL_DIR="/usr/local/bin" + +# Print banner +echo -e "${BLUE}" +echo "╔═══════════════════════════════════════════╗" +echo "║ ║" +echo "║ Prime Intellect Protocol Worker ║" +echo "║ Uninstallation ║" +echo "╚═══════════════════════════════════════════╝" + +# Check for binary +echo -e "${BLUE}→ Checking for installed binary...${NC}" +if [[ ! -f "$INSTALL_DIR/$BINARY_NAME" ]]; then + echo -e "${RED}✗ Prime Worker binary not found in $INSTALL_DIR${NC}" + exit 1 +fi +echo -e "${GREEN}✓ Found installed binary${NC}" + +# Remove binary +echo -e "${BLUE}→ Removing Prime Intellect Protocol Worker...${NC}" +if [[ -w "$INSTALL_DIR" ]]; then + rm -f "$INSTALL_DIR/$BINARY_NAME" + echo -e "${GREEN}✓ Binary removed${NC}" +else + echo -e "${RED}✗ Cannot remove from $INSTALL_DIR${NC}" + echo -e "${BLUE}→ Please run this script with sudo to remove from $INSTALL_DIR${NC}" + exit 1 +fi + +echo -e "\n${GREEN}Prime Intellect Protocol Worker successfully uninstalled!${NC}" From 914aaf9b08c2adffee7652438a26c2578756ae76 Mon Sep 17 00:00:00 2001 From: JannikSt Date: Thu, 20 Mar 2025 17:57:30 -0700 Subject: [PATCH 55/85] Feature/node chain sync (#148) * add discovery based chain sync * add whitelist status to discovery, add ejected state to orchestrator, fix state logging bug on worker, add provider and compute node monitoring on worker * fix state issue causing wrong logs * improve command flow for provider and compute node registration, improve stake manager --- .tmuxinator.yml | 5 +- Makefile | 5 + dev-utils/examples/eject_node.rs | 71 +++++++ discovery/src/api/routes/node.rs | 1 + discovery/src/chainsync/sync.rs | 2 + orchestrator/src/api/server.rs | 12 +- orchestrator/src/discovery/monitor.rs | 41 +++- orchestrator/src/main.rs | 3 + orchestrator/src/models/node.rs | 1 + shared/src/models/node.rs | 4 + smart-contracts | 2 +- validator/src/main.rs | 5 +- worker/src/cli/command.rs | 207 +++++++++++---------- worker/src/docker/docker_manager.rs | 2 +- worker/src/docker/service.rs | 33 ++-- worker/src/docker/state.rs | 7 + worker/src/operations/compute_node.rs | 88 +++++++-- worker/src/operations/heartbeat/service.rs | 13 +- worker/src/operations/provider.rs | 193 ++++++++++++++++--- worker/src/services/discovery.rs | 1 - 20 files changed, 522 insertions(+), 174 deletions(-) create mode 100644 dev-utils/examples/eject_node.rs diff --git a/.tmuxinator.yml b/.tmuxinator.yml index e1e05980..b9ad8e81 100644 --- a/.tmuxinator.yml +++ b/.tmuxinator.yml @@ -7,9 +7,8 @@ windows: - bash -c 'tmux select-pane -T "Worker" && sleep 5 && cd smart-contracts && sh deploy.sh && sh deploy_work_validation.sh && cd .. && make setup && clear' - bash -c 'tmux select-pane -T "Discovery" && sleep 10 && make watch-discovery' - bash -c 'tmux select-pane -T "Validator" && sleep 15 && make watch-validator' - - bash -c 'tmux select-pane -T "Orchestrator" && sleep 20 && make watch-orchestrator' + - bash -c 'tmux select-pane -T "Orchestrator" && sleep 20 && make watch-orchestrator' - background: layout: even-horizontal panes: - - bash -c 'tmux rename-window "Prime Dev Services" && docker compose up anvil redis' - - bash -c 'while true; do make whitelist-provider; sleep 10; done' \ No newline at end of file + - bash -c 'tmux rename-window "Prime Dev Services" && docker compose up anvil redis' \ No newline at end of file diff --git a/Makefile b/Makefile index af42f3d3..7c8ac483 100644 --- a/Makefile +++ b/Makefile @@ -160,3 +160,8 @@ kill-tunnel: remote-worker: @trap 'make kill-tunnel' EXIT; \ make watch-worker-remote + +# testing: +eject-node: + set -a; source ${ENV_FILE}; set +a; \ + cargo run -p dev-utils --example eject_node -- --pool-id $${WORKER_COMPUTE_POOL_ID} --node $${NODE_ADDRESS} --provider-address $${PROVIDER_ADDRESS} --key $${POOL_OWNER_PRIVATE_KEY} --rpc-url $${RPC_URL} diff --git a/dev-utils/examples/eject_node.rs b/dev-utils/examples/eject_node.rs new file mode 100644 index 00000000..d7cd4ddc --- /dev/null +++ b/dev-utils/examples/eject_node.rs @@ -0,0 +1,71 @@ +use alloy::primitives::Address; +use clap::Parser; +use eyre::Result; +use shared::web3::contracts::core::builder::ContractBuilder; +use shared::web3::wallet::Wallet; +use std::str::FromStr; +use url::Url; + +#[derive(Parser)] +struct Args { + /// Private key for transaction signing + /// The address of this key must be the pool creator or manager + #[arg(short = 'k', long)] + key: String, + + /// RPC URL + #[arg(short = 'r', long)] + rpc_url: String, + + /// Pool ID + #[arg(short = 'p', long)] + pool_id: u32, + + /// Provider address + #[arg(short = 'a', long)] + provider_address: String, + + /// Node address to eject + #[arg(short = 'n', long)] + node: String, +} + +#[tokio::main] +async fn main() -> Result<()> { + let args = Args::parse(); + let wallet = Wallet::new(&args.key, Url::parse(&args.rpc_url)?).unwrap(); + + // Build all contracts + let contracts = ContractBuilder::new(&wallet) + .with_compute_registry() + .with_ai_token() + .with_prime_network() + .with_compute_pool() + .build() + .unwrap(); + + let node_address = Address::from_str(&args.node).expect("Invalid node address"); + let provider_address = + Address::from_str(&args.provider_address).expect("Invalid provider address"); + + let node_info = contracts + .compute_registry + .get_node(provider_address, node_address) + .await; + println!("Node info: {:?}", node_info); + + let tx = contracts + .compute_pool + .eject_node(args.pool_id, node_address) + .await; + println!("Ejected node {} from pool {}", args.node, args.pool_id); + println!("Transaction: {:?}", tx); + + let node_info = contracts + .compute_registry + .get_node(provider_address, node_address) + .await; + println!("Post ejection node info: {:?}", node_info); + + Ok(()) +} diff --git a/discovery/src/api/routes/node.rs b/discovery/src/api/routes/node.rs index 62b6e0ee..31fb6bb4 100644 --- a/discovery/src/api/routes/node.rs +++ b/discovery/src/api/routes/node.rs @@ -169,6 +169,7 @@ mod tests { node, is_validated: true, is_active: true, + is_provider_whitelisted: false, is_blacklisted: false, }; diff --git a/discovery/src/chainsync/sync.rs b/discovery/src/chainsync/sync.rs index b8649ccc..32c609dc 100644 --- a/discovery/src/chainsync/sync.rs +++ b/discovery/src/chainsync/sync.rs @@ -46,9 +46,11 @@ impl ChainSync { let node_address = Address::from_str(&node.id).unwrap(); let is_blacklisted = contracts_clone.compute_pool.is_node_blacklisted(node.node.compute_pool_id, node_address).await.unwrap(); let node_info = contracts_clone.compute_registry.get_node(provider_address, node_address).await.unwrap(); + let provider_info = contracts_clone.compute_registry.get_provider(provider_address).await.unwrap(); let (is_active, is_validated) = node_info; n.is_active = is_active; n.is_validated = is_validated; + n.is_provider_whitelisted = provider_info.is_whitelisted; n.is_blacklisted = is_blacklisted; node_store_clone.update_node(n); } diff --git a/orchestrator/src/api/server.rs b/orchestrator/src/api/server.rs index 628750bb..d93588fc 100644 --- a/orchestrator/src/api/server.rs +++ b/orchestrator/src/api/server.rs @@ -2,6 +2,7 @@ use crate::api::routes::nodes::nodes_routes; use crate::api::routes::storage::storage_routes; use crate::api::routes::task::tasks_routes; use crate::api::routes::{heartbeat::heartbeat_routes, metrics::metrics_routes}; +use crate::models::node::NodeStatus; use crate::store::core::StoreContext; use actix_web::middleware::{Compress, NormalizePath, TrailingSlash}; use actix_web::{middleware, web::Data, App, HttpServer}; @@ -36,10 +37,13 @@ pub async fn start_server( }); let node_store = app_state.store_context.node_store.clone(); let node_store_clone = node_store.clone(); - let validator_state = Arc::new( - ValidatorState::new(vec![]) - .with_validator(move |address| node_store_clone.get_node(address).is_some()), - ); + let validator_state = Arc::new(ValidatorState::new(vec![]).with_validator(move |address| { + if let Some(node) = node_store_clone.get_node(address) { + node.status != NodeStatus::Ejected + } else { + false + } + })); let api_key_middleware = Arc::new(ApiKeyMiddleware::new(admin_api_key)); diff --git a/orchestrator/src/discovery/monitor.rs b/orchestrator/src/discovery/monitor.rs index 394226ab..061101e0 100644 --- a/orchestrator/src/discovery/monitor.rs +++ b/orchestrator/src/discovery/monitor.rs @@ -1,3 +1,4 @@ +use crate::models::node::NodeStatus; use crate::models::node::OrchestratorNode; use crate::store::core::StoreContext; use anyhow::Error; @@ -55,8 +56,7 @@ impl<'b> DiscoveryMonitor<'b> { } } } - - pub async fn fetch_nodes_from_discovery(&self) -> Result, Error> { + pub async fn fetch_nodes_from_discovery(&self) -> Result, Error> { let discovery_route = format!("/api/pool/{}", self.compute_pool_id); let address = self.coordinator_wallet.address().to_string(); @@ -112,18 +112,42 @@ impl<'b> DiscoveryMonitor<'b> { let nodes = nodes .into_iter() .filter(|node| node.is_validated) - .map(OrchestratorNode::from) - .collect::>(); + .collect::>(); Ok(nodes) } pub async fn get_nodes(&self) -> Result, Error> { - let nodes = self.fetch_nodes_from_discovery().await?; + let discovery_nodes = self.fetch_nodes_from_discovery().await?; - for node in &nodes { + for discovery_node in &discovery_nodes { + let node = OrchestratorNode::from(discovery_node.clone()); match self.store_context.node_store.get_node(&node.address) { Some(existing_node) => { + if discovery_node.is_validated && !discovery_node.is_provider_whitelisted { + self.store_context + .node_store + .update_node_status(&node.address, NodeStatus::Ejected); + } + if !discovery_node.is_active && existing_node.status == NodeStatus::Healthy { + // Node is active False but we have it in store and it is healthy + // This means that the node likely got kicked by e.g. the validator + // We simply remove it from the store now and will rediscover it later? + println!( + "Node {} is no longer active on chain, marking as dead", + node.address + ); + if !discovery_node.is_provider_whitelisted { + self.store_context + .node_store + .update_node_status(&node.address, NodeStatus::Ejected); + } else { + self.store_context + .node_store + .update_node_status(&node.address, NodeStatus::Dead); + } + } + if existing_node.ip_address != node.ip_address { info!( "Node {} IP changed from {} to {}", @@ -139,6 +163,9 @@ impl<'b> DiscoveryMonitor<'b> { } } - Ok(nodes) + Ok(discovery_nodes + .into_iter() + .map(OrchestratorNode::from) + .collect()) } } diff --git a/orchestrator/src/main.rs b/orchestrator/src/main.rs index bad94ef9..6d543a3c 100644 --- a/orchestrator/src/main.rs +++ b/orchestrator/src/main.rs @@ -139,6 +139,9 @@ async fn main() -> Result<()> { inviter.run().await }); + // The node status updater is responsible for checking the heartbeats + // and calculating the status of the node. + // It also ejects nodes when they are dead. let status_update_store_context = store_context.clone(); tasks.spawn(async move { let status_updater = NodeStatusUpdater::new( diff --git a/orchestrator/src/models/node.rs b/orchestrator/src/models/node.rs index a00df4ab..6b2b2814 100644 --- a/orchestrator/src/models/node.rs +++ b/orchestrator/src/models/node.rs @@ -53,4 +53,5 @@ pub enum NodeStatus { Healthy, Unhealthy, Dead, + Ejected, } diff --git a/shared/src/models/node.rs b/shared/src/models/node.rs index fce22c64..4a4a3a00 100644 --- a/shared/src/models/node.rs +++ b/shared/src/models/node.rs @@ -45,6 +45,8 @@ pub struct DiscoveryNode { pub is_validated: bool, pub is_active: bool, #[serde(default)] + pub is_provider_whitelisted: bool, + #[serde(default)] pub is_blacklisted: bool, } @@ -54,6 +56,7 @@ impl DiscoveryNode { node: new_node, is_validated: self.is_validated, is_active: self.is_active, + is_provider_whitelisted: self.is_provider_whitelisted, is_blacklisted: self.is_blacklisted, } } @@ -73,6 +76,7 @@ impl From for DiscoveryNode { node, is_validated: false, // Default values for new discovery nodes is_active: false, + is_provider_whitelisted: false, is_blacklisted: false, } } diff --git a/smart-contracts b/smart-contracts index 0a343de8..85b83dfe 160000 --- a/smart-contracts +++ b/smart-contracts @@ -1 +1 @@ -Subproject commit 0a343de8af7c8da76ae55c67592ee1bf03569343 +Subproject commit 85b83dfe2cec7b7776d7323f41347d08c907b0d2 diff --git a/validator/src/main.rs b/validator/src/main.rs index b1b3d0c6..061f63e5 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -136,8 +136,9 @@ fn main() { loop { if let Some(validator) = &mut synthetic_validator { runtime.block_on(async { - let validation_result = validator.validate_work().await; - println!("Validation result: {:?}", validation_result); + if let Err(e) = validator.validate_work().await { + error!("Failed to validate work: {}", e); + } }); } diff --git a/worker/src/cli/command.rs b/worker/src/cli/command.rs index 880d32b9..a899986e 100644 --- a/worker/src/cli/command.rs +++ b/worker/src/cli/command.rs @@ -7,7 +7,6 @@ use crate::docker::DockerService; use crate::metrics::store::MetricsStore; use crate::operations::compute_node::ComputeNodeOperations; use crate::operations::heartbeat::service::HeartbeatService; -use crate::operations::provider::ProviderError; use crate::operations::provider::ProviderOperations; use crate::services::discovery::DiscoveryService; use crate::state::system_state::SystemState; @@ -151,18 +150,15 @@ pub async fn execute_command( .unwrap(), ); - let provider_ops = ProviderOperations::new( - &provider_wallet_instance, - &contracts.compute_registry, - &contracts.ai_token, - &contracts.prime_network, - ); + let provider_ops = + ProviderOperations::new(provider_wallet_instance.clone(), contracts.clone()); + + let provider_ops_cancellation = cancellation_token.clone(); let compute_node_ops = ComputeNodeOperations::new( &provider_wallet_instance, &node_wallet_instance, - &contracts.compute_registry, - &contracts.prime_network, + contracts.clone(), ); let discovery_service = @@ -170,7 +166,6 @@ pub async fn execute_command( let pool_id = U256::from(*compute_pool_id as u32); Console::progress("Loading pool info"); - println!("Loading pool info {}", pool_id); let pool_info = loop { match contracts.compute_pool.get_pool_info(pool_id).await { Ok(pool) if pool.status == PoolStatus::ACTIVE => break Arc::new(pool), @@ -273,8 +268,6 @@ pub async fn execute_command( state, ); - let mut attempts = 0; - let max_attempts = 100; let gpu_count: u32 = match &node_config.compute_specs { Some(specs) => specs .gpu @@ -283,22 +276,17 @@ pub async fn execute_command( .unwrap_or(0), None => 0, }; + let compute_units = U256::from(std::cmp::max(1, gpu_count * 1000)); - let compute_units = U256::from(gpu_count * 1000); - - let provider_total_compute = match contracts - .compute_registry - .get_provider_total_compute( - provider_wallet_instance.wallet.default_signer().address(), - ) - .await - { - Ok(compute) => compute, + // Check if provider exists first + let provider_exists = match provider_ops.check_provider_exists().await { + Ok(exists) => exists, Err(e) => { - Console::error(&format!("❌ Failed to get provider total compute: {}", e)); + Console::error(&format!("❌ Failed to check if provider exists: {}", e)); std::process::exit(1); } }; + let stake_manager = match contracts.stake_manager.as_ref() { Some(stake_manager) => stake_manager, None => { @@ -307,95 +295,126 @@ pub async fn execute_command( } }; - let required_stake = match stake_manager - .calculate_stake(compute_units, provider_total_compute) - .await - { - Ok(stake) => stake, - Err(e) => { - Console::error(&format!("❌ Failed to calculate required stake: {}", e)); - std::process::exit(1); - } - }; - Console::info( - "Required stake", - &format!("{}", required_stake / U256::from(10u128.pow(18))), - ); - - // TODO: Currently we do not increase stake when adding more nodes - - while attempts < max_attempts { - let spinner = Console::spinner("Registering provider..."); - if let Err(e) = provider_ops.register_provider(required_stake).await { - spinner.finish_and_clear(); // Finish spinner before logging error - if let ProviderError::NotWhitelisted = e { - Console::error("❌ Provider not whitelisted, retrying in 15 seconds..."); - tokio::select! { - _ = tokio::time::sleep(tokio::time::Duration::from_secs(15)) => {} - _ = cancellation_token.cancelled() => { - return Ok(()); // or return an error if you prefer - } - } - attempts += 1; - continue; // Retry registration - } else { - Console::error(&format!("❌ Failed to register provider: {}", e)); + if !provider_exists { + let required_stake = match stake_manager + .calculate_stake(compute_units, U256::from(0)) + .await + { + Ok(stake) => stake, + Err(e) => { + Console::error(&format!("❌ Failed to calculate required stake: {}", e)); std::process::exit(1); } + }; + Console::info( + "Required stake", + &format!("{}", required_stake / U256::from(10u128.pow(18))), + ); + + const MAX_REGISTER_PROVIDER_ATTEMPTS: u32 = 100; + if let Err(e) = provider_ops + .retry_register_provider( + required_stake, + MAX_REGISTER_PROVIDER_ATTEMPTS, + cancellation_token.clone(), + ) + .await + { + Console::error(&format!("❌ Failed to register provider: {}", e)); + std::process::exit(1); } - spinner.finish_and_clear(); // Finish spinner if registration is successful - break; // Exit loop if registration is successful } - if attempts >= max_attempts { - Console::error(&format!( - "❌ Failed to register provider after {} attempts", - attempts - )); - std::process::exit(1); - }; - let provider_stake = match stake_manager - .get_stake(provider_wallet_instance.wallet.default_signer().address()) - .await - { - Ok(stake) => stake, + provider_ops.start_monitoring(provider_ops_cancellation); + + let compute_node_exists = match compute_node_ops.check_compute_node_exists().await { + Ok(exists) => exists, Err(e) => { - Console::error(&format!("❌ Failed to get provider stake: {}", e)); + Console::error(&format!("❌ Failed to check if compute node exists: {}", e)); std::process::exit(1); } }; - Console::info( - "Provider stake", - &format!("{}", provider_stake / U256::from(10u128.pow(18))), - ); - if provider_stake < required_stake { - let spinner = Console::spinner("Increasing stake..."); - if let Err(e) = provider_ops - .increase_stake(required_stake - provider_stake) + if compute_node_exists { + // TODO: What if we have two nodes? + Console::info( + "Compute node status", + "Already exists, recovering from previous state", + ); + recover_last_state = true; + } else { + let provider_total_compute = match contracts + .compute_registry + .get_provider_total_compute( + provider_wallet_instance.wallet.default_signer().address(), + ) .await { - spinner.finish_and_clear(); - Console::error(&format!("❌ Failed to increase stake: {}", e)); - std::process::exit(1); - } - spinner.finish_and_clear(); - } + Ok(compute) => compute, + Err(e) => { + Console::error(&format!("❌ Failed to get provider total compute: {}", e)); + std::process::exit(1); + } + }; - match compute_node_ops.add_compute_node(compute_units).await { - Ok(added_node) => { - if added_node { - // If we are adding a new compute node we wait for a proper - // invite and do not recover from previous state - recover_last_state = false; + let provider_stake = stake_manager + .get_stake(provider_wallet_instance.wallet.default_signer().address()) + .await + .unwrap_or_default(); + + let required_stake = match stake_manager + .calculate_stake(compute_units, provider_total_compute) + .await + { + Ok(stake) => stake, + Err(e) => { + Console::error(&format!("❌ Failed to calculate required stake: {}", e)); + std::process::exit(1); + } + }; + + if required_stake > provider_stake { + Console::info( + "Provider stake is less than required stake", + &format!( + "Required: {} tokens, Current: {} tokens", + required_stake / U256::from(10u128.pow(18)), + provider_stake / U256::from(10u128.pow(18)) + ), + ); + + match provider_ops + .increase_stake(required_stake - provider_stake) + .await + { + Ok(_) => { + Console::success("Successfully increased stake"); + } + Err(e) => { + Console::error(&format!("❌ Failed to increase stake: {}", e)); + std::process::exit(1); + } } } - Err(e) => { - Console::error(&format!("❌ Failed to add compute node: {}", e)); - std::process::exit(1); + + match compute_node_ops.add_compute_node(compute_units).await { + Ok(added_node) => { + if added_node { + // If we are adding a new compute node we wait for a proper + // invite and do not recover from previous state + recover_last_state = false; + } + } + Err(e) => { + Console::error(&format!("❌ Failed to add compute node: {}", e)); + std::process::exit(1); + } } } + // Start monitoring compute node status on chain + compute_node_ops.start_monitoring(cancellation_token.clone()); + if let Err(e) = discovery_service.upload_discovery_info(&node_config).await { Console::error(&format!("❌ Failed to upload discovery info: {}", e)); std::process::exit(1); diff --git a/worker/src/docker/docker_manager.rs b/worker/src/docker/docker_manager.rs index fe4ae750..c6125fc1 100644 --- a/worker/src/docker/docker_manager.rs +++ b/worker/src/docker/docker_manager.rs @@ -335,7 +335,7 @@ impl DockerManager { .unwrap_or_default(), }; - info!("Retrieved details for container {}", container_id); + debug!("Retrieved details for container {}", container_id); Ok(info) } diff --git a/worker/src/docker/service.rs b/worker/src/docker/service.rs index df924520..25e9b989 100644 --- a/worker/src/docker/service.rs +++ b/worker/src/docker/service.rs @@ -71,6 +71,7 @@ impl DockerService { let manager_clone = manager.clone(); let terminate_manager = manager_clone.clone(); + let task_state_clone = state.clone(); loop { tokio::select! { _ = cancellation_token.cancelled() => { @@ -88,9 +89,8 @@ impl DockerService { tasks.retain(|handle| !handle.is_finished()); } - let current_task = state.get_current_task().await; + let current_task = task_state_clone.get_current_task().await; let task_id = generate_task_id(¤t_task); - let task_clone = current_task.clone(); let all_containers = manager.list_containers(true).await.unwrap(); @@ -130,7 +130,7 @@ impl DockerService { if has_running_tasks { Console::info("DockerService", "Container is still starting ..."); } else { - let last_started_time = match state.get_last_started().await { + let last_started_time = match task_state_clone.get_last_started().await { Some(time) => time, None => DateTime::from_timestamp(0, 0).unwrap(), }; @@ -140,14 +140,13 @@ impl DockerService { Console::info("DockerService", "Waiting before starting new container ..."); } else { Console::info("DockerService", "Starting new container ..."); - let task_clone = task_clone.clone(); let manager_clone = manager_clone.clone(); - let state_clone = state.clone(); + let state_clone = task_state_clone.clone(); let has_gpu = self.has_gpu; let system_memory_mb = self.system_memory_mb; let task_bridge_socket_path = self.task_bridge_socket_path.clone(); let handle = tokio::spawn(async move { - let payload = task_clone.unwrap(); + let payload = state_clone.get_current_task().await.unwrap(); let cmd_full = (payload.command, payload.args); let cmd = match cmd_full { (Some(c), Some(a)) => { @@ -200,12 +199,12 @@ impl DockerService { let container_status = container_match.unwrap().clone(); let status = manager.get_container_details(&container_status.id).await.unwrap(); - let task_state_current = task_clone.clone().unwrap().state; + let task_state_current = task_state_clone.get_current_task().await.unwrap().state; // handle edge case where container instantly dies due to invalid command if status.status == Some(ContainerStateStatusEnum::CREATED) && task_state_current == TaskState::FAILED { Console::info("DockerService", "Task failed, waiting for new command from manager ..."); } else { - let task_state = match status.status { + let task_state_live = match status.status { Some(ContainerStateStatusEnum::RUNNING) => TaskState::RUNNING, Some(ContainerStateStatusEnum::CREATED) => TaskState::PENDING, Some(ContainerStateStatusEnum::EXITED) => TaskState::COMPLETED, @@ -215,9 +214,14 @@ impl DockerService { Some(ContainerStateStatusEnum::REMOVING) => TaskState::UNKNOWN, _ => TaskState::UNKNOWN, }; - if task_state == TaskState::FAILED { - Console::info("DockerService", "Task failed, waiting for new command from manager and restarting container"); - let terminate_manager_clone = terminate_manager.clone(); + + // Only log if state changed + if task_state_live != task_state_current { + Console::info("DockerService", &format!("Task state changed from {:?} to {:?}", task_state_current, task_state_live)); + + if task_state_live == TaskState::FAILED { + Console::info("DockerService", "Task failed, waiting for new command from manager and restarting container"); + let terminate_manager_clone = terminate_manager.clone(); let handle = tokio::spawn(async move { let termination = terminate_manager_clone.remove_container(&container_status.id).await; match termination { @@ -226,9 +230,12 @@ impl DockerService { } }); terminating_container_tasks.lock().await.push(handle); + } + } + + if let Some(task) = task_state_clone.get_current_task().await { + task_state_clone.update_task_state(task.id, task_state_live).await; } - Console::info("DockerService", &format!("Task state of task {}: {:?}", &task_id.unwrap(), task_state)); - state.update_task_state(task_clone.unwrap().id, task_state).await; } } } diff --git a/worker/src/docker/state.rs b/worker/src/docker/state.rs index 89b55abe..7d98ed2b 100644 --- a/worker/src/docker/state.rs +++ b/worker/src/docker/state.rs @@ -20,6 +20,11 @@ impl DockerState { pub async fn set_current_task(&self, task: Option) { let mut current_task = self.current_task.lock().await; + if let (Some(new_task), Some(existing_task)) = (&task, &*current_task) { + if new_task.id == existing_task.id { + return; + } + } *current_task = task; } @@ -29,6 +34,8 @@ impl DockerState { if task.id == task_id { task.state = state; } + } else { + println!("No current task found when trying to update state"); } } diff --git a/worker/src/operations/compute_node.rs b/worker/src/operations/compute_node.rs index 306d7bad..a3177e76 100644 --- a/worker/src/operations/compute_node.rs +++ b/worker/src/operations/compute_node.rs @@ -1,40 +1,80 @@ use crate::console::Console; use alloy::{primitives::utils::keccak256 as keccak, primitives::U256, signers::Signer}; -use shared::web3::contracts::implementations::{ - compute_registry_contract::ComputeRegistryContract, - prime_network_contract::PrimeNetworkContract, -}; +use shared::web3::contracts::core::builder::Contracts; use shared::web3::wallet::Wallet; +use std::sync::Arc; +use tokio::time::{sleep, Duration}; +use tokio_util::sync::CancellationToken; pub struct ComputeNodeOperations<'c> { provider_wallet: &'c Wallet, node_wallet: &'c Wallet, - compute_registry: &'c ComputeRegistryContract, - prime_network: &'c PrimeNetworkContract, + contracts: Arc, } impl<'c> ComputeNodeOperations<'c> { pub fn new( provider_wallet: &'c Wallet, node_wallet: &'c Wallet, - compute_registry: &'c ComputeRegistryContract, - prime_network: &'c PrimeNetworkContract, + contracts: Arc, ) -> Self { Self { provider_wallet, node_wallet, - compute_registry, - prime_network, + contracts, } } + pub fn start_monitoring(&self, cancellation_token: CancellationToken) { + let provider_address = self.provider_wallet.wallet.default_signer().address(); + let node_address = self.node_wallet.wallet.default_signer().address(); + let contracts = self.contracts.clone(); + let mut last_active = false; + let mut last_validated = false; + let mut first_check = true; - // Returns true if the compute node was added, false if it already exists - pub async fn add_compute_node( - &self, - compute_units: U256, - ) -> Result> { - Console::section("🔄 Adding compute node"); + tokio::spawn(async move { + loop { + tokio::select! { + _ = cancellation_token.cancelled() => { + Console::info("Monitor", "Shutting down node status monitor..."); + break; + } + _ = async { + match contracts.compute_registry.get_node(provider_address, node_address).await { + Ok((active, validated)) => { + if first_check { + Console::info("Initial node status", &format!("Part of compute pool: {}, Validated: {}", active, validated)); + first_check = false; + last_active = active; + last_validated = validated; + } else if active != last_active { + Console::info( + "Node pool membership status changed on chain", + &format!("Part of compute pool: {}", active), + ); + last_active = active; + } else if validated != last_validated { + Console::info( + "Node validation status changed on chain", + &format!("Validated: {}", validated), + ); + last_validated = validated; + } + } + Err(e) => { + Console::error(&format!("Failed to get node status: {}", e)); + } + } + sleep(Duration::from_secs(5)).await; + } => {} + } + } + }); + } + + pub async fn check_compute_node_exists(&self) -> Result> { let compute_node = self + .contracts .compute_registry .get_node( self.provider_wallet.wallet.default_signer().address(), @@ -45,15 +85,28 @@ impl<'c> ComputeNodeOperations<'c> { match compute_node { Ok(_) => { Console::info("Compute node status", "Compute node already exists"); - return Ok(false); + Ok(true) } Err(_) => { Console::info( "Compute node status", "Compute node does not exist - creating", ); + Ok(false) } } + } + + // Returns true if the compute node was added, false if it already exists + pub async fn add_compute_node( + &self, + compute_units: U256, + ) -> Result> { + Console::section("🔄 Adding compute node"); + + if self.check_compute_node_exists().await? { + return Ok(false); + } Console::progress("Adding compute node"); Console::info( @@ -81,6 +134,7 @@ impl<'c> ComputeNodeOperations<'c> { // Create the signature bytes let add_node_tx = self + .contracts .prime_network .add_compute_node(node_address, compute_units, signature.to_vec()) .await?; diff --git a/worker/src/operations/heartbeat/service.rs b/worker/src/operations/heartbeat/service.rs index b68b3852..56d20b0b 100644 --- a/worker/src/operations/heartbeat/service.rs +++ b/worker/src/operations/heartbeat/service.rs @@ -187,9 +187,20 @@ impl HeartbeatService { let heartbeat_response = response.data.clone(); log::debug!("Heartbeat response: {:?}", heartbeat_response); + + // Get current task before updating + let current_task = docker_service.state.get_current_task().await; + let task = match heartbeat_response.current_task { Some(task) => { - log::info!("Current task is to run image: {:?}", task.image); + // Only log if task image changed or there was no previous task + if current_task + .as_ref() + .map(|t| t.image != task.image) + .unwrap_or(true) + { + log::info!("Current task is to run image: {:?}", task.image); + } Some(task) } None => None, diff --git a/worker/src/operations/provider.rs b/worker/src/operations/provider.rs index 1d5ef132..c3d131f0 100644 --- a/worker/src/operations/provider.rs +++ b/worker/src/operations/provider.rs @@ -1,31 +1,165 @@ use crate::console::Console; use alloy::primitives::{Address, U256}; -use shared::web3::contracts::implementations::{ - ai_token_contract::AIToken, compute_registry_contract::ComputeRegistryContract, - prime_network_contract::PrimeNetworkContract, -}; +use shared::web3::contracts::core::builder::Contracts; use shared::web3::wallet::Wallet; use std::fmt; -pub struct ProviderOperations<'c> { - wallet: &'c Wallet, - compute_registry: &'c ComputeRegistryContract, - ai_token: &'c AIToken, - prime_network: &'c PrimeNetworkContract, +use std::sync::Arc; +use tokio::time::{sleep, Duration}; +use tokio_util::sync::CancellationToken; + +pub struct ProviderOperations { + wallet: Arc, + contracts: Arc, } -impl<'c> ProviderOperations<'c> { - pub fn new( - wallet: &'c Wallet, - compute_registry: &'c ComputeRegistryContract, - ai_token: &'c AIToken, - prime_network: &'c PrimeNetworkContract, - ) -> Self { - Self { - wallet, - compute_registry, - ai_token, - prime_network, +impl ProviderOperations { + pub fn new(wallet: Arc, contracts: Arc) -> Self { + Self { wallet, contracts } + } + pub fn start_monitoring(&self, cancellation_token: CancellationToken) { + let provider_address = self.wallet.wallet.default_signer().address(); + let contracts = self.contracts.clone(); + + // Only start monitoring if we have a stake manager + let mut last_stake = U256::ZERO; + let mut last_balance = U256::ZERO; + let mut last_whitelist_status = false; + let mut first_check = true; + + tokio::spawn(async move { + loop { + tokio::select! { + _ = cancellation_token.cancelled() => { + Console::info("Monitor", "Shutting down provider status monitor..."); + break; + } + _ = async { + let stake_manager = match contracts.stake_manager.as_ref() { + Some(sm) => sm, + None => { + Console::error("Cannot start monitoring - stake manager not initialized"); + return; + } + }; + + // Monitor stake + match stake_manager.get_stake(provider_address).await { + Ok(stake) => { + if first_check || stake != last_stake { + Console::info("Provider stake", &format!("{} tokens", stake / U256::from(10u128.pow(18)))); + if !first_check { + Console::info("Stake changed", &format!("From {} to {} tokens", + last_stake / U256::from(10u128.pow(18)), + stake / U256::from(10u128.pow(18)) + )); + } + last_stake = stake; + } + Some(stake) + }, + Err(e) => { + Console::error(&format!("Failed to get stake: {}", e)); + None + } + }; + + // Monitor AI token balance + match contracts.ai_token.balance_of(provider_address).await { + Ok(balance) => { + if first_check || balance != last_balance { + Console::info("AI Token Balance", &format!("{} tokens", balance / U256::from(10u128.pow(18)))); + if !first_check { + Console::info("Balance changed", &format!("From {} to {} tokens", + last_balance / U256::from(10u128.pow(18)), + balance / U256::from(10u128.pow(18)) + )); + } + last_balance = balance; + } + Some(balance) + }, + Err(e) => { + Console::error(&format!("Failed to get AI token balance: {}", e)); + None + } + }; + + // Monitor whitelist status + match contracts.compute_registry.get_provider(provider_address).await { + Ok(provider) => { + if first_check || provider.is_whitelisted != last_whitelist_status { + Console::info("Whitelist status", &format!("{}", provider.is_whitelisted)); + if !first_check { + Console::info("Whitelist status changed", &format!("From {} to {}", + last_whitelist_status, + provider.is_whitelisted + )); + } + last_whitelist_status = provider.is_whitelisted; + } + }, + Err(e) => { + Console::error(&format!("Failed to get provider whitelist status: {}", e)); + } + }; + + first_check = false; + sleep(Duration::from_secs(5)).await; + } => {} + } + } + }); + } + + pub async fn check_provider_exists(&self) -> Result { + let address = self.wallet.wallet.default_signer().address(); + + let provider = self + .contracts + .compute_registry + .get_provider(address) + .await + .map_err(|_| ProviderError::Other)?; + + Ok(provider.provider_address != Address::default()) + } + pub async fn retry_register_provider( + &self, + stake: U256, + max_attempts: u32, + cancellation_token: CancellationToken, + ) -> Result<(), ProviderError> { + let mut attempts = 0; + while attempts < max_attempts { + let spinner = Console::spinner("Registering provider..."); + match self.register_provider(stake).await { + Ok(_) => { + spinner.finish_and_clear(); + return Ok(()); + } + Err(e) => { + spinner.finish_and_clear(); + if let ProviderError::NotWhitelisted = e { + Console::error("❌ Provider not whitelisted, retrying in 15 seconds..."); + tokio::select! { + _ = tokio::time::sleep(tokio::time::Duration::from_secs(15)) => {} + _ = cancellation_token.cancelled() => { + return Err(e); + } + } + attempts += 1; + continue; + } else { + return Err(e); + } + } + } } + Console::error(&format!( + "❌ Failed to register provider after {} attempts", + attempts + )); + Err(ProviderError::Other) } pub async fn register_provider(&self, stake: U256) -> Result<(), ProviderError> { @@ -33,6 +167,7 @@ impl<'c> ProviderOperations<'c> { let address = self.wallet.wallet.default_signer().address(); let balance: U256 = self + .contracts .ai_token .balance_of(address) .await @@ -44,14 +179,8 @@ impl<'c> ProviderOperations<'c> { .await .map_err(|_| ProviderError::Other)?; - // Check if we are already provider - let provider = self - .compute_registry - .get_provider(address) - .await - .map_err(|_| ProviderError::Other)?; + let provider_exists = self.check_provider_exists().await?; - let provider_exists = provider.provider_address != Address::default(); Console::info( "AI Token Balance", &format!("{} tokens", balance / U256::from(10u128.pow(18))), @@ -65,6 +194,7 @@ impl<'c> ProviderOperations<'c> { if !provider_exists { let spinner = Console::spinner("Approving AI Token for Stake transaction"); let approve_tx = self + .contracts .ai_token .approve(stake) .await @@ -73,7 +203,7 @@ impl<'c> ProviderOperations<'c> { spinner.finish_and_clear(); let spinner = Console::spinner("Registering Provider"); - let register_tx = match self.prime_network.register_provider(stake).await { + let register_tx = match self.contracts.prime_network.register_provider(stake).await { Ok(tx) => tx, Err(e) => { println!("Failed to register provider: {:?}", e); @@ -90,6 +220,7 @@ impl<'c> ProviderOperations<'c> { // Get provider details again - cleanup later let spinner = Console::spinner("Getting provider details"); let provider = self + .contracts .compute_registry .get_provider(address) .await @@ -117,6 +248,7 @@ impl<'c> ProviderOperations<'c> { let address = self.wallet.wallet.default_signer().address(); let balance: U256 = self + .contracts .ai_token .balance_of(address) .await @@ -138,6 +270,7 @@ impl<'c> ProviderOperations<'c> { let spinner = Console::spinner("Approving AI Token for additional stake"); let approve_tx = self + .contracts .ai_token .approve(additional_stake) .await @@ -146,7 +279,7 @@ impl<'c> ProviderOperations<'c> { spinner.finish_and_clear(); let spinner = Console::spinner("Increasing stake"); - let stake_tx = match self.prime_network.stake(additional_stake).await { + let stake_tx = match self.contracts.prime_network.stake(additional_stake).await { Ok(tx) => tx, Err(e) => { println!("Failed to increase stake: {:?}", e); diff --git a/worker/src/services/discovery.rs b/worker/src/services/discovery.rs index b9f4c992..154d4b9d 100644 --- a/worker/src/services/discovery.rs +++ b/worker/src/services/discovery.rs @@ -43,7 +43,6 @@ impl<'b> DiscoveryService<'b> { ); headers.insert("x-signature", signature_string.parse().unwrap()); let request_url = format!("{}{}", self.base_url, &self.endpoint); - println!("Request URL: {:?}", request_url); let response = reqwest::Client::new() .put(&request_url) From fb7c9e33e969e4bcd90b68ec2761d9677c28b54b Mon Sep 17 00:00:00 2001 From: JannikSt Date: Thu, 20 Mar 2025 22:54:40 -0700 Subject: [PATCH 56/85] improve boot sequence (#157) --- .env.example | 4 +- Cargo.lock | 1 + Makefile | 21 +++--- worker/Cargo.toml | 1 + worker/src/api/server.rs | 2 - worker/src/checks/hardware/gpu.rs | 1 + worker/src/checks/hardware/hardware_check.rs | 10 +-- worker/src/checks/hardware/memory.rs | 2 +- worker/src/checks/hardware/storage.rs | 2 +- worker/src/checks/software/docker.rs | 22 ++---- worker/src/checks/software/software_check.rs | 3 +- worker/src/cli/command.rs | 48 +++++++----- worker/src/console/logger.rs | 78 +++++++++++++------- worker/src/docker/taskbridge/bridge.rs | 4 +- worker/src/operations/compute_node.rs | 62 ++++++---------- worker/src/operations/provider.rs | 66 +++++++++-------- worker/src/services/discovery.rs | 2 +- 17 files changed, 170 insertions(+), 159 deletions(-) diff --git a/.env.example b/.env.example index 13b8826c..5ba16b6b 100644 --- a/.env.example +++ b/.env.example @@ -11,9 +11,9 @@ PRIVATE_KEY_VALIDATOR= VALIDATOR_ADDRESS= # Provider with their node -PROVIDER_PRIVATE_KEY= +PRIVATE_KEY_PROVIDER= PROVIDER_ADDRESS= -NODE_PRIVATE_KEY= +PRIVATE_KEY_NODE= NODE_ADDRESS= # Pool Owner diff --git a/Cargo.lock b/Cargo.lock index 2d568126..28ee9bf6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6996,6 +6996,7 @@ dependencies = [ "tokio", "tokio-util", "toml", + "unicode-width 0.2.0", "url", "uuid", "validator 0.16.1", diff --git a/Makefile b/Makefile index 7c8ac483..5c1ac311 100644 --- a/Makefile +++ b/Makefile @@ -73,8 +73,6 @@ watch-discovery: watch-worker: set -a; source ${ENV_FILE}; set +a; \ - PRIVATE_KEY_PROVIDER=$${PROVIDER_PRIVATE_KEY} \ - PRIVATE_KEY_NODE=$${NODE_PRIVATE_KEY} \ cargo watch -w worker/src -x "run --bin worker -- run --port 8091 --external-ip $${WORKER_EXTERNAL_IP:-localhost} --compute-pool-id $$WORKER_COMPUTE_POOL_ID --validator-address $$VALIDATOR_ADDRESS" watch-validator: @@ -140,14 +138,17 @@ watch-worker-remote: setup-remote setup-tunnel sync-remote $(SSH_CONNECTION) -t "cd ~/$(notdir $(CURDIR)) && \ export PATH=\"\$$HOME/.cargo/bin:\$$PATH\" && \ . \"\$$HOME/.cargo/env\" && \ - set -a && source .env && set +a && \ - export EXTERNAL_IP=$(EXTERNAL_IP) && \ - RUST_BACKTRACE=1 RUST_LOG=debug PRIVATE_KEY_PROVIDER="$$PROVIDER_PRIVATE_KEY" PRIVATE_KEY_NODE="$$NODE_PRIVATE_KEY" cargo watch -w worker/src -x \"run --bin worker -- run \ - --port $(PORT) \ - --external-ip \$$EXTERNAL_IP \ - --compute-pool-id \$$WORKER_COMPUTE_POOL_ID \ - --validator-address \$$VALIDATOR_ADDRESS \ - 2>&1 | tee worker.log\"" + export TERM=xterm-256color && \ + bash --login -i -c '\ + set -a && source .env && set +a && \ + export EXTERNAL_IP=$(EXTERNAL_IP) && \ + clear && \ + RUST_BACKTRACE=1 RUST_LOG=debug cargo watch -w worker/src -x \"run --bin worker -- run \ + --port $(PORT) \ + --external-ip \$$EXTERNAL_IP \ + --compute-pool-id \$$WORKER_COMPUTE_POOL_ID \ + --validator-address \$$VALIDATOR_ADDRESS \ + 2>&1 | tee worker.log\"'" # Kill SSH tunnel .PHONY: kill-tunnel diff --git a/worker/Cargo.toml b/worker/Cargo.toml index 144b57e7..0e071d32 100644 --- a/worker/Cargo.toml +++ b/worker/Cargo.toml @@ -43,6 +43,7 @@ directories = "6.0.0" strip-ansi-escapes = "0.2.1" nalgebra = "0.33.2" sha2 = "0.10.8" +unicode-width = "0.2.0" [dev-dependencies] tempfile = "=3.14.0" diff --git a/worker/src/api/server.rs b/worker/src/api/server.rs index 0262e4df..e7e72ba8 100644 --- a/worker/src/api/server.rs +++ b/worker/src/api/server.rs @@ -33,8 +33,6 @@ pub async fn start_server( pool_info: Arc, validator_address: String, ) -> std::io::Result<()> { - println!("Starting server at http://{}:{}", host, port); - let app_state = Data::new(AppState { contracts, node_wallet, diff --git a/worker/src/checks/hardware/gpu.rs b/worker/src/checks/hardware/gpu.rs index 5164ac6a..60987661 100644 --- a/worker/src/checks/hardware/gpu.rs +++ b/worker/src/checks/hardware/gpu.rs @@ -24,6 +24,7 @@ enum GpuDevice { } pub fn detect_gpu() -> Option { + Console::title("GPU Detection"); // Changed return type to GpuSpecs match get_gpu_status() { GpuDevice::Available { diff --git a/worker/src/checks/hardware/hardware_check.rs b/worker/src/checks/hardware/hardware_check.rs index 661616b4..b137f51e 100644 --- a/worker/src/checks/hardware/hardware_check.rs +++ b/worker/src/checks/hardware/hardware_check.rs @@ -24,7 +24,6 @@ impl HardwareChecker { ) -> Result> { self.collect_system_info(&mut node_config)?; self.print_system_info(&node_config); - Console::success("All hardware checks passed"); Ok(node_config) } @@ -32,6 +31,7 @@ impl HardwareChecker { &self, node_config: &mut Node, ) -> Result<(), Box> { + Console::section("Hardware Checks"); if self.sys.cpus().is_empty() { return Err(Box::new(std::io::Error::new( std::io::ErrorKind::Other, @@ -95,10 +95,8 @@ impl HardwareChecker { } fn print_system_info(&self, node_config: &Node) { - Console::section("Hardware Requirements Check"); - // Print CPU Info - Console::section("CPU Information:"); + Console::title("CPU Information:"); if let Some(compute_specs) = &node_config.compute_specs { if let Some(cpu) = &compute_specs.cpu { Console::info("Cores", &cpu.cores.unwrap_or(0).to_string()); @@ -116,7 +114,7 @@ impl HardwareChecker { // Print Storage Info if let Some(storage_gb) = &compute_specs.storage_gb { - Console::section("Storage Information:"); + Console::title("Storage Information:"); Console::info("Total Storage", &format!("{} GB", storage_gb)); } if let Some(storage_path) = &compute_specs.storage_path { @@ -141,8 +139,6 @@ impl HardwareChecker { 0.0 }; Console::info("Memory", &format!("{:.0} GB", memory_gb)); - } else { - Console::warning("No compatible GPU detected"); } } else { Console::warning("No compute specs available"); diff --git a/worker/src/checks/hardware/memory.rs b/worker/src/checks/hardware/memory.rs index d6639f3e..53c15556 100644 --- a/worker/src/checks/hardware/memory.rs +++ b/worker/src/checks/hardware/memory.rs @@ -16,7 +16,7 @@ pub fn convert_to_mb(memory: u64) -> u64 { pub fn print_memory_info(total_memory: u64, free_memory: u64) { let total_gb = (total_memory + BYTES_TO_GB / 2) / BYTES_TO_GB; let free_gb = (free_memory + BYTES_TO_GB / 2) / BYTES_TO_GB; - Console::section("Memory Information:"); + Console::title("Memory Information:"); Console::info("Total Memory", &format!("{:.1} GB", total_gb)); Console::info("Free Memory", &format!("{:.1} GB", free_gb)); } diff --git a/worker/src/checks/hardware/storage.rs b/worker/src/checks/hardware/storage.rs index 5ece9a9c..28d32054 100644 --- a/worker/src/checks/hardware/storage.rs +++ b/worker/src/checks/hardware/storage.rs @@ -131,7 +131,7 @@ pub fn find_largest_storage() -> Option { pub fn print_storage_info() { match get_storage_info() { Ok((total, free)) => { - Console::section("Storage Information:"); + Console::title("Storage Information:"); Console::info("Total Storage", &format!("{:.1} GB", total)); Console::info("Free Storage", &format!("{:.1} GB", free)); } diff --git a/worker/src/checks/software/docker.rs b/worker/src/checks/software/docker.rs index c87d60f1..4bd7e94c 100644 --- a/worker/src/checks/software/docker.rs +++ b/worker/src/checks/software/docker.rs @@ -1,5 +1,6 @@ +use crate::console::Console; + use super::types::SoftwareCheckError; -use colored::*; pub fn check_docker_installed() -> Result<(), SoftwareCheckError> { let docker_path = std::process::Command::new("which") @@ -24,6 +25,8 @@ pub fn check_docker_installed() -> Result<(), SoftwareCheckError> { return Err(SoftwareCheckError::DockerNotRunning); } + Console::success("Docker ready"); + // Check if NVIDIA Container Toolkit is installed using which command let nvidia_toolkit = std::process::Command::new("which") .arg("nvidia-ctk") @@ -38,23 +41,12 @@ pub fn check_docker_installed() -> Result<(), SoftwareCheckError> { .map_err(|e| SoftwareCheckError::Other(format!("Failed to run nvidia-ctk: {}", e)))?; if version_check.status.success() { - println!( - "{}", - "✓ Docker check passed - Docker and NVIDIA Container Toolkit are installed and running" - .green() - ); + Console::success("NVIDIA toolkit ready"); } else { - println!( - "{}", - "⚠ Docker is running but NVIDIA Container Toolkit is not properly configured" - .yellow() - ); + Console::error("NVIDIA toolkit not configured"); } } else { - println!( - "{}", - "⚠ Docker is running but NVIDIA Container Toolkit is not installed".yellow() - ); + Console::error("NVIDIA toolkit not found"); } Ok(()) diff --git a/worker/src/checks/software/software_check.rs b/worker/src/checks/software/software_check.rs index 339372a3..4b8040aa 100644 --- a/worker/src/checks/software/software_check.rs +++ b/worker/src/checks/software/software_check.rs @@ -16,12 +16,11 @@ impl std::fmt::Display for SoftwareCheckError { impl Error for SoftwareCheckError {} pub fn run_software_check() -> Result<(), SoftwareCheckError> { - Console::section("Software Requirements Check"); + Console::section("Software Checks"); // Check Docker installation and connectivity Console::title("Docker:"); check_docker_installed()?; - Console::success("All software checks passed"); Ok(()) } diff --git a/worker/src/cli/command.rs b/worker/src/cli/command.rs index a899986e..342761c3 100644 --- a/worker/src/cli/command.rs +++ b/worker/src/cli/command.rs @@ -99,7 +99,7 @@ pub async fn execute_command( } => { if *disable_state_storing && *auto_recover { Console::error( - "❌ Cannot disable state storing and enable auto recover at the same time.", + "Cannot disable state storing and enable auto recover at the same time.", ); std::process::exit(1); } @@ -111,8 +111,8 @@ pub async fn execute_command( let mut recover_last_state = *auto_recover; let version = env!("CARGO_PKG_VERSION"); - Console::section("🚀 PRIME MINER INITIALIZATION"); - Console::info("Version:", version); + Console::section("🚀 PRIME WORKER INITIALIZATION"); + Console::info("Version", version); /* Initialize Wallet instances */ @@ -120,7 +120,7 @@ pub async fn execute_command( match Wallet::new(&private_key_provider, Url::parse(rpc_url).unwrap()) { Ok(wallet) => wallet, Err(err) => { - Console::error(&format!("❌ Failed to create wallet: {}", err)); + Console::error(&format!("Failed to create wallet: {}", err)); std::process::exit(1); } }, @@ -165,7 +165,6 @@ pub async fn execute_command( DiscoveryService::new(&node_wallet_instance, discovery_url.clone(), None); let pool_id = U256::from(*compute_pool_id as u32); - Console::progress("Loading pool info"); let pool_info = loop { match contracts.compute_pool.get_pool_info(pool_id).await { Ok(pool) if pool.status == PoolStatus::ACTIVE => break Arc::new(pool), @@ -182,7 +181,6 @@ pub async fn execute_command( } } }; - println!("Pool info: {:?}", pool_info); let node_config = Node { id: node_wallet_instance @@ -278,6 +276,8 @@ pub async fn execute_command( }; let compute_units = U256::from(std::cmp::max(1, gpu_count * 1000)); + Console::section("Syncing with Network"); + // Check if provider exists first let provider_exists = match provider_ops.check_provider_exists().await { Ok(exists) => exists, @@ -295,7 +295,18 @@ pub async fn execute_command( } }; - if !provider_exists { + Console::title("Provider Status"); + let is_whitelisted = match provider_ops.check_provider_whitelisted().await { + Ok(is_whitelisted) => is_whitelisted, + Err(e) => { + Console::error(&format!("Failed to check provider whitelist status: {}", e)); + std::process::exit(1); + } + }; + + if provider_exists && is_whitelisted { + Console::success("Provider is registered and whitelisted"); + } else { let required_stake = match stake_manager .calculate_stake(compute_units, U256::from(0)) .await @@ -311,7 +322,7 @@ pub async fn execute_command( &format!("{}", required_stake / U256::from(10u128.pow(18))), ); - const MAX_REGISTER_PROVIDER_ATTEMPTS: u32 = 100; + const MAX_REGISTER_PROVIDER_ATTEMPTS: u32 = 200; if let Err(e) = provider_ops .retry_register_provider( required_stake, @@ -325,8 +336,6 @@ pub async fn execute_command( } } - provider_ops.start_monitoring(provider_ops_cancellation); - let compute_node_exists = match compute_node_ops.check_compute_node_exists().await { Ok(exists) => exists, Err(e) => { @@ -335,12 +344,10 @@ pub async fn execute_command( } }; + Console::title("Compute Node Status"); if compute_node_exists { // TODO: What if we have two nodes? - Console::info( - "Compute node status", - "Already exists, recovering from previous state", - ); + Console::success("Compute node is registered"); recover_last_state = true; } else { let provider_total_compute = match contracts @@ -412,15 +419,18 @@ pub async fn execute_command( } } - // Start monitoring compute node status on chain - compute_node_ops.start_monitoring(cancellation_token.clone()); - if let Err(e) = discovery_service.upload_discovery_info(&node_config).await { Console::error(&format!("❌ Failed to upload discovery info: {}", e)); std::process::exit(1); } - Console::success("✅ Discovery info uploaded"); + Console::success("Discovery info uploaded"); + + Console::section("Starting Worker"); + + // Start monitoring compute node status on chain + provider_ops.start_monitoring(provider_ops_cancellation); + compute_node_ops.start_monitoring(cancellation_token.clone()); // 6. Start HTTP Server to receive challenges and invites to join cluster Console::info( @@ -455,7 +465,7 @@ pub async fn execute_command( Ok(()) } Commands::Check {} => { - Console::section("🔍 PRIME MINER SYSTEM CHECK"); + Console::section("🔍 PRIME WORKER SYSTEM CHECK"); Console::info("═", &"═".repeat(50)); // Run hardware checks diff --git a/worker/src/console/logger.rs b/worker/src/console/logger.rs index d4ea2704..ec7176cc 100644 --- a/worker/src/console/logger.rs +++ b/worker/src/console/logger.rs @@ -1,68 +1,94 @@ -use console::style; +use console::{style, Term}; +use indicatif::{ProgressBar, ProgressStyle}; +use std::cmp; use std::time::Duration; +use unicode_width::UnicodeWidthStr; pub struct Console; impl Console { - const fn get_width() -> usize { - 40 // Base width for horizontal lines + /// Maximum content width for the box. + const MAX_WIDTH: usize = 80; + + /// Calculates the content width for boxes. + /// It uses the available terminal width (minus a margin) and caps it at MAX_WIDTH. + fn get_content_width() -> usize { + let term_width = Term::stdout().size().1 as usize; + // Leave a margin of 10 columns. + let available = if term_width > 10 { + term_width - 10 + } else { + term_width + }; + cmp::min(available, Self::MAX_WIDTH) } - fn horizontal_border() -> String { - "═".repeat(Self::get_width()) + /// Centers a given text within a given width based on its display width. + fn center_text(text: &str, width: usize) -> String { + let text_width = UnicodeWidthStr::width(text); + if width > text_width { + let total_padding = width - text_width; + let left = total_padding / 2; + let right = total_padding - left; + format!("{}{}{}", " ".repeat(left), text, " ".repeat(right)) + } else { + text.to_string() + } } + /// Prints a section header as an aligned box. pub fn section(title: &str) { - println!(); - let width = Self::get_width(); - let formatted_title = format!("{:^width$}", title, width = width); - let border = Self::horizontal_border(); + let content_width = Self::get_content_width(); + let top_border = format!("╔{}╗", "═".repeat(content_width)); + let centered_title = Self::center_text(title, content_width); + let middle_line = format!("║{}║", centered_title); + let bottom_border = format!("╚{}╝", "═".repeat(content_width)); - println!("{}", style(format!("╔{}╗", border)).magenta().bold()); - println!("{}", style(formatted_title).magenta().bold()); - println!("{}", style(format!("╚{}╝", border)).magenta().bold()); + println!(); + println!("{}", style(top_border).white().bold()); + println!("{}", style(middle_line).white().bold()); + println!("{}", style(bottom_border).white().bold()); } + /// Prints a sub-title. pub fn title(text: &str) { println!(); - let width = Self::get_width(); - let formatted_text = format!("{:^width$}", text, width = width); - let border = Self::horizontal_border(); - - println!("{}", style(format!("╔{}╗", border)).magenta().bold()); - println!("{}", style(formatted_text).magenta().bold()); - println!("{}", style(format!("╚{}╝", border)).magenta().bold()); + println!("{}", style(text).white().bold()); } + /// Prints an informational message. pub fn info(label: &str, value: &str) { - println!("{}: {}", style(label).dim().magenta(), style(value).white()); + println!("{}: {}", style(label).dim().white(), style(value).white()); } + /// Prints a success message. pub fn success(text: &str) { println!("{} {}", style("✓").green().bold(), style(text).green()); } + /// Prints a warning message. pub fn warning(text: &str) { println!("{} {}", style("⚠").yellow().bold(), style(text).yellow()); } + /// Prints an error message. pub fn error(text: &str) { println!("{} {}", style("✗").red().bold(), style(text).red()); } + /// Prints a progress message. pub fn progress(text: &str) { println!("{} {}", style("→").cyan().bold(), style(text).cyan()); } - pub fn spinner(text: &str) -> indicatif::ProgressBar { - let pb = indicatif::ProgressBar::new_spinner(); + /// Creates and returns an enhanced spinner with a dark-themed style. + pub fn spinner(_text: &str) -> ProgressBar { + let pb = ProgressBar::new_spinner(); pb.set_style( - indicatif::ProgressStyle::default_spinner() - .tick_strings(&["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]) - .template("{spinner:.magenta} {msg}") + ProgressStyle::default_spinner() + .template("{spinner:.white} {msg}") .unwrap(), ); - pb.set_message(text.to_string()); pb.enable_steady_tick(Duration::from_millis(100)); pb } diff --git a/worker/src/docker/taskbridge/bridge.rs b/worker/src/docker/taskbridge/bridge.rs index 80b92440..352ebb30 100644 --- a/worker/src/docker/taskbridge/bridge.rs +++ b/worker/src/docker/taskbridge/bridge.rs @@ -78,7 +78,7 @@ impl TaskBridge { pub async fn run(&self) -> Result<()> { let socket_path = Path::new(&self.socket_path); - info!("Setting up TaskBridge socket at: {}", socket_path.display()); + debug!("Setting up TaskBridge socket at: {}", socket_path.display()); if let Some(parent) = socket_path.parent() { match fs::create_dir_all(parent) { @@ -107,7 +107,7 @@ impl TaskBridge { let listener = match UnixListener::bind(socket_path) { Ok(l) => { - info!("Successfully bound to Unix socket"); + debug!("Successfully bound to Unix socket"); l } Err(e) => { diff --git a/worker/src/operations/compute_node.rs b/worker/src/operations/compute_node.rs index a3177e76..c4c09e18 100644 --- a/worker/src/operations/compute_node.rs +++ b/worker/src/operations/compute_node.rs @@ -31,7 +31,6 @@ impl<'c> ComputeNodeOperations<'c> { let mut last_active = false; let mut last_validated = false; let mut first_check = true; - tokio::spawn(async move { loop { tokio::select! { @@ -42,24 +41,28 @@ impl<'c> ComputeNodeOperations<'c> { _ = async { match contracts.compute_registry.get_node(provider_address, node_address).await { Ok((active, validated)) => { - if first_check { - Console::info("Initial node status", &format!("Part of compute pool: {}, Validated: {}", active, validated)); - first_check = false; - last_active = active; - last_validated = validated; - } else if active != last_active { - Console::info( - "Node pool membership status changed on chain", - &format!("Part of compute pool: {}", active), - ); + if first_check || active != last_active { + Console::info("🔄 Chain Sync - Node pool membership", &format!("{}", active)); + if !first_check { + Console::info("🔄 Chain Sync - Pool membership changed", &format!("From {} to {}", + last_active, + active + )); + } last_active = active; - } else if validated != last_validated { - Console::info( - "Node validation status changed on chain", - &format!("Validated: {}", validated), - ); + } + + if first_check || validated != last_validated { + Console::info("🔄 Chain Sync - Node validation", &format!("{}", validated)); + if !first_check { + Console::info("🔄 Chain Sync - Validation changed", &format!("From {} to {}", + last_validated, + validated + )); + } last_validated = validated; } + first_check = false; } Err(e) => { Console::error(&format!("Failed to get node status: {}", e)); @@ -83,17 +86,8 @@ impl<'c> ComputeNodeOperations<'c> { .await; match compute_node { - Ok(_) => { - Console::info("Compute node status", "Compute node already exists"); - Ok(true) - } - Err(_) => { - Console::info( - "Compute node status", - "Compute node does not exist - creating", - ); - Ok(false) - } + Ok(_) => Ok(true), + Err(_) => Ok(false), } } @@ -102,25 +96,13 @@ impl<'c> ComputeNodeOperations<'c> { &self, compute_units: U256, ) -> Result> { - Console::section("🔄 Adding compute node"); + Console::title("🔄 Adding compute node"); if self.check_compute_node_exists().await? { return Ok(false); } Console::progress("Adding compute node"); - Console::info( - "Provider wallet", - &format!( - "{:?}", - self.provider_wallet.wallet.default_signer().address() - ), - ); - Console::info( - "Node wallet", - &format!("{:?}", self.node_wallet.wallet.default_signer().address()), - ); - let provider_address = self.provider_wallet.wallet.default_signer().address(); let node_address = self.node_wallet.wallet.default_signer().address(); let digest = keccak([provider_address.as_slice(), node_address.as_slice()].concat()); diff --git a/worker/src/operations/provider.rs b/worker/src/operations/provider.rs index c3d131f0..89c44d45 100644 --- a/worker/src/operations/provider.rs +++ b/worker/src/operations/provider.rs @@ -46,9 +46,9 @@ impl ProviderOperations { match stake_manager.get_stake(provider_address).await { Ok(stake) => { if first_check || stake != last_stake { - Console::info("Provider stake", &format!("{} tokens", stake / U256::from(10u128.pow(18)))); + Console::info("🔄 Chain Sync - Provider stake", &format!("{} tokens", stake / U256::from(10u128.pow(18)))); if !first_check { - Console::info("Stake changed", &format!("From {} to {} tokens", + Console::info("🔄 Chain Sync - Stake changed", &format!("From {} to {} tokens", last_stake / U256::from(10u128.pow(18)), stake / U256::from(10u128.pow(18)) )); @@ -67,9 +67,9 @@ impl ProviderOperations { match contracts.ai_token.balance_of(provider_address).await { Ok(balance) => { if first_check || balance != last_balance { - Console::info("AI Token Balance", &format!("{} tokens", balance / U256::from(10u128.pow(18)))); + Console::info("🔄 Chain Sync - AI Token Balance", &format!("{} tokens", balance / U256::from(10u128.pow(18)))); if !first_check { - Console::info("Balance changed", &format!("From {} to {} tokens", + Console::info("🔄 Chain Sync - Balance changed", &format!("From {} to {} tokens", last_balance / U256::from(10u128.pow(18)), balance / U256::from(10u128.pow(18)) )); @@ -88,9 +88,9 @@ impl ProviderOperations { match contracts.compute_registry.get_provider(provider_address).await { Ok(provider) => { if first_check || provider.is_whitelisted != last_whitelist_status { - Console::info("Whitelist status", &format!("{}", provider.is_whitelisted)); + Console::info("🔄 Chain Sync - Whitelist status", &format!("{}", provider.is_whitelisted)); if !first_check { - Console::info("Whitelist status changed", &format!("From {} to {}", + Console::info("🔄 Chain Sync - Whitelist status changed", &format!("From {} to {}", last_whitelist_status, provider.is_whitelisted )); @@ -123,12 +123,27 @@ impl ProviderOperations { Ok(provider.provider_address != Address::default()) } + + pub async fn check_provider_whitelisted(&self) -> Result { + let address = self.wallet.wallet.default_signer().address(); + + let provider = self + .contracts + .compute_registry + .get_provider(address) + .await + .map_err(|_| ProviderError::Other)?; + + Ok(provider.is_whitelisted) + } + pub async fn retry_register_provider( &self, stake: U256, max_attempts: u32, cancellation_token: CancellationToken, ) -> Result<(), ProviderError> { + Console::title("Registering Provider"); let mut attempts = 0; while attempts < max_attempts { let spinner = Console::spinner("Registering provider..."); @@ -140,9 +155,9 @@ impl ProviderOperations { Err(e) => { spinner.finish_and_clear(); if let ProviderError::NotWhitelisted = e { - Console::error("❌ Provider not whitelisted, retrying in 15 seconds..."); + Console::error("Provider not whitelisted, retrying in 10 seconds..."); tokio::select! { - _ = tokio::time::sleep(tokio::time::Duration::from_secs(15)) => {} + _ = tokio::time::sleep(tokio::time::Duration::from_secs(10)) => {} _ = cancellation_token.cancelled() => { return Err(e); } @@ -163,8 +178,6 @@ impl ProviderOperations { } pub async fn register_provider(&self, stake: U256) -> Result<(), ProviderError> { - Console::section("🏗️ Registering Provider"); - let address = self.wallet.wallet.default_signer().address(); let balance: U256 = self .contracts @@ -181,39 +194,31 @@ impl ProviderOperations { let provider_exists = self.check_provider_exists().await?; - Console::info( - "AI Token Balance", - &format!("{} tokens", balance / U256::from(10u128.pow(18))), - ); - Console::info( - "ETH Balance", - &format!("{} ETH", eth_balance / U256::from(10u128.pow(18))), - ); - Console::info("Provider registered:", &format!("{}", provider_exists)); - if !provider_exists { + Console::info( + "AI Token Balance", + &format!("{} tokens", balance / U256::from(10u128.pow(18))), + ); + Console::info( + "ETH Balance", + &format!("{} ETH", eth_balance / U256::from(10u128.pow(18))), + ); let spinner = Console::spinner("Approving AI Token for Stake transaction"); - let approve_tx = self - .contracts + self.contracts .ai_token .approve(stake) .await .map_err(|_| ProviderError::Other)?; - Console::info("Transaction approved", &format!("{:?}", approve_tx)); spinner.finish_and_clear(); let spinner = Console::spinner("Registering Provider"); let register_tx = match self.contracts.prime_network.register_provider(stake).await { Ok(tx) => tx, - Err(e) => { - println!("Failed to register provider: {:?}", e); + Err(_) => { return Err(ProviderError::Other); } }; - Console::info( - "Registration transaction completed: ", - &format!("{:?}", register_tx), - ); + Console::info("Registration tx", &format!("{:?}", register_tx)); spinner.finish_and_clear(); } @@ -227,7 +232,6 @@ impl ProviderOperations { .map_err(|_| ProviderError::Other)?; spinner.finish_and_clear(); spinner.abandon(); - Console::info("Is whitelisted", &format!("{:?}", provider.is_whitelisted)); let provider_exists = provider.provider_address != Address::default(); if !provider_exists { @@ -244,7 +248,7 @@ impl ProviderOperations { } pub async fn increase_stake(&self, additional_stake: U256) -> Result<(), ProviderError> { - Console::section("💰 Increasing Provider Stake"); + Console::title("💰 Increasing Provider Stake"); let address = self.wallet.wallet.default_signer().address(); let balance: U256 = self diff --git a/worker/src/services/discovery.rs b/worker/src/services/discovery.rs index 154d4b9d..caf0c0c2 100644 --- a/worker/src/services/discovery.rs +++ b/worker/src/services/discovery.rs @@ -22,7 +22,7 @@ impl<'b> DiscoveryService<'b> { &self, node_config: &Node, ) -> Result<(), Box> { - Console::section("📦 Uploading discovery info"); + Console::title("📦 Uploading discovery info"); let request_data = serde_json::to_value(node_config) .map_err(|e| Box::new(e) as Box)?; From 5721523b3de28018fc04abf61539a702726d3230 Mon Sep 17 00:00:00 2001 From: JannikSt Date: Thu, 20 Mar 2025 23:16:20 -0700 Subject: [PATCH 57/85] Improvement/manual transcation approval (#152) * ask user to manually accept staking transactions * ability to auto accept transactions --- Makefile | 1 + worker/src/cli/command.rs | 12 ++++++-- worker/src/operations/provider.rs | 46 +++++++++++++++++++++++++++++-- 3 files changed, 54 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 5c1ac311..e70b81ef 100644 --- a/Makefile +++ b/Makefile @@ -148,6 +148,7 @@ watch-worker-remote: setup-remote setup-tunnel sync-remote --external-ip \$$EXTERNAL_IP \ --compute-pool-id \$$WORKER_COMPUTE_POOL_ID \ --validator-address \$$VALIDATOR_ADDRESS \ + --auto-accept \ 2>&1 | tee worker.log\"'" # Kill SSH tunnel diff --git a/worker/src/cli/command.rs b/worker/src/cli/command.rs index 342761c3..3c5f94a9 100644 --- a/worker/src/cli/command.rs +++ b/worker/src/cli/command.rs @@ -72,6 +72,10 @@ pub enum Commands { #[arg(long, default_value = "0x0000000000000000000000000000000000000000")] validator_address: Option, + + /// Auto accept transactions + #[arg(long, default_value = "false")] + auto_accept: bool, }, Check {}, @@ -96,6 +100,7 @@ pub async fn execute_command( disable_state_storing, auto_recover, validator_address, + auto_accept, } => { if *disable_state_storing && *auto_recover { Console::error( @@ -150,8 +155,11 @@ pub async fn execute_command( .unwrap(), ); - let provider_ops = - ProviderOperations::new(provider_wallet_instance.clone(), contracts.clone()); + let provider_ops = ProviderOperations::new( + provider_wallet_instance.clone(), + contracts.clone(), + *auto_accept, + ); let provider_ops_cancellation = cancellation_token.clone(); diff --git a/worker/src/operations/provider.rs b/worker/src/operations/provider.rs index 89c44d45..88ebe6ce 100644 --- a/worker/src/operations/provider.rs +++ b/worker/src/operations/provider.rs @@ -2,20 +2,43 @@ use crate::console::Console; use alloy::primitives::{Address, U256}; use shared::web3::contracts::core::builder::Contracts; use shared::web3::wallet::Wallet; -use std::fmt; +use std::io::Write; use std::sync::Arc; +use std::{fmt, io}; use tokio::time::{sleep, Duration}; use tokio_util::sync::CancellationToken; pub struct ProviderOperations { wallet: Arc, contracts: Arc, + auto_accept: bool, } impl ProviderOperations { - pub fn new(wallet: Arc, contracts: Arc) -> Self { - Self { wallet, contracts } + pub fn new(wallet: Arc, contracts: Arc, auto_accept: bool) -> Self { + Self { + wallet, + contracts, + auto_accept, + } } + + fn prompt_user_confirmation(&self, message: &str) -> bool { + if self.auto_accept { + return true; + } + + print!("{} [y/N]: ", message); + io::stdout().flush().unwrap(); + + let mut input = String::new(); + if io::stdin().read_line(&mut input).is_ok() { + input.trim().to_lowercase() == "y" + } else { + false + } + } + pub fn start_monitoring(&self, cancellation_token: CancellationToken) { let provider_address = self.wallet.wallet.default_signer().address(); let contracts = self.contracts.clone(); @@ -204,6 +227,13 @@ impl ProviderOperations { &format!("{} ETH", eth_balance / U256::from(10u128.pow(18))), ); let spinner = Console::spinner("Approving AI Token for Stake transaction"); + if !self.prompt_user_confirmation(&format!( + "Do you want to approve staking {} tokens?", + stake / U256::from(10u128.pow(18)) + )) { + Console::info("Operation cancelled by user", "Staking approval declined"); + return Err(ProviderError::UserCancelled); + } self.contracts .ai_token .approve(stake) @@ -272,6 +302,14 @@ impl ProviderOperations { return Err(ProviderError::Other); } + if !self.prompt_user_confirmation(&format!( + "Do you want to approve staking {} additional tokens?", + additional_stake / U256::from(10u128.pow(18)) + )) { + Console::info("Operation cancelled by user", "Staking approval declined"); + return Err(ProviderError::UserCancelled); + } + let spinner = Console::spinner("Approving AI Token for additional stake"); let approve_tx = self .contracts @@ -304,6 +342,7 @@ impl ProviderOperations { #[derive(Debug)] pub enum ProviderError { NotWhitelisted, + UserCancelled, Other, } @@ -311,6 +350,7 @@ impl fmt::Display for ProviderError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::NotWhitelisted => write!(f, "Provider is not whitelisted"), + Self::UserCancelled => write!(f, "Operation cancelled by user"), Self::Other => write!(f, "Provider could not be registered"), } } From f5e40f4e94aad7449c455a1e6554dfc2280af8fc Mon Sep 17 00:00:00 2001 From: samsja <55492238+samsja@users.noreply.github.com> Date: Thu, 20 Mar 2025 23:20:46 -0700 Subject: [PATCH 58/85] fix typo (#158) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index dd05f92f..b90e0a42 100644 --- a/README.md +++ b/README.md @@ -164,7 +164,7 @@ First, you need to create a local worker (after you have all other services runn make watch-worker ``` -check that the worker as been registered on the orchestrator: +check that the worker has been registered on the orchestrator: ```bash curl -X GET http://localhost:8090/nodes -H "Authorization: Bearer admin" From 8c981bed5fcb5c9623429dc7d5fd7767ef2cc13a Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Fri, 21 Mar 2025 14:07:55 +0100 Subject: [PATCH 59/85] add taskrequest extension to support port binding for containers --- orchestrator/src/api/routes/heartbeat.rs | 1 + orchestrator/src/api/routes/task.rs | 3 +++ shared/src/models/task.rs | 3 +++ worker/src/docker/docker_manager.rs | 3 +++ worker/src/docker/service.rs | 20 +++++++++++++++++++- 5 files changed, 29 insertions(+), 1 deletion(-) diff --git a/orchestrator/src/api/routes/heartbeat.rs b/orchestrator/src/api/routes/heartbeat.rs index 4653623d..a385a63d 100644 --- a/orchestrator/src/api/routes/heartbeat.rs +++ b/orchestrator/src/api/routes/heartbeat.rs @@ -104,6 +104,7 @@ mod tests { command: None, args: None, env_vars: None, + ports: None, }; app_state.store_context.task_store.set_task(task.into()); diff --git a/orchestrator/src/api/routes/task.rs b/orchestrator/src/api/routes/task.rs index b669b775..ebea39ca 100644 --- a/orchestrator/src/api/routes/task.rs +++ b/orchestrator/src/api/routes/task.rs @@ -75,6 +75,7 @@ mod tests { command: None, args: None, env_vars: None, + ports: None }; let req = test::TestRequest::post() .uri("/tasks") @@ -117,6 +118,7 @@ mod tests { command: None, args: None, env_vars: None, + ports: Some(vec!["14141".to_string()]), } .into(); @@ -159,6 +161,7 @@ mod tests { command: None, args: None, env_vars: None, + ports: None, } .into(); diff --git a/shared/src/models/task.rs b/shared/src/models/task.rs index 40d9ea96..213da102 100644 --- a/shared/src/models/task.rs +++ b/shared/src/models/task.rs @@ -53,6 +53,7 @@ pub struct TaskRequest { pub env_vars: Option>, pub command: Option, pub args: Option>, + pub ports: Option>, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -63,6 +64,7 @@ pub struct Task { pub env_vars: Option>, pub command: Option, pub args: Option>, + pub ports: Option>, pub state: TaskState, } @@ -75,6 +77,7 @@ impl From for Task { command: request.command, args: request.args, env_vars: request.env_vars, + ports: request.ports, state: TaskState::PENDING, } } diff --git a/worker/src/docker/docker_manager.rs b/worker/src/docker/docker_manager.rs index c6125fc1..6609fa59 100644 --- a/worker/src/docker/docker_manager.rs +++ b/worker/src/docker/docker_manager.rs @@ -7,6 +7,7 @@ use bollard::image::CreateImageOptions; use bollard::models::ContainerStateStatusEnum; use bollard::models::DeviceRequest; use bollard::models::HostConfig; +use bollard::models::PortBinding; use bollard::volume::CreateVolumeOptions; use bollard::Docker; use futures_util::StreamExt; @@ -103,6 +104,7 @@ impl DockerManager { name: &str, env_vars: Option>, command: Option>, + port_bindings: Option>>>, gpu_enabled: bool, // Simple Vec of (host_path, container_path, read_only) volumes: Option>, @@ -201,6 +203,7 @@ impl DockerManager { }]), binds: volume_binds, shm_size: shm_size.map(|s| s as i64), + port_bindings: port_bindings, ..Default::default() }) } else { diff --git a/worker/src/docker/service.rs b/worker/src/docker/service.rs index 25e9b989..f7081e6d 100644 --- a/worker/src/docker/service.rs +++ b/worker/src/docker/service.rs @@ -3,6 +3,7 @@ use super::DockerManager; use super::DockerState; use crate::console::Console; use bollard::models::ContainerStateStatusEnum; +use bollard::models::PortBinding; use chrono::{DateTime, Utc}; use shared::models::task::Task; use shared::models::task::TaskState; @@ -23,6 +24,7 @@ pub struct DockerService { } const TASK_PREFIX: &str = "prime-task"; +const BINDABLE_PORTS_START : u16 = 20000; impl DockerService { pub fn new( @@ -157,6 +159,20 @@ impl DockerService { (Some(c), None) => vec![c], _ => vec!["sleep".to_string(), "infinity".to_string()], }; + let mut port_bindings = ::std::collections::HashMap::new(); + if let Some(ports) = &payload.ports { + let mut next_bound_port = BINDABLE_PORTS_START; + for port in ports { + port_bindings.insert( + port.clone(), + Some(vec![PortBinding { + host_ip: Some(String::from("127.0.0.1")), + host_port: Some(next_bound_port.to_string()), + }]), + ); + next_bound_port += 1; + } + } let mut env_vars: HashMap = HashMap::new(); if let Some(env) = &payload.env_vars { @@ -179,7 +195,7 @@ impl DockerService { 67108864 // Default to 64MB in bytes } }; - match manager_clone.start_container(&payload.image, &container_task_id, Some(env_vars), Some(cmd), has_gpu, Some(volumes), Some(shm_size)).await { + match manager_clone.start_container(&payload.image, &container_task_id, Some(env_vars), Some(cmd), Some(port_bindings), has_gpu, Some(volumes), Some(shm_size)).await { Ok(container_id) => { Console::info("DockerService", &format!("Container started with id: {}", container_id)); }, @@ -303,6 +319,7 @@ mod tests { env_vars: None, command: Some("sleep".to_string()), args: Some(vec!["100".to_string()]), + ports: None, state: TaskState::PENDING, }; let task_clone = task.clone(); @@ -350,6 +367,7 @@ mod tests { env_vars: None, command: Some("invalid_command".to_string()), args: None, + ports: None, state: TaskState::PENDING, }; From ebac7168fb12f315a896569c2120eddb020a8eec Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Fri, 21 Mar 2025 14:11:40 +0100 Subject: [PATCH 60/85] fix clippy --- orchestrator/src/api/routes/task.rs | 2 +- worker/src/docker/docker_manager.rs | 2 +- worker/src/docker/service.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/orchestrator/src/api/routes/task.rs b/orchestrator/src/api/routes/task.rs index ebea39ca..34ac0078 100644 --- a/orchestrator/src/api/routes/task.rs +++ b/orchestrator/src/api/routes/task.rs @@ -75,7 +75,7 @@ mod tests { command: None, args: None, env_vars: None, - ports: None + ports: None, }; let req = test::TestRequest::post() .uri("/tasks") diff --git a/worker/src/docker/docker_manager.rs b/worker/src/docker/docker_manager.rs index 6609fa59..c5273c0d 100644 --- a/worker/src/docker/docker_manager.rs +++ b/worker/src/docker/docker_manager.rs @@ -203,7 +203,7 @@ impl DockerManager { }]), binds: volume_binds, shm_size: shm_size.map(|s| s as i64), - port_bindings: port_bindings, + port_bindings, ..Default::default() }) } else { diff --git a/worker/src/docker/service.rs b/worker/src/docker/service.rs index f7081e6d..cf5c767c 100644 --- a/worker/src/docker/service.rs +++ b/worker/src/docker/service.rs @@ -24,7 +24,7 @@ pub struct DockerService { } const TASK_PREFIX: &str = "prime-task"; -const BINDABLE_PORTS_START : u16 = 20000; +const BINDABLE_PORTS_START: u16 = 20000; impl DockerService { pub fn new( From b65d7888f077009089c94f4355cd502369a2d0c3 Mon Sep 17 00:00:00 2001 From: JannikSt Date: Fri, 21 Mar 2025 17:14:37 -0700 Subject: [PATCH 61/85] support legacy private key setup (#160) * support legacy private key parameters in CLI for backwards compatibility --- worker/src/cli/command.rs | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/worker/src/cli/command.rs b/worker/src/cli/command.rs index 3c5f94a9..1d5a12fe 100644 --- a/worker/src/cli/command.rs +++ b/worker/src/cli/command.rs @@ -73,6 +73,14 @@ pub enum Commands { #[arg(long, default_value = "0x0000000000000000000000000000000000000000")] validator_address: Option, + /// Private key for the provider (not recommended, use environment variable PRIVATE_KEY_PROVIDER instead) + #[arg(long)] + private_key_provider: Option, + + /// Private key for the node (not recommended, use environment variable PRIVATE_KEY_NODE instead) + #[arg(long)] + private_key_node: Option, + /// Auto accept transactions #[arg(long, default_value = "false")] auto_accept: bool, @@ -100,6 +108,8 @@ pub async fn execute_command( disable_state_storing, auto_recover, validator_address, + private_key_provider, + private_key_node, auto_accept, } => { if *disable_state_storing && *auto_recover { @@ -109,10 +119,19 @@ pub async fn execute_command( std::process::exit(1); } - let private_key_provider = - std::env::var("PRIVATE_KEY_PROVIDER").expect("PRIVATE_KEY_PROVIDER must be set"); - let private_key_node = - std::env::var("PRIVATE_KEY_NODE").expect("PRIVATE_KEY_NODE must be set"); + let private_key_provider = if let Some(key) = private_key_provider { + Console::warning("Using private key from command line is not recommended. Consider using PRIVATE_KEY_PROVIDER environment variable instead."); + key.clone() + } else { + std::env::var("PRIVATE_KEY_PROVIDER").expect("PRIVATE_KEY_PROVIDER must be set") + }; + + let private_key_node = if let Some(key) = private_key_node { + Console::warning("Using private key from command line is not recommended. Consider using PRIVATE_KEY_NODE environment variable instead."); + key.clone() + } else { + std::env::var("PRIVATE_KEY_NODE").expect("PRIVATE_KEY_NODE must be set") + }; let mut recover_last_state = *auto_recover; let version = env!("CARGO_PKG_VERSION"); From 19f9ad4bc56ffb097a289c92ab60a240a3523ecc Mon Sep 17 00:00:00 2001 From: JannikSt Date: Fri, 21 Mar 2025 18:33:01 -0700 Subject: [PATCH 62/85] disable spinner (#161) --- worker/src/console/logger.rs | 14 -------------- worker/src/operations/provider.rs | 20 ++++++-------------- 2 files changed, 6 insertions(+), 28 deletions(-) diff --git a/worker/src/console/logger.rs b/worker/src/console/logger.rs index ec7176cc..7e4f5271 100644 --- a/worker/src/console/logger.rs +++ b/worker/src/console/logger.rs @@ -1,7 +1,5 @@ use console::{style, Term}; -use indicatif::{ProgressBar, ProgressStyle}; use std::cmp; -use std::time::Duration; use unicode_width::UnicodeWidthStr; pub struct Console; @@ -80,16 +78,4 @@ impl Console { pub fn progress(text: &str) { println!("{} {}", style("→").cyan().bold(), style(text).cyan()); } - - /// Creates and returns an enhanced spinner with a dark-themed style. - pub fn spinner(_text: &str) -> ProgressBar { - let pb = ProgressBar::new_spinner(); - pb.set_style( - ProgressStyle::default_spinner() - .template("{spinner:.white} {msg}") - .unwrap(), - ); - pb.enable_steady_tick(Duration::from_millis(100)); - pb - } } diff --git a/worker/src/operations/provider.rs b/worker/src/operations/provider.rs index 88ebe6ce..7b2e858f 100644 --- a/worker/src/operations/provider.rs +++ b/worker/src/operations/provider.rs @@ -169,14 +169,12 @@ impl ProviderOperations { Console::title("Registering Provider"); let mut attempts = 0; while attempts < max_attempts { - let spinner = Console::spinner("Registering provider..."); + Console::progress("Registering provider..."); match self.register_provider(stake).await { Ok(_) => { - spinner.finish_and_clear(); return Ok(()); } Err(e) => { - spinner.finish_and_clear(); if let ProviderError::NotWhitelisted = e { Console::error("Provider not whitelisted, retrying in 10 seconds..."); tokio::select! { @@ -226,7 +224,7 @@ impl ProviderOperations { "ETH Balance", &format!("{} ETH", eth_balance / U256::from(10u128.pow(18))), ); - let spinner = Console::spinner("Approving AI Token for Stake transaction"); + Console::progress("Approving AI Token for Stake transaction"); if !self.prompt_user_confirmation(&format!( "Do you want to approve staking {} tokens?", stake / U256::from(10u128.pow(18)) @@ -239,9 +237,8 @@ impl ProviderOperations { .approve(stake) .await .map_err(|_| ProviderError::Other)?; - spinner.finish_and_clear(); - let spinner = Console::spinner("Registering Provider"); + Console::progress("Registering Provider"); let register_tx = match self.contracts.prime_network.register_provider(stake).await { Ok(tx) => tx, Err(_) => { @@ -249,19 +246,16 @@ impl ProviderOperations { } }; Console::info("Registration tx", &format!("{:?}", register_tx)); - spinner.finish_and_clear(); } // Get provider details again - cleanup later - let spinner = Console::spinner("Getting provider details"); + Console::progress("Getting provider details"); let provider = self .contracts .compute_registry .get_provider(address) .await .map_err(|_| ProviderError::Other)?; - spinner.finish_and_clear(); - spinner.abandon(); let provider_exists = provider.provider_address != Address::default(); if !provider_exists { @@ -310,7 +304,7 @@ impl ProviderOperations { return Err(ProviderError::UserCancelled); } - let spinner = Console::spinner("Approving AI Token for additional stake"); + Console::progress("Approving AI Token for additional stake"); let approve_tx = self .contracts .ai_token @@ -318,9 +312,8 @@ impl ProviderOperations { .await .map_err(|_| ProviderError::Other)?; Console::info("Transaction approved", &format!("{:?}", approve_tx)); - spinner.finish_and_clear(); - let spinner = Console::spinner("Increasing stake"); + Console::progress("Increasing stake"); let stake_tx = match self.contracts.prime_network.stake(additional_stake).await { Ok(tx) => tx, Err(e) => { @@ -332,7 +325,6 @@ impl ProviderOperations { "Stake increase transaction completed: ", &format!("{:?}", stake_tx), ); - spinner.finish_and_clear(); Console::success("Provider stake increased successfully"); Ok(()) From a514b9f0bd963973815c50026dbc374321837377 Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Sat, 22 Mar 2025 15:37:36 +0100 Subject: [PATCH 63/85] switch to using docker manager directly to get around stability issues, add some more lock guards --- shared/src/models/gpu_challenge.rs | 156 +++++++ shared/src/models/mod.rs | 1 + validator/src/main.rs | 2 - validator/src/validators/hardware.rs | 455 +++++++++++++++++++- worker/src/api/routes/gpu_challenge.rs | 563 +++++++++++++++++++++++++ worker/src/api/routes/mod.rs | 1 + worker/src/api/server.rs | 2 + worker/src/docker/service.rs | 2 +- 8 files changed, 1156 insertions(+), 26 deletions(-) create mode 100644 shared/src/models/gpu_challenge.rs create mode 100644 worker/src/api/routes/gpu_challenge.rs diff --git a/shared/src/models/gpu_challenge.rs b/shared/src/models/gpu_challenge.rs new file mode 100644 index 00000000..a25055da --- /dev/null +++ b/shared/src/models/gpu_challenge.rs @@ -0,0 +1,156 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuChallengeInitResponse { + pub seed: String, + pub n: u64, + pub session_id: String, +} + +// Response from setting A,B matrices +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuSetABResponse { + pub status: String, +} + +// Response containing the commitment root +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuCommitmentResponse { + pub commitment_root: String, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuChallengeVectorResponse { + pub challenge_vector: String, // base64 encoded float array +} + +// disable snake case warning +#[allow(non_snake_case)] +// Response containing C*r result +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuComputeCRResponse { + pub Cr: String, // base64 encoded float array +} + +// Single row proof structure +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuRowProof { + pub row_idx: u64, + pub row_data: String, // base64 encoded float array + pub merkle_path: Vec, // array of hex strings +} + +// Response containing multiple row proofs +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuRowProofsResponse { + pub rows: Vec, +} + +// Individual row check result +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuRowCheckResult { + pub row_idx: u64, + pub pass: bool, +} + +// Final row check response +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuMultiRowCheckResponse { + pub all_passed: bool, + pub results: Vec, +} + +// Request to start a GPU challenge - keep everything as strings +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuChallengeInitRequest { + pub n: u64, +} + +// Request to submit commitment +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuCommitmentRequest { + pub session_id: String, + pub commitment_root: String, +} + +// Request to compute C*r +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuComputeCRRequest { + pub r: String, // base64 encoded challenge vector +} + +// disable snake case warning +#[allow(non_snake_case)] +// Request to get row challenge +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuRowChallengeRequest { + pub session_id: String, + pub Cr: String, // base64 encoded result +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuRowChallengeResponse { + pub freivalds_ok: bool, // base64 encoded float array + pub spot_rows: Vec, // array of ints +} + +// Request to get row proofs +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuRowProofsRequest { + pub row_idxs: Vec, +} + +// Request to check multiple rows +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuMultiRowCheckRequest { + pub session_id: String, + pub rows: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuChallengeResponse { + pub session_id: String, + pub master_seed: String, + pub n: u64, +} + +// Response for status check +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuChallengeStatus { + pub session_id: String, + pub status: String, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuChallengeWorkerStart { + pub session_id: String, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuChallengeWorkerGetStatus { + pub session_id: String, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuChallengeWorkerComputeCommitment { + pub session_id: String, + pub master_seed: String, + pub n: u64, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuChallengeWorkerComputeCR { + pub session_id: String, + pub r: String, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuChallengeWorkerComputeRowProofs { + pub session_id: String, + pub row_idxs: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuChallengeWorkerComputeRowProofsResponse { + pub rows: Vec, +} diff --git a/shared/src/models/mod.rs b/shared/src/models/mod.rs index bbb59159..5e1f6aec 100644 --- a/shared/src/models/mod.rs +++ b/shared/src/models/mod.rs @@ -1,5 +1,6 @@ pub mod api; pub mod challenge; +pub mod gpu_challenge; pub mod heartbeat; pub mod invite; pub mod metric; diff --git a/validator/src/main.rs b/validator/src/main.rs index 061f63e5..ebe5b730 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -207,8 +207,6 @@ fn main() { if let Err(e) = runtime.block_on(hardware_validator.validate_nodes(nodes)) { error!("Error validating nodes: {:#}", e); } - - std::thread::sleep(std::time::Duration::from_secs(10)); } } diff --git a/validator/src/validators/hardware.rs b/validator/src/validators/hardware.rs index dfe271f1..4a942e40 100644 --- a/validator/src/validators/hardware.rs +++ b/validator/src/validators/hardware.rs @@ -2,31 +2,52 @@ use alloy::primitives::Address; use anyhow::{Context, Error, Result}; use log::{error, info}; use rand::{rng, Rng}; +use reqwest::header::{HeaderMap, HeaderValue}; +use serde_json::{json, Value}; use shared::models::api::ApiResponse; use shared::models::challenge::{calc_matrix, ChallengeRequest, ChallengeResponse, FixedF64}; +use shared::models::gpu_challenge::*; use shared::models::node::DiscoveryNode; use shared::security::request_signer::sign_request; use shared::web3::contracts::core::builder::Contracts; use shared::web3::wallet::Wallet; +use std::env; use std::sync::Arc; +use tokio::sync::Mutex; + +#[derive(Debug, Clone)] +pub struct NodeChallengeState { + pub session_id: Option, + pub timestamp: u64, +} pub struct HardwareValidator<'a> { wallet: &'a Wallet, contracts: Arc, + verifier_service_url: String, + node_sessions: Arc>>, } impl<'a> HardwareValidator<'a> { pub fn new(wallet: &'a Wallet, contracts: Arc) -> Self { - Self { wallet, contracts } + let verifier_url = env::var("VERIFIER_SERVICE_URL") + .unwrap_or_else(|_| "http://localhost:14141".to_string()); + + Self { + wallet, + contracts, + verifier_service_url: verifier_url, + node_sessions: Arc::new(Mutex::new(std::collections::HashMap::new())), + } } pub async fn validate_nodes(&self, nodes: Vec) -> Result<()> { - let non_validated_nodes: Vec = nodes - .into_iter() - .filter(|node| !node.is_validated) - .collect(); + //let non_validated_nodes: Vec = nodes + // .into_iter() + // .filter(|node| !node.is_validated) + // .collect(); - info!("Non validated nodes: {:?}", non_validated_nodes); + let non_validated_nodes = nodes; for node in non_validated_nodes { let node_address = match node.id.trim_start_matches("0x").parse::
() { @@ -52,16 +73,33 @@ impl<'a> HardwareValidator<'a> { } }; - let challenge_route = "/challenge/submit"; - let challenge_result = self.challenge_node(&node, challenge_route).await; - if challenge_result.is_err() { + // First, validate with a simple matrix challenge + let basic_challenge_result = self.basic_challenge_node(&node).await; + if basic_challenge_result.is_err() { + error!( + "Node {} failed basic challenge: {:?}", + node.id, basic_challenge_result + ); + continue; + } + + // skip if node is already being checked + if self.node_sessions.lock().await.contains_key(&node.id) { + info!("Node {} is already being checked", node.id); + continue; + } + + // Then perform the GPU challenge + let gpu_challenge_result = self.gpu_challenge_node(&node).await; + if gpu_challenge_result.is_err() { error!( - "Failed to challenge node {}: {:?}", - node.id, challenge_result + "Node {} failed GPU challenge: {:?}", + node.id, gpu_challenge_result ); continue; } + // If both challenges pass, validate the node on-chain if let Err(e) = self .contracts .prime_network @@ -77,16 +115,12 @@ impl<'a> HardwareValidator<'a> { Ok(()) } - async fn challenge_node( - &self, - node: &DiscoveryNode, - challenge_route: &str, - ) -> Result { + async fn basic_challenge_node(&self, node: &DiscoveryNode) -> Result<(), Error> { let node_url = format!("http://{}:{}", node.node.ip_address, node.node.port); - + let challenge_route = "/challenge/submit"; let mut headers = reqwest::header::HeaderMap::new(); - // create random challenge matrix + // Create random challenge matrix let challenge_matrix = self.random_challenge(3, 3, 3, 3); let challenge_expected = calc_matrix(&challenge_matrix); @@ -117,17 +151,392 @@ impl<'a> HardwareValidator<'a> { .await?; let response_text = response.text().await?; - println!("Response text: {}", response_text); let parsed_response: ApiResponse = serde_json::from_str(&response_text)?; if !parsed_response.success { Err(anyhow::anyhow!("Error fetching challenge from node")) } else if challenge_expected.result == parsed_response.data.result { - info!("Challenge successful"); - Ok(0) + info!("Basic challenge successful"); + Ok(()) } else { - error!("Challenge failed"); - Err(anyhow::anyhow!("Node failed challenge")) + error!("Basic challenge failed"); + Err(anyhow::anyhow!("Node failed basic challenge")) + } + } + + async fn verifier_send( + &self, + endpoint: &str, + payload: Option, + ) -> Result { + info!("Sending request to verifier service: {}", endpoint); + let client = reqwest::Client::new(); + + let mut request = client.post(format!("{}{}", self.verifier_service_url, endpoint)); + + // Add signature and address headers + let address = self.wallet.wallet.default_signer().address().to_string(); + let signature = sign_request(endpoint, self.wallet, payload.as_ref()) + .await + .map_err(|e| anyhow::anyhow!("{}", e))?; + + let mut headers = HeaderMap::new(); + headers.insert("x-address", HeaderValue::from_str(&address)?); + headers.insert("x-signature", HeaderValue::from_str(&signature)?); + request = request.headers(headers); + + if let Some(payload) = payload { + request = request.json(&payload); + } + + let response = request.send().await?; + + if !response.status().is_success() { + let error_text = response.text().await?; + error!("Endpoint failure: {}, {}", endpoint, error_text); + return Err(anyhow::anyhow!("Verifier request failed: {}", error_text)); + } + + response + .json::() + .await + .context("Failed to deserialize verifier response") + } + + async fn worker_send< + T: serde::de::DeserializeOwned + serde::Serialize + std::fmt::Debug, + P: serde::Serialize, + >( + &self, + node: &DiscoveryNode, + endpoint: &str, + payload_struct: Option

, + ) -> Result { + info!("Sending request to worker node: {}", endpoint); + let client = reqwest::Client::new(); + let node_url = format!("http://{}:{}", node.node.ip_address, node.node.port); + + let mut request = client.post(format!("{}{}", node_url, endpoint)); + + let payload = payload_struct + .map(|p| serde_json::to_value(p)) + .transpose()?; + + // Add signature and address headers + let address = self.wallet.wallet.default_signer().address().to_string(); + let signature = sign_request(endpoint, self.wallet, payload.as_ref()) + .await + .map_err(|e| anyhow::anyhow!("{}", e))?; + + let mut headers = HeaderMap::new(); + headers.insert("x-address", HeaderValue::from_str(&address)?); + headers.insert("x-signature", HeaderValue::from_str(&signature)?); + request = request.headers(headers); + + if let Some(payload) = payload { + request = request.json(&payload); + } + + let response = request.send().await?; + + if !response.status().is_success() { + let error_text = response.text().await?; + error!("Endpoint failure: {}, {}", endpoint, error_text); + return Err(anyhow::anyhow!("Worker request failed: {}", error_text)); + } + + // Clone the response so we can read the body twice + let response_text = response.text().await?; + info!("Worker response body: {}", response_text); + + let parsed_response: ApiResponse = serde_json::from_str(&response_text)?; + + info!("Parsed worker response: {:?}", parsed_response.data); + + // process this json into T struct + Ok(parsed_response.data) + } + + async fn worker_get( + &self, + node: &DiscoveryNode, + endpoint: &str, + payload_struct: Option, + ) -> Result { + info!("Sending request to worker node: {}", endpoint); + let client = reqwest::Client::new(); + let node_url = format!("http://{}:{}", node.node.ip_address, node.node.port); + + let mut request = client.get(format!("{}{}", node_url, endpoint)); + + let payload = payload_struct.map(serde_json::to_value).transpose()?; + + if let Some(payload) = payload { + // For GET requests, add the payload as query parameters instead of JSON body + let query_params = + serde_json::from_value::>(payload) + .map_err(|e| { + anyhow::anyhow!("Failed to convert payload to query params: {}", e) + })?; + + for (key, value) in query_params { + request = request.query(&[(key, value)]); + } + } + + // get the built /endpoint?query=param URL + let request_url = request + .try_clone() + .unwrap() + .build() + .map_err(|e| anyhow::anyhow!("Failed to build request URL: {}", e))?; + let request_url_str = request_url.url().to_string(); + + // Add signature and address headers + let address = self.wallet.wallet.default_signer().address().to_string(); + let signature = sign_request(&request_url_str, self.wallet, None) + .await + .map_err(|e| anyhow::anyhow!("{}", e))?; + + let mut headers = HeaderMap::new(); + headers.insert("x-address", HeaderValue::from_str(&address)?); + headers.insert("x-signature", HeaderValue::from_str(&signature)?); + request = request.headers(headers); + + let response = request.send().await?; + + if !response.status().is_success() { + let error_text = response.text().await?; + error!("Endpoint failure: {}, {}", endpoint, error_text); + Err(anyhow::anyhow!("Worker request failed: {}", error_text)) + } else { + let response_text = response.text().await?; + info!("Worker response body: {}", response_text); + Ok(response_text) + } + } + + async fn gpu_challenge_node(&self, node: &DiscoveryNode) -> Result<(), Error> { + // check if node is already running a challenge session + { + // Create a separate scope for the lock + let mut sessions = self.node_sessions.lock().await; + + if let Some(session) = sessions.get(&node.id) { + let current_time = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs(); + let session_age = current_time - session.timestamp; + + if session_age < 600 { + return Err(anyhow::anyhow!( + "Node is already running a challenge session" + )); + } else { + info!("Removing expired session for node: {}", node.id); + sessions.remove(&node.id); + } + } else { + info!("No session found for node, creating: {}", node.id); + sessions.insert( + node.id.clone(), + NodeChallengeState { + session_id: None, + timestamp: std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs(), + }, + ); + } + } + + // STEP 1: Initialize a new challenge with the verifier service + let init_request = GpuChallengeInitRequest { + n: 8192, // Default matrix size + }; + + let init_data: GpuChallengeResponse = self + .verifier_send("/init", Some(json!(init_request))) + .await + .context("Failed to initialize GPU challenge")?; + + info!("Initialized verifier session: {}", init_data.session_id); + + // store session id in node_sessions + let challenge_state = NodeChallengeState { + session_id: Some(init_data.session_id.clone()), + timestamp: std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs(), + }; + + self.node_sessions + .lock() + .await + .insert(node.id.clone(), challenge_state.clone()); + + info!( + "Starting challenge state: {}, {}", + challenge_state.session_id.unwrap(), + challenge_state.timestamp + ); + + // STEP 2: Start GPU challenge on worker node + let start_route = "/gpu-challenge/init-container"; + + let start_payload = GpuChallengeWorkerStart { + session_id: init_data.session_id.clone(), + }; + + let response: GpuChallengeStatus = match self + .worker_send(node, start_route, Some(start_payload)) + .await + { + Ok(status) => status, + Err(e) => { + error!("Failed to start GPU challenge on worker node: {}", e); + return Err(anyhow::anyhow!("Failed to start GPU challenge: {}", e)); + } + }; + + info!("Worker response: {:?}", response); + + if response.status != "initializing" { + return Err(anyhow::anyhow!( + "Failed to start GPU challenge on worker node" + )); + } + + // STEP 3: Check status until worker is ready + let status_route = "/gpu-challenge/status"; + + let mut max_attempts = 100; + + loop { + match self.worker_get(node, status_route, None).await { + Ok(response_text) => { + let status_response: GpuChallengeStatus = serde_json::from_str(&response_text)?; + info!("Worker status: {}", status_response.status); + if status_response.status == "ready" { + info!("Worker node is ready for GPU challenge"); + break; + } + } + Err(e) => { + error!("Failed to get worker status: {}", e); + return Err(anyhow::anyhow!("Failed to get worker status: {}", e)); + } + } + + max_attempts -= 1; + if max_attempts == 0 { + return Err(anyhow::anyhow!( + "Worker node GPU container did not become ready in time" + )); + } + + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + } + + // STEP 4: Send initial challenge parameters to worker, get commitment + let compute_commitment_route = "/gpu-challenge/compute-commitment"; + + let compute_commitment_payload = GpuChallengeWorkerComputeCommitment { + session_id: init_data.session_id.clone(), + master_seed: init_data.master_seed, + n: init_data.n, + }; + + let commitment_response: GpuCommitmentResponse = self + .worker_send( + node, + compute_commitment_route, + Some(compute_commitment_payload), + ) + .await?; + + // STEP 5: Send commitment to verifier + let commitment_request = GpuCommitmentRequest { + session_id: init_data.session_id.clone(), + commitment_root: commitment_response.commitment_root, + }; + + let commitment_result: GpuChallengeVectorResponse = self + .verifier_send("/commitment", Some(json!(commitment_request))) + .await + .context("Failed to submit commitment to verifier")?; + + // STEP 6: Compute C*r on worker + let compute_cr_route = "/gpu-challenge/compute-cr"; + + let compute_cr_payload = GpuChallengeWorkerComputeCR { + session_id: init_data.session_id.clone(), + r: commitment_result.challenge_vector, + }; + + let cr_response: GpuComputeCRResponse = self + .worker_send(node, compute_cr_route, Some(compute_cr_payload)) + .await?; + + // STEP 6: Send Cr to verifier + let cr_request = GpuRowChallengeRequest { + session_id: init_data.session_id.clone(), + Cr: cr_response.Cr, + }; + + let cr_result: GpuRowChallengeResponse = self + .verifier_send("/row_challenge", Some(json!(cr_request))) + .await + .context("Failed to submit Cr to verifier")?; + + if !cr_result.freivalds_ok { + return Err(anyhow::anyhow!("GPU challenge failed")); + } + + // STEP 7: Get row proofs from worker + let row_proofs_route = "/gpu-challenge/compute-row-proofs"; + + let row_proofs_payload = GpuRowProofsRequest { + row_idxs: cr_result.spot_rows, + }; + + let row_proofs_response: GpuChallengeWorkerComputeRowProofsResponse = self + .worker_send(node, row_proofs_route, Some(row_proofs_payload)) + .await?; + + // STEP 8: Send row proofs to verifier + let row_proofs_request = GpuMultiRowCheckRequest { + session_id: init_data.session_id.clone(), + rows: row_proofs_response.rows, + }; + + let row_proofs_result: GpuMultiRowCheckResponse = self + .verifier_send("/multi_row_check", Some(json!(row_proofs_request))) + .await + .context("Failed to submit row proofs to verifier")?; + + // STEP 9: Check verifier response + if row_proofs_result.all_passed { + info!("GPU challenge successful"); + Ok(()) + } else { + info!("GPU challenge: not all rows passed"); + // pass anyway if >= 50% of rows passed + let total_rows = row_proofs_result.results.len(); + let total_passed = row_proofs_result.results.iter().filter(|r| r.pass).count(); + if total_passed as f64 / total_rows as f64 >= 0.5 { + info!( + "GPU challenge passed with {} out of {} rows", + total_passed, total_rows + ); + Ok(()) + } else { + error!("GPU challenge failed"); + Err(anyhow::anyhow!("GPU challenge failed")) + } } } diff --git a/worker/src/api/routes/gpu_challenge.rs b/worker/src/api/routes/gpu_challenge.rs new file mode 100644 index 00000000..e8120c78 --- /dev/null +++ b/worker/src/api/routes/gpu_challenge.rs @@ -0,0 +1,563 @@ +use crate::{api::server::AppState, docker::docker_manager::ContainerDetails}; +use actix_web::{ + web::{self, get, post, Data}, + HttpRequest, HttpResponse, Scope, +}; +use bollard::errors::Error as DockerError; +use bollard::models::ContainerStateStatusEnum; +use bollard::models::PortBinding; +use log::info; +use serde::{Deserialize, Serialize}; +use serde_json::{self, json}; +use shared::models::api::ApiResponse; +use shared::models::gpu_challenge::*; +use shared::models::task::{Task, TaskRequest}; +use std::collections::HashMap; +use std::sync::Arc; +use tokio::sync::Mutex; + +const PROVER_CONTAINER_ID: &str = "prime-core-gpu-challenge"; +const TASK_NAME: &str = "gpu-challenge"; +const IMAGE_NAME: &str = "matrix-prover"; + +// Store raw responses from the prover as strings +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct GpuChallengeResult { + // Store raw JSON strings instead of parsed values + pub session_id: String, + pub commitment_root: String, + pub cr_result_json: String, + pub row_proofs_json: String, +} + +// Challenge status with results +#[derive(Debug, Clone)] +struct GpuChallengeStateData { + session_id: Option, + container_id: Option, + status: Option, + result: Option, + error: Option, +} + +// Simple challenge state storage +struct ChallengeStateStorage { + state: GpuChallengeStateData, +} + +impl ChallengeStateStorage { + fn new() -> Self { + Self { + state: GpuChallengeStateData { + session_id: None, + container_id: None, + status: None, + result: None, + error: None, + }, + } + } + + fn new_session(&mut self, state: GpuChallengeStateData) { + self.state = state; + } + + fn set_status(&mut self, status: &str) { + self.state.status = Some(status.to_string()); + } + + fn set_container_id(&mut self, container_id: &str) { + self.state.container_id = Some(container_id.to_string()); + } + + fn set_result(&mut self, result: GpuChallengeResult) { + self.state.result = Some(result); + } + + fn mut_result(&mut self) -> Option<&mut GpuChallengeResult> { + self.state.result.as_mut() + } + + fn set_error(&mut self, error: String) { + self.state.error = Some(error); + } + + fn get_session_id(&self) -> Option { + self.state.session_id.clone() + } + + fn get_status(&self) -> Option { + self.state.status.clone() + } + + fn get_container_id(&self) -> Option { + self.state.container_id.clone() + } + + fn wipe(&mut self) { + self.state = GpuChallengeStateData { + session_id: None, + container_id: None, + status: None, + result: None, + error: None, + }; + } +} + +// Thread-safe state storage +lazy_static::lazy_static! { + static ref CURRENT_CHALLENGE: Arc> = Arc::new(Mutex::new(ChallengeStateStorage::new())); +} + +// allow unused +#[allow(dead_code)] +pub async fn start_task_via_service(app_state: Data) { + // Set environment variables for container + let mut env_vars = HashMap::new(); + env_vars.insert("PORT".to_string(), "12121".to_string()); + + // Launch Docker container with GPU support + let task: Task = TaskRequest { + image: IMAGE_NAME.to_string(), + name: TASK_NAME.to_string(), + env_vars: Some(env_vars), + command: Some("/usr/bin/python3".to_string()), + args: Some(vec!["prover.py".to_string()]), + ports: Some(vec!["12121/tcp".to_string()]), + } + .into(); + let task_clone = task.clone(); + let docker_service = app_state.docker_service.clone(); + + // sleep for 2 minutes + tokio::time::sleep(tokio::time::Duration::from_secs(120)).await; + + while !docker_service.state.get_is_running().await { + tokio::time::sleep(tokio::time::Duration::from_secs(5)).await; + info!("Waiting for Docker service to start"); + } + + // sleep + tokio::time::sleep(tokio::time::Duration::from_secs(10)).await; + loop { + // don't run it while there's a pending task + if docker_service.state.get_current_task().await.is_none() { + break; + } + tokio::time::sleep(tokio::time::Duration::from_secs(5)).await; + info!("Waiting for previous task to finish"); + } + docker_service + .state + .set_current_task(Some(task_clone)) + .await; + // sleep for a minute + tokio::time::sleep(tokio::time::Duration::from_secs(15)).await; + // Spawn a background task to wait for the container to start + tokio::spawn(async move { + let mut retries = 0; + while retries < 30 { + if let Some(current_task) = docker_service.state.get_current_task().await { + if current_task.state == shared::models::task::TaskState::RUNNING + && current_task.name == TASK_NAME + { + let mut state = CURRENT_CHALLENGE.lock().await; + state.set_status("ready"); + break; + } + info!("Current task: {:?}", current_task); + } + tokio::time::sleep(tokio::time::Duration::from_secs(10)).await; + retries += 1; + } + if retries >= 30 { + let mut state = CURRENT_CHALLENGE.lock().await; + state.wipe(); + info!("Failed to start GPU challenge container"); + // shut down docker + docker_service.state.set_current_task(None).await; + } + }); +} + +pub async fn start_task_via_manager(app_state: Data) -> anyhow::Result<()> { + let manager = app_state.docker_service.docker_manager.clone(); + + // before trying to start a new container, check that there isn't a stale one already running + match manager.get_container_details(PROVER_CONTAINER_ID).await { + Ok(container_details) => { + manager.remove_container(&container_details.id).await?; + info!("Stopped stale GPU challenge container."); + } + Err(_) => { + info!("No stale containers, we're safe to proceed."); + } + } + + let image = IMAGE_NAME.to_string(); + let container_task_id = PROVER_CONTAINER_ID.to_string(); + let has_gpu = true; + let mut env_vars: HashMap = HashMap::new(); + env_vars.insert("PORT".to_string(), "12121".to_string()); + let cmd = vec!["/usr/bin/python3".to_string(), "prover.py".to_string()]; + let volumes: Vec<(String, String, bool)> = Vec::new(); + + let ports = Some(vec!["12121/tcp".to_string()]); + + let mut port_bindings = ::std::collections::HashMap::new(); + if let Some(ports) = ports { + let mut next_bound_port = 20000; + for port in ports { + port_bindings.insert( + port.clone(), + Some(vec![PortBinding { + host_ip: Some(String::from("127.0.0.1")), + host_port: Some(next_bound_port.to_string()), + }]), + ); + next_bound_port += 1; + } + } + + match manager + .start_container( + &image, + &container_task_id, + Some(env_vars), + Some(cmd), + Some(port_bindings), + has_gpu, + Some(volumes), + Some(67108864), + ) + .await + { + Ok(container_id) => { + info!("Started GPU challenge container."); + let mut state: tokio::sync::MutexGuard<'_, ChallengeStateStorage> = + CURRENT_CHALLENGE.lock().await; + state.set_status("initializing"); + state.set_container_id(&container_id); + Ok(()) + } + Err(e) => { + info!("Failed to start GPU challenge container: {:?}", e); + let mut state = CURRENT_CHALLENGE.lock().await; + state.wipe(); + Err(anyhow::anyhow!("Failed to start GPU challenge container")) + } + } +} + +pub async fn stop_task_via_manager(app_state: Data) -> anyhow::Result<()> { + let manager = app_state.docker_service.docker_manager.clone(); + + let state = CURRENT_CHALLENGE.lock().await; + match state.get_container_id() { + Some(container_is) => { + info!("Stopping GPU challenge container."); + match manager.remove_container(&container_is).await { + Ok(_) => Ok(()), + Err(e) => Err(anyhow::anyhow!( + "Failed to stop GPU challenge container: {:?}", + e + )), + } + } + None => Err(anyhow::anyhow!("No GPU challenge container to stop")), + } +} + +pub async fn get_container_status( + app_state: Data, +) -> Result { + let manager = app_state.docker_service.docker_manager.clone(); + let state = CURRENT_CHALLENGE.lock().await; + return manager + .get_container_details(&state.get_container_id().unwrap()) + .await; +} + +// Start a GPU challenge +pub async fn init_container( + challenge_req: web::Json, + app_state: Data, +) -> HttpResponse { + info!( + "Received GPU challenge request: session_id={}", + challenge_req.session_id + ); + + let session_id = challenge_req.session_id.clone(); + + // if state is anything other than empty, skip + { + let mut state = CURRENT_CHALLENGE.lock().await; + info!("Challenge state: {:?}", state.state); + if state.get_status().is_some() { + return HttpResponse::Ok().json(ApiResponse::new( + true, + GpuChallengeStatus { + session_id: state.get_session_id().unwrap(), + status: state.get_status().unwrap(), + }, + )); + } else { + // immediately create new challenge session + state.new_session(GpuChallengeStateData { + session_id: Some(session_id.clone()), + container_id: None, + status: Some("initializing".to_string()), + result: None, + error: None, + }); + } + } + + match start_task_via_manager(app_state.clone()).await { + Ok(_) => { + // Spawn a background task to wait for the container to start + tokio::spawn(async move { + let mut retries = 0; + while retries < 30 { + { + match get_container_status(app_state.clone()).await { + Ok(container_details) => { + if container_details.status.unwrap() + == ContainerStateStatusEnum::RUNNING + { + let mut state = CURRENT_CHALLENGE.lock().await; + state.set_status("ready"); + break; + } + } + Err(e) => { + info!("Failed to get container status: {:?}", e); + } + } + } + tokio::time::sleep(tokio::time::Duration::from_secs(10)).await; + retries += 1; + } + if retries >= 30 { + let mut state = CURRENT_CHALLENGE.lock().await; + info!("Failed to start GPU challenge container"); + // shut down docker + stop_task_via_manager(app_state.clone()).await.unwrap(); + state.wipe(); + } + }); + } + Err(e) => { + return HttpResponse::InternalServerError() + .json(ApiResponse::new(false, e.to_string())); + } + } + + info!("Started GPU challenge container."); + + // Return success with the session ID + HttpResponse::Ok().json(ApiResponse::new( + true, + GpuChallengeStatus { + session_id, + status: "initializing".to_string(), + }, + )) +} + +// Get challenge status +pub async fn get_status(req: HttpRequest) -> HttpResponse { + info!("Received status request: {:?}", req); + + let state = CURRENT_CHALLENGE.lock().await; + info!("Current session ID: {:?}", state.get_session_id()); + + if let Some(status) = &state.get_status() { + let response = GpuChallengeStatus { + session_id: state.get_session_id().unwrap(), + status: status.clone(), + }; + HttpResponse::Ok().json(ApiResponse::new(true, response)) + } else { + HttpResponse::NotFound().json(ApiResponse::new(false, "No challenge currently running.")) + } +} + +async fn prover_send( + endpoint: &str, + payload: Option, +) -> anyhow::Result { + let client = reqwest::Client::new(); + let mut builder = client.post(format!("http://localhost:20000{}", endpoint)); + + if let Some(json) = payload { + builder = builder.json(&json); + } + + let response = builder.send().await?; + + if !response.status().is_success() { + let error_text = response.text().await?; + return Err(anyhow::anyhow!("Prover request failed: {}", error_text)); + } + + response.json::().await.map_err(anyhow::Error::from) +} + +pub async fn compute_commitment( + challenge_req: web::Json, + // app_state: Data, +) -> HttpResponse { + let session_id = &challenge_req.session_id; + let n = challenge_req.n; + let master_seed = challenge_req.master_seed.clone(); + + // Call prover's setAB endpoint + match prover_send::( + "/setAB", + Some(json!({ + "n": n, + "seed": master_seed + })), + ) + .await + { + Ok(_) => { + // Get commitment from prover + match prover_send::("/getCommitment", None).await { + Ok(commitment_data) => { + let commitment_root = commitment_data.commitment_root; + + // Store result + let mut state = CURRENT_CHALLENGE.lock().await; + state.set_result(GpuChallengeResult { + session_id: session_id.clone(), + commitment_root: commitment_root.clone(), + cr_result_json: "".to_string(), + row_proofs_json: "".to_string(), + }); + + HttpResponse::Ok().json(ApiResponse::new( + true, + json!({ + "commitment_root": commitment_root + }), + )) + } + Err(e) => { + let mut state = CURRENT_CHALLENGE.lock().await; + state.set_error(e.to_string()); + HttpResponse::InternalServerError().json(ApiResponse::new(false, e.to_string())) + } + } + } + Err(e) => { + let mut state = CURRENT_CHALLENGE.lock().await; + state.set_error(e.to_string()); + HttpResponse::InternalServerError().json(ApiResponse::new(false, e.to_string())) + } + } +} + +pub async fn compute_cr( + challenge_req: web::Json, + // app_state: Data, +) -> HttpResponse { + let session_id = &challenge_req.session_id; + let challenge_vector = challenge_req.r.clone(); + + // check that session ID matches + let state = CURRENT_CHALLENGE.lock().await; + if session_id != state.get_session_id().as_deref().unwrap_or("") { + return HttpResponse::NotFound().json(ApiResponse::new( + false, + format!("No challenge found with session ID: {}", session_id), + )); + } + + match prover_send::( + "/computeCR", + Some(json!({ + "r": challenge_vector + })), + ) + .await + { + Ok(cr_data) => { + let cr = cr_data.Cr; + + // Store result + let mut state = CURRENT_CHALLENGE.lock().await; + if let Some(result) = state.mut_result() { + result.cr_result_json = cr.clone(); + } + + HttpResponse::Ok().json(ApiResponse::new( + true, + json!({ + "Cr": cr + }), + )) + } + Err(e) => { + let mut state = CURRENT_CHALLENGE.lock().await; + state.set_error(e.to_string()); + HttpResponse::InternalServerError().json(ApiResponse::new(false, e.to_string())) + } + } +} + +pub async fn compute_row_proofs( + challenge_req: web::Json, + // app_state: Data, +) -> HttpResponse { + let session_id = &challenge_req.session_id; + let row_indices = challenge_req.row_idxs.clone(); + + // check that session ID matches + let state = CURRENT_CHALLENGE.lock().await; + if session_id != state.get_session_id().as_deref().unwrap_or("") { + return HttpResponse::NotFound().json(ApiResponse::new( + false, + format!("No challenge found with session ID: {}", session_id), + )); + } + + match prover_send::( + "/getRowProofs", + Some(json!({ + "row_idxs": row_indices + })), + ) + .await + { + Ok(proof_data) => { + let proofs_json = serde_json::to_string(&proof_data).unwrap_or_default(); + + // Store result + let mut state = CURRENT_CHALLENGE.lock().await; + if let Some(result) = state.mut_result() { + result.row_proofs_json = proofs_json.clone(); + } + + HttpResponse::Ok().json(ApiResponse::new(true, proof_data)) + } + Err(e) => { + let mut state = CURRENT_CHALLENGE.lock().await; + state.set_error(e.to_string()); + HttpResponse::InternalServerError().json(ApiResponse::new(false, e.to_string())) + } + } +} + +// Register the routes +pub fn gpu_challenge_routes() -> Scope { + web::scope("/gpu-challenge") + .route("/init-container", post().to(init_container)) + .route("/status", get().to(get_status)) + .route("/compute-commitment", post().to(compute_commitment)) + .route("/compute-cr", post().to(compute_cr)) + .route("/compute-row-proofs", post().to(compute_row_proofs)) +} diff --git a/worker/src/api/routes/mod.rs b/worker/src/api/routes/mod.rs index d1e00d34..f1195704 100644 --- a/worker/src/api/routes/mod.rs +++ b/worker/src/api/routes/mod.rs @@ -1,4 +1,5 @@ pub mod challenge; +pub mod gpu_challenge; pub mod invite; pub mod task; pub mod types; diff --git a/worker/src/api/server.rs b/worker/src/api/server.rs index e7e72ba8..fefe46fd 100644 --- a/worker/src/api/server.rs +++ b/worker/src/api/server.rs @@ -1,4 +1,5 @@ use crate::api::routes::challenge::challenge_routes; +use crate::api::routes::gpu_challenge::gpu_challenge_routes; use crate::api::routes::invite::invite_routes; use crate::api::routes::task::task_routes; use crate::docker::DockerService; @@ -54,6 +55,7 @@ pub async fn start_server( .service(invite_routes()) .service(task_routes()) .service(challenge_routes()) + .service(gpu_challenge_routes()) }) .bind((host, port))? .run() diff --git a/worker/src/docker/service.rs b/worker/src/docker/service.rs index cf5c767c..fe249579 100644 --- a/worker/src/docker/service.rs +++ b/worker/src/docker/service.rs @@ -15,7 +15,7 @@ use tokio::time::{interval, Duration}; use tokio_util::sync::CancellationToken; pub struct DockerService { - docker_manager: Arc, + pub docker_manager: Arc, cancellation_token: CancellationToken, pub state: Arc, has_gpu: bool, From c5330dcd88149df230b7bee88c08a4915ccae0ef Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Sat, 22 Mar 2025 15:45:06 +0100 Subject: [PATCH 64/85] ensure there's no deadlocks --- worker/src/api/routes/gpu_challenge.rs | 86 +++++++++++++++----------- 1 file changed, 51 insertions(+), 35 deletions(-) diff --git a/worker/src/api/routes/gpu_challenge.rs b/worker/src/api/routes/gpu_challenge.rs index e8120c78..d6ca7cec 100644 --- a/worker/src/api/routes/gpu_challenge.rs +++ b/worker/src/api/routes/gpu_challenge.rs @@ -279,6 +279,28 @@ pub async fn get_container_status( .await; } + +async fn prover_send( + endpoint: &str, + payload: Option, +) -> anyhow::Result { + let client = reqwest::Client::new(); + let mut builder = client.post(format!("http://localhost:20000{}", endpoint)); + + if let Some(json) = payload { + builder = builder.json(&json); + } + + let response = builder.send().await?; + + if !response.status().is_success() { + let error_text = response.text().await?; + return Err(anyhow::anyhow!("Prover request failed: {}", error_text)); + } + + response.json::().await.map_err(anyhow::Error::from) +} + // Start a GPU challenge pub async fn init_container( challenge_req: web::Json, @@ -385,27 +407,6 @@ pub async fn get_status(req: HttpRequest) -> HttpResponse { } } -async fn prover_send( - endpoint: &str, - payload: Option, -) -> anyhow::Result { - let client = reqwest::Client::new(); - let mut builder = client.post(format!("http://localhost:20000{}", endpoint)); - - if let Some(json) = payload { - builder = builder.json(&json); - } - - let response = builder.send().await?; - - if !response.status().is_success() { - let error_text = response.text().await?; - return Err(anyhow::anyhow!("Prover request failed: {}", error_text)); - } - - response.json::().await.map_err(anyhow::Error::from) -} - pub async fn compute_commitment( challenge_req: web::Json, // app_state: Data, @@ -414,6 +415,17 @@ pub async fn compute_commitment( let n = challenge_req.n; let master_seed = challenge_req.master_seed.clone(); + { + // check that session ID matches + let state = CURRENT_CHALLENGE.lock().await; + if session_id != state.get_session_id().as_deref().unwrap_or("") { + return HttpResponse::NotFound().json(ApiResponse::new( + false, + format!("No challenge found with session ID: {}", session_id), + )); + } + } + // Call prover's setAB endpoint match prover_send::( "/setAB", @@ -468,13 +480,15 @@ pub async fn compute_cr( let session_id = &challenge_req.session_id; let challenge_vector = challenge_req.r.clone(); - // check that session ID matches - let state = CURRENT_CHALLENGE.lock().await; - if session_id != state.get_session_id().as_deref().unwrap_or("") { - return HttpResponse::NotFound().json(ApiResponse::new( - false, - format!("No challenge found with session ID: {}", session_id), - )); + { + // check that session ID matches + let state = CURRENT_CHALLENGE.lock().await; + if session_id != state.get_session_id().as_deref().unwrap_or("") { + return HttpResponse::NotFound().json(ApiResponse::new( + false, + format!("No challenge found with session ID: {}", session_id), + )); + } } match prover_send::( @@ -516,13 +530,15 @@ pub async fn compute_row_proofs( let session_id = &challenge_req.session_id; let row_indices = challenge_req.row_idxs.clone(); - // check that session ID matches - let state = CURRENT_CHALLENGE.lock().await; - if session_id != state.get_session_id().as_deref().unwrap_or("") { - return HttpResponse::NotFound().json(ApiResponse::new( - false, - format!("No challenge found with session ID: {}", session_id), - )); + { + // check that session ID matches + let state = CURRENT_CHALLENGE.lock().await; + if session_id != state.get_session_id().as_deref().unwrap_or("") { + return HttpResponse::NotFound().json(ApiResponse::new( + false, + format!("No challenge found with session ID: {}", session_id), + )); + } } match prover_send::( From 2e37d692e9e71fd2c86c081ed19a5cfedbfaaf7e Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Sat, 22 Mar 2025 16:55:25 +0100 Subject: [PATCH 65/85] add retries, blacklisting, better state tracking --- validator/src/validators/hardware.rs | 177 ++++++++++++++++++------- worker/src/api/routes/gpu_challenge.rs | 1 - 2 files changed, 132 insertions(+), 46 deletions(-) diff --git a/validator/src/validators/hardware.rs b/validator/src/validators/hardware.rs index 4a942e40..5b45b6f2 100644 --- a/validator/src/validators/hardware.rs +++ b/validator/src/validators/hardware.rs @@ -15,10 +15,35 @@ use std::env; use std::sync::Arc; use tokio::sync::Mutex; +const VALIDATION_TIMEOUT: u64 = 600; +const ERROR_TIME_BUFFER: u64 = 30; +const MATRIX_CHALLENGE_SIZE: u64 = 8192; +const MAX_CHALLENGE_ATTEMPTS: u64 = 3; +const H100_TIME_CUTOFF: u64 = 150; + +#[derive(Debug, Clone, PartialEq)] +pub enum NodeChallengeStatus { + Init, + Running, + Completed, + Failed, + Blacklisted, +} + #[derive(Debug, Clone)] pub struct NodeChallengeState { pub session_id: Option, pub timestamp: u64, + pub commitment_time: u64, + pub status: NodeChallengeStatus, + pub attempts: u64, +} + +fn get_time_as_secs() -> u64 { + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs() } pub struct HardwareValidator<'a> { @@ -42,12 +67,10 @@ impl<'a> HardwareValidator<'a> { } pub async fn validate_nodes(&self, nodes: Vec) -> Result<()> { - //let non_validated_nodes: Vec = nodes - // .into_iter() - // .filter(|node| !node.is_validated) - // .collect(); - - let non_validated_nodes = nodes; + let non_validated_nodes: Vec = nodes + .into_iter() + .filter(|node| !node.is_validated) + .collect(); for node in non_validated_nodes { let node_address = match node.id.trim_start_matches("0x").parse::

() { @@ -83,10 +106,53 @@ impl<'a> HardwareValidator<'a> { continue; } - // skip if node is already being checked - if self.node_sessions.lock().await.contains_key(&node.id) { - info!("Node {} is already being checked", node.id); - continue; + // skip if node is already being checked and hasn't timed out + { + let mut sessions = self.node_sessions.lock().await; + if let Some(session) = sessions.get_mut(&node.id) { + let current_time = get_time_as_secs(); + let session_age = current_time - session.timestamp; + + match session.status { + NodeChallengeStatus::Init | NodeChallengeStatus::Running => { + info!("Node {} challenge is still pending", node.id); + if session_age < VALIDATION_TIMEOUT { + info!("Node {} challenge is still pending", node.id); + continue; + } else { + session.attempts += 1; + session.status = NodeChallengeStatus::Init; + session.timestamp = get_time_as_secs(); + session.session_id = None; + } + } + NodeChallengeStatus::Failed => { + info!("Node {} challenge failed", node.id); + if session.attempts >= MAX_CHALLENGE_ATTEMPTS { + session.status = NodeChallengeStatus::Blacklisted; + info!("Node {} is blacklisted", node.id); + } else { + let failure_age = current_time - session.timestamp; + if failure_age > VALIDATION_TIMEOUT + ERROR_TIME_BUFFER { + info!("Node {} challenge is still pending", node.id); + session.status = NodeChallengeStatus::Init; + session.timestamp = get_time_as_secs(); + session.session_id = None; + session.attempts += 1; + } + } + continue; + } + NodeChallengeStatus::Completed => { + info!("Node {} challenge has completed", node.id); + return Ok(()); + } + NodeChallengeStatus::Blacklisted => { + info!("Node {} is blacklisted", node.id); + continue; + } + } + } } // Then perform the GPU challenge @@ -96,7 +162,27 @@ impl<'a> HardwareValidator<'a> { "Node {} failed GPU challenge: {:?}", node.id, gpu_challenge_result ); + let mut sessions = self.node_sessions.lock().await; + if let Some(session) = sessions.get_mut(&node.id) { + session.status = NodeChallengeStatus::Failed; + session.timestamp = get_time_as_secs(); + } continue; + } else { + let mut sessions = self.node_sessions.lock().await; + let session = sessions.get_mut(&node.id).unwrap(); + if session.commitment_time != 0 && session.commitment_time < H100_TIME_CUTOFF { + info!( + "Node {} is validated as having H100 level performance", + node.id + ); + session.status = NodeChallengeStatus::Completed; + } else { + info!("Node {} did not meet performance requirements", node.id); + session.status = NodeChallengeStatus::Failed; + session.timestamp = get_time_as_secs(); + continue; + } } // If both challenges pass, validate the node on-chain @@ -323,19 +409,12 @@ impl<'a> HardwareValidator<'a> { let mut sessions = self.node_sessions.lock().await; if let Some(session) = sessions.get(&node.id) { - let current_time = std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_secs(); - let session_age = current_time - session.timestamp; - - if session_age < 600 { + // if we get here, it should be in the init stage + if session.status != NodeChallengeStatus::Init { + info!("Node {} already has a challenge session running", node.id); return Err(anyhow::anyhow!( - "Node is already running a challenge session" + "Node already has a challenge session running" )); - } else { - info!("Removing expired session for node: {}", node.id); - sessions.remove(&node.id); } } else { info!("No session found for node, creating: {}", node.id); @@ -343,10 +422,10 @@ impl<'a> HardwareValidator<'a> { node.id.clone(), NodeChallengeState { session_id: None, - timestamp: std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_secs(), + timestamp: get_time_as_secs(), + status: NodeChallengeStatus::Init, + attempts: 0, + commitment_time: 0, }, ); } @@ -354,7 +433,7 @@ impl<'a> HardwareValidator<'a> { // STEP 1: Initialize a new challenge with the verifier service let init_request = GpuChallengeInitRequest { - n: 8192, // Default matrix size + n: MATRIX_CHALLENGE_SIZE, // Default matrix size }; let init_data: GpuChallengeResponse = self @@ -364,25 +443,19 @@ impl<'a> HardwareValidator<'a> { info!("Initialized verifier session: {}", init_data.session_id); - // store session id in node_sessions - let challenge_state = NodeChallengeState { - session_id: Some(init_data.session_id.clone()), - timestamp: std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_secs(), - }; - - self.node_sessions - .lock() - .await - .insert(node.id.clone(), challenge_state.clone()); - - info!( - "Starting challenge state: {}, {}", - challenge_state.session_id.unwrap(), - challenge_state.timestamp - ); + { + let mut sessions = self.node_sessions.lock().await; + let session = sessions.get_mut(&node.id).unwrap(); + session.session_id = Some(init_data.session_id.clone()); + session.status = NodeChallengeStatus::Running; + session.timestamp = get_time_as_secs(); + + info!( + "Starting challenge state: {}, {}", + session.session_id.as_ref().unwrap(), + session.timestamp + ); + } // STEP 2: Start GPU challenge on worker node let start_route = "/gpu-challenge/init-container"; @@ -441,6 +514,13 @@ impl<'a> HardwareValidator<'a> { tokio::time::sleep(std::time::Duration::from_secs(5)).await; } + // reset timestamp to calculate time taken to compute commitment + { + let mut sessions = self.node_sessions.lock().await; + let session = sessions.get_mut(&node.id).unwrap(); + session.timestamp = get_time_as_secs(); + } + // STEP 4: Send initial challenge parameters to worker, get commitment let compute_commitment_route = "/gpu-challenge/compute-commitment"; @@ -458,6 +538,13 @@ impl<'a> HardwareValidator<'a> { ) .await?; + { + let mut sessions = self.node_sessions.lock().await; + let session = sessions.get_mut(&node.id).unwrap(); + // the following value is the time it took the node to compute the A*B matmul + session.commitment_time = get_time_as_secs() - session.timestamp; + } + // STEP 5: Send commitment to verifier let commitment_request = GpuCommitmentRequest { session_id: init_data.session_id.clone(), diff --git a/worker/src/api/routes/gpu_challenge.rs b/worker/src/api/routes/gpu_challenge.rs index d6ca7cec..0fac9c3d 100644 --- a/worker/src/api/routes/gpu_challenge.rs +++ b/worker/src/api/routes/gpu_challenge.rs @@ -279,7 +279,6 @@ pub async fn get_container_status( .await; } - async fn prover_send( endpoint: &str, payload: Option, From 573fb59625f9dabaff06b08683b4717e20760ef6 Mon Sep 17 00:00:00 2001 From: JannikSt Date: Sat, 22 Mar 2025 18:44:51 -0700 Subject: [PATCH 66/85] add ability to sign message from CLI (#163) * add ability to sign message from CLI --- Makefile | 4 +++ worker/src/cli/command.rs | 56 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/Makefile b/Makefile index e70b81ef..729d9fee 100644 --- a/Makefile +++ b/Makefile @@ -167,3 +167,7 @@ remote-worker: eject-node: set -a; source ${ENV_FILE}; set +a; \ cargo run -p dev-utils --example eject_node -- --pool-id $${WORKER_COMPUTE_POOL_ID} --node $${NODE_ADDRESS} --provider-address $${PROVIDER_ADDRESS} --key $${POOL_OWNER_PRIVATE_KEY} --rpc-url $${RPC_URL} + +sign-message: + set -a; source ${ENV_FILE}; set +a; \ + cargo watch -w worker/src -x "run --bin worker -- sign-message --message example --private-key-provider $$PRIVATE_KEY_PROVIDER --private-key-node $$PRIVATE_KEY_NODE" diff --git a/worker/src/cli/command.rs b/worker/src/cli/command.rs index 1d5a12fe..84739d95 100644 --- a/worker/src/cli/command.rs +++ b/worker/src/cli/command.rs @@ -13,6 +13,7 @@ use crate::state::system_state::SystemState; use crate::TaskHandles; use alloy::primitives::U256; use alloy::signers::local::PrivateKeySigner; +use alloy::signers::Signer; use clap::{Parser, Subcommand}; use log::debug; use shared::models::node::Node; @@ -89,6 +90,21 @@ pub enum Commands { /// Generate new wallets for provider and node GenerateWallets {}, + + /// Sign Message + SignMessage { + /// Message to sign + #[arg(long)] + message: String, + + /// Private key for the provider + #[arg(long)] + private_key_provider: Option, + + /// Private key for the node + #[arg(long)] + private_key_node: Option, + }, } pub async fn execute_command( @@ -536,5 +552,45 @@ pub async fn execute_command( ); Ok(()) } + Commands::SignMessage { + message, + private_key_provider, + private_key_node, + } => { + let private_key_provider = if let Some(key) = private_key_provider { + key.clone() + } else { + std::env::var("PRIVATE_KEY_PROVIDER").expect("PRIVATE_KEY_PROVIDER must be set") + }; + + let private_key_node = if let Some(key) = private_key_node { + key.clone() + } else { + std::env::var("PRIVATE_KEY_NODE").expect("PRIVATE_KEY_NODE must be set") + }; + + let provider_wallet = Wallet::new( + &private_key_provider, + Url::parse("http://localhost:8545").unwrap(), + ) + .unwrap(); + let node_wallet = Wallet::new( + &private_key_node, + Url::parse("http://localhost:8545").unwrap(), + ) + .unwrap(); + + let message_hash = provider_wallet.signer.sign_message(message.as_bytes()); + let node_signature = node_wallet.signer.sign_message(message.as_bytes()); + + let provider_signature = message_hash.await?; + let node_signature = node_signature.await?; + let combined_signature = + [provider_signature.as_bytes(), node_signature.as_bytes()].concat(); + + println!("\nSignature: {}", hex::encode(combined_signature)); + + Ok(()) + } } } From 7e6facc8917ed4c1087915b95f35146918a56e9f Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Sun, 23 Mar 2025 15:37:45 +0100 Subject: [PATCH 67/85] fix get construction --- validator/src/validators/hardware.rs | 41 +++++----------------------- 1 file changed, 7 insertions(+), 34 deletions(-) diff --git a/validator/src/validators/hardware.rs b/validator/src/validators/hardware.rs index 5b45b6f2..f325897d 100644 --- a/validator/src/validators/hardware.rs +++ b/validator/src/validators/hardware.rs @@ -343,44 +343,16 @@ impl<'a> HardwareValidator<'a> { Ok(parsed_response.data) } - async fn worker_get( - &self, - node: &DiscoveryNode, - endpoint: &str, - payload_struct: Option, - ) -> Result { + async fn worker_get(&self, node: &DiscoveryNode, endpoint: &str) -> Result { info!("Sending request to worker node: {}", endpoint); let client = reqwest::Client::new(); let node_url = format!("http://{}:{}", node.node.ip_address, node.node.port); let mut request = client.get(format!("{}{}", node_url, endpoint)); - let payload = payload_struct.map(serde_json::to_value).transpose()?; - - if let Some(payload) = payload { - // For GET requests, add the payload as query parameters instead of JSON body - let query_params = - serde_json::from_value::>(payload) - .map_err(|e| { - anyhow::anyhow!("Failed to convert payload to query params: {}", e) - })?; - - for (key, value) in query_params { - request = request.query(&[(key, value)]); - } - } - - // get the built /endpoint?query=param URL - let request_url = request - .try_clone() - .unwrap() - .build() - .map_err(|e| anyhow::anyhow!("Failed to build request URL: {}", e))?; - let request_url_str = request_url.url().to_string(); - // Add signature and address headers let address = self.wallet.wallet.default_signer().address().to_string(); - let signature = sign_request(&request_url_str, self.wallet, None) + let signature = sign_request(endpoint, self.wallet, None) .await .map_err(|e| anyhow::anyhow!("{}", e))?; @@ -489,11 +461,12 @@ impl<'a> HardwareValidator<'a> { let mut max_attempts = 100; loop { - match self.worker_get(node, status_route, None).await { + match self.worker_get(node, status_route).await { Ok(response_text) => { - let status_response: GpuChallengeStatus = serde_json::from_str(&response_text)?; - info!("Worker status: {}", status_response.status); - if status_response.status == "ready" { + let status_response: ApiResponse = + serde_json::from_str(&response_text)?; + info!("Worker response: {:?}", status_response); + if status_response.data.status == "ready" { info!("Worker node is ready for GPU challenge"); break; } From 9cb0eb772cdebfe04aa9f5a086a7280243fbd3c5 Mon Sep 17 00:00:00 2001 From: JannikSt Date: Sun, 23 Mar 2025 14:19:03 -0700 Subject: [PATCH 68/85] fix permission issue with installer (#162) --- worker/scripts/install.sh | 43 +++++++++++++++++++++++++-------------- 1 file changed, 28 insertions(+), 15 deletions(-) diff --git a/worker/scripts/install.sh b/worker/scripts/install.sh index 4e173dfd..f799152f 100755 --- a/worker/scripts/install.sh +++ b/worker/scripts/install.sh @@ -5,17 +5,26 @@ set -e RED='\033[0;31m' GREEN='\033[0;32m' BLUE='\033[0;34m' +YELLOW='\033[0;33m' NC='\033[0m' # No Color # Configuration BINARY_NAME="prime-worker" RELEASE_URL="https://github.com/PrimeIntellect-ai/protocol/releases" BINARY_URL="$RELEASE_URL/latest/download/worker-linux-x86_64" -INSTALL_DIR="/usr/local/bin" + +# Determine install directory - try system dir first, fall back to user dir if needed +if [ "$EUID" -eq 0 ]; then + # Running as root, use system directory + INSTALL_DIR="/usr/local/bin" +else + # Not running as root, use user directory (create if doesn't exist) + INSTALL_DIR="$HOME/.local/bin" + mkdir -p "$INSTALL_DIR" +fi # Check if dev flag is set if [[ "$1" == "--dev" ]]; then - # Get latest dev tag LATEST_DEV_TAG=$(curl -s "$RELEASE_URL" | grep -o 'dev-[0-9]\{8\}-[a-z0-9]\+' | head -1) if [[ -z "$LATEST_DEV_TAG" ]]; then echo -e "${RED}✗ Could not find latest dev release${NC}" @@ -31,6 +40,7 @@ echo "║ ║" echo "║ Prime Intellect Protocol Worker ║" echo "║ ║" echo "╚═══════════════════════════════════════════╝" +echo -e "${NC}" # Check operating system echo -e "${BLUE}→ Checking system compatibility...${NC}" @@ -38,7 +48,6 @@ if [[ "$(uname -s)" != "Linux" ]]; then echo -e "${RED}✗ This installer is for Linux only.${NC}" exit 1 fi - if [[ "$(uname -m)" != "x86_64" ]]; then echo -e "${RED}✗ This installer is for x86_64 architecture only.${NC}" exit 1 @@ -57,22 +66,26 @@ echo -e "${GREEN}✓ Download complete${NC}" # Install binary echo -e "${BLUE}→ Installing to $INSTALL_DIR...${NC}" -if [[ -w "$INSTALL_DIR" ]]; then - mv "$TMP_DIR/$BINARY_NAME" "$INSTALL_DIR/$BINARY_NAME" - echo -e "${GREEN}✓ Installation complete${NC}" -else - echo -e "${RED}✗ Cannot write to $INSTALL_DIR${NC}" - echo -e "${BLUE}→ Please run this script with sudo to install to $INSTALL_DIR${NC}" - exit 1 -fi +mv "$TMP_DIR/$BINARY_NAME" "$INSTALL_DIR/$BINARY_NAME" +echo -e "${GREEN}✓ Installation complete${NC}" # Verify installation echo -e "${BLUE}→ Verifying installation...${NC}" -if command -v "$BINARY_NAME" &> /dev/null; then +if command -v "$BINARY_NAME" &> /dev/null || [ -x "$INSTALL_DIR/$BINARY_NAME" ]; then echo -e "${GREEN}✓ Prime Intellect Protocol Worker successfully installed!${NC}" - echo -e "${BLUE}→ Run '$BINARY_NAME --help' to get started${NC}" + echo -e "${BLUE}→ Binary location: $INSTALL_DIR/$BINARY_NAME${NC}" + + # Check if install dir is in PATH + if [[ ":$PATH:" != *":$INSTALL_DIR:"* ]]; then + echo -e "${YELLOW}⚠ $INSTALL_DIR is not in your PATH${NC}" + echo -e "${BLUE}→ Run this command to add it to your PATH:${NC}" + echo -e "${GREEN} echo 'export PATH=\"$INSTALL_DIR:\$PATH\"' >> ~/.bashrc && source ~/.bashrc${NC}" + echo -e "${BLUE}→ Or run the binary directly: $INSTALL_DIR/$BINARY_NAME --help${NC}" + else + echo -e "${BLUE}→ Run '$BINARY_NAME --help' to get started${NC}" + fi else echo -e "${RED}✗ Installation verification failed.${NC}" - echo -e "${BLUE}→ The binary is located at: $INSTALL_DIR/$BINARY_NAME${NC}" fi -echo -e "\n${GREEN}The Prime Intellect Protocol Worker was successfully installed${NC}" + +echo -e "\n${GREEN}The Prime Intellect Protocol Worker was successfully installed${NC}" \ No newline at end of file From 7cefac569d710e271f1ae07320b7e40a4abd705a Mon Sep 17 00:00:00 2001 From: JannikSt Date: Mon, 24 Mar 2025 09:08:31 -0700 Subject: [PATCH 69/85] add balance cmd and improve registration flow (#164) * add balance cmd and improve registration flow --- Makefile | 7 ++-- worker/src/cli/command.rs | 53 +++++++++++++++++++++++++++++-- worker/src/operations/provider.rs | 20 ++++++++---- 3 files changed, 69 insertions(+), 11 deletions(-) diff --git a/Makefile b/Makefile index 729d9fee..858616e7 100644 --- a/Makefile +++ b/Makefile @@ -9,7 +9,6 @@ set-min-stake-amount: mint-ai-tokens-to-provider: set -a; source ${ENV_FILE}; set +a; \ cargo run -p dev-utils --example mint_ai_token -- --address $${PROVIDER_ADDRESS} --key $${PRIVATE_KEY_FEDERATOR} --rpc-url $${RPC_URL} - transfer-eth-to-provider: set -a; source ${ENV_FILE}; set +a; \ cargo run -p dev-utils --example transfer_eth -- --address $${PROVIDER_ADDRESS} --key $${PRIVATE_KEY_FEDERATOR} --rpc-url $${RPC_URL} --amount 1000000000000000000 @@ -170,4 +169,8 @@ eject-node: sign-message: set -a; source ${ENV_FILE}; set +a; \ - cargo watch -w worker/src -x "run --bin worker -- sign-message --message example --private-key-provider $$PRIVATE_KEY_PROVIDER --private-key-node $$PRIVATE_KEY_NODE" + cargo watch -w worker/src -x "run --bin worker -- sign-message --message example-content --private-key-provider $$PRIVATE_KEY_PROVIDER --private-key-node $$PRIVATE_KEY_NODE" + +balance: + set -a; source ${ENV_FILE}; set +a; \ + cargo watch -w worker/src -x "run --bin worker -- balance --private-key $$PRIVATE_KEY_PROVIDER --rpc-url $$RPC_URL" diff --git a/worker/src/cli/command.rs b/worker/src/cli/command.rs index 84739d95..b64ee62e 100644 --- a/worker/src/cli/command.rs +++ b/worker/src/cli/command.rs @@ -85,12 +85,27 @@ pub enum Commands { /// Auto accept transactions #[arg(long, default_value = "false")] auto_accept: bool, + + /// Retry count until provider has enough balance to stake (0 for unlimited retries) + #[arg(long, default_value = "0")] + funding_retry_count: u32, }, Check {}, /// Generate new wallets for provider and node GenerateWallets {}, + /// Get balance of provider and node + Balance { + /// Private key for the provider + #[arg(long)] + private_key: Option, + + /// RPC URL + #[arg(long, default_value = "http://localhost:8545")] + rpc_url: String, + }, + /// Sign Message SignMessage { /// Message to sign @@ -127,6 +142,7 @@ pub async fn execute_command( private_key_provider, private_key_node, auto_accept, + funding_retry_count, } => { if *disable_state_storing && *auto_recover { Console::error( @@ -365,11 +381,10 @@ pub async fn execute_command( &format!("{}", required_stake / U256::from(10u128.pow(18))), ); - const MAX_REGISTER_PROVIDER_ATTEMPTS: u32 = 200; if let Err(e) = provider_ops .retry_register_provider( required_stake, - MAX_REGISTER_PROVIDER_ATTEMPTS, + *funding_retry_count, cancellation_token.clone(), ) .await @@ -552,6 +567,40 @@ pub async fn execute_command( ); Ok(()) } + + Commands::Balance { + private_key, + rpc_url, + } => { + let private_key = if let Some(key) = private_key { + key.clone() + } else { + std::env::var("PRIVATE_KEY").expect("PRIVATE_KEY must be set") + }; + + let provider_wallet = Wallet::new(&private_key, Url::parse(rpc_url).unwrap()).unwrap(); + + let contracts = Arc::new( + ContractBuilder::new(&provider_wallet) + .with_compute_registry() + .with_ai_token() + .with_prime_network() + .with_compute_pool() + .build() + .unwrap(), + ); + + let provider_balance = contracts + .ai_token + .balance_of(provider_wallet.wallet.default_signer().address()) + .await + .unwrap(); + + let format_balance = format!("{}", provider_balance / U256::from(10u128.pow(18))); + + println!("Provider balance: {}", format_balance); + Ok(()) + } Commands::SignMessage { message, private_key_provider, diff --git a/worker/src/operations/provider.rs b/worker/src/operations/provider.rs index 7b2e858f..b6111fcc 100644 --- a/worker/src/operations/provider.rs +++ b/worker/src/operations/provider.rs @@ -168,15 +168,15 @@ impl ProviderOperations { ) -> Result<(), ProviderError> { Console::title("Registering Provider"); let mut attempts = 0; - while attempts < max_attempts { + while attempts < max_attempts || max_attempts == 0 { Console::progress("Registering provider..."); match self.register_provider(stake).await { Ok(_) => { return Ok(()); } - Err(e) => { - if let ProviderError::NotWhitelisted = e { - Console::error("Provider not whitelisted, retrying in 10 seconds..."); + Err(e) => match e { + ProviderError::NotWhitelisted | ProviderError::InsufficientBalance => { + Console::info("Info", "Retrying in 10 seconds..."); tokio::select! { _ = tokio::time::sleep(tokio::time::Duration::from_secs(10)) => {} _ = cancellation_token.cancelled() => { @@ -185,10 +185,9 @@ impl ProviderOperations { } attempts += 1; continue; - } else { - return Err(e); } - } + _ => return Err(e), + }, } } Console::error(&format!( @@ -224,6 +223,10 @@ impl ProviderOperations { "ETH Balance", &format!("{} ETH", eth_balance / U256::from(10u128.pow(18))), ); + if balance < stake { + Console::error("Insufficient AI Token balance for stake"); + return Err(ProviderError::InsufficientBalance); + } Console::progress("Approving AI Token for Stake transaction"); if !self.prompt_user_confirmation(&format!( "Do you want to approve staking {} tokens?", @@ -265,6 +268,7 @@ impl ProviderOperations { Console::success("Provider registered"); if !provider.is_whitelisted { + Console::error("Provider is not whitelisted yet."); return Err(ProviderError::NotWhitelisted); } @@ -336,6 +340,7 @@ pub enum ProviderError { NotWhitelisted, UserCancelled, Other, + InsufficientBalance, } impl fmt::Display for ProviderError { @@ -344,6 +349,7 @@ impl fmt::Display for ProviderError { Self::NotWhitelisted => write!(f, "Provider is not whitelisted"), Self::UserCancelled => write!(f, "Operation cancelled by user"), Self::Other => write!(f, "Provider could not be registered"), + Self::InsufficientBalance => write!(f, "Insufficient AI Token balance for stake"), } } } From 7100b5c7513892c6a5f255e14a0ac84b00a9a2cc Mon Sep 17 00:00:00 2001 From: JannikSt Date: Mon, 24 Mar 2025 16:19:55 -0700 Subject: [PATCH 70/85] improve README & general setup info (#165) --- .env.example | 7 +- README.md | 207 ++++++-------------------------------- docs/development-setup.md | 157 +++++++++++++++++++++++++++++ docs/usage-guide.md | 111 ++++++++++++++++++++ 4 files changed, 303 insertions(+), 179 deletions(-) create mode 100644 docs/development-setup.md create mode 100644 docs/usage-guide.md diff --git a/.env.example b/.env.example index 5ba16b6b..6bff58e9 100644 --- a/.env.example +++ b/.env.example @@ -5,12 +5,15 @@ WORKER_COMPUTE_POOL_ID= WORKER_EXTERNAL_IP= # Private keys of privileged accounts +# See deploy.sh files in smart-contracts PRIVATE_KEY_FEDERATOR= FEDERATOR_ADDRESS= PRIVATE_KEY_VALIDATOR= VALIDATOR_ADDRESS= # Provider with their node +# This can simple be a provider that is funded as part of the anvil start cmd. +# You can also generate wallets using prime-worker wallet generate PRIVATE_KEY_PROVIDER= PROVIDER_ADDRESS= PRIVATE_KEY_NODE= @@ -20,7 +23,7 @@ NODE_ADDRESS= POOL_OWNER_PRIVATE_KEY= POOL_OWNER_ADDRESS= -# Contracts +# Contracts - you get these addresses after deploying the contracts PRIME_NETWORK_ADDRESS= AI_TOKEN_ADDRESS= COMPUTE_REGISTRY_ADDRESS= @@ -28,7 +31,7 @@ DOMAIN_REGISTRY_ADDRESS= STAKE_MANAGER_ADDRESS= COMPUTE_POOL_ADDRESS= +# Optional WORK_VALIDATION_CONTRACT= - LEVITICUS_URL= S3_CREDENTIALS= \ No newline at end of file diff --git a/README.md b/README.md index b90e0a42..835df178 100644 --- a/README.md +++ b/README.md @@ -5,22 +5,21 @@

Decentralized Compute Infrastructure for AI

-> ⚠️ **IMPORTANT**: This project is still under active development. Currently, you can only run the protocol locally - connecting to public RPCs is not yet supported. Please check back later for updates. +> ⚠️ **IMPORTANT**: This project is still under active development. Currently, you can only run the protocol locally - connecting to public RPCs is not yet supported. Please check back later for updates. See our [FAQ](#frequently-asked-questions) for details. Prime Network is a peer-to-peer compute and intelligence network that enables decentralized AI development at scale. This repository contains the core infrastructure for contributing compute resources to the network, including workers, validators, and the coordination layer. ## 📚 Table of Contents - [System Architecture](#system-architecture) - [Getting Started](#getting-started) -- [Installation](#installation) -- [Usage](#usage) -- [Development](#development) +- [Documentation](#documentation) +- [Frequently Asked Questions](#frequently-asked-questions) - [Community](#community) - [Contributing](#contributing) - [Security](#security) - [License](#license) -## System Architecture +## System Architecture The Prime Protocol follows a modular architecture designed for decentralized AI compute:
@@ -28,7 +27,6 @@ The Prime Protocol follows a modular architecture designed for decentralized AI
### Component Overview - - **Smart Contracts**: Ethereum-based contracts manage the protocol's economic layer - **Discovery Service**: Enables secure peer discovery and metadata sharing - **Orchestrator**: Coordinates compute jobs across worker nodes @@ -38,190 +36,49 @@ The Prime Protocol follows a modular architecture designed for decentralized AI ## Getting Started ### Prerequisites +- Linux operating system +- CUDA-capable GPU(s) for worker operations +- Docker Desktop and Git installed -Before running Prime Protocol, ensure you have the following requirements: - -#### Hardware -- Linux or macOS operating system -- CUDA-capable GPU(s) for mining operations +For complete setup instructions, refer to our [Development Setup Guide](docs/development-setup.md). -#### Software -- [Docker Desktop](https://www.docker.com/products/docker-desktop/) - Container runtime -- [Git](https://git-scm.com/) - Version control -- [Rust](https://www.rust-lang.org/) - Programming language and toolchain -- [Redis](https://redis.io/) - In-memory data store -- [Foundry](https://book.getfoundry.sh/) - Smart contract development toolkit -- [tmuxinator](https://github.com/tmuxinator/tmuxinator) - Terminal session manager - -## Installation - -### 1. Clone Repository -```bash -git clone https://github.com/PrimeIntellect-ai/protocol.git -cd protocol -git submodule update --init --recursive +### Install Worker CLI: +You can install the latest worker CLI using: ``` - -### 2. Install Dependencies -```bash -# Install Foundry -curl -L https://foundry.paradigm.xyz | bash - -# Reload .bashrc (or .bash_profile, depends on the system) -source ~/.bashrc - -foundryup - -# Install Rust -curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh - -# Install cargo-watch -cargo install cargo-watch - -# Install Redis (MacOS) -brew install redis - -# Install Redis (Ubuntu) -# sudo apt-get install redis-server - -# Install Ruby (MacOS) -brew install ruby - -# Install Ruby (Ubuntu) -# sudo apt-get install redis-server - -# Install tmuxinator (do not use brew) -gem install tmuxinator - -# Install Tmux (MacOS) -brew install tmux - -# Install Tmux (Ubuntu) -#sudo apt install tmux -#sudo apt-get install libssl-dev +curl -sSL https://raw.githubusercontent.com/PrimeIntellect-ai/protocol/develop/worker/scripts/install.sh | bash ``` -### 3. Configure Environment -- Enable "Allow the default Docker socket to be used" in Docker Desktop settings (MacOS) -- On Ubuntu, add your user to the docker group: -```bash -sudo usermod -aG docker $USER +For the latest dev build use: ``` -- Create `.env` files in base folder and discovery folder - -## Development - -### Starting the Development Environment - -To start all core services: -```bash -make up +curl -sSL https://raw.githubusercontent.com/PrimeIntellect-ai/protocol/develop/worker/scripts/install.sh | bash -s -- --dev ``` -This will launch: -- Local blockchain node -- Discovery service -- Validator node -- Orchestrator service -- Redis instance -- Supporting infrastructure -### Running in docker compose -You can run all supporting services (chain, validator, discovery, orchestrator) in docker compose. +## Documentation +- [Development Setup Guide](docs/development-setup.md) - Detailed installation and environment setup instructions +- [Usage Guide](docs/usage-guide.md) - Instructions for dispatching tasks, monitoring, and system management -1.Start docker compose: -```bash -docker compose up -``` +## Frequently Asked Questions -2. Run Setup: -``` -make setup -``` +#### Q: What is Prime Protocol? +**A:** Prime Protocol is a peer-to-peer compute and intelligence network that enables decentralized AI development at scale. It provides infrastructure for contributing compute resources to the network through workers, validators, and a coordination layer. -3. You can now launch a worker. -- Adjust the .env var `WORKER_EXTERNAL_IP` to: `WORKER_EXTERNAL_IP=host.docker.internal` -- Launch the worker using `make watch-worker` -- whitelist the worker once you see the whitelist alert using: `make whitelist-provider` +#### Q: Is Prime Protocol ready for production use? +**A:** No, Prime Protocol is still under active development. Currently, you can only run the protocol locally. -### Running a Worker Node +#### Q: What environment variables do I need for local development? +**A:** We have provided an .env.example file with the required variables. -Once the core services are running, you can start a worker node in a new terminal: -```bash -make watch-worker -``` +#### Q: How are private keys managed securely in the system? +**A:** We're actively developing our security practices for private key management. -The worker will automatically connect to the discovery service and begin processing tasks. -It takes a couple of seconds until the worker is whitelisted. This is done using a simple loop on the second page of tmux. +#### Q: What are the recommended network isolation strategies for the worker? +**A:** We will be providing detailed documentation on how to secure workers with firewalls for ingress / egress -You can find more details on the APIs in the orchestrator and discovery service directory. +#### Q: What are the funding requirements for workers? +**A:** For the current development phase, minimal testnet ETH is sufficient. Detailed information on setting up the worker will follow. -### Deploying a task - -First, you need to create a local worker (after you have all other services running using e.g. `make up`) - -```bash -make watch-worker -``` - -check that the worker has been registered on the orchestrator: - -```bash -curl -X GET http://localhost:8090/nodes -H "Authorization: Bearer admin" ->>> {"nodes":[{"address":"0x66295e2b4a78d1cb57db16ac0260024900a5ba9b","ip_address":"0.0.0.0","port":8091,"status":"Healthy","task_id":null,"task_state":null}],"success":true} -``` - - -then lets create a task - -```bash -curl -X POST http://localhost:8090/tasks -H "Content-Type: application/json" -H "Authorization: Bearer admin" -d '{"name":"sample","image":"ubuntu:latest"}' ->>> {"success":true,"task":"updated_task"}% -``` - -and check that the task is created - -```bash -curl -X GET http://localhost:8090/nodes -H "Authorization: Bearer admin" ->>> {"nodes":[{"address":"0x66295e2b4a78d1cb57db16ac0260024900a5ba9b","ip_address":"0.0.0.0","port":8091,"status":"Healthy","task_id":"29edd356-5c48-4ba6-ab96-73d002daddff","task_state":"RUNNING"}],"success":true}% -``` - -you can also check docker ps to see that the docker is running locally - -```bash -docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -e860c44a9989 ubuntu:latest "sleep infinity" 3 minutes ago Up 3 minutes prime-task-29edd356-5c48-4ba6-ab96-73d002daddff -ef02d23b5c74 redis:alpine "docker-entrypoint.s…" 27 minutes ago Up 27 minutes 0.0.0.0:6380->6379/tcp, [::]:6380->6379/tcp prime-worker-validator-redis-1 -7761ee7b6dcf ghcr.io/foundry-rs/foundry:latest "anvil --host 0.0.0.…" 27 minutes ago Up 27 minutes 0.0.0.0:8545->8545/tcp, :::8545->8545/tcp prime-worker-validator-anvil-1 -``` - - -### Stopping Services - -To gracefully shutdown all services: -```bash -make down -``` - -### Remote GPU Development Setup - -https://github.com/user-attachments/assets/8b25ad50-7183-4dd5-add6-f9acf3852b03 - -Start the local development environment: -``` -make up -``` -Set up your remote GPU worker: -1. Provision a GPU instance and ensure Docker is installed -2. Configure environment variables and start the remote worker: -``` -SSH_CONNECTION="ssh your-ssh-conn string" -EXTERNAL_IP="your-external-ip" -make remote-worker -``` ## Community - - [Discord](https://discord.gg/primeintellect) - [X](https://x.com/PrimeIntellect) - [Blog](https://www.primeintellect.ai/blog) @@ -230,8 +87,4 @@ make remote-worker We welcome contributions! Please see our [Contributing Guidelines](CONTRIBUTING.md). ## Security -See [SECURITY.md](SECURITY.md) for security policies and reporting vulnerabilities. - -## Additional Resources - -- [Anvil Testchain Deployment Guide](./docs/testchain-deployment.md) - Commands and troubleshooting for setting up an Anvil testchain +See [SECURITY.md](SECURITY.md) for security policies and reporting vulnerabilities. \ No newline at end of file diff --git a/docs/development-setup.md b/docs/development-setup.md new file mode 100644 index 00000000..d817b7ba --- /dev/null +++ b/docs/development-setup.md @@ -0,0 +1,157 @@ +# Prime Protocol Development Setup + +## Table of Contents +- [Prerequisites](#prerequisites) +- [Full Development Setup](#full-development-setup) +- [Docker Compose Setup](#docker-compose-setup) +- [Running a Worker Node](#running-a-worker-node) +- [Remote GPU Development](#remote-gpu-development) +- [Stopping Services](#stopping-services) + +## Prerequisites + +Before running Prime Protocol, ensure you have the following requirements: + +### Software +- [Docker Desktop](https://www.docker.com/products/docker-desktop/) - Container runtime +- [Git](https://git-scm.com/) - Version control +- [Rust](https://www.rust-lang.org/) - Programming language and toolchain +- [Redis](https://redis.io/) - In-memory data store +- [Foundry](https://book.getfoundry.sh/) - Smart contract development toolkit +- [tmuxinator](https://github.com/tmuxinator/tmuxinator) - Terminal session manager + +## Full Development Setup + +### 1. Clone Repository +```bash +git clone https://github.com/PrimeIntellect-ai/protocol.git +cd protocol +git submodule update --init --recursive +``` + +### 2. Install Dependencies + +#### Foundry +```bash +# Install Foundry +curl -L https://foundry.paradigm.xyz | bash + +# Reload .bashrc (or .bash_profile, depends on the system) +source ~/.bashrc + +foundryup +``` + +#### Rust +```bash +# Install Rust +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh + +# Install cargo-watch +cargo install cargo-watch +``` + +#### Redis +```bash +# Install Redis (MacOS) +brew install redis + +# Install Redis (Ubuntu) +sudo apt-get install redis-server +``` + +#### Ruby and Tmux +```bash +# Install Ruby (MacOS) +brew install ruby + +# Install Ruby (Ubuntu) +sudo apt-get install ruby + +# Install tmuxinator (do not use brew) +gem install tmuxinator + +# Install Tmux (MacOS) +brew install tmux + +# Install Tmux (Ubuntu) +sudo apt install tmux +sudo apt-get install libssl-dev +``` + +### 3. Configure Environment +- Enable "Allow the default Docker socket to be used" in Docker Desktop settings (MacOS) +- On Ubuntu, add your user to the docker group: + ```bash + sudo usermod -aG docker $USER + ``` +- Create `.env` files in base folder and discovery folder + +### 4. Launch Core Services +```bash +make up +``` + +This will start: +- Local blockchain node +- Discovery service +- Validator node +- Orchestrator service +- Redis instance +- Supporting infrastructure + +## Docker Compose Setup + +You can run all supporting services (chain, validator, discovery, orchestrator) in docker compose. + +1. Start docker compose: + ```bash + docker compose up + ``` + +2. Run Setup: + ```bash + make setup + ``` + +3. Launch a worker: + - Adjust the .env var `WORKER_EXTERNAL_IP` to: `WORKER_EXTERNAL_IP=host.docker.internal` + - Launch the worker using `make watch-worker` + - Whitelist the worker once you see the whitelist alert using: `make whitelist-provider` + +## Running a Worker Node + +Once the core services are running, you can start a worker node in a new terminal: +```bash +make watch-worker +``` + +The worker will automatically connect to the discovery service and begin processing tasks. +It takes a couple of seconds until the worker is whitelisted. This is done using a simple loop on the second page of tmux. + +You can find more details on the APIs in the orchestrator and discovery service directory. + +## Remote GPU Development + +https://github.com/user-attachments/assets/8b25ad50-7183-4dd5-add6-f9acf3852b03 + +Start the local development environment: +```bash +make up +``` + +Set up your remote GPU worker: +1. Provision a GPU instance and ensure Docker is installed +2. Configure environment variables and start the remote worker: + ```bash + SSH_CONNECTION="ssh your-ssh-conn string" + EXTERNAL_IP="your-external-ip" + make remote-worker + ``` + +## Stopping Services + +To gracefully shutdown all services: +```bash +make down +``` \ No newline at end of file diff --git a/docs/usage-guide.md b/docs/usage-guide.md new file mode 100644 index 00000000..e4ebc767 --- /dev/null +++ b/docs/usage-guide.md @@ -0,0 +1,111 @@ +# Prime Protocol Usage Guide + +This guide provides instructions for deploying and managing tasks on the Prime Protocol network. + +## Table of Contents +- [Deploying Tasks](#deploying-tasks) +- [Monitoring Tasks](#monitoring-tasks) +- [Task Management](#task-management) + +## Deploying Tasks + +### Prerequisites + +Before deploying tasks, ensure you have: +1. All core services running (`make up`) +2. A worker node registered on the network + +### Start a Worker Node + +Start a local worker: + +```bash +make watch-worker +``` + +### Verify Worker Registration + +Check that the worker has been registered with the orchestrator: + +```bash +curl -X GET http://localhost:8090/nodes -H "Authorization: Bearer admin" +``` + +Successful response: +```json +{ + "nodes": [ + { + "address": "0x66295e2b4a78d1cb57db16ac0260024900a5ba9b", + "ip_address": "0.0.0.0", + "port": 8091, + "status": "Healthy", + "task_id": null, + "task_state": null + } + ], + "success": true +} +``` + +### Create a Task + +Deploy a simple task using the API: + +```bash +curl -X POST http://localhost:8090/tasks \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer admin" \ + -d '{"name":"sample","image":"ubuntu:latest"}' +``` + +Successful response: +```json +{ + "success": true, + "task": "updated_task" +} +``` + +## Monitoring Tasks + +### Check Task Status + +Verify that the task is running: + +```bash +curl -X GET http://localhost:8090/nodes -H "Authorization: Bearer admin" +``` + +Successful response: +```json +{ + "nodes": [ + { + "address": "0x66295e2b4a78d1cb57db16ac0260024900a5ba9b", + "ip_address": "0.0.0.0", + "port": 8091, + "status": "Healthy", + "task_id": "29edd356-5c48-4ba6-ab96-73d002daddff", + "task_state": "RUNNING" + } + ], + "success": true +} +``` + +### Verify Container Status + +Check that the Docker container is running: + +```bash +docker ps +``` + +Example output: +``` +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +e860c44a9989 ubuntu:latest "sleep infinity" 3 minutes ago Up 3 minutes prime-task-29edd356-5c48-4ba6-ab96-73d002daddff +ef02d23b5c74 redis:alpine "docker-entrypoint.s…" 27 minutes ago Up 27 minutes 0.0.0.0:6380->6379/tcp, [::]:6380->6379/tcp prime-worker-validator-redis-1 +7761ee7b6dcf ghcr.io/foundry-rs/foundry:latest "anvil --host 0.0.0.…" 27 minutes ago Up 27 minutes 0.0.0.0:8545->8545/tcp, :::8545->8545/tcp prime-worker-validator-anvil-1 +``` \ No newline at end of file From 706c2471013026931c7f1ff8d64c6d041aa95ff1 Mon Sep 17 00:00:00 2001 From: Manveer Date: Mon, 24 Mar 2025 17:06:16 -0700 Subject: [PATCH 71/85] Create generate-node-wallet command to skip generating provider wallet if not needed --- .env.example | 2 +- worker/src/cli/command.rs | 15 +++++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/.env.example b/.env.example index 6bff58e9..87514a65 100644 --- a/.env.example +++ b/.env.example @@ -13,7 +13,7 @@ VALIDATOR_ADDRESS= # Provider with their node # This can simple be a provider that is funded as part of the anvil start cmd. -# You can also generate wallets using prime-worker wallet generate +# You can also generate wallets using prime-worker generate-wallets PRIVATE_KEY_PROVIDER= PROVIDER_ADDRESS= PRIVATE_KEY_NODE= diff --git a/worker/src/cli/command.rs b/worker/src/cli/command.rs index b64ee62e..c4f579e6 100644 --- a/worker/src/cli/command.rs +++ b/worker/src/cli/command.rs @@ -94,6 +94,9 @@ pub enum Commands { /// Generate new wallets for provider and node GenerateWallets {}, + + /// Generate new wallet for node only + GenerateNodeWallet {}, /// Get balance of provider and node Balance { @@ -567,6 +570,18 @@ pub async fn execute_command( ); Ok(()) } + + Commands::GenerateNodeWallet {} => { + let node_signer = PrivateKeySigner::random(); + + println!("Node wallet:"); + println!(" Address: {}", node_signer.address()); + println!( + " Private key: {}", + hex::encode(node_signer.credential().to_bytes()) + ); + Ok(()) + } Commands::Balance { private_key, From bf1a9513341c3fa092556d9f74dcb7b6d77e1b93 Mon Sep 17 00:00:00 2001 From: JannikSt Date: Mon, 24 Mar 2025 17:37:18 -0700 Subject: [PATCH 72/85] improve stake approval ux by showing response immediatly (#167) --- worker/src/operations/provider.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/worker/src/operations/provider.rs b/worker/src/operations/provider.rs index b6111fcc..a2e0c946 100644 --- a/worker/src/operations/provider.rs +++ b/worker/src/operations/provider.rs @@ -227,7 +227,6 @@ impl ProviderOperations { Console::error("Insufficient AI Token balance for stake"); return Err(ProviderError::InsufficientBalance); } - Console::progress("Approving AI Token for Stake transaction"); if !self.prompt_user_confirmation(&format!( "Do you want to approve staking {} tokens?", stake / U256::from(10u128.pow(18)) @@ -235,12 +234,13 @@ impl ProviderOperations { Console::info("Operation cancelled by user", "Staking approval declined"); return Err(ProviderError::UserCancelled); } + + Console::progress("Approving AI Token for Stake transaction"); self.contracts .ai_token .approve(stake) .await .map_err(|_| ProviderError::Other)?; - Console::progress("Registering Provider"); let register_tx = match self.contracts.prime_network.register_provider(stake).await { Ok(tx) => tx, From c66409117d5b445c1209cb9987e76bb4c79c2fc3 Mon Sep 17 00:00:00 2001 From: JannikSt Date: Mon, 24 Mar 2025 19:03:59 -0700 Subject: [PATCH 73/85] Feature: interconnect check w. issue tracker (#166) * basic interconnect test * add issue tracker for software & hardware issues * introduce a new flag to ignore errors --- Cargo.lock | 2 + Makefile | 5 +- worker/Cargo.toml | 3 +- worker/src/checks/hardware/hardware_check.rs | 79 ++++++++++++-- worker/src/checks/hardware/interconnect.rs | 104 ++++++++++++++++++ worker/src/checks/hardware/mod.rs | 3 +- worker/src/checks/issue.rs | 109 +++++++++++++++++++ worker/src/checks/mod.rs | 1 + worker/src/checks/software/docker.rs | 63 ++++++++--- worker/src/checks/software/mod.rs | 1 - worker/src/checks/software/software_check.rs | 25 ++--- worker/src/checks/software/types.rs | 6 - worker/src/cli/command.rs | 60 +++++++--- worker/src/main.rs | 2 - 14 files changed, 396 insertions(+), 67 deletions(-) create mode 100644 worker/src/checks/hardware/interconnect.rs create mode 100644 worker/src/checks/issue.rs delete mode 100644 worker/src/checks/software/types.rs diff --git a/Cargo.lock b/Cargo.lock index 28ee9bf6..56fafefe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4970,6 +4970,7 @@ dependencies = [ "base64 0.22.1", "bytes", "encoding_rs", + "futures-channel", "futures-core", "futures-util", "h2 0.4.7", @@ -6982,6 +6983,7 @@ dependencies = [ "log", "nalgebra", "nvml-wrapper", + "rand 0.9.0", "regex", "reqwest", "serde", diff --git a/Makefile b/Makefile index 858616e7..5b5967de 100644 --- a/Makefile +++ b/Makefile @@ -72,7 +72,10 @@ watch-discovery: watch-worker: set -a; source ${ENV_FILE}; set +a; \ - cargo watch -w worker/src -x "run --bin worker -- run --port 8091 --external-ip $${WORKER_EXTERNAL_IP:-localhost} --compute-pool-id $$WORKER_COMPUTE_POOL_ID --validator-address $$VALIDATOR_ADDRESS" + cargo watch -w worker/src -x "run --bin worker -- run --port 8091 --external-ip $${WORKER_EXTERNAL_IP:-localhost} --compute-pool-id $$WORKER_COMPUTE_POOL_ID --validator-address $$VALIDATOR_ADDRESS --ignore-issues" + +watch-check: + cargo watch -w worker/src -x "run --bin worker -- check" watch-validator: set -a; source ${ENV_FILE}; set +a; \ diff --git a/worker/Cargo.toml b/worker/Cargo.toml index 0e071d32..6cfce033 100644 --- a/worker/Cargo.toml +++ b/worker/Cargo.toml @@ -26,7 +26,7 @@ futures-util = "0.3" alloy = { version = "0.9.2", features = ["full"] } url = "2.5.4" serde_json = "1.0.135" -reqwest = "0.12.12" +reqwest = { version = "0.12.12", features = ["blocking"] } hex = "0.4.3" console = "0.15.10" indicatif = "0.17.9" @@ -44,6 +44,7 @@ strip-ansi-escapes = "0.2.1" nalgebra = "0.33.2" sha2 = "0.10.8" unicode-width = "0.2.0" +rand = "0.9.0" [dev-dependencies] tempfile = "=3.14.0" diff --git a/worker/src/checks/hardware/hardware_check.rs b/worker/src/checks/hardware/hardware_check.rs index b137f51e..0d9da8a0 100644 --- a/worker/src/checks/hardware/hardware_check.rs +++ b/worker/src/checks/hardware/hardware_check.rs @@ -1,38 +1,54 @@ use super::{ gpu::detect_gpu, + interconnect::InterconnectCheck, memory::{convert_to_mb, get_memory_info, print_memory_info}, storage::{get_storage_info, BYTES_TO_GB}, }; -use crate::console::Console; +use crate::{ + checks::issue::{IssueReport, IssueType}, + console::Console, +}; use shared::models::node::{ComputeSpecs, CpuSpecs, GpuSpecs, Node}; +use std::sync::Arc; use sysinfo::{self, System}; +use tokio::sync::RwLock; pub struct HardwareChecker { sys: System, + issues: Arc>, } impl HardwareChecker { - pub fn new() -> Self { + pub fn new(issues: Option>>) -> Self { let mut sys = System::new_all(); + sys.refresh_all(); - Self { sys } + Self { + sys, + issues: issues.unwrap_or_else(|| Arc::new(RwLock::new(IssueReport::new()))), + } } - pub fn enrich_node_config( - &self, + pub async fn check_hardware( + &mut self, mut node_config: Node, ) -> Result> { - self.collect_system_info(&mut node_config)?; + self.collect_system_info(&mut node_config).await?; self.print_system_info(&node_config); Ok(node_config) } - fn collect_system_info( - &self, + async fn collect_system_info( + &mut self, node_config: &mut Node, ) -> Result<(), Box> { Console::section("Hardware Checks"); + let issue_tracker = self.issues.write().await; if self.sys.cpus().is_empty() { + issue_tracker.add_issue( + IssueType::InsufficientCpu, + "Failed to detect CPU information", + ); return Err(Box::new(std::io::Error::new( std::io::ErrorKind::Other, "Failed to detect CPU information", @@ -43,6 +59,28 @@ impl HardwareChecker { let gpu_specs = self.collect_gpu_specs()?; let (ram_mb, storage_gb) = self.collect_memory_specs()?; + // Check minimum requirements + if cpu_specs.cores.unwrap_or(0) < 4 { + issue_tracker.add_issue(IssueType::InsufficientCpu, "Minimum 4 CPU cores required"); + } + + if ram_mb < 8192 { + // 8GB minimum + issue_tracker.add_issue(IssueType::InsufficientMemory, "Minimum 8GB RAM required"); + } + + if storage_gb < 100 { + // 100GB minimum + issue_tracker.add_issue( + IssueType::InsufficientStorage, + "Minimum 100GB storage required", + ); + } + + if gpu_specs.is_none() { + issue_tracker.add_issue(IssueType::NoGpu, "No GPU detected"); + } + let (storage_path, available_space) = if cfg!(target_os = "linux") { match super::storage::find_largest_storage() { Some(mount_point) => (Some(mount_point.path), Some(mount_point.available_space)), @@ -57,6 +95,30 @@ impl HardwareChecker { None => storage_gb, }; + // Check network speeds + Console::title("Network Speed Test:"); + Console::progress("Starting network speed test..."); + match InterconnectCheck::check_speeds().await { + Ok((download_speed, upload_speed)) => { + Console::info("Download Speed", &format!("{:.2} Mbps", download_speed)); + Console::info("Upload Speed", &format!("{:.2} Mbps", upload_speed)); + + if download_speed < 50.0 || upload_speed < 50.0 { + issue_tracker.add_issue( + IssueType::NetworkConnectivityIssue, + "Network speed below recommended 50Mbps", + ); + } + } + Err(_) => { + issue_tracker.add_issue( + IssueType::NetworkConnectivityIssue, + "Failed to perform network speed test", + ); + Console::warning("Failed to perform network speed test"); + } + } + node_config.compute_specs = Some(ComputeSpecs { cpu: Some(cpu_specs), ram_mb: Some(ram_mb), @@ -132,7 +194,6 @@ impl HardwareChecker { gpu.model.as_ref().unwrap_or(&"Unknown".to_string()), ); // Convert memory from MB to GB and round - let memory_gb = if let Some(memory_mb) = gpu.memory_mb { memory_mb as f64 / 1024.0 } else { diff --git a/worker/src/checks/hardware/interconnect.rs b/worker/src/checks/hardware/interconnect.rs new file mode 100644 index 00000000..dcce4b98 --- /dev/null +++ b/worker/src/checks/hardware/interconnect.rs @@ -0,0 +1,104 @@ +use rand::RngCore; +use reqwest::Client; +use std::time::Instant; + +pub struct InterconnectCheck; + +impl InterconnectCheck { + pub async fn check_speeds() -> Result<(f64, f64), Box> { + let client = Client::new(); + + // Download test: Request a 10 MB file using the query parameter. + // Cloudflare's speed test endpoint is not officially documented or guaranteed + // Consider using a more reliable speed test service or implementing our own test server + let download_bytes = 10 * 1024 * 1024; // 10 MB + let download_url = format!( + "https://speed.cloudflare.com/__down?bytes={}", + download_bytes + ); + let start = Instant::now(); + let response = client.get(&download_url).send().await?; + + // Verify we got a successful response + if !response.status().is_success() { + return Err(format!("Speed test failed with status: {}", response.status()).into()); + } + + let data = response.bytes().await?; + + // Verify we got the expected amount of data + if data.len() != download_bytes { + return Err(format!( + "Received {} bytes but expected {} bytes", + data.len(), + download_bytes + ) + .into()); + } + + let elapsed = start.elapsed().as_secs_f64(); + let download_speed_mbps = (data.len() as f64 * 8.0) / (elapsed * 1_000_000.0); + // Upload test: Generate 10 MB of random data. + let upload_url = "https://speed.cloudflare.com/__up"; + let upload_size = 5 * 1024 * 1024; // 5 MB + let mut rng = rand::rng(); + let mut upload_data = vec![0u8; upload_size]; + rng.fill_bytes(&mut upload_data); + + let start = Instant::now(); + let upload_result = tokio::time::timeout( + std::time::Duration::from_secs(30), // 30 second timeout + client + .post(upload_url) + .header("Content-Type", "application/octet-stream") + .body(upload_data) + .send(), + ) + .await; + + let upload_speed_mbps = match upload_result { + Ok(response) => match response { + Ok(_) => { + let elapsed = start.elapsed().as_secs_f64(); + (upload_size as f64 * 8.0) / (elapsed * 1_000_000.0) + } + Err(_) => 0.0, + }, + Err(_) => { + println!("Upload speed test timed out after 30 seconds"); + 0.0 + } + }; + + Ok((download_speed_mbps, upload_speed_mbps)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_check_speeds() { + let result = InterconnectCheck::check_speeds().await; + println!("Test Result: {:?}", result); + + // Verify the result is Ok and contains expected tuple structure + assert!(result.is_ok()); + + let (download_speed, upload_speed) = result.unwrap(); + + // Verify speeds are positive numbers + assert!(download_speed > 0.0, "Download speed should be positive"); + assert!(upload_speed > 0.0, "Upload speed should be positive"); + + // Verify speeds are within reasonable bounds (0.1 Mbps to 10000 Mbps) + assert!(download_speed >= 0.1, "Download speed too low"); + assert!( + download_speed <= 10000.0, + "Download speed unreasonably high" + ); + assert!(upload_speed >= 0.1, "Upload speed too low"); + assert!(upload_speed <= 10000.0, "Upload speed unreasonably high"); + } +} diff --git a/worker/src/checks/hardware/mod.rs b/worker/src/checks/hardware/mod.rs index 10c552fd..732e62c2 100644 --- a/worker/src/checks/hardware/mod.rs +++ b/worker/src/checks/hardware/mod.rs @@ -1,5 +1,6 @@ mod gpu; -mod hardware_check; +pub mod hardware_check; +mod interconnect; mod memory; mod storage; pub use hardware_check::HardwareChecker; diff --git a/worker/src/checks/issue.rs b/worker/src/checks/issue.rs new file mode 100644 index 00000000..d7c9e894 --- /dev/null +++ b/worker/src/checks/issue.rs @@ -0,0 +1,109 @@ +use crate::console::Console; +use std::fmt; +use std::sync::{Arc, RwLock}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum Severity { + Warning, + Error, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum IssueType { + NoGpu, // GPU required for compute + DockerNotInstalled, // Docker required for containers + ContainerToolkitNotInstalled, // Container toolkit required for GPU + InsufficientStorage, // Minimum storage needed + InsufficientMemory, // Minimum RAM needed + InsufficientCpu, // Minimum CPU cores needed + NetworkConnectivityIssue, // Network performance issues +} + +impl IssueType { + pub const fn severity(&self) -> Severity { + match self { + Self::NetworkConnectivityIssue => Severity::Warning, + Self::InsufficientCpu => Severity::Warning, + Self::InsufficientMemory => Severity::Warning, + Self::InsufficientStorage => Severity::Warning, + _ => Severity::Error, + } + } +} + +#[derive(Debug, Clone)] +pub struct Issue { + issue_type: IssueType, + message: String, +} + +impl Issue { + pub fn new(issue_type: IssueType, message: impl Into) -> Self { + Self { + issue_type, + message: message.into(), + } + } + + pub const fn severity(&self) -> Severity { + self.issue_type.severity() + } + + pub fn print(&self) { + match self.severity() { + Severity::Error => Console::error(&format!("{}", self)), + Severity::Warning => Console::warning(&format!("{}", self)), + } + } +} + +impl fmt::Display for Issue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}: {}", self.issue_type, self.message) + } +} + +#[derive(Debug, Default, Clone)] +pub struct IssueReport { + issues: Arc>>, +} + +impl IssueReport { + pub fn new() -> Self { + Self::default() + } + + pub fn add_issue(&self, issue_type: IssueType, message: impl Into) { + if let Ok(mut issues) = self.issues.write() { + issues.push(Issue::new(issue_type, message)); + } + } + + pub fn print_issues(&self) { + if let Ok(issues) = self.issues.read() { + if issues.is_empty() { + Console::success("No issues found"); + return; + } + + Console::section("System Check Issues"); + + for issue in issues.iter().filter(|i| i.severity() == Severity::Error) { + issue.print(); + } + + for issue in issues.iter().filter(|i| i.severity() == Severity::Warning) { + issue.print(); + } + } + } + + pub fn has_critical_issues(&self) -> bool { + if let Ok(issues) = self.issues.read() { + return issues + .iter() + .any(|issue| matches!(issue.severity(), Severity::Error)); + } + false + } +} diff --git a/worker/src/checks/mod.rs b/worker/src/checks/mod.rs index c79bab55..23bfcd74 100644 --- a/worker/src/checks/mod.rs +++ b/worker/src/checks/mod.rs @@ -1,2 +1,3 @@ pub mod hardware; +pub mod issue; pub mod software; diff --git a/worker/src/checks/software/docker.rs b/worker/src/checks/software/docker.rs index 4bd7e94c..a93f0914 100644 --- a/worker/src/checks/software/docker.rs +++ b/worker/src/checks/software/docker.rs @@ -1,28 +1,45 @@ +use crate::checks::issue::{IssueReport, IssueType}; use crate::console::Console; +use std::sync::Arc; +use tokio::sync::RwLock; -use super::types::SoftwareCheckError; - -pub fn check_docker_installed() -> Result<(), SoftwareCheckError> { +pub async fn check_docker_installed( + issues: &Arc>, +) -> Result<(), Box> { + let issue_tracker = issues.read().await; let docker_path = std::process::Command::new("which") .arg("docker") .output() .map_err(|e| { - SoftwareCheckError::Other(format!("Failed to execute 'which docker': {}", e)) + issue_tracker.add_issue( + IssueType::DockerNotInstalled, + format!("Failed to execute 'which docker': {}", e), + ); + e })?; if !docker_path.status.success() { - return Err(SoftwareCheckError::DockerNotInstalled); + issue_tracker.add_issue(IssueType::DockerNotInstalled, "Docker is not installed"); + return Ok(()); } let docker_info = std::process::Command::new("docker").output().map_err(|e| { - SoftwareCheckError::Other(format!( - "Failed to execute 'docker ps': {}. You may need to add your user to the docker group.", - e - )) + issue_tracker.add_issue( + IssueType::DockerNotInstalled, + format!( + "Failed to execute 'docker ps': {}. You may need to add your user to the docker group.", + e + ) + ); + e })?; if !docker_info.status.success() { - return Err(SoftwareCheckError::DockerNotRunning); + issue_tracker.add_issue( + IssueType::DockerNotInstalled, + "Docker daemon is not running", + ); + return Ok(()); } Console::success("Docker ready"); @@ -31,22 +48,40 @@ pub fn check_docker_installed() -> Result<(), SoftwareCheckError> { let nvidia_toolkit = std::process::Command::new("which") .arg("nvidia-ctk") .output() - .map_err(|e| SoftwareCheckError::Other(format!("Failed to check for nvidia-ctk: {}", e)))?; + .map_err(|e| { + issue_tracker.add_issue( + IssueType::ContainerToolkitNotInstalled, + format!("Failed to check for nvidia-ctk: {}", e), + ); + e + })?; if nvidia_toolkit.status.success() { // If which succeeds, check if it's working properly let version_check = std::process::Command::new("nvidia-ctk") .arg("--version") .output() - .map_err(|e| SoftwareCheckError::Other(format!("Failed to run nvidia-ctk: {}", e)))?; + .map_err(|e| { + issue_tracker.add_issue( + IssueType::ContainerToolkitNotInstalled, + format!("Failed to run nvidia-ctk: {}", e), + ); + e + })?; if version_check.status.success() { Console::success("NVIDIA toolkit ready"); } else { - Console::error("NVIDIA toolkit not configured"); + issue_tracker.add_issue( + IssueType::ContainerToolkitNotInstalled, + "NVIDIA toolkit not configured properly", + ); } } else { - Console::error("NVIDIA toolkit not found"); + issue_tracker.add_issue( + IssueType::ContainerToolkitNotInstalled, + "NVIDIA toolkit not found", + ); } Ok(()) diff --git a/worker/src/checks/software/mod.rs b/worker/src/checks/software/mod.rs index 270986e7..72c7162e 100644 --- a/worker/src/checks/software/mod.rs +++ b/worker/src/checks/software/mod.rs @@ -1,3 +1,2 @@ pub mod docker; pub mod software_check; -pub mod types; diff --git a/worker/src/checks/software/software_check.rs b/worker/src/checks/software/software_check.rs index 4b8040aa..d1945a64 100644 --- a/worker/src/checks/software/software_check.rs +++ b/worker/src/checks/software/software_check.rs @@ -1,26 +1,17 @@ use super::docker::check_docker_installed; -use super::types::SoftwareCheckError; +use crate::checks::issue::IssueReport; use crate::console::Console; -use std::error::Error; - -impl std::fmt::Display for SoftwareCheckError { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - match self { - Self::DockerNotInstalled => write!(f, "Docker is not installed"), - Self::DockerNotRunning => write!(f, "Docker daemon is not running"), - Self::Other(msg) => write!(f, "Software error: {}", msg), - } - } -} - -impl Error for SoftwareCheckError {} - -pub fn run_software_check() -> Result<(), SoftwareCheckError> { +use std::sync::Arc; +use tokio::sync::RwLock; +pub async fn run_software_check( + issues: Option>>, +) -> Result<(), Box> { Console::section("Software Checks"); + let issues = issues.unwrap_or_else(|| Arc::new(RwLock::new(IssueReport::new()))); // Check Docker installation and connectivity Console::title("Docker:"); - check_docker_installed()?; + check_docker_installed(&issues).await?; Ok(()) } diff --git a/worker/src/checks/software/types.rs b/worker/src/checks/software/types.rs deleted file mode 100644 index 3bc13a18..00000000 --- a/worker/src/checks/software/types.rs +++ /dev/null @@ -1,6 +0,0 @@ -#[derive(Debug)] -pub enum SoftwareCheckError { - DockerNotInstalled, - DockerNotRunning, - Other(String), -} diff --git a/worker/src/cli/command.rs b/worker/src/cli/command.rs index b64ee62e..1a2e6fe6 100644 --- a/worker/src/cli/command.rs +++ b/worker/src/cli/command.rs @@ -1,5 +1,6 @@ use crate::api::server::start_server; use crate::checks::hardware::HardwareChecker; +use crate::checks::issue::IssueReport; use crate::checks::software::software_check; use crate::console::Console; use crate::docker::taskbridge::TaskBridge; @@ -22,6 +23,7 @@ use shared::web3::contracts::structs::compute_pool::PoolStatus; use shared::web3::wallet::Wallet; use std::sync::Arc; use std::time::Duration; +use tokio::sync::RwLock; use tokio_util::sync::CancellationToken; use url::Url; @@ -89,6 +91,10 @@ pub enum Commands { /// Retry count until provider has enough balance to stake (0 for unlimited retries) #[arg(long, default_value = "0")] funding_retry_count: u32, + + /// Ignore issues + #[arg(long, default_value = "false")] + ignore_issues: bool, }, Check {}, @@ -143,6 +149,7 @@ pub async fn execute_command( private_key_node, auto_accept, funding_retry_count, + ignore_issues, } => { if *disable_state_storing && *auto_recover { Console::error( @@ -257,11 +264,26 @@ pub async fn execute_command( compute_specs: None, compute_pool_id: *compute_pool_id as u32, }; - let hardware_check = HardwareChecker::new(); - let node_config = hardware_check.enrich_node_config(node_config).unwrap(); + let issue_tracker = Arc::new(RwLock::new(IssueReport::new())); + let mut hardware_check = HardwareChecker::new(Some(issue_tracker.clone())); + let node_config = hardware_check.check_hardware(node_config).await.unwrap(); + if let Err(err) = software_check::run_software_check(Some(issue_tracker.clone())).await + { + Console::error(&format!("❌ Software check failed: {}", err)); + std::process::exit(1); + } + + let issues = issue_tracker.read().await; + issues.print_issues(); + if issues.has_critical_issues() { + if !*ignore_issues { + Console::error("❌ Critical issues found. Exiting."); + std::process::exit(1); + } else { + Console::warning("Critical issues found. Ignoring and continuing."); + } + } - // TODO: Move to proper check - let _ = software_check::run_software_check(); let has_gpu = match node_config.compute_specs { Some(ref specs) => specs.gpu.is_some(), None => { @@ -524,10 +546,10 @@ pub async fn execute_command( } Commands::Check {} => { Console::section("🔍 PRIME WORKER SYSTEM CHECK"); - Console::info("═", &"═".repeat(50)); + let issues = Arc::new(RwLock::new(IssueReport::new())); // Run hardware checks - let hardware_checker = HardwareChecker::new(); + let mut hardware_checker = HardwareChecker::new(Some(issues.clone())); let node_config = Node { id: String::new(), ip_address: String::new(), @@ -537,16 +559,24 @@ pub async fn execute_command( compute_pool_id: 0, }; - match hardware_checker.enrich_node_config(node_config) { - Ok(_) => { - Console::success("✅ Hardware check passed!"); - } - Err(err) => { - Console::error(&format!("❌ Hardware check failed: {}", err)); - std::process::exit(1); - } + if let Err(err) = hardware_checker.check_hardware(node_config).await { + Console::error(&format!("❌ Hardware check failed: {}", err)); + std::process::exit(1); + } + + if let Err(err) = software_check::run_software_check(Some(issues.clone())).await { + Console::error(&format!("❌ Software check failed: {}", err)); + std::process::exit(1); } - let _ = software_check::run_software_check(); + + let issues = issues.read().await; + issues.print_issues(); + + if issues.has_critical_issues() { + Console::error("❌ Critical issues found. Exiting."); + std::process::exit(1); + } + Ok(()) } Commands::GenerateWallets {} => { diff --git a/worker/src/main.rs b/worker/src/main.rs index a33e4f61..888bb2b2 100644 --- a/worker/src/main.rs +++ b/worker/src/main.rs @@ -86,7 +86,5 @@ async fn main() -> Result<(), Box> { Ok(_) => debug!("All tasks cleaned up successfully"), Err(_) => log::warn!("Timeout waiting for tasks to cleanup"), } - - log::info!("Shutdown complete"); Ok(()) } From 4312d393595f608fe0127ca4f011f0503dffc771 Mon Sep 17 00:00:00 2001 From: Manveer Date: Tue, 25 Mar 2025 14:12:49 -0700 Subject: [PATCH 74/85] Run cargo fmt --- worker/src/cli/command.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/worker/src/cli/command.rs b/worker/src/cli/command.rs index c4f579e6..f612cd5d 100644 --- a/worker/src/cli/command.rs +++ b/worker/src/cli/command.rs @@ -94,7 +94,7 @@ pub enum Commands { /// Generate new wallets for provider and node GenerateWallets {}, - + /// Generate new wallet for node only GenerateNodeWallet {}, @@ -570,7 +570,7 @@ pub async fn execute_command( ); Ok(()) } - + Commands::GenerateNodeWallet {} => { let node_signer = PrivateKeySigner::random(); From 0ec478a6dd9ff9887b478b2152e19a494d54244c Mon Sep 17 00:00:00 2001 From: JannikSt Date: Tue, 25 Mar 2025 22:44:33 -0700 Subject: [PATCH 75/85] Preserve folder structure for toploc-validator (#168) * Improve the synthetic data validation code to ensure we preserve file structure * added a temporary s3 access until we're fully decentralized as the validator needs access to the file sha mapping --- .env.example | 3 +- Cargo.lock | 4 + Makefile | 4 +- orchestrator/Cargo.toml | 1 + orchestrator/Dockerfile | 2 + orchestrator/src/api/routes/storage.rs | 20 +- orchestrator/src/api/server.rs | 3 + orchestrator/src/api/tests/helper.rs | 1 + orchestrator/src/main.rs | 7 +- orchestrator/src/utils/google_cloud.rs | 48 -- shared/Cargo.toml | 3 + shared/src/lib.rs | 1 + shared/src/utils/google_cloud.rs | 149 ++++++ {orchestrator => shared}/src/utils/mod.rs | 0 validator/Dockerfile | 6 + validator/src/main.rs | 15 +- validator/src/validators/hardware.rs | 3 +- validator/src/validators/synthetic_data.rs | 492 +++++++++++-------- worker/src/docker/taskbridge/file_handler.rs | 10 +- 19 files changed, 498 insertions(+), 274 deletions(-) delete mode 100644 orchestrator/src/utils/google_cloud.rs create mode 100644 shared/src/utils/google_cloud.rs rename {orchestrator => shared}/src/utils/mod.rs (100%) diff --git a/.env.example b/.env.example index 87514a65..2cc50c15 100644 --- a/.env.example +++ b/.env.example @@ -34,4 +34,5 @@ COMPUTE_POOL_ADDRESS= # Optional WORK_VALIDATION_CONTRACT= LEVITICUS_URL= -S3_CREDENTIALS= \ No newline at end of file +S3_CREDENTIALS= +BUCKET_NAME= \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 56fafefe..93c6b738 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4333,6 +4333,7 @@ dependencies = [ "google-cloud-storage", "hex", "log", + "rand 0.9.0", "redis", "redis-test", "reqwest", @@ -5485,11 +5486,14 @@ dependencies = [ "actix-web", "alloy", "anyhow", + "base64 0.22.1", "dashmap", "futures-util", + "google-cloud-storage", "hex", "log", "nalgebra", + "rand 0.9.0", "redis", "serde", "serde_json", diff --git a/Makefile b/Makefile index 5b5967de..edd93dc5 100644 --- a/Makefile +++ b/Makefile @@ -79,11 +79,11 @@ watch-check: watch-validator: set -a; source ${ENV_FILE}; set +a; \ - cargo watch -w validator/src -x "run --bin validator -- --validator-key $${PRIVATE_KEY_VALIDATOR} --rpc-url $${RPC_URL} --pool-id $${WORKER_COMPUTE_POOL_ID} --work-validation-contract $${WORK_VALIDATION_CONTRACT} --leviticus-url $${LEVITICUS_URL} --leviticus-token $${LEVITICUS_TOKEN}" + cargo watch -w validator/src -x "run --bin validator -- --validator-key $${PRIVATE_KEY_VALIDATOR} --rpc-url $${RPC_URL} --pool-id $${WORKER_COMPUTE_POOL_ID} --work-validation-contract $${WORK_VALIDATION_CONTRACT} --leviticus-url $${LEVITICUS_URL} --leviticus-token $${LEVITICUS_TOKEN} --s3-credentials $${S3_CREDENTIALS} --bucket-name $${BUCKET_NAME}" watch-orchestrator: set -a; source ${ENV_FILE}; set +a; \ - cargo watch -w orchestrator/src -x "run --bin orchestrator -- -r $$RPC_URL -k $$POOL_OWNER_PRIVATE_KEY -d 0 -p 8090 -i 10 -u http://localhost:8090 --s3-credentials $$S3_CREDENTIALS --compute-pool-id $$WORKER_COMPUTE_POOL_ID" + cargo watch -w orchestrator/src -x "run --bin orchestrator -- -r $$RPC_URL -k $$POOL_OWNER_PRIVATE_KEY -d 0 -p 8090 -i 10 -u http://localhost:8090 --s3-credentials $$S3_CREDENTIALS --compute-pool-id $$WORKER_COMPUTE_POOL_ID --bucket-name $$BUCKET_NAME" build-worker: cargo build --release --bin worker diff --git a/orchestrator/Cargo.toml b/orchestrator/Cargo.toml index e4d6815e..31c4786a 100644 --- a/orchestrator/Cargo.toml +++ b/orchestrator/Cargo.toml @@ -16,6 +16,7 @@ google-cloud-auth = "0.18.0" google-cloud-storage = "0.24.0" hex = "0.4.3" log = "0.4.25" +rand = "0.9.0" redis = "0.28.1" redis-test = "0.8.0" reqwest = "0.12.12" diff --git a/orchestrator/Dockerfile b/orchestrator/Dockerfile index 2f7e5c09..e348970a 100644 --- a/orchestrator/Dockerfile +++ b/orchestrator/Dockerfile @@ -18,6 +18,7 @@ ENV DISCOVERY_URL="http://localhost:8089" ENV ADMIN_API_KEY="admin" ENV DISABLE_EJECTION="false" ENV S3_CREDENTIALS="" +ENV BUCKET_NAME="" RUN echo '#!/bin/sh\n\ exec /usr/local/bin/orchestrator \ @@ -34,6 +35,7 @@ $([ ! -z "$HOST" ] && echo "--host $HOST") \ --admin-api-key "$ADMIN_API_KEY" \ $([ "$DISABLE_EJECTION" = "true" ] && echo "--disable-ejection") \ $([ ! -z "$S3_CREDENTIALS" ] && echo "--s3-credentials $S3_CREDENTIALS") \ +$([ ! -z "$BUCKET_NAME" ] && echo "--bucket-name $BUCKET_NAME") \ "$@"' > /entrypoint.sh && \ chmod +x /entrypoint.sh diff --git a/orchestrator/src/api/routes/storage.rs b/orchestrator/src/api/routes/storage.rs index 4fb23ef7..598fd875 100644 --- a/orchestrator/src/api/routes/storage.rs +++ b/orchestrator/src/api/routes/storage.rs @@ -1,9 +1,9 @@ use crate::api::server::AppState; -use crate::utils::google_cloud::generate_upload_signed_url; use actix_web::{ web::{self, post, Data}, HttpResponse, Scope, }; +use shared::utils::google_cloud::{generate_mapping_file, generate_upload_signed_url}; use std::time::Duration; #[derive(serde::Deserialize)] @@ -11,6 +11,7 @@ pub struct RequestUploadRequest { pub file_name: String, pub file_size: u64, pub file_type: String, + pub sha256: String, } async fn request_upload( @@ -20,7 +21,7 @@ async fn request_upload( let file_name = &request_upload.file_name; let file_size = &request_upload.file_size; let file_type = &request_upload.file_type; - println!("request_upload: {} {} {}", file_name, file_size, file_type); + let sha256 = &request_upload.sha256; // Get credentials from app state let credentials = match &app_state.s3_credentials { @@ -33,6 +34,21 @@ async fn request_upload( } }; + if let Err(e) = generate_mapping_file( + app_state.bucket_name.clone().unwrap().as_str(), + credentials, + sha256, + file_name, + ) + .await + { + log::error!("Failed to generate mapping file: {}", e); + return HttpResponse::InternalServerError().json(serde_json::json!({ + "success": false, + "error": format!("Failed to generate mapping file: {}", e) + })); + } + // Generate signed upload URL match generate_upload_signed_url( "protocol-development-bucket", // TODO: Make configurable diff --git a/orchestrator/src/api/server.rs b/orchestrator/src/api/server.rs index d93588fc..c8746eeb 100644 --- a/orchestrator/src/api/server.rs +++ b/orchestrator/src/api/server.rs @@ -19,6 +19,7 @@ pub struct AppState { pub store_context: Arc, pub wallet: Arc, pub s3_credentials: Option, + pub bucket_name: Option, } pub async fn start_server( @@ -28,12 +29,14 @@ pub async fn start_server( wallet: Arc, admin_api_key: String, s3_credentials: Option, + bucket_name: Option, ) -> Result<(), Error> { info!("Starting server at http://{}:{}", host, port); let app_state = Data::new(AppState { store_context, wallet, s3_credentials, + bucket_name, }); let node_store = app_state.store_context.node_store.clone(); let node_store_clone = node_store.clone(); diff --git a/orchestrator/src/api/tests/helper.rs b/orchestrator/src/api/tests/helper.rs index 6ccca2d8..44983b7a 100644 --- a/orchestrator/src/api/tests/helper.rs +++ b/orchestrator/src/api/tests/helper.rs @@ -41,6 +41,7 @@ pub async fn create_test_app_state() -> Data { .unwrap(), ), s3_credentials: None, + bucket_name: None, }) } diff --git a/orchestrator/src/main.rs b/orchestrator/src/main.rs index 6d543a3c..16b18b54 100644 --- a/orchestrator/src/main.rs +++ b/orchestrator/src/main.rs @@ -3,7 +3,6 @@ mod discovery; mod models; mod node; mod store; -mod utils; use crate::api::server::start_server; use crate::discovery::monitor::DiscoveryMonitor; use crate::node::invite::NodeInviter; @@ -73,6 +72,10 @@ struct Args { /// S3 credentials #[arg(long)] s3_credentials: Option, + + /// S3 bucket name + #[arg(long)] + bucket_name: Option, } #[tokio::main] @@ -157,7 +160,7 @@ async fn main() -> Result<()> { let server_store_context = store_context.clone(); tokio::select! { - res = start_server("0.0.0.0", port, server_store_context.clone(), server_wallet, args.admin_api_key, args.s3_credentials) => { + res = start_server("0.0.0.0", port, server_store_context.clone(), server_wallet, args.admin_api_key, args.s3_credentials, args.bucket_name) => { if let Err(e) = res { error!("Server error: {}", e); } diff --git a/orchestrator/src/utils/google_cloud.rs b/orchestrator/src/utils/google_cloud.rs deleted file mode 100644 index b1eb0618..00000000 --- a/orchestrator/src/utils/google_cloud.rs +++ /dev/null @@ -1,48 +0,0 @@ -use anyhow::Result; -use base64::{engine::general_purpose, Engine as _}; -use google_cloud_storage::client::google_cloud_auth::credentials::CredentialsFile; -use google_cloud_storage::client::{Client, ClientConfig}; -use google_cloud_storage::sign::{SignedURLMethod, SignedURLOptions}; -use std::time::Duration; - -pub async fn generate_upload_signed_url( - bucket: &str, - object_path: &str, - credentials_base64: &str, - content_type: Option, - expiration: Duration, - max_bytes: Option, // Maximum file size in bytes -) -> Result { - // Decode base64 to JSON string - let credentials_json = general_purpose::STANDARD.decode(credentials_base64)?; - let credentials_str = String::from_utf8(credentials_json)?; - - // Create client config directly from the JSON string - let credentials = CredentialsFile::new_from_str(&credentials_str) - .await - .unwrap(); - let config = ClientConfig::default() - .with_credentials(credentials) - .await - .unwrap(); - let client = Client::new(config); - - // Set options for the signed URL - let mut options = SignedURLOptions { - method: SignedURLMethod::PUT, - expires: expiration, - content_type, - ..Default::default() - }; - - // Set max bytes if specified - if let Some(bytes) = max_bytes { - options.headers = vec![format!("content-length:{}", bytes)]; - } - - // Generate the signed URL - let signed_url = client - .signed_url(bucket, object_path, None, None, options) - .await?; - Ok(signed_url) -} diff --git a/shared/Cargo.toml b/shared/Cargo.toml index b1c0d911..ea078ff8 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -26,3 +26,6 @@ dashmap = "6.1.0" anyhow = "1.0.95" nalgebra = "0.33.2" log = "0.4.26" +rand = "0.9.0" +google-cloud-storage = "0.24.0" +base64 = "0.22.1" diff --git a/shared/src/lib.rs b/shared/src/lib.rs index bc518856..b40d5c60 100644 --- a/shared/src/lib.rs +++ b/shared/src/lib.rs @@ -1,3 +1,4 @@ pub mod models; pub mod security; +pub mod utils; pub mod web3; diff --git a/shared/src/utils/google_cloud.rs b/shared/src/utils/google_cloud.rs new file mode 100644 index 00000000..81a5ecc6 --- /dev/null +++ b/shared/src/utils/google_cloud.rs @@ -0,0 +1,149 @@ +use anyhow::Result; +use base64::{engine::general_purpose, Engine as _}; +use google_cloud_storage::client::google_cloud_auth::credentials::CredentialsFile; +use google_cloud_storage::client::{Client, ClientConfig}; +use google_cloud_storage::http::objects::download::Range; +use google_cloud_storage::http::objects::get::GetObjectRequest; +use google_cloud_storage::http::objects::upload::{Media, UploadObjectRequest, UploadType}; +use google_cloud_storage::sign::{SignedURLMethod, SignedURLOptions}; +use log::debug; +use std::time::Duration; + +/// Creates a GCS client from base64-encoded credentials +async fn create_gcs_client(credentials_base64: &str) -> Result { + // Decode base64 to JSON string + let credentials_json = general_purpose::STANDARD.decode(credentials_base64)?; + let credentials_str = String::from_utf8(credentials_json)?; + + // Create client config directly from the JSON string + let credentials = CredentialsFile::new_from_str(&credentials_str) + .await + .map_err(|e| anyhow::anyhow!("Failed to parse credentials: {}", e))?; + + let config = ClientConfig::default() + .with_credentials(credentials) + .await + .map_err(|e| anyhow::anyhow!("Failed to configure client: {}", e))?; + + Ok(Client::new(config)) +} + +pub async fn generate_mapping_file( + bucket: &str, + credentials_base64: &str, + sha256: &str, + file_name: &str, +) -> Result { + let client = create_gcs_client(credentials_base64).await?; + let mapping_path = format!("mapping/{}", sha256); // Use sha256 as filename + let upload_type = UploadType::Simple(Media::new(mapping_path.clone())); + + let file_name = file_name.strip_prefix('/').unwrap_or(file_name); // Adjusted to use strip_prefix + let content = file_name.to_string().into_bytes(); + + let uploaded = client + .upload_object( + &UploadObjectRequest { + bucket: bucket.to_string(), + ..Default::default() + }, + content, + &upload_type, + ) + .await; + + debug!("Uploaded mapping file: {:?}", uploaded); + + Ok(mapping_path) // Return the mapping_path instead +} + +pub async fn resolve_mapping_for_sha( + bucket: &str, + credentials_base64: &str, + sha256: &str, +) -> Result { + let client = create_gcs_client(credentials_base64).await?; + let mapping_path = format!("mapping/{}", sha256); + + // Download the mapping file content + let content = client + .download_object( + &GetObjectRequest { + bucket: bucket.to_string(), + object: mapping_path.clone(), + ..Default::default() + }, + &Range::default(), + ) + .await?; + + // Convert bytes to string + let file_name = String::from_utf8(content)?; + + Ok(file_name) +} + +pub async fn generate_upload_signed_url( + bucket: &str, + object_path: &str, + credentials_base64: &str, + content_type: Option, + expiration: Duration, + max_bytes: Option, +) -> Result { + // Ensure object_path does not start with a / + let object_path = object_path.strip_prefix('/').unwrap_or(object_path); // Adjusted to use strip_prefix + + let client = create_gcs_client(credentials_base64).await?; + + // Set options for the signed URL + let mut options = SignedURLOptions { + method: SignedURLMethod::PUT, + expires: expiration, + content_type, + ..Default::default() + }; + + // Set max bytes if specified + if let Some(bytes) = max_bytes { + options.headers = vec![format!("content-length:{}", bytes)]; + } + + // Generate the signed URL + let signed_url = client + .signed_url(bucket, object_path, None, None, options) + .await?; + Ok(signed_url) +} + +#[cfg(test)] +mod tests { + use super::*; + use rand::Rng; + + #[tokio::test] + async fn test_generate_mapping_file() { + let bucket_name = std::env::var("BUCKET_NAME").expect("BUCKET_NAME not set"); + let credentials_base64 = std::env::var("S3_CREDENTIALS").expect("S3_CREDENTIALS not set"); + + let random_sha256: String = rand::rng().random_range(0..=u64::MAX).to_string(); + + let mapping_content: String = generate_mapping_file( + &bucket_name, + &credentials_base64, + &random_sha256, + "run_1/file.parquet", + ) + .await + .unwrap(); + println!("mapping_content: {}", mapping_content); + + let original_file_name = + resolve_mapping_for_sha(&bucket_name, &credentials_base64, &random_sha256) + .await + .unwrap(); + + println!("original_file_name: {}", original_file_name); + assert_eq!(original_file_name, "run_1/file.parquet"); + } +} diff --git a/orchestrator/src/utils/mod.rs b/shared/src/utils/mod.rs similarity index 100% rename from orchestrator/src/utils/mod.rs rename to shared/src/utils/mod.rs diff --git a/validator/Dockerfile b/validator/Dockerfile index 9546666c..ecbc47d8 100644 --- a/validator/Dockerfile +++ b/validator/Dockerfile @@ -12,6 +12,10 @@ ENV POOL_ID="" ENV WORK_VALIDATION_INTERVAL="30" ENV LEVITICUS_URL="" ENV LEVITICUS_TOKEN="" +ENV S3_CREDENTIALS="" +ENV BUCKET_NAME="" +ENV S3_CREDENTIALS="" + RUN echo '#!/bin/sh\n\ exec /usr/local/bin/validator \ @@ -23,6 +27,8 @@ exec /usr/local/bin/validator \ --work-validation-interval "$WORK_VALIDATION_INTERVAL" \ --leviticus-url "$LEVITICUS_URL" \ $([ ! -z "$LEVITICUS_TOKEN" ] && echo "--leviticus-token $LEVITICUS_TOKEN") \ +$([ ! -z "$S3_CREDENTIALS" ] && echo "--s3-credentials $S3_CREDENTIALS") \ +$([ ! -z "$BUCKET_NAME" ] && echo "--bucket-name $BUCKET_NAME") \ "$@"' > /entrypoint.sh && \ chmod +x /entrypoint.sh diff --git a/validator/src/main.rs b/validator/src/main.rs index 061f63e5..caa5b166 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -4,7 +4,7 @@ use alloy::primitives::utils::Unit; use alloy::primitives::{Address, U256}; use anyhow::{Context, Result}; use clap::Parser; -use log::LevelFilter; +use log::{debug, LevelFilter}; use log::{error, info}; use serde_json::json; use shared::models::api::ApiResponse; @@ -60,6 +60,14 @@ struct Args { /// Note: This value will be multiplied by 10^18 (1 token = 10^18 wei) #[arg(long, default_value = "1000")] validator_penalty: u64, + + /// Temporary: S3 credentials + #[arg(long, default_value = None)] + s3_credentials: Option, + + /// Temporary: S3 bucket name + #[arg(long, default_value = None)] + bucket_name: Option, } fn main() { let runtime = tokio::runtime::Runtime::new().unwrap(); @@ -118,6 +126,8 @@ fn main() { leviticus_url, args.leviticus_token, penalty, + args.s3_credentials, + args.bucket_name, )) } else { error!("Leviticus URL is not provided"); @@ -173,7 +183,7 @@ fn main() { .context("Failed to create signature header")?, ); - info!("Fetching nodes from: {}{}", discovery_url, discovery_route); + debug!("Fetching nodes from: {}{}", discovery_url, discovery_route); let response = reqwest::Client::new() .get(format!("{}{}", discovery_url, discovery_route)) .headers(headers) @@ -208,6 +218,7 @@ fn main() { error!("Error validating nodes: {:#}", e); } + info!("Validation loop completed"); std::thread::sleep(std::time::Duration::from_secs(10)); } } diff --git a/validator/src/validators/hardware.rs b/validator/src/validators/hardware.rs index dfe271f1..5314a336 100644 --- a/validator/src/validators/hardware.rs +++ b/validator/src/validators/hardware.rs @@ -26,7 +26,7 @@ impl<'a> HardwareValidator<'a> { .filter(|node| !node.is_validated) .collect(); - info!("Non validated nodes: {:?}", non_validated_nodes); + log::debug!("Non validated nodes: {:?}", non_validated_nodes); for node in non_validated_nodes { let node_address = match node.id.trim_start_matches("0x").parse::
() { @@ -117,7 +117,6 @@ impl<'a> HardwareValidator<'a> { .await?; let response_text = response.text().await?; - println!("Response text: {}", response_text); let parsed_response: ApiResponse = serde_json::from_str(&response_text)?; if !parsed_response.success { diff --git a/validator/src/validators/synthetic_data.rs b/validator/src/validators/synthetic_data.rs index b53a76f8..7ed19125 100644 --- a/validator/src/validators/synthetic_data.rs +++ b/validator/src/validators/synthetic_data.rs @@ -1,10 +1,11 @@ use alloy::primitives::U256; -use anyhow::{Context, Error, Result}; +use anyhow::{Context, Error}; use directories::ProjectDirs; use hex; use log::debug; use log::{error, info}; use serde::{Deserialize, Serialize}; +use shared::utils::google_cloud::resolve_mapping_for_sha; use shared::web3::contracts::implementations::prime_network_contract::PrimeNetworkContract; use std::fs; use std::path::Path; @@ -29,6 +30,28 @@ struct PersistedWorkState { last_validation_timestamp: U256, } +enum ValidationResult { + Accept, + Reject, + Crashed, + Pending, + Unknown, +} + +#[derive(Debug)] +pub enum ProcessWorkKeyError { + /// Error when resolving the original file name for the work key. + FileNameResolutionError(String), + /// Error when triggering remote toploc validation. + ValidationTriggerError(String), + /// Error when polling for remote toploc validation. + ValidationPollingError(String), + /// Error when invalidating work. + InvalidatingWorkError(String), + /// Error when processing work key. + MaxAttemptsReached(String), +} + pub struct SyntheticDataValidator { pool_id: U256, validator: SyntheticDataWorkValidator, @@ -38,6 +61,8 @@ pub struct SyntheticDataValidator { leviticus_url: String, leviticus_token: Option, penalty: U256, + s3_credentials: Option, + bucket_name: Option, } impl Validator for SyntheticDataValidator { @@ -49,6 +74,7 @@ impl Validator for SyntheticDataValidator { } impl SyntheticDataValidator { + #[allow(clippy::too_many_arguments)] pub fn new( state_dir: Option, pool_id_str: String, @@ -57,6 +83,8 @@ impl SyntheticDataValidator { leviticus_url: String, leviticus_token: Option, penalty: U256, + s3_credentials: Option, + bucket_name: Option, ) -> Self { let pool_id = pool_id_str.parse::().expect("Invalid pool ID"); let default_state_dir = get_default_state_dir(); @@ -105,10 +133,12 @@ impl SyntheticDataValidator { leviticus_url, leviticus_token, penalty, + s3_credentials, + bucket_name, } } - fn save_state(&self) -> Result<()> { + fn save_state(&self) -> Result<(), Error> { // Get values without block_on let state = PersistedWorkState { pool_id: self.pool_id, @@ -125,7 +155,7 @@ impl SyntheticDataValidator { Ok(()) } - fn load_state(state_dir: &Path, pool_id: &str) -> Result> { + fn load_state(state_dir: &Path, pool_id: &str) -> Result, Error> { let state_path = state_dir.join(state_filename(pool_id)); if state_path.exists() { let contents = fs::read_to_string(state_path)?; @@ -135,10 +165,10 @@ impl SyntheticDataValidator { Ok(None) } - pub async fn invalidate_work(&self, work_key: &str) -> Result<()> { + pub async fn invalidate_work(&self, work_key: &str) -> Result<(), Error> { let data = hex::decode(work_key) .map_err(|e| Error::msg(format!("Failed to decode hex work key: {}", e)))?; - println!("Invalidating work: {}", work_key); + info!("Invalidating work: {}", work_key); match self .prime_network .invalidate_work(self.pool_id, self.penalty, data) @@ -152,225 +182,269 @@ impl SyntheticDataValidator { } } - pub async fn validate_work(&mut self) -> Result<()> { - info!("Validating work for pool ID: {:?}", self.pool_id); - - // Get all work keys for the pool - let work_keys = self - .validator - .get_work_since(self.pool_id, self.last_validation_timestamp) - .await - .context("Failed to get work keys")?; - - info!("Found {} work keys to validate", work_keys.len()); + async fn trigger_remote_toploc_validation( + &self, + file_name: &str, + sha: &str, + ) -> Result<(), Error> { + let validate_url = format!("{}/validate/{}", self.leviticus_url, file_name); + info!( + "Triggering remote toploc validation for {} {}", + file_name, validate_url + ); - // Process each work key - for work_key in work_keys { - info!("Processing work key: {}", work_key); - match self.validator.get_work_info(self.pool_id, &work_key).await { - Ok(work_info) => { - info!( - "Got work info - Provider: {:?}, Node: {:?}, Timestamp: {}", - work_info.provider, work_info.node_id, work_info.timestamp + let client = reqwest::Client::builder() + .default_headers({ + let mut headers = reqwest::header::HeaderMap::new(); + if let Some(token) = &self.leviticus_token { + headers.insert( + reqwest::header::AUTHORIZATION, + reqwest::header::HeaderValue::from_str(&format!("Bearer {}", token)) + .expect("Invalid token"), ); + } + headers + }) + .build() + .expect("Failed to build HTTP client"); - // Start validation by calling validation endpoint with retries - let validate_url = - format!("{}/validate/{}.parquet", self.leviticus_url, work_key); - let mut client = reqwest::Client::builder(); - - // Add auth token if provided - if let Some(token) = &self.leviticus_token { - client = client.default_headers({ - let mut headers = reqwest::header::HeaderMap::new(); - headers.insert( - reqwest::header::AUTHORIZATION, - reqwest::header::HeaderValue::from_str(&format!( - "Bearer {}", - token - )) - .expect("Invalid token"), - ); - headers - }); - } - - let client = client.build().expect("Failed to build HTTP client"); - - let mut validate_attempts = 0; - const MAX_VALIDATE_ATTEMPTS: u32 = 3; - - let validation_result = loop { - let body = serde_json::json!({ - "file_sha": work_key - }); - - match client.post(&validate_url).json(&body).send().await { - Ok(_) => { - info!("Started validation for work key: {}", work_key); - break Ok(()); - } - Err(e) => { - validate_attempts += 1; - error!( - "Attempt {} failed to start validation for {}: {}", - validate_attempts, work_key, e - ); - - if validate_attempts >= MAX_VALIDATE_ATTEMPTS { - break Err(e); - } + let body = serde_json::json!({ + "file_sha": sha + }); - // Exponential backoff - tokio::time::sleep(tokio::time::Duration::from_secs( - 2u64.pow(validate_attempts), - )) - .await; - } - } - }; - - match validation_result { - Ok(_) => { - // Poll status endpoint until we get a proper response - let status_url = - format!("{}/status/{}.parquet", self.leviticus_url, work_key); - let mut status_attempts = 0; - const MAX_STATUS_ATTEMPTS: u32 = 5; - - loop { - tokio::time::sleep(tokio::time::Duration::from_secs(2)).await; - - match client.get(&status_url).send().await { - Ok(response) => { - match response.json::().await { - Ok(status_json) => { - match status_json - .get("status") - .and_then(|s| s.as_str()) - { - Some(status) => { - info!( - "Validation status for {}: {}", - work_key, status - ); - - match status { - "accept" => { - info!( - "Work {} was accepted", - work_key - ); - break; - } - "reject" => { - error!( - "Work {} was rejected", - work_key - ); - if let Err(e) = self - .invalidate_work(&work_key) - .await - { - error!("Failed to invalidate work {}: {}", work_key, e); - } else { - info!("Successfully invalidated work {}", work_key); - } - break; - } - "crashed" => { - error!( - "Validation crashed for {}", - work_key - ); - break; - } - "pending" => { - status_attempts += 1; - if status_attempts - >= MAX_STATUS_ATTEMPTS - { - error!("Max status attempts reached for {}", work_key); - break; - } - } - _ => { - status_attempts += 1; - error!( - "Unknown status {} for {}", - status, work_key - ); - if status_attempts - >= MAX_STATUS_ATTEMPTS - { - break; - } - } - } - } - None => { - status_attempts += 1; - error!( - "No status field in response for {}", - work_key - ); - if status_attempts >= MAX_STATUS_ATTEMPTS { - error!("Max status attempts reached for {}", work_key); - break; - } - } - } - } - Err(e) => { - status_attempts += 1; - error!("Attempt {} failed to parse status JSON for {}: {}", status_attempts, work_key, e); - - if status_attempts >= MAX_STATUS_ATTEMPTS { - error!( - "Max status attempts reached for {}", - work_key - ); - break; - } - } - } - } - Err(e) => { - status_attempts += 1; - error!( - "Attempt {} failed to get status for {}: {}", - status_attempts, work_key, e - ); - - if status_attempts >= MAX_STATUS_ATTEMPTS { - error!("Max status attempts reached for {}", work_key); - break; - } + match client.post(&validate_url).json(&body).send().await { + Ok(_) => Ok(()), + Err(e) => { + error!( + "Failed to trigger remote toploc validation for {}: {}", + file_name, e + ); + Err(Error::msg(format!( + "Failed to trigger remote toploc validation: {}", + e + ))) + } + } + } + async fn poll_remote_toploc_validation( + &self, + file_name: &str, + ) -> Result { + let url = format!("{}/status/{}", self.leviticus_url, file_name); + let client = reqwest::Client::builder() + .default_headers({ + let mut headers = reqwest::header::HeaderMap::new(); + if let Some(token) = &self.leviticus_token { + headers.insert( + reqwest::header::AUTHORIZATION, + reqwest::header::HeaderValue::from_str(&format!("Bearer {}", token)) + .expect("Invalid token"), + ); + } + headers + }) + .build() + .expect("Failed to build HTTP client"); + + match client.get(&url).send().await { + Ok(response) => { + if response.status() != reqwest::StatusCode::OK { + error!( + "Unexpected status code {} for {}", + response.status(), + file_name + ); + return Err(Error::msg(format!( + "Unexpected status code: {}", + response.status() + ))); + } + let status_json: serde_json::Value = response.json().await.map_err(|e| { + error!("Failed to parse JSON response for {}: {}", file_name, e); + Error::msg(format!("Failed to parse JSON response: {}", e)) + })?; + + if status_json.get("status").is_none() { + error!("No status found for {}", file_name); + Err(Error::msg("No status found")) + } else { + match status_json.get("status").and_then(|s| s.as_str()) { + Some(status) => { + info!("Validation status for {}: {}", file_name, status); + + let validation_result = match status { + "accept" => ValidationResult::Accept, + "reject" => { + if let Err(e) = self.invalidate_work(file_name).await { + error!("Failed to invalidate work {}: {}", file_name, e); + } else { + info!("Successfully invalidated work {}", file_name); } + ValidationResult::Reject } - } + "crashed" => ValidationResult::Crashed, + "pending" => ValidationResult::Pending, + _ => ValidationResult::Unknown, + }; + Ok(validation_result) } - Err(_) => { - error!("Failed all validation attempts for {}", work_key); - continue; + None => { + error!("No status found for {}", file_name); + Err(Error::msg("No status found")) } } } - Err(e) => { - error!("Failed to get work info for key {}: {}", work_key, e); + } + Err(e) => { + error!( + "Failed to poll remote toploc validation for {}: {}", + file_name, e + ); + Err(Error::msg(format!( + "Failed to poll remote toploc validation: {}", + e + ))) + } + } + } + + async fn process_workkey(&mut self, work_key: &str) -> Result<(), ProcessWorkKeyError> { + let original_file_name = resolve_mapping_for_sha( + self.bucket_name.clone().unwrap().as_str(), + &self.s3_credentials.clone().unwrap(), + work_key, + ) + .await + .map_err(|e| ProcessWorkKeyError::FileNameResolutionError(e.to_string()))?; + + if original_file_name.is_empty() { + error!( + "Failed to resolve original file name for work key: {}", + work_key + ); + return Err(ProcessWorkKeyError::FileNameResolutionError(format!( + "Failed to resolve original file name for work key: {}", + work_key + ))); + } + + let cleaned_file_name = original_file_name + .strip_prefix('/') + .unwrap_or(original_file_name.as_str()); + + // Trigger remote toploc validation + let mut attempts = 0; + const MAX_ATTEMPTS: u32 = 3; + const BACKOFF_FACTOR: u64 = 2; + + while attempts < MAX_ATTEMPTS { + if let Err(e) = self + .trigger_remote_toploc_validation(cleaned_file_name, work_key) + .await + { + attempts += 1; + let backoff_time = BACKOFF_FACTOR.pow(attempts) * 100; // Exponential backoff in milliseconds + error!("Failed to trigger remote toploc validation for {}: {}. Attempt {}/{}. Retrying in {} ms...", cleaned_file_name, e, attempts, MAX_ATTEMPTS, backoff_time); + tokio::time::sleep(std::time::Duration::from_millis(backoff_time)).await; + continue; + } + break; // Success, exit the loop + } + + if attempts == MAX_ATTEMPTS { + return Err(ProcessWorkKeyError::ValidationTriggerError(format!( + "Failed to trigger remote toploc validation for {}", + cleaned_file_name + ))); + } + + let max_attempts = 5; + let mut attempts = 0; + while attempts < max_attempts { + if attempts > 0 { + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + } + let result = self.poll_remote_toploc_validation(cleaned_file_name).await; + if let Err(e) = result { + error!( + "Failed to poll remote toploc validation for {}: {}", + cleaned_file_name, e + ); + if attempts == max_attempts { + return Err(ProcessWorkKeyError::ValidationPollingError(format!( + "Failed to poll remote toploc validation for {}", + cleaned_file_name + ))); + } + attempts += 1; + continue; + } + + let validation_result = result.unwrap(); + match validation_result { + ValidationResult::Accept => { + info!("Validation accepted for {}", cleaned_file_name); + return Ok(()); + } + ValidationResult::Reject => { + info!("Validation rejected for {}", cleaned_file_name); + if let Err(e) = self.invalidate_work(work_key).await { + error!("Failed to invalidate work {}: {}", work_key, e); + attempts += 1; + if attempts == max_attempts { + return Err(ProcessWorkKeyError::InvalidatingWorkError(format!( + "Failed to invalidate work {}", + work_key + ))); + } + continue; + } + return Ok(()); + } + _ => { + attempts += 1; continue; } } } + Err(ProcessWorkKeyError::MaxAttemptsReached(format!( + "Failed to poll remote toploc validation for {}", + cleaned_file_name + ))) + } - // Update last validation timestamp to current time - // TODO: We should only set this once we are sure that we have validated all keys - self.last_validation_timestamp = U256::from( - std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .context("Failed to get current timestamp")? - .as_secs(), - ); + pub async fn validate_work(&mut self) -> Result<(), Error> { + debug!("Validating work for pool ID: {:?}", self.pool_id); + + // Get all work keys for the pool + let work_keys = self + .validator + .get_work_since(self.pool_id, self.last_validation_timestamp) + .await + .context("Failed to get work keys")?; + + debug!("Found {} work keys to validate", work_keys.len()); + + let mut completed_all_validations = true; + + // Process each work key + for work_key in work_keys { + info!("Processing work key: {}", work_key); + if let Err(e) = self.process_workkey(&work_key).await { + // TODO: We have an error now - should we actually skip this validation then? + error!("Work Validation error: {:?}", e); + completed_all_validations = false; + } + } + + if completed_all_validations { + self.last_validation_timestamp = U256::from( + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .context("Failed to get current timestamp")? + .as_secs(), + ); + } self.save_state()?; diff --git a/worker/src/docker/taskbridge/file_handler.rs b/worker/src/docker/taskbridge/file_handler.rs index 994603ab..1b1dd9b4 100644 --- a/worker/src/docker/taskbridge/file_handler.rs +++ b/worker/src/docker/taskbridge/file_handler.rs @@ -17,6 +17,7 @@ pub struct RequestUploadRequest { pub file_name: String, pub file_size: u64, pub file_type: String, + pub sha256: String, } /// Handles a file upload request @@ -72,13 +73,10 @@ pub async fn handle_file_upload( // Create upload request let client = Client::new(); let request = RequestUploadRequest { - file_name: if clean_file_name.ends_with(".parquet") { - format!("{}.parquet", file_sha) - } else { - file_sha - }, + file_name: file_name.to_string(), file_size, file_type: "application/json".to_string(), // Assume JSON + sha256: file_sha.clone(), }; // Sign request @@ -110,7 +108,7 @@ pub async fn handle_file_upload( let json = response.json::().await?; if let Some(signed_url) = json["signed_url"].as_str() { - info!("Got signed URL for upload: {}", signed_url); + debug!("Got signed URL for upload: {}", signed_url); // Read file contents let file_contents = tokio::fs::read(&file).await?; From b1042e8732130b7725f1fbbd8ecd2f45c17a2038 Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Wed, 26 Mar 2025 13:29:26 +0100 Subject: [PATCH 76/85] make sure port bindings are passed even to non-gpu dockers --- worker/src/docker/docker_manager.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/worker/src/docker/docker_manager.rs b/worker/src/docker/docker_manager.rs index c5273c0d..510da966 100644 --- a/worker/src/docker/docker_manager.rs +++ b/worker/src/docker/docker_manager.rs @@ -210,6 +210,7 @@ impl DockerManager { Some(HostConfig { extra_hosts: Some(vec!["host.docker.internal:host-gateway".into()]), binds: volume_binds, + port_bindings, ..Default::default() }) }; From d84fcd8450c2d936ff8f88493188fe78d441e9cf Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Wed, 26 Mar 2025 17:16:39 +0100 Subject: [PATCH 77/85] remove misc race conditions, add session id for row proofs request --- shared/src/models/gpu_challenge.rs | 6 +++ validator/src/validators/hardware.rs | 16 ++---- worker/Cargo.toml | 2 +- worker/src/api/routes/gpu_challenge.rs | 67 +++++++++++++++++++++----- 4 files changed, 66 insertions(+), 25 deletions(-) diff --git a/shared/src/models/gpu_challenge.rs b/shared/src/models/gpu_challenge.rs index a25055da..8aff4ce2 100644 --- a/shared/src/models/gpu_challenge.rs +++ b/shared/src/models/gpu_challenge.rs @@ -13,6 +13,12 @@ pub struct GpuSetABResponse { pub status: String, } +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuSetABRequest { + pub seed: String, + pub n: u64, +} + // Response containing the commitment root #[derive(Debug, Serialize, Deserialize)] pub struct GpuCommitmentResponse { diff --git a/validator/src/validators/hardware.rs b/validator/src/validators/hardware.rs index f325897d..9d8a00b3 100644 --- a/validator/src/validators/hardware.rs +++ b/validator/src/validators/hardware.rs @@ -67,9 +67,9 @@ impl<'a> HardwareValidator<'a> { } pub async fn validate_nodes(&self, nodes: Vec) -> Result<()> { - let non_validated_nodes: Vec = nodes - .into_iter() - .filter(|node| !node.is_validated) + let non_validated_nodes: Vec = nodes + .into_iter() + .filter(|node| !node.is_validated) .collect(); for node in non_validated_nodes { @@ -333,12 +333,8 @@ impl<'a> HardwareValidator<'a> { // Clone the response so we can read the body twice let response_text = response.text().await?; - info!("Worker response body: {}", response_text); - let parsed_response: ApiResponse = serde_json::from_str(&response_text)?; - info!("Parsed worker response: {:?}", parsed_response.data); - // process this json into T struct Ok(parsed_response.data) } @@ -369,7 +365,6 @@ impl<'a> HardwareValidator<'a> { Err(anyhow::anyhow!("Worker request failed: {}", error_text)) } else { let response_text = response.text().await?; - info!("Worker response body: {}", response_text); Ok(response_text) } } @@ -447,8 +442,6 @@ impl<'a> HardwareValidator<'a> { } }; - info!("Worker response: {:?}", response); - if response.status != "initializing" { return Err(anyhow::anyhow!( "Failed to start GPU challenge on worker node" @@ -559,7 +552,8 @@ impl<'a> HardwareValidator<'a> { // STEP 7: Get row proofs from worker let row_proofs_route = "/gpu-challenge/compute-row-proofs"; - let row_proofs_payload = GpuRowProofsRequest { + let row_proofs_payload = GpuChallengeWorkerComputeRowProofs { + session_id: init_data.session_id.clone(), row_idxs: cr_result.spot_rows, }; diff --git a/worker/Cargo.toml b/worker/Cargo.toml index 0e071d32..8bb13c0d 100644 --- a/worker/Cargo.toml +++ b/worker/Cargo.toml @@ -26,7 +26,7 @@ futures-util = "0.3" alloy = { version = "0.9.2", features = ["full"] } url = "2.5.4" serde_json = "1.0.135" -reqwest = "0.12.12" +reqwest = {version = "0.12.12", features = ["json"] } hex = "0.4.3" console = "0.15.10" indicatif = "0.17.9" diff --git a/worker/src/api/routes/gpu_challenge.rs b/worker/src/api/routes/gpu_challenge.rs index 0fac9c3d..8dc49ef7 100644 --- a/worker/src/api/routes/gpu_challenge.rs +++ b/worker/src/api/routes/gpu_challenge.rs @@ -152,7 +152,7 @@ pub async fn start_task_via_service(app_state: Data) { .state .set_current_task(Some(task_clone)) .await; - // sleep for a minute + // sleep for a bit tokio::time::sleep(tokio::time::Duration::from_secs(15)).await; // Spawn a background task to wait for the container to start tokio::spawn(async move { @@ -187,8 +187,20 @@ pub async fn start_task_via_manager(app_state: Data) -> anyhow::Result // before trying to start a new container, check that there isn't a stale one already running match manager.get_container_details(PROVER_CONTAINER_ID).await { Ok(container_details) => { - manager.remove_container(&container_details.id).await?; - info!("Stopped stale GPU challenge container."); + match manager.remove_container(&container_details.id).await { + Ok(_) => { + info!("Stopped stale GPU challenge container."); + // sleep for 5 seconds to prevent race condition in docker state + tokio::time::sleep(tokio::time::Duration::from_secs(5)).await; + } + Err(e) => { + info!("Failed to stop stale GPU challenge container: {:?}", e); + return Err(anyhow::anyhow!( + "Failed to stop stale GPU challenge container: {:?}", + e + )); + } + } } Err(_) => { info!("No stale containers, we're safe to proceed."); @@ -197,7 +209,7 @@ pub async fn start_task_via_manager(app_state: Data) -> anyhow::Result let image = IMAGE_NAME.to_string(); let container_task_id = PROVER_CONTAINER_ID.to_string(); - let has_gpu = true; + let has_gpu = false; let mut env_vars: HashMap = HashMap::new(); env_vars.insert("PORT".to_string(), "12121".to_string()); let cmd = vec!["/usr/bin/python3".to_string(), "prover.py".to_string()]; @@ -283,14 +295,42 @@ async fn prover_send( endpoint: &str, payload: Option, ) -> anyhow::Result { - let client = reqwest::Client::new(); - let mut builder = client.post(format!("http://localhost:20000{}", endpoint)); + let prover_url = format!("http://{}:{}{}", "localhost", 20000, endpoint); + let mut builder = reqwest::Client::builder().http1_only().build().unwrap().post(&prover_url); if let Some(json) = payload { builder = builder.json(&json); } - let response = builder.send().await?; + info!("Sending request to prover ... (prover_send)"); + let response = match builder.send().await { + Ok(response) => response, + Err(e) => { + return Err(anyhow::anyhow!("Failed to send prover request: {:?}", e)); + } + }; + + if !response.status().is_success() { + let error_text = response.text().await?; + return Err(anyhow::anyhow!("Prover request failed: {}", error_text)); + } + + response.json::().await.map_err(anyhow::Error::from) +} + +async fn prover_get( + endpoint: &str, +) -> anyhow::Result { + let prover_url = format!("http://{}:{}{}", "localhost", 20000, endpoint); + let builder = reqwest::Client::builder().http1_only().build().unwrap().get(&prover_url); + + info!("Sending request to prover ... (prover_get)"); + let response = match builder.send().await { + Ok(response) => response, + Err(e) => { + return Err(anyhow::anyhow!("Failed to send prover request: {:?}", e)); + } + }; if !response.status().is_success() { let error_text = response.text().await?; @@ -348,6 +388,9 @@ pub async fn init_container( if container_details.status.unwrap() == ContainerStateStatusEnum::RUNNING { + info!("GPU challenge container is now running."); + // sleep for 10 seconds to allow the prover service to become ready + tokio::time::sleep(tokio::time::Duration::from_secs(10)).await; let mut state = CURRENT_CHALLENGE.lock().await; state.set_status("ready"); break; @@ -376,7 +419,7 @@ pub async fn init_container( } } - info!("Started GPU challenge container."); + info!("Sent start command to docker for GPU challenge container."); // Return success with the session ID HttpResponse::Ok().json(ApiResponse::new( @@ -389,11 +432,9 @@ pub async fn init_container( } // Get challenge status -pub async fn get_status(req: HttpRequest) -> HttpResponse { - info!("Received status request: {:?}", req); - +pub async fn get_status(_req: HttpRequest) -> HttpResponse { let state = CURRENT_CHALLENGE.lock().await; - info!("Current session ID: {:?}", state.get_session_id()); + info!("Current session ID, status: {:?}, {:?}", state.get_session_id(), state.get_status()); if let Some(status) = &state.get_status() { let response = GpuChallengeStatus { @@ -437,7 +478,7 @@ pub async fn compute_commitment( { Ok(_) => { // Get commitment from prover - match prover_send::("/getCommitment", None).await { + match prover_get::("/getCommitment").await { Ok(commitment_data) => { let commitment_root = commitment_data.commitment_root; From fc9e3278c68f252964cc486b0dd014aa9ece74d7 Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Wed, 26 Mar 2025 17:24:01 +0100 Subject: [PATCH 78/85] fmt --- validator/src/validators/hardware.rs | 6 +++--- worker/src/api/routes/gpu_challenge.rs | 22 ++++++++++++++++------ 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/validator/src/validators/hardware.rs b/validator/src/validators/hardware.rs index 779ef5cb..0ec6e110 100644 --- a/validator/src/validators/hardware.rs +++ b/validator/src/validators/hardware.rs @@ -67,9 +67,9 @@ impl<'a> HardwareValidator<'a> { } pub async fn validate_nodes(&self, nodes: Vec) -> Result<()> { - let non_validated_nodes: Vec = nodes - .into_iter() - .filter(|node| !node.is_validated) + let non_validated_nodes: Vec = nodes + .into_iter() + .filter(|node| !node.is_validated) .collect(); log::debug!("Non validated nodes: {:?}", non_validated_nodes); diff --git a/worker/src/api/routes/gpu_challenge.rs b/worker/src/api/routes/gpu_challenge.rs index 8dc49ef7..ce723a40 100644 --- a/worker/src/api/routes/gpu_challenge.rs +++ b/worker/src/api/routes/gpu_challenge.rs @@ -296,7 +296,11 @@ async fn prover_send( payload: Option, ) -> anyhow::Result { let prover_url = format!("http://{}:{}{}", "localhost", 20000, endpoint); - let mut builder = reqwest::Client::builder().http1_only().build().unwrap().post(&prover_url); + let mut builder = reqwest::Client::builder() + .http1_only() + .build() + .unwrap() + .post(&prover_url); if let Some(json) = payload { builder = builder.json(&json); @@ -318,11 +322,13 @@ async fn prover_send( response.json::().await.map_err(anyhow::Error::from) } -async fn prover_get( - endpoint: &str, -) -> anyhow::Result { +async fn prover_get(endpoint: &str) -> anyhow::Result { let prover_url = format!("http://{}:{}{}", "localhost", 20000, endpoint); - let builder = reqwest::Client::builder().http1_only().build().unwrap().get(&prover_url); + let builder = reqwest::Client::builder() + .http1_only() + .build() + .unwrap() + .get(&prover_url); info!("Sending request to prover ... (prover_get)"); let response = match builder.send().await { @@ -434,7 +440,11 @@ pub async fn init_container( // Get challenge status pub async fn get_status(_req: HttpRequest) -> HttpResponse { let state = CURRENT_CHALLENGE.lock().await; - info!("Current session ID, status: {:?}, {:?}", state.get_session_id(), state.get_status()); + info!( + "Current session ID, status: {:?}, {:?}", + state.get_session_id(), + state.get_status() + ); if let Some(status) = &state.get_status() { let response = GpuChallengeStatus { From 1f210eb8d3051687610dfc3debea77a09e9ebb3d Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Wed, 26 Mar 2025 17:47:26 +0100 Subject: [PATCH 79/85] stop container once challenge has completed --- worker/src/api/routes/gpu_challenge.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/worker/src/api/routes/gpu_challenge.rs b/worker/src/api/routes/gpu_challenge.rs index ce723a40..70041ca0 100644 --- a/worker/src/api/routes/gpu_challenge.rs +++ b/worker/src/api/routes/gpu_challenge.rs @@ -575,7 +575,7 @@ pub async fn compute_cr( pub async fn compute_row_proofs( challenge_req: web::Json, - // app_state: Data, + app_state: Data, ) -> HttpResponse { let session_id = &challenge_req.session_id; let row_indices = challenge_req.row_idxs.clone(); @@ -591,7 +591,7 @@ pub async fn compute_row_proofs( } } - match prover_send::( + let response = match prover_send::( "/getRowProofs", Some(json!({ "row_idxs": row_indices @@ -607,6 +607,7 @@ pub async fn compute_row_proofs( if let Some(result) = state.mut_result() { result.row_proofs_json = proofs_json.clone(); } + state.set_status("completed"); HttpResponse::Ok().json(ApiResponse::new(true, proof_data)) } @@ -615,7 +616,12 @@ pub async fn compute_row_proofs( state.set_error(e.to_string()); HttpResponse::InternalServerError().json(ApiResponse::new(false, e.to_string())) } - } + }; + + // we are finished and no longer need the container + stop_task_via_manager(app_state).await.unwrap(); + + response } // Register the routes From a908dbbf1a061b9148bb434e0c5c2f2eee815c82 Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Fri, 28 Mar 2025 11:52:12 +0100 Subject: [PATCH 80/85] address review comments --- validator/src/validators/hardware.rs | 41 +++++- worker/src/api/routes/gpu_challenge.rs | 196 ++++++++++--------------- worker/src/api/server.rs | 3 +- worker/src/cli/command.rs | 1 + worker/src/docker/service.rs | 2 +- 5 files changed, 116 insertions(+), 127 deletions(-) diff --git a/validator/src/validators/hardware.rs b/validator/src/validators/hardware.rs index 0ec6e110..fe12f978 100644 --- a/validator/src/validators/hardware.rs +++ b/validator/src/validators/hardware.rs @@ -17,9 +17,10 @@ use tokio::sync::Mutex; const VALIDATION_TIMEOUT: u64 = 600; const ERROR_TIME_BUFFER: u64 = 30; -const MATRIX_CHALLENGE_SIZE: u64 = 8192; +const MATRIX_CHALLENGE_SIZE_DEFAULT: u64 = 8192; const MAX_CHALLENGE_ATTEMPTS: u64 = 3; const H100_TIME_CUTOFF: u64 = 150; +const MINIMUM_MEMORY: u32 = 80000; #[derive(Debug, Clone, PartialEq)] pub enum NodeChallengeStatus { @@ -156,8 +157,36 @@ impl<'a> HardwareValidator<'a> { } } + // Check if debug mode is enabled via environment variable + let debug_mode = env::var("GPU_VALIDATOR_DEBUG") + .unwrap_or_else(|_| "false".to_string()) + .to_lowercase() + == "true"; + + if debug_mode { + info!("Running GPU challenge in debug mode for node {}", node.id); + } + + let matrix_size = node + .compute_specs + .as_ref() + .and_then(|specs| specs.gpu.as_ref()) + .and_then(|gpu| gpu.memory_mb) + .and_then(|memory| { + if memory >= MINIMUM_MEMORY { + Some(81920) + } else { + None + } + }); + + if matrix_size.is_none() && !debug_mode { + error!("Node {} does not meet minimum requirements", node.id); + return Err(anyhow::anyhow!("Node does not meet minimum requirements")); + } + // Then perform the GPU challenge - let gpu_challenge_result = self.gpu_challenge_node(&node).await; + let gpu_challenge_result = self.gpu_challenge_node(&node, matrix_size).await; if gpu_challenge_result.is_err() { error!( "Node {} failed GPU challenge: {:?}", @@ -370,7 +399,11 @@ impl<'a> HardwareValidator<'a> { } } - async fn gpu_challenge_node(&self, node: &DiscoveryNode) -> Result<(), Error> { + async fn gpu_challenge_node( + &self, + node: &DiscoveryNode, + matrix_size: Option, + ) -> Result<(), Error> { // check if node is already running a challenge session { // Create a separate scope for the lock @@ -401,7 +434,7 @@ impl<'a> HardwareValidator<'a> { // STEP 1: Initialize a new challenge with the verifier service let init_request = GpuChallengeInitRequest { - n: MATRIX_CHALLENGE_SIZE, // Default matrix size + n: matrix_size.unwrap_or(MATRIX_CHALLENGE_SIZE_DEFAULT), // Default matrix size }; let init_data: GpuChallengeResponse = self diff --git a/worker/src/api/routes/gpu_challenge.rs b/worker/src/api/routes/gpu_challenge.rs index 70041ca0..ed4de047 100644 --- a/worker/src/api/routes/gpu_challenge.rs +++ b/worker/src/api/routes/gpu_challenge.rs @@ -11,14 +11,14 @@ use serde::{Deserialize, Serialize}; use serde_json::{self, json}; use shared::models::api::ApiResponse; use shared::models::gpu_challenge::*; -use shared::models::task::{Task, TaskRequest}; use std::collections::HashMap; use std::sync::Arc; -use tokio::sync::Mutex; +use tokio::select; +use tokio::sync::RwLock; +use tokio_util::sync::CancellationToken; const PROVER_CONTAINER_ID: &str = "prime-core-gpu-challenge"; -const TASK_NAME: &str = "gpu-challenge"; -const IMAGE_NAME: &str = "matrix-prover"; +const IMAGE_NAME: &str = "primeintellect/gpu-challenge:latest"; // Store raw responses from the prover as strings #[derive(Debug, Serialize, Deserialize, Clone)] @@ -107,78 +107,9 @@ impl ChallengeStateStorage { // Thread-safe state storage lazy_static::lazy_static! { - static ref CURRENT_CHALLENGE: Arc> = Arc::new(Mutex::new(ChallengeStateStorage::new())); -} - -// allow unused -#[allow(dead_code)] -pub async fn start_task_via_service(app_state: Data) { - // Set environment variables for container - let mut env_vars = HashMap::new(); - env_vars.insert("PORT".to_string(), "12121".to_string()); - - // Launch Docker container with GPU support - let task: Task = TaskRequest { - image: IMAGE_NAME.to_string(), - name: TASK_NAME.to_string(), - env_vars: Some(env_vars), - command: Some("/usr/bin/python3".to_string()), - args: Some(vec!["prover.py".to_string()]), - ports: Some(vec!["12121/tcp".to_string()]), - } - .into(); - let task_clone = task.clone(); - let docker_service = app_state.docker_service.clone(); - - // sleep for 2 minutes - tokio::time::sleep(tokio::time::Duration::from_secs(120)).await; - - while !docker_service.state.get_is_running().await { - tokio::time::sleep(tokio::time::Duration::from_secs(5)).await; - info!("Waiting for Docker service to start"); - } - - // sleep - tokio::time::sleep(tokio::time::Duration::from_secs(10)).await; - loop { - // don't run it while there's a pending task - if docker_service.state.get_current_task().await.is_none() { - break; - } - tokio::time::sleep(tokio::time::Duration::from_secs(5)).await; - info!("Waiting for previous task to finish"); - } - docker_service - .state - .set_current_task(Some(task_clone)) - .await; - // sleep for a bit - tokio::time::sleep(tokio::time::Duration::from_secs(15)).await; - // Spawn a background task to wait for the container to start - tokio::spawn(async move { - let mut retries = 0; - while retries < 30 { - if let Some(current_task) = docker_service.state.get_current_task().await { - if current_task.state == shared::models::task::TaskState::RUNNING - && current_task.name == TASK_NAME - { - let mut state = CURRENT_CHALLENGE.lock().await; - state.set_status("ready"); - break; - } - info!("Current task: {:?}", current_task); - } - tokio::time::sleep(tokio::time::Duration::from_secs(10)).await; - retries += 1; - } - if retries >= 30 { - let mut state = CURRENT_CHALLENGE.lock().await; - state.wipe(); - info!("Failed to start GPU challenge container"); - // shut down docker - docker_service.state.set_current_task(None).await; - } - }); + static ref CURRENT_CHALLENGE: Arc> = + Arc::new(RwLock::new(ChallengeStateStorage::new())); + static ref CANCEL_TOKEN: Arc> = Arc::new(RwLock::new(CancellationToken::new())); } pub async fn start_task_via_manager(app_state: Data) -> anyhow::Result<()> { @@ -207,9 +138,10 @@ pub async fn start_task_via_manager(app_state: Data) -> anyhow::Result } } + let has_gpu = app_state.docker_service.has_gpu; + let image = IMAGE_NAME.to_string(); let container_task_id = PROVER_CONTAINER_ID.to_string(); - let has_gpu = false; let mut env_vars: HashMap = HashMap::new(); env_vars.insert("PORT".to_string(), "12121".to_string()); let cmd = vec!["/usr/bin/python3".to_string(), "prover.py".to_string()]; @@ -247,15 +179,15 @@ pub async fn start_task_via_manager(app_state: Data) -> anyhow::Result { Ok(container_id) => { info!("Started GPU challenge container."); - let mut state: tokio::sync::MutexGuard<'_, ChallengeStateStorage> = - CURRENT_CHALLENGE.lock().await; + let mut state: tokio::sync::RwLockWriteGuard<'_, ChallengeStateStorage> = + CURRENT_CHALLENGE.write().await; state.set_status("initializing"); state.set_container_id(&container_id); Ok(()) } Err(e) => { info!("Failed to start GPU challenge container: {:?}", e); - let mut state = CURRENT_CHALLENGE.lock().await; + let mut state = CURRENT_CHALLENGE.write().await; state.wipe(); Err(anyhow::anyhow!("Failed to start GPU challenge container")) } @@ -265,7 +197,7 @@ pub async fn start_task_via_manager(app_state: Data) -> anyhow::Result pub async fn stop_task_via_manager(app_state: Data) -> anyhow::Result<()> { let manager = app_state.docker_service.docker_manager.clone(); - let state = CURRENT_CHALLENGE.lock().await; + let state = CURRENT_CHALLENGE.read().await; match state.get_container_id() { Some(container_is) => { info!("Stopping GPU challenge container."); @@ -285,7 +217,7 @@ pub async fn get_container_status( app_state: Data, ) -> Result { let manager = app_state.docker_service.docker_manager.clone(); - let state = CURRENT_CHALLENGE.lock().await; + let state = CURRENT_CHALLENGE.read().await; return manager .get_container_details(&state.get_container_id().unwrap()) .await; @@ -360,7 +292,7 @@ pub async fn init_container( // if state is anything other than empty, skip { - let mut state = CURRENT_CHALLENGE.lock().await; + let mut state = CURRENT_CHALLENGE.write().await; info!("Challenge state: {:?}", state.state); if state.get_status().is_some() { return HttpResponse::Ok().json(ApiResponse::new( @@ -385,37 +317,56 @@ pub async fn init_container( match start_task_via_manager(app_state.clone()).await { Ok(_) => { // Spawn a background task to wait for the container to start - tokio::spawn(async move { - let mut retries = 0; - while retries < 30 { - { - match get_container_status(app_state.clone()).await { - Ok(container_details) => { - if container_details.status.unwrap() - == ContainerStateStatusEnum::RUNNING - { - info!("GPU challenge container is now running."); - // sleep for 10 seconds to allow the prover service to become ready - tokio::time::sleep(tokio::time::Duration::from_secs(10)).await; - let mut state = CURRENT_CHALLENGE.lock().await; - state.set_status("ready"); + tokio::spawn({ + let local_app_state = app_state.clone(); + let local_token = CANCEL_TOKEN.read().await.clone(); + + async move { + let mut retries = 0; + loop { + select! { + // If we get cancelled, do cleanup and bail out: + _ = local_token.cancelled() => { + info!("Container startup task cancelled."); + let mut state = CURRENT_CHALLENGE.write().await; + // Possibly stop the container, reset state, etc. + state.wipe(); + break; + } + + // Otherwise, proceed with the status check after sleeping. + _ = tokio::time::sleep(std::time::Duration::from_secs(10)) => { + retries += 1; + match get_container_status(local_app_state.clone()).await { + Ok(container_details) => { + if container_details.status.unwrap() + == ContainerStateStatusEnum::RUNNING + { + info!("GPU challenge container is now running."); + // sleep for 10 seconds to allow the prover service to become ready + tokio::time::sleep(std::time::Duration::from_secs(10)).await; + let mut state = CURRENT_CHALLENGE.write().await; + state.set_status("ready"); + break; + } + } + Err(e) => { + info!("Failed to get container status: {:?}", e); + } + } + if retries >= 30 { + info!("Failed to start GPU challenge container"); + // remove the container + if let Err(e) = stop_task_via_manager(local_app_state.clone()).await { + info!("Failed to remove container via manager: {:?}", e); + } + let mut state = CURRENT_CHALLENGE.write().await; + state.wipe(); break; } } - Err(e) => { - info!("Failed to get container status: {:?}", e); - } } } - tokio::time::sleep(tokio::time::Duration::from_secs(10)).await; - retries += 1; - } - if retries >= 30 { - let mut state = CURRENT_CHALLENGE.lock().await; - info!("Failed to start GPU challenge container"); - // shut down docker - stop_task_via_manager(app_state.clone()).await.unwrap(); - state.wipe(); } }); } @@ -439,7 +390,7 @@ pub async fn init_container( // Get challenge status pub async fn get_status(_req: HttpRequest) -> HttpResponse { - let state = CURRENT_CHALLENGE.lock().await; + let state = CURRENT_CHALLENGE.read().await; info!( "Current session ID, status: {:?}, {:?}", state.get_session_id(), @@ -467,7 +418,7 @@ pub async fn compute_commitment( { // check that session ID matches - let state = CURRENT_CHALLENGE.lock().await; + let state = CURRENT_CHALLENGE.read().await; if session_id != state.get_session_id().as_deref().unwrap_or("") { return HttpResponse::NotFound().json(ApiResponse::new( false, @@ -493,7 +444,7 @@ pub async fn compute_commitment( let commitment_root = commitment_data.commitment_root; // Store result - let mut state = CURRENT_CHALLENGE.lock().await; + let mut state = CURRENT_CHALLENGE.write().await; state.set_result(GpuChallengeResult { session_id: session_id.clone(), commitment_root: commitment_root.clone(), @@ -509,14 +460,14 @@ pub async fn compute_commitment( )) } Err(e) => { - let mut state = CURRENT_CHALLENGE.lock().await; + let mut state = CURRENT_CHALLENGE.write().await; state.set_error(e.to_string()); HttpResponse::InternalServerError().json(ApiResponse::new(false, e.to_string())) } } } Err(e) => { - let mut state = CURRENT_CHALLENGE.lock().await; + let mut state = CURRENT_CHALLENGE.write().await; state.set_error(e.to_string()); HttpResponse::InternalServerError().json(ApiResponse::new(false, e.to_string())) } @@ -532,7 +483,7 @@ pub async fn compute_cr( { // check that session ID matches - let state = CURRENT_CHALLENGE.lock().await; + let state = CURRENT_CHALLENGE.read().await; if session_id != state.get_session_id().as_deref().unwrap_or("") { return HttpResponse::NotFound().json(ApiResponse::new( false, @@ -553,7 +504,7 @@ pub async fn compute_cr( let cr = cr_data.Cr; // Store result - let mut state = CURRENT_CHALLENGE.lock().await; + let mut state = CURRENT_CHALLENGE.write().await; if let Some(result) = state.mut_result() { result.cr_result_json = cr.clone(); } @@ -566,7 +517,7 @@ pub async fn compute_cr( )) } Err(e) => { - let mut state = CURRENT_CHALLENGE.lock().await; + let mut state = CURRENT_CHALLENGE.write().await; state.set_error(e.to_string()); HttpResponse::InternalServerError().json(ApiResponse::new(false, e.to_string())) } @@ -582,7 +533,7 @@ pub async fn compute_row_proofs( { // check that session ID matches - let state = CURRENT_CHALLENGE.lock().await; + let state = CURRENT_CHALLENGE.read().await; if session_id != state.get_session_id().as_deref().unwrap_or("") { return HttpResponse::NotFound().json(ApiResponse::new( false, @@ -603,7 +554,7 @@ pub async fn compute_row_proofs( let proofs_json = serde_json::to_string(&proof_data).unwrap_or_default(); // Store result - let mut state = CURRENT_CHALLENGE.lock().await; + let mut state = CURRENT_CHALLENGE.write().await; if let Some(result) = state.mut_result() { result.row_proofs_json = proofs_json.clone(); } @@ -612,7 +563,7 @@ pub async fn compute_row_proofs( HttpResponse::Ok().json(ApiResponse::new(true, proof_data)) } Err(e) => { - let mut state = CURRENT_CHALLENGE.lock().await; + let mut state = CURRENT_CHALLENGE.write().await; state.set_error(e.to_string()); HttpResponse::InternalServerError().json(ApiResponse::new(false, e.to_string())) } @@ -625,7 +576,10 @@ pub async fn compute_row_proofs( } // Register the routes -pub fn gpu_challenge_routes() -> Scope { +pub fn gpu_challenge_routes(cancellation_token: CancellationToken) -> Scope { + // Update the cancellation token without async/await + let mut token = CANCEL_TOKEN.blocking_write(); + *token = cancellation_token.clone(); web::scope("/gpu-challenge") .route("/init-container", post().to(init_container)) .route("/status", get().to(get_status)) diff --git a/worker/src/api/server.rs b/worker/src/api/server.rs index fefe46fd..80116bf7 100644 --- a/worker/src/api/server.rs +++ b/worker/src/api/server.rs @@ -24,6 +24,7 @@ pub struct AppState { #[allow(clippy::too_many_arguments)] pub async fn start_server( + cancellation_token: tokio_util::sync::CancellationToken, host: &str, port: u16, contracts: Arc, @@ -55,7 +56,7 @@ pub async fn start_server( .service(invite_routes()) .service(task_routes()) .service(challenge_routes()) - .service(gpu_challenge_routes()) + .service(gpu_challenge_routes(cancellation_token.clone())) }) .bind((host, port))? .run() diff --git a/worker/src/cli/command.rs b/worker/src/cli/command.rs index ec7383cf..72d2c90b 100644 --- a/worker/src/cli/command.rs +++ b/worker/src/cli/command.rs @@ -531,6 +531,7 @@ pub async fn execute_command( } start_server( + cancellation_token.clone(), "0.0.0.0", *port, contracts.clone(), diff --git a/worker/src/docker/service.rs b/worker/src/docker/service.rs index fe249579..8dfbdeaf 100644 --- a/worker/src/docker/service.rs +++ b/worker/src/docker/service.rs @@ -18,7 +18,7 @@ pub struct DockerService { pub docker_manager: Arc, cancellation_token: CancellationToken, pub state: Arc, - has_gpu: bool, + pub has_gpu: bool, system_memory_mb: Option, task_bridge_socket_path: String, } From dc9d46b7a40c3c95e3113d5cb9d584f15a6ac5a2 Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Fri, 28 Mar 2025 12:02:00 +0100 Subject: [PATCH 81/85] fix panic --- worker/src/api/routes/gpu_challenge.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/worker/src/api/routes/gpu_challenge.rs b/worker/src/api/routes/gpu_challenge.rs index ed4de047..b077f4bc 100644 --- a/worker/src/api/routes/gpu_challenge.rs +++ b/worker/src/api/routes/gpu_challenge.rs @@ -577,9 +577,12 @@ pub async fn compute_row_proofs( // Register the routes pub fn gpu_challenge_routes(cancellation_token: CancellationToken) -> Scope { - // Update the cancellation token without async/await - let mut token = CANCEL_TOKEN.blocking_write(); - *token = cancellation_token.clone(); + // Update the cancellation token + tokio::spawn(async move { + let mut token = CANCEL_TOKEN.write().await; + *token = cancellation_token; + }); + web::scope("/gpu-challenge") .route("/init-container", post().to(init_container)) .route("/status", get().to(get_status)) From cc60c5f4284d0dc32e7af14936c59f1a6d603d60 Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Fri, 28 Mar 2025 15:17:30 +0100 Subject: [PATCH 82/85] make requirements env parametrizable --- validator/src/validators/hardware.rs | 35 ++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 5 deletions(-) diff --git a/validator/src/validators/hardware.rs b/validator/src/validators/hardware.rs index fe12f978..ff796e9a 100644 --- a/validator/src/validators/hardware.rs +++ b/validator/src/validators/hardware.rs @@ -19,8 +19,6 @@ const VALIDATION_TIMEOUT: u64 = 600; const ERROR_TIME_BUFFER: u64 = 30; const MATRIX_CHALLENGE_SIZE_DEFAULT: u64 = 8192; const MAX_CHALLENGE_ATTEMPTS: u64 = 3; -const H100_TIME_CUTOFF: u64 = 150; -const MINIMUM_MEMORY: u32 = 80000; #[derive(Debug, Clone, PartialEq)] pub enum NodeChallengeStatus { @@ -47,23 +45,50 @@ fn get_time_as_secs() -> u64 { .as_secs() } +pub struct HardwareReqs { + pub memory: u32, + pub count: u32, + pub benchmark: u64, +} + pub struct HardwareValidator<'a> { wallet: &'a Wallet, contracts: Arc, verifier_service_url: String, node_sessions: Arc>>, + reqs: HardwareReqs, } impl<'a> HardwareValidator<'a> { pub fn new(wallet: &'a Wallet, contracts: Arc) -> Self { - let verifier_url = env::var("VERIFIER_SERVICE_URL") + let verifier_url = env::var("GPU_VERIFIER_SERVICE_URL") .unwrap_or_else(|_| "http://localhost:14141".to_string()); + let benchmark: u64 = std::env::var("GPU_VALIDATOR_BENCHMARK_TARGET") + .ok() + .and_then(|v| v.parse().ok()) + .unwrap_or(150); + + let memory: u32 = std::env::var("GPU_VALIDATOR_MIN_VRAM_MB") + .ok() + .and_then(|v| v.parse().ok()) + .unwrap_or(80000); + + let count: u32 = std::env::var("GPU_VALIDATOR_MIN_GPUS") + .ok() + .and_then(|v| v.parse().ok()) + .unwrap_or(1); + Self { wallet, contracts, verifier_service_url: verifier_url, node_sessions: Arc::new(Mutex::new(std::collections::HashMap::new())), + reqs: HardwareReqs { + memory, + count, + benchmark, + }, } } @@ -173,7 +198,7 @@ impl<'a> HardwareValidator<'a> { .and_then(|specs| specs.gpu.as_ref()) .and_then(|gpu| gpu.memory_mb) .and_then(|memory| { - if memory >= MINIMUM_MEMORY { + if memory >= self.reqs.memory { Some(81920) } else { None @@ -201,7 +226,7 @@ impl<'a> HardwareValidator<'a> { } else { let mut sessions = self.node_sessions.lock().await; let session = sessions.get_mut(&node.id).unwrap(); - if session.commitment_time != 0 && session.commitment_time < H100_TIME_CUTOFF { + if session.commitment_time != 0 && session.commitment_time < self.reqs.benchmark { info!( "Node {} is validated as having H100 level performance", node.id From c7f57439fa8aa2615a5e366540801a17ad3e7dc3 Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Fri, 28 Mar 2025 15:41:36 +0100 Subject: [PATCH 83/85] make challenge autoscale to memory requirement --- .env.example | 8 ++++++++ validator/src/validators/hardware.rs | 18 ++++++++++++++---- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/.env.example b/.env.example index 2cc50c15..c9fcc054 100644 --- a/.env.example +++ b/.env.example @@ -31,6 +31,14 @@ DOMAIN_REGISTRY_ADDRESS= STAKE_MANAGER_ADDRESS= COMPUTE_POOL_ADDRESS= +# GPU validator config +GPU_VERIFIER_SERVICE_URL= +GPU_VALIDATOR_DEBUG= +GPU_VALIDATOR_BENCHMARK_TARGET= +GPU_VALIDATOR_MIN_VRAM_MB= +GPU_VALIDATOR_MIN_GPUS= + + # Optional WORK_VALIDATION_CONTRACT= LEVITICUS_URL= diff --git a/validator/src/validators/hardware.rs b/validator/src/validators/hardware.rs index ff796e9a..ef23c3f6 100644 --- a/validator/src/validators/hardware.rs +++ b/validator/src/validators/hardware.rs @@ -196,10 +196,20 @@ impl<'a> HardwareValidator<'a> { .compute_specs .as_ref() .and_then(|specs| specs.gpu.as_ref()) - .and_then(|gpu| gpu.memory_mb) - .and_then(|memory| { - if memory >= self.reqs.memory { - Some(81920) + .and_then(|gpu| { + if let (Some(memory), Some(count)) = (gpu.memory_mb, gpu.count) { + if memory >= self.reqs.memory && count >= self.reqs.count { + // saturate memory: fp32 square matrix, and 3 matrices required + let mem_per_matrix = + ((self.reqs.memory * 1024 * 1024) as f64) / 3.0 / 4.0; + // multiply by 0.9 to leave some room for overhead + let n = (mem_per_matrix * 0.9).sqrt(); + // clip to nearest multiple of 4096 + let matrix_size = (n / 4096.0).floor() * 4096.0; + Some(matrix_size as u64) + } else { + None + } } else { None } From 03f161aaa0a9e7d2f5f01ab7bf37de706df19b9a Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Fri, 28 Mar 2025 18:51:48 +0100 Subject: [PATCH 84/85] add support for verifier /clear endpoint --- shared/src/models/gpu_challenge.rs | 10 ++++++++++ validator/src/validators/hardware.rs | 21 +++++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/shared/src/models/gpu_challenge.rs b/shared/src/models/gpu_challenge.rs index 8aff4ce2..4da7d0eb 100644 --- a/shared/src/models/gpu_challenge.rs +++ b/shared/src/models/gpu_challenge.rs @@ -106,6 +106,16 @@ pub struct GpuRowProofsRequest { pub row_idxs: Vec, } +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuClearRequest { + pub session_id: String, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct GpuClearResponse { + pub status: String, +} + // Request to check multiple rows #[derive(Debug, Serialize, Deserialize)] pub struct GpuMultiRowCheckRequest { diff --git a/validator/src/validators/hardware.rs b/validator/src/validators/hardware.rs index ef23c3f6..d7219744 100644 --- a/validator/src/validators/hardware.rs +++ b/validator/src/validators/hardware.rs @@ -231,6 +231,27 @@ impl<'a> HardwareValidator<'a> { if let Some(session) = sessions.get_mut(&node.id) { session.status = NodeChallengeStatus::Failed; session.timestamp = get_time_as_secs(); + if let Some(session_id) = session.session_id.clone() { + info!("Clearing session ID: {}", session_id); + let response: GpuClearResponse = self + .verifier_send( + "/clear", + Some(json!(GpuClearRequest { + session_id: session_id.clone(), + })), + ) + .await + .unwrap_or_else(|e| { + error!("Failed to clear session {}: {}", session_id, e); + GpuClearResponse { + status: "error".to_string(), + } + }); + if response.status == "ok" { + info!("Session {} cleared successfully", session_id); + } + session.session_id = None; + } } continue; } else { From 4e4e9ff58fcdcc99a1d192b891f73f7e8bbf4570 Mon Sep 17 00:00:00 2001 From: Matthew Di Ferrante Date: Fri, 28 Mar 2025 23:30:33 +0100 Subject: [PATCH 85/85] fix overflow in ram calculation --- validator/src/validators/hardware.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/validator/src/validators/hardware.rs b/validator/src/validators/hardware.rs index d7219744..eb2993d5 100644 --- a/validator/src/validators/hardware.rs +++ b/validator/src/validators/hardware.rs @@ -46,7 +46,7 @@ fn get_time_as_secs() -> u64 { } pub struct HardwareReqs { - pub memory: u32, + pub memory: u64, pub count: u32, pub benchmark: u64, } @@ -69,7 +69,7 @@ impl<'a> HardwareValidator<'a> { .and_then(|v| v.parse().ok()) .unwrap_or(150); - let memory: u32 = std::env::var("GPU_VALIDATOR_MIN_VRAM_MB") + let memory: u64 = std::env::var("GPU_VALIDATOR_MIN_VRAM_MB") .ok() .and_then(|v| v.parse().ok()) .unwrap_or(80000); @@ -198,7 +198,7 @@ impl<'a> HardwareValidator<'a> { .and_then(|specs| specs.gpu.as_ref()) .and_then(|gpu| { if let (Some(memory), Some(count)) = (gpu.memory_mb, gpu.count) { - if memory >= self.reqs.memory && count >= self.reqs.count { + if (memory as u64) >= self.reqs.memory && count >= self.reqs.count { // saturate memory: fp32 square matrix, and 3 matrices required let mem_per_matrix = ((self.reqs.memory * 1024 * 1024) as f64) / 3.0 / 4.0;