From 49812cacb6edc0e9f978d1708f9a915ce83e2e33 Mon Sep 17 00:00:00 2001 From: brown Date: Wed, 25 Mar 2026 12:17:46 +0100 Subject: [PATCH 1/2] improve breakpoint --- examples/test_unbounded_iteration.rs | 73 +- extensions/vscode/src/cli/debuggerProcess.ts | 30 + extensions/vscode/src/dap/adapter.ts | 53 +- .../vscode/src/dap/sourceBreakpoints.ts | 20 +- src/analyzer/security.rs | 255 +++--- src/batch.rs | 123 +-- src/benchmarks.rs | 49 +- src/bin/bench-regression.rs | 1 - src/cli/commands.rs | 814 +++++++++--------- src/debugger/mod.rs | 2 +- src/debugger/source_map.rs | 396 ++++++++- src/history/mod.rs | 6 +- src/server/debug_server.rs | 40 + src/server/protocol.rs | 14 + tests/arithmetic_rule.rs | 5 +- tests/source_breakpoint_resolution_tests.rs | 170 ++++ tests/unbounded_iteration_tests.rs | 80 +- 17 files changed, 1447 insertions(+), 684 deletions(-) create mode 100644 tests/source_breakpoint_resolution_tests.rs diff --git a/examples/test_unbounded_iteration.rs b/examples/test_unbounded_iteration.rs index d4faf435..65b93a4e 100644 --- a/examples/test_unbounded_iteration.rs +++ b/examples/test_unbounded_iteration.rs @@ -1,5 +1,5 @@ //! Example demonstrating the improved unbounded iteration detection -//! +//! //! This example shows how the enhanced security analyzer can detect //! storage-driven loops with improved precision and confidence scoring. @@ -7,42 +7,48 @@ use soroban_debugger::analyzer::security::SecurityAnalyzer; fn main() { println!("Testing improved unbounded iteration detection..."); - + // Create a simple WASM module with storage calls in loops let wasm_with_storage_loop = create_wasm_with_storage_loop(); - + let analyzer = SecurityAnalyzer::new(); match analyzer.analyze(&wasm_with_storage_loop, None, None) { Ok(report) => { - println!("Analysis complete. Found {} security issues.", report.findings.len()); - + println!( + "Analysis complete. Found {} security issues.", + report.findings.len() + ); + for finding in &report.findings { if finding.rule_id == "unbounded-iteration" { println!("🔍 Unbounded Iteration Finding:"); println!(" Severity: {:?}", finding.severity); println!(" Description: {}", finding.description); - + if let Some(confidence) = &finding.confidence { println!(" Confidence: {:?}", confidence.level); println!(" Rationale: {}", confidence.rationale); } - + if let Some(context) = &finding.context { if let Some(depth) = context.loop_nesting_depth { println!(" Loop Nesting Depth: {}", depth); } - + if let Some(pattern) = &context.storage_call_pattern { println!(" Storage Calls in Loops: {}", pattern.calls_in_loops); - println!(" Storage Calls Outside Loops: {}", pattern.calls_outside_loops); + println!( + " Storage Calls Outside Loops: {}", + pattern.calls_outside_loops + ); } - + if let Some(cf_info) = &context.control_flow_info { println!(" Loop Types: {:?}", cf_info.loop_types); println!(" Conditional Branches: {}", cf_info.conditional_branches); } } - + println!(" Remediation: {}", finding.remediation); println!(); } @@ -57,15 +63,15 @@ fn main() { fn create_wasm_with_storage_loop() -> Vec { // This is a minimal WASM module that imports a storage function and calls it in a loop // For demonstration purposes, we'll create a simple pattern - + let mut module = vec![ 0x00, 0x61, 0x73, 0x6D, // WASM magic 0x01, 0x00, 0x00, 0x00, // WASM version ]; - + // Type section (one function type: () -> ()) module.extend_from_slice(&[0x01, 0x60, 0x00, 0x00]); - + // Import section (import storage_get from env) module.extend_from_slice(&[ 0x02, // Import section id @@ -76,16 +82,16 @@ fn create_wasm_with_storage_loop() -> Vec { module.extend_from_slice(&[0x0B]); // Length of "storage_get" module.extend_from_slice(b"storage_get"); module.extend_from_slice(&[0x00, 0x00]); // Import kind: function, type index 0 - + // Function section (one local function) module.extend_from_slice(&[0x03, 0x01, 0x00]); - + // Export section (export the function) module.extend_from_slice(&[0x07, 0x01]); module.extend_from_slice(&[0x09]); // Length of "test_func" module.extend_from_slice(b"test_func"); module.extend_from_slice(&[0x00, 0x01]); // Export kind: function, function index 1 - + // Code section (function body with loop and storage call) module.extend_from_slice(&[0x0A, 0x01, 0x09]); // Code section, 1 function, body size 9 module.extend_from_slice(&[ @@ -96,37 +102,46 @@ fn create_wasm_with_storage_loop() -> Vec { 0x0B, // End loop 0x0B, // End function ]); - + module } #[cfg(test)] mod tests { use super::*; - + #[test] fn test_unbounded_iteration_detection() { let wasm = create_wasm_with_storage_loop(); let analyzer = SecurityAnalyzer::new(); - - let report = analyzer.analyze(&wasm, None, None).expect("Analysis should succeed"); - + + let report = analyzer + .analyze(&wasm, None, None) + .expect("Analysis should succeed"); + // Should find the unbounded iteration issue - let unbounded_findings: Vec<_> = report.findings + let unbounded_findings: Vec<_> = report + .findings .iter() .filter(|f| f.rule_id == "unbounded-iteration") .collect(); - - assert!(!unbounded_findings.is_empty(), "Should detect unbounded iteration"); - + + assert!( + !unbounded_findings.is_empty(), + "Should detect unbounded iteration" + ); + let finding = unbounded_findings[0]; - assert_eq!(finding.severity, soroban_debugger::analyzer::security::Severity::High); - + assert_eq!( + finding.severity, + soroban_debugger::analyzer::security::Severity::High + ); + // Should have confidence metadata assert!(finding.confidence.is_some()); let confidence = finding.confidence.as_ref().unwrap(); assert!(!confidence.rationale.is_empty()); - + // Should have context metadata assert!(finding.context.is_some()); let context = finding.context.as_ref().unwrap(); diff --git a/extensions/vscode/src/cli/debuggerProcess.ts b/extensions/vscode/src/cli/debuggerProcess.ts index 1952aa41..f38c4da7 100644 --- a/extensions/vscode/src/cli/debuggerProcess.ts +++ b/extensions/vscode/src/cli/debuggerProcess.ts @@ -53,6 +53,7 @@ type DebugRequest = | { type: 'GetStorage' } | { type: 'SetBreakpoint'; function: string } | { type: 'ClearBreakpoint'; function: string } + | { type: 'ResolveSourceBreakpoints'; source_path: string; lines: number[]; exported_functions: string[] } | { type: 'Evaluate'; expression: string; frame_id?: number } | { type: 'Ping' } | { type: 'Disconnect' } @@ -69,6 +70,7 @@ type DebugResponse = | { type: 'SnapshotLoaded'; summary: string } | { type: 'BreakpointSet'; function: string } | { type: 'BreakpointCleared'; function: string } + | { type: 'SourceBreakpointsResolved'; breakpoints: Array<{ requested_line: number; line: number; verified: boolean; function?: string; reason_code: string; message: string }> } | { type: 'EvaluateResult'; result: string; result_type?: string; variables_reference: number } | { type: 'Pong' } | { type: 'Disconnected' } @@ -321,6 +323,34 @@ export class DebuggerProcess { return functions; } + async resolveSourceBreakpoints( + sourcePath: string, + lines: number[], + exportedFunctions: Set, + options?: RequestOptions + ): Promise> { + const response = await this.sendRequest( + { + type: 'ResolveSourceBreakpoints', + source_path: sourcePath, + lines, + exported_functions: Array.from(exportedFunctions) + }, + options + ); + + this.expectResponse(response, 'SourceBreakpointsResolved'); + + return response.breakpoints.map((bp) => ({ + requestedLine: bp.requested_line, + line: bp.line, + verified: bp.verified, + functionName: bp.function, + reasonCode: bp.reason_code, + message: bp.message + })); + } + async stop(): Promise { try { if (this.socket && !this.socket.destroyed) { diff --git a/extensions/vscode/src/dap/adapter.ts b/extensions/vscode/src/dap/adapter.ts index 0c79b88a..063d5036 100644 --- a/extensions/vscode/src/dap/adapter.ts +++ b/extensions/vscode/src/dap/adapter.ts @@ -95,19 +95,50 @@ export class SorobanDebugSession extends DebugSession { const lines = breakpoints.map((bp) => bp.line); try { - const resolved: ResolvedBreakpoint[] = this.debuggerProcess && source - ? resolveSourceBreakpoints(source, lines, this.exportedFunctions) - : lines.map((line) => ({ - line, - verified: false, - message: 'Debugger is not launched or source path is unavailable' + let resolved: ResolvedBreakpoint[]; + if (!this.debuggerProcess || !source) { + resolved = lines.map((line) => ({ + requestedLine: line, + line, + verified: false, + reasonCode: 'NO_DEBUGGER', + setBreakpoint: false, + message: 'Debugger is not launched or source path is unavailable' + })); + } else { + let serverResolved: Array<{ requestedLine: number; line: number; verified: boolean; functionName?: string; reasonCode: string; message: string }> | null = null; + try { + serverResolved = await this.debuggerProcess.resolveSourceBreakpoints(source, lines, this.exportedFunctions); + } catch { + serverResolved = null; + } + + const shouldFallbackHeuristic = serverResolved + ? serverResolved.every((bp) => ['NO_DEBUG_INFO', 'FILE_NOT_IN_DEBUG_INFO', 'WASM_PARSE_ERROR'].includes(bp.reasonCode)) + : false; + + if (serverResolved && shouldFallbackHeuristic) { + resolved = resolveSourceBreakpoints(source, lines, this.exportedFunctions); + } else if (serverResolved) { + resolved = serverResolved.map((bp) => ({ + requestedLine: bp.requestedLine, + line: bp.line, + verified: bp.verified, + functionName: bp.functionName, + reasonCode: bp.reasonCode, + message: bp.message, + setBreakpoint: bp.verified && Boolean(bp.functionName) })); + } else { + resolved = resolveSourceBreakpoints(source, lines, this.exportedFunctions); + } + } await this.syncFunctionBreakpoints( source, new Set( resolved - .filter((bp) => bp.verified && bp.functionName) + .filter((bp) => bp.setBreakpoint && bp.functionName) .map((bp) => bp.functionName as string) ) ); @@ -122,10 +153,10 @@ export class SorobanDebugSession extends DebugSession { response.body = { breakpoints: breakpoints.map((bp) => { - const match = resolved.find((resolvedBreakpoint) => resolvedBreakpoint.line === bp.line); + const match = resolved.find((resolvedBreakpoint) => resolvedBreakpoint.requestedLine === bp.line); return { verified: match?.verified ?? false, - line: bp.line, + line: match?.line ?? bp.line, column: bp.column, source: args.source, message: match?.message @@ -539,13 +570,13 @@ export class SorobanDebugSession extends DebugSession { return; } - this.state.callStack = inspection.callStack.map((frame, index) => { + this.state.callStack = inspection.callStack.map((frame, index) => { let sourcePath = frame; let line = 1; // Try to find the range for the function to resolve the actual source line for (const [sourceFilePath, sourceBpSet] of this.sourceFunctionBreakpoints.entries()) { - if (sourceBpSet.has(frame) || sourceFilePath) { + if (sourceBpSet.has(frame)) { sourcePath = sourceFilePath; try { const { parseFunctionRanges } = require('./sourceBreakpoints'); diff --git a/extensions/vscode/src/dap/sourceBreakpoints.ts b/extensions/vscode/src/dap/sourceBreakpoints.ts index b7f610ad..79bd666a 100644 --- a/extensions/vscode/src/dap/sourceBreakpoints.ts +++ b/extensions/vscode/src/dap/sourceBreakpoints.ts @@ -7,10 +7,17 @@ export interface FunctionRange { } export interface ResolvedBreakpoint { + requestedLine: number; line: number; verified: boolean; functionName?: string; + reasonCode?: string; message?: string; + /** + * Whether to set a runtime function breakpoint for this source breakpoint. + * Source breakpoints can be unverified but still mapped to a function as a best-effort. + */ + setBreakpoint?: boolean; } const FUNCTION_DECL = /^\s*(?:pub\s+)?fn\s+([A-Za-z_][A-Za-z0-9_]*)\s*\(/; @@ -69,26 +76,35 @@ export function resolveSourceBreakpoints( const range = ranges.find((candidate) => line >= candidate.startLine && line <= candidate.endLine); if (!range) { return { + requestedLine: line, line, verified: false, + reasonCode: 'HEURISTIC_NO_FUNCTION', + setBreakpoint: false, message: 'Line is not inside a detectable Rust function' }; } if (!exportedFunctions.has(range.name)) { return { + requestedLine: line, line, verified: false, functionName: range.name, + reasonCode: 'HEURISTIC_NOT_EXPORTED', + setBreakpoint: false, message: `Rust function '${range.name}' is not an exported contract entrypoint` }; } return { + requestedLine: line, line, - verified: true, + verified: false, functionName: range.name, - message: `Mapped to contract function '${range.name}' entry breakpoint` + reasonCode: 'HEURISTIC_NO_DWARF', + setBreakpoint: true, + message: `Heuristic mapping to contract entrypoint '${range.name}' (DWARF source map unavailable)` }; }); } diff --git a/src/analyzer/security.rs b/src/analyzer/security.rs index e0216679..a9673f61 100644 --- a/src/analyzer/security.rs +++ b/src/analyzer/security.rs @@ -1,10 +1,10 @@ use crate::runtime::executor::ContractExecutor; -use crate::server::protocol::{ DynamicTraceEvent, DynamicTraceEventKind }; -use crate::utils::wasm::{ parse_instructions, WasmInstruction }; +use crate::server::protocol::{DynamicTraceEvent, DynamicTraceEventKind}; +use crate::utils::wasm::{parse_instructions, WasmInstruction}; use crate::Result; -use serde::{ Deserialize, Serialize }; -use std::collections::{ HashMap, HashSet }; -use wasmparser::{ Operator, Parser, Payload }; +use serde::{Deserialize, Serialize}; +use std::collections::{HashMap, HashSet}; +use wasmparser::{Operator, Parser, Payload}; #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub enum Severity { @@ -72,7 +72,7 @@ pub trait SecurityRule { fn analyze_dynamic( &self, _executor: Option<&ContractExecutor>, - _trace: &[DynamicTraceEvent] + _trace: &[DynamicTraceEvent], ) -> Result> { Ok(vec![]) } @@ -91,7 +91,7 @@ impl SecurityAnalyzer { Box::new(AuthorizationCheckRule), Box::new(ReentrancyPatternRule), Box::new(CrossContractImportRule), - Box::new(UnboundedIterationRule) + Box::new(UnboundedIterationRule), ], } } @@ -100,7 +100,7 @@ impl SecurityAnalyzer { &self, wasm_bytes: &[u8], executor: Option<&ContractExecutor>, - trace: Option<&[DynamicTraceEvent]> + trace: Option<&[DynamicTraceEvent]>, ) -> Result { let mut report = SecurityReport::default(); @@ -135,7 +135,11 @@ fn strkey_crc16(data: &[u8]) -> u16 { for &byte in data { crc ^= (byte as u16) << 8; for _ in 0..8 { - crc = if (crc & 0x8000) != 0 { (crc << 1) ^ 0x1021 } else { crc << 1 }; + crc = if (crc & 0x8000) != 0 { + (crc << 1) ^ 0x1021 + } else { + crc << 1 + }; } } crc @@ -230,18 +234,19 @@ impl SecurityRule for HardcodedAddressRule { // Without guard 2, arbitrary 56-char constants such as error // message fragments or base64 blobs that happen to start with // 'G' or 'C' would be mis-classified as addresses. - if - (word.starts_with('G') || word.starts_with('C')) && - word.len() == 56 && - is_valid_strkey(word) + if (word.starts_with('G') || word.starts_with('C')) + && word.len() == 56 + && is_valid_strkey(word) { findings.push(SecurityFinding { rule_id: self.name().to_string(), severity: Severity::Medium, location: "Data Section".to_string(), description: format!("Found potential hardcoded address: {}", word), - remediation: "Use Address::from_str from configuration or function \ - arguments instead of hardcoding.".to_string(), + remediation: + "Use Address::from_str from configuration or function \ + arguments instead of hardcoding." + .to_string(), confidence: None, context: None, }); @@ -289,12 +294,12 @@ impl ArithmeticCheckRule { fn is_arithmetic(instr: &WasmInstruction) -> bool { matches!( instr, - WasmInstruction::I32Add | - WasmInstruction::I32Sub | - WasmInstruction::I32Mul | - WasmInstruction::I64Add | - WasmInstruction::I64Sub | - WasmInstruction::I64Mul + WasmInstruction::I32Add + | WasmInstruction::I32Sub + | WasmInstruction::I32Mul + | WasmInstruction::I64Add + | WasmInstruction::I64Sub + | WasmInstruction::I64Mul ) } @@ -337,7 +342,7 @@ impl SecurityRule for AuthorizationCheckRule { fn analyze_dynamic( &self, _executor: Option<&ContractExecutor>, - trace: &[DynamicTraceEvent] + trace: &[DynamicTraceEvent], ) -> Result> { let mut findings = Vec::new(); let mut auth_seen = false; @@ -380,7 +385,7 @@ impl SecurityRule for ReentrancyPatternRule { fn analyze_dynamic( &self, _executor: Option<&ContractExecutor>, - trace: &[DynamicTraceEvent] + trace: &[DynamicTraceEvent], ) -> Result> { Ok(analyze_reentrancy_dynamic(trace)) } @@ -464,20 +469,19 @@ impl SecurityRule for CrossContractImportRule { return Ok(Vec::new()); } - Ok( - vec![SecurityFinding { - rule_id: self.name().to_string(), - severity: Severity::Low, - location: "Import Section".to_string(), - description: format!( - "Cross-contract host imports detected: {}", - matches.join(", ") - ), - remediation: "Review external call sites for reentrancy and authorization checks.".to_string(), - confidence: None, - context: None, - }] - ) + Ok(vec![SecurityFinding { + rule_id: self.name().to_string(), + severity: Severity::Low, + location: "Import Section".to_string(), + description: format!( + "Cross-contract host imports detected: {}", + matches.join(", ") + ), + remediation: "Review external call sites for reentrancy and authorization checks." + .to_string(), + confidence: None, + context: None, + }]) } } @@ -573,8 +577,7 @@ impl SecurityRule for UnboundedIterationRule { if depth > 1 { finding.description = format!( "{} Loop nesting depth: {} (increased complexity).", - finding.description, - depth + finding.description, depth ); } } @@ -586,17 +589,15 @@ impl SecurityRule for UnboundedIterationRule { fn analyze_dynamic( &self, _executor: Option<&ContractExecutor>, - trace: &[DynamicTraceEvent] + trace: &[DynamicTraceEvent], ) -> Result> { - Ok( - analyze_unbounded_iteration_dynamic(trace) - .into_iter() - .map(|mut finding| { - finding.rule_id = self.name().to_string(); - finding - }) - .collect() - ) + Ok(analyze_unbounded_iteration_dynamic(trace) + .into_iter() + .map(|mut finding| { + finding.rule_id = self.name().to_string(); + finding + }) + .collect()) } } @@ -612,9 +613,7 @@ struct UnboundedStaticSignal { #[derive(Debug, Clone)] enum ControlFlowFrame { - Loop { - loop_type: String, - }, + Loop { loop_type: String }, Block, If, } @@ -672,25 +671,21 @@ fn analyze_unbounded_iteration_static(wasm_bytes: &[u8]) -> UnboundedStaticSigna match op { Operator::Loop { .. } => { - let current_depth = control_flow_stack - .iter() - .filter(|f| f.is_loop()) - .count(); - let loop_type = ( - if current_depth > 0 { - "nested_loop" - } else { - "top_level_loop" - } - ).to_string(); + let current_depth = + control_flow_stack.iter().filter(|f| f.is_loop()).count(); + let loop_type = (if current_depth > 0 { + "nested_loop" + } else { + "top_level_loop" + }) + .to_string(); loop_types_seen.insert(loop_type.clone()); control_flow_stack.push(ControlFlowFrame::Loop { loop_type: loop_type.clone(), }); - signal.max_nesting_depth = signal.max_nesting_depth.max( - current_depth + 1 - ); + signal.max_nesting_depth = + signal.max_nesting_depth.max(current_depth + 1); } Operator::Block { .. } => { control_flow_stack.push(ControlFlowFrame::Block); @@ -705,19 +700,14 @@ fn analyze_unbounded_iteration_static(wasm_bytes: &[u8]) -> UnboundedStaticSigna } Operator::Call { function_index } => { let is_storage_call = storage_import_indices.contains(&function_index); - let current_loop_depth = control_flow_stack - .iter() - .filter(|f| f.is_loop()) - .count(); + let current_loop_depth = + control_flow_stack.iter().filter(|f| f.is_loop()).count(); if is_storage_call { if current_loop_depth > 0 { storage_calls_in_loops += 1; - if - let Some(loop_frame) = control_flow_stack - .iter() - .rev() - .find(|f| f.is_loop()) + if let Some(loop_frame) = + control_flow_stack.iter().rev().find(|f| f.is_loop()) { if let Some(loop_type) = loop_frame.loop_type() { loop_types_with_calls.insert(loop_type.to_string()); @@ -757,9 +747,7 @@ fn analyze_unbounded_iteration_static(wasm_bytes: &[u8]) -> UnboundedStaticSigna let confidence_rationale = format!( "Storage calls in loops: {}, max nesting depth: {}, loop types with calls: {:?}", - storage_calls_in_loops, - signal.max_nesting_depth, - loop_types_with_calls + storage_calls_in_loops, signal.max_nesting_depth, loop_types_with_calls ); signal.confidence = Some(FindingConfidence { @@ -842,9 +830,8 @@ fn analyze_unbounded_iteration_dynamic(trace: &[DynamicTraceEvent]) -> Option= 64 && - (unique_keys <= total_reads / 4 || max_reads_for_one_key >= 32 || total_reads >= 128); + let likely_unbounded = total_reads >= 64 + && (unique_keys <= total_reads / 4 || max_reads_for_one_key >= 32 || total_reads >= 128); if !likely_unbounded { return None; @@ -917,16 +904,28 @@ mod tests { #[test] fn strkey_accepts_well_formed_g_address() { let addr = build_strkey(6 << 3, &[0u8; 32]); - assert!(addr.starts_with('G'), "sanity: version 0x30 encodes to 'G' prefix"); - assert!(is_valid_strkey(&addr), "well-formed G address must be accepted"); + assert!( + addr.starts_with('G'), + "sanity: version 0x30 encodes to 'G' prefix" + ); + assert!( + is_valid_strkey(&addr), + "well-formed G address must be accepted" + ); } /// Same for the contract ('C') variant. #[test] fn strkey_accepts_well_formed_c_address() { let addr = build_strkey(2 << 3, &[0u8; 32]); - assert!(addr.starts_with('C'), "sanity: version 0x10 encodes to 'C' prefix"); - assert!(is_valid_strkey(&addr), "well-formed C address must be accepted"); + assert!( + addr.starts_with('C'), + "sanity: version 0x10 encodes to 'C' prefix" + ); + assert!( + is_valid_strkey(&addr), + "well-formed C address must be accepted" + ); } /// 56 uppercase-ASCII chars starting with 'G' but with all-'A' payload have @@ -937,7 +936,10 @@ mod tests { // It has a valid prefix/length but an invalid payload+CRC combination. let fake = format!("G{}", "A".repeat(55)); assert_eq!(fake.len(), 56); - assert!(!is_valid_strkey(&fake), "all-A token must be rejected (bad CRC)"); + assert!( + !is_valid_strkey(&fake), + "all-A token must be rejected (bad CRC)" + ); } /// A string that is 56 chars, starts with 'G', but contains characters @@ -948,18 +950,25 @@ mod tests { // Contains '0', '1', and lower-case letters — all outside A-Z/2-7. let bad_chars = "G0001111abcdefghABCDEFGHIJKLMNOPQRSTUVWXYZ234567ABCDE"; assert_eq!(bad_chars.len(), 53); // not 56, show next case is the real one - // Craft exactly 56 chars with an invalid char ('0') at position 1. + // Craft exactly 56 chars with an invalid char ('0') at position 1. let with_zero = "G0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"; assert_eq!(with_zero.len(), 56); - assert!(!is_valid_strkey(with_zero), "token with '0' must be rejected"); + assert!( + !is_valid_strkey(with_zero), + "token with '0' must be rejected" + ); } /// Strings shorter or longer than 56 characters must always be rejected, /// regardless of prefix. #[test] fn strkey_rejects_wrong_length() { - assert!(!is_valid_strkey("GAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")); // 55 - assert!(!is_valid_strkey("GAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")); // 57 + assert!(!is_valid_strkey( + "GAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + )); // 55 + assert!(!is_valid_strkey( + "GAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + )); // 57 assert!(!is_valid_strkey("")); // empty } @@ -1000,16 +1009,10 @@ mod tests { assert!(data_len < 128, "test helper only handles short payloads"); let mut wasm = vec![ - 0x00, - 0x61, - 0x73, - 0x6d, // magic: \0asm - 0x01, - 0x00, - 0x00, - 0x00, // version: 1 + 0x00, 0x61, 0x73, 0x6d, // magic: \0asm + 0x01, 0x00, 0x00, 0x00, // version: 1 // Data section (id = 11) - 0x0b + 0x0b, ]; // Section content = segment-count(1) + segment @@ -1039,7 +1042,7 @@ mod tests { // valid base32 characters, yet none carries a correct CRC-16 checksum. let fake_tokens = [ "GAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", // 57 → trim - "CBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB", // 55 → skip + "CBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB", // 55 → skip // Exactly 56 chars, valid base32, but wrong CRC: "GABCDEFGHIJKLMNOPQRSTUVWXYZ234567ABCDEFGHIJKLMNOPQRSTUVW", "CABCDEFGHIJKLMNOPQRSTUVWXYZ234567ABCDEFGHIJKLMNOPQRSTUVW", @@ -1054,7 +1057,9 @@ mod tests { continue; // shorter than 56 — would not be picked up anyway } let wasm = wasm_with_data_string(&t); - let findings = rule.analyze_static(&wasm).expect("analyze_static should not error"); + let findings = rule + .analyze_static(&wasm) + .expect("analyze_static should not error"); assert!( findings.is_empty(), "token '{}' must not produce a finding (not a valid StrKey): {:?}", @@ -1074,9 +1079,15 @@ mod tests { let wasm = wasm_with_data_string(&valid_addr); let rule = HardcodedAddressRule; - let findings = rule.analyze_static(&wasm).expect("analyze_static should not error"); + let findings = rule + .analyze_static(&wasm) + .expect("analyze_static should not error"); - assert_eq!(findings.len(), 1, "exactly one finding expected for a valid hardcoded address"); + assert_eq!( + findings.len(), + 1, + "exactly one finding expected for a valid hardcoded address" + ); assert_eq!(findings[0].rule_id, "hardcoded-address"); assert!( findings[0].description.contains(&valid_addr), @@ -1089,13 +1100,17 @@ mod tests { #[test] fn hardcoded_address_rule_mixed_tokens() { let valid_addr = build_strkey(2 << 3, &[0x11u8; 32]); // C-prefix contract address - // Pad the two strings with a space so they end up as separate tokens. - let payload = - format!("{} GABCDEFGHIJKLMNOPQRSTUVWXYZ234567ABCDEFGHIJKLMNOPQRSTUV", valid_addr); + // Pad the two strings with a space so they end up as separate tokens. + let payload = format!( + "{} GABCDEFGHIJKLMNOPQRSTUVWXYZ234567ABCDEFGHIJKLMNOPQRSTUV", + valid_addr + ); let wasm = wasm_with_data_string(&payload); let rule = HardcodedAddressRule; - let findings = rule.analyze_static(&wasm).expect("analyze_static should not error"); + let findings = rule + .analyze_static(&wasm) + .expect("analyze_static should not error"); assert_eq!( findings.len(), @@ -1138,7 +1153,7 @@ mod tests { let instrs = vec![ WasmInstruction::I32Add, WasmInstruction::Unknown(0x41), - WasmInstruction::BrIf + WasmInstruction::BrIf, ]; assert!(ArithmeticCheckRule::is_guarded(&instrs, 0)); } @@ -1150,11 +1165,11 @@ mod tests { // idx=0, window covers idx+1..idx+4 (indices 1, 2, 3). // BrIf is at index 4, which is outside the window. let instrs = vec![ - WasmInstruction::I32Add, // idx 0 + WasmInstruction::I32Add, // idx 0 WasmInstruction::Unknown(0x41), // idx 1 WasmInstruction::Unknown(0x41), // idx 2 WasmInstruction::Unknown(0x41), // idx 3 - WasmInstruction::BrIf // idx 4 — outside window + WasmInstruction::BrIf, // idx 4 — outside window ]; assert!(!ArithmeticCheckRule::is_guarded(&instrs, 0)); } @@ -1184,7 +1199,11 @@ mod tests { assert!(!ArithmeticCheckRule::is_guarded(&after, 0)); // Call on both sides: - let both = vec![WasmInstruction::Call, WasmInstruction::I32Mul, WasmInstruction::Call]; + let both = vec![ + WasmInstruction::Call, + WasmInstruction::I32Mul, + WasmInstruction::Call, + ]; assert!(!ArithmeticCheckRule::is_guarded(&both, 1)); } @@ -1193,7 +1212,11 @@ mod tests { #[test] fn is_guarded_true_when_brif_follows_call_after_arithmetic() { // i32.add -> call (side-effect) -> br_if (checks result) - let instrs = vec![WasmInstruction::I32Add, WasmInstruction::Call, WasmInstruction::BrIf]; + let instrs = vec![ + WasmInstruction::I32Add, + WasmInstruction::Call, + WasmInstruction::BrIf, + ]; assert!(ArithmeticCheckRule::is_guarded(&instrs, 0)); } @@ -1227,7 +1250,7 @@ mod tests { fn reentrancy_no_finding_for_write_in_callee_frame() { let trace = vec![ make_event(0, DynamicTraceEventKind::CrossContractCall, 0), - make_event(1, DynamicTraceEventKind::StorageWrite, 1) + make_event(1, DynamicTraceEventKind::StorageWrite, 1), ]; assert!( analyze_reentrancy_dynamic(&trace).is_empty(), @@ -1244,7 +1267,7 @@ mod tests { make_event(0, DynamicTraceEventKind::CrossContractCall, 0), make_event(1, DynamicTraceEventKind::StorageWrite, 1), make_event(2, DynamicTraceEventKind::CrossContractReturn, 0), - make_event(3, DynamicTraceEventKind::StorageWrite, 0) + make_event(3, DynamicTraceEventKind::StorageWrite, 0), ]; assert!( analyze_reentrancy_dynamic(&trace).is_empty(), @@ -1260,7 +1283,7 @@ mod tests { make_event(0, DynamicTraceEventKind::CrossContractCall, 0), make_event(1, DynamicTraceEventKind::StorageWrite, 1), make_event(2, DynamicTraceEventKind::CrossContractReturn, 0), - make_event(3, DynamicTraceEventKind::StorageWrite, 0) + make_event(3, DynamicTraceEventKind::StorageWrite, 0), ]; assert!( analyze_reentrancy_dynamic(&trace).is_empty(), @@ -1274,7 +1297,7 @@ mod tests { fn reentrancy_finding_for_write_in_same_frame_after_cross_call() { let trace = vec![ make_event(0, DynamicTraceEventKind::CrossContractCall, 0), - make_event(1, DynamicTraceEventKind::StorageWrite, 0) + make_event(1, DynamicTraceEventKind::StorageWrite, 0), ]; let findings = analyze_reentrancy_dynamic(&trace); assert_eq!( diff --git a/src/batch.rs b/src/batch.rs index e76ab39f..9323e81c 100644 --- a/src/batch.rs +++ b/src/batch.rs @@ -2,14 +2,14 @@ use crate::runtime::executor::ContractExecutor; use crate::DebuggerError; use crate::Result; use rayon::prelude::*; -use serde::{ Deserialize, Serialize }; +use serde::{Deserialize, Serialize}; use serde_json::Value; +use std::cell::RefCell; use std::fs; use std::path::Path; use std::sync::Arc; -use std::time::Instant; -use std::cell::RefCell; use std::thread_local; +use std::time::Instant; /// A single batch execution item with arguments and optional expected result #[derive(Debug, Clone, Serialize, Deserialize)] @@ -88,23 +88,26 @@ impl BatchExecutor { /// Load batch items from a JSON file pub fn load_batch_file>(path: P) -> Result> { - let content = fs - ::read_to_string(path.as_ref()) - .map_err(|e| { - DebuggerError::FileError( - format!("Failed to read batch file {:?}: {}", path.as_ref(), e) - ) - })?; - - let parsed: Vec = serde_json - ::from_str(&content) - .map_err(|e| { - DebuggerError::FileError( - format!("Failed to parse batch file as JSON array {:?}: {}", path.as_ref(), e) - ) - })?; - - let items = parsed.into_iter().map(BatchItem::from).collect::>(); + let content = fs::read_to_string(path.as_ref()).map_err(|e| { + DebuggerError::FileError(format!( + "Failed to read batch file {:?}: {}", + path.as_ref(), + e + )) + })?; + + let parsed: Vec = serde_json::from_str(&content).map_err(|e| { + DebuggerError::FileError(format!( + "Failed to parse batch file as JSON array {:?}: {}", + path.as_ref(), + e + )) + })?; + + let items = parsed + .into_iter() + .map(BatchItem::from) + .collect::>(); Ok(items) } @@ -178,22 +181,10 @@ impl BatchExecutor { /// Generate summary from batch results pub fn summarize(results: &[BatchResult]) -> BatchSummary { let total = results.len(); - let passed = results - .iter() - .filter(|r| r.passed) - .count(); - let failed = results - .iter() - .filter(|r| r.success && !r.passed) - .count(); - let errors = results - .iter() - .filter(|r| !r.success) - .count(); - let total_duration_ms = results - .iter() - .map(|r| r.duration_ms) - .sum(); + let passed = results.iter().filter(|r| r.passed).count(); + let failed = results.iter().filter(|r| r.success && !r.passed).count(); + let errors = results.iter().filter(|r| !r.success).count(); + let total_duration_ms = results.iter().map(|r| r.duration_ms).sum(); BatchSummary { total, @@ -226,22 +217,22 @@ impl BatchExecutor { let label = result.label.as_deref().unwrap_or(&default_label); crate::logging::log_display( format!("\n{} {}", status, label), - crate::logging::LogLevel::Info + crate::logging::LogLevel::Info, ); crate::logging::log_display( format!(" Args: {}", result.args), - crate::logging::LogLevel::Info + crate::logging::LogLevel::Info, ); if result.success { crate::logging::log_display( format!(" Result: {}", result.result), - crate::logging::LogLevel::Info + crate::logging::LogLevel::Info, ); if let Some(expected) = &result.expected { crate::logging::log_display( format!(" Expected: {}", expected), - crate::logging::LogLevel::Info + crate::logging::LogLevel::Info, ); if !result.passed { crate::logging::log_display( @@ -249,20 +240,20 @@ impl BatchExecutor { " {}", Formatter::warning("Result does not match expected value") ), - crate::logging::LogLevel::Warn + crate::logging::LogLevel::Warn, ); } } } else if let Some(error) = &result.error { crate::logging::log_display( format!(" Error: {}", Formatter::error(error)), - crate::logging::LogLevel::Error + crate::logging::LogLevel::Error, ); } crate::logging::log_display( format!(" Duration: {}ms", result.duration_ms), - crate::logging::LogLevel::Info + crate::logging::LogLevel::Info, ); } @@ -272,30 +263,39 @@ impl BatchExecutor { crate::logging::log_display("=".repeat(80), crate::logging::LogLevel::Info); crate::logging::log_display( format!(" Total: {}", summary.total), - crate::logging::LogLevel::Info + crate::logging::LogLevel::Info, ); crate::logging::log_display( - format!(" {}", Formatter::success(format!("Passed: {}", summary.passed))), - crate::logging::LogLevel::Info + format!( + " {}", + Formatter::success(format!("Passed: {}", summary.passed)) + ), + crate::logging::LogLevel::Info, ); if summary.failed > 0 { crate::logging::log_display( - format!(" {}", Formatter::warning(format!("Failed: {}", summary.failed))), - crate::logging::LogLevel::Warn + format!( + " {}", + Formatter::warning(format!("Failed: {}", summary.failed)) + ), + crate::logging::LogLevel::Warn, ); } if summary.errors > 0 { crate::logging::log_display( - format!(" {}", Formatter::error(format!("Errors: {}", summary.errors))), - crate::logging::LogLevel::Error + format!( + " {}", + Formatter::error(format!("Errors: {}", summary.errors)) + ), + crate::logging::LogLevel::Error, ); } crate::logging::log_display( format!(" Duration: {}ms", summary.total_duration_ms), - crate::logging::LogLevel::Info + crate::logging::LogLevel::Info, ); crate::logging::log_display("=".repeat(80), crate::logging::LogLevel::Info); } @@ -334,10 +334,7 @@ fn json_values_equal(a: &Value, b: &Value) -> bool { match (a, b) { (Value::Number(n1), Value::Number(n2)) => n1.as_f64() == n2.as_f64(), (Value::Array(a), Value::Array(b)) => { - a.len() == b.len() - && a.iter() - .zip(b.iter()) - .all(|(x, y)| json_values_equal(x, y)) + a.len() == b.len() && a.iter().zip(b.iter()).all(|(x, y)| json_values_equal(x, y)) } (Value::Object(a), Value::Object(b)) => { a.len() == b.len() @@ -385,7 +382,10 @@ fn truncate_for_table(value: &str, limit: usize) -> String { return value.to_string(); } - let mut truncated = value.chars().take(limit.saturating_sub(1)).collect::(); + let mut truncated = value + .chars() + .take(limit.saturating_sub(1)) + .collect::(); truncated.push('…'); truncated } @@ -396,8 +396,7 @@ mod tests { #[test] fn test_batch_item_deserialization() { - let json = - r#"[ + let json = r#"[ {"args": "[1, 2]", "expected": "3", "label": "Add 1+2"}, {"args": "[5, 10]"} ]"#; @@ -414,7 +413,11 @@ mod tests { #[test] fn test_values_match_loose_json() { // Different whitespace / key order still matches in loose mode - assert!(values_match(r#"{"a":1,"b":2}"#, r#"{ "b": 2, "a": 1 }"#, false)); + assert!(values_match( + r#"{"a":1,"b":2}"#, + r#"{ "b": 2, "a": 1 }"#, + false + )); assert!(values_match("42", "42", false)); // Equivalent numeric representations assert!(values_match("1", "1.0", false)); @@ -472,7 +475,7 @@ mod tests { expected: Some("ok".to_string()), passed: true, duration_ms: 10, - } + }, ]; let summary = BatchExecutor::summarize(&results); diff --git a/src/benchmarks.rs b/src/benchmarks.rs index 2bbffbe6..fe59f7db 100644 --- a/src/benchmarks.rs +++ b/src/benchmarks.rs @@ -47,11 +47,7 @@ pub fn load_baseline_json(path: impl AsRef) -> Result { DebuggerError::FileError(format!("Failed to read baseline JSON {:?}: {e}", path)) })?; serde_json::from_slice(&bytes).map_err(|e| { - DebuggerError::FileError(format!( - "Failed to parse baseline JSON {:?}: {e}", - path - )) - .into() + DebuggerError::FileError(format!("Failed to parse baseline JSON {:?}: {e}", path)).into() }) } @@ -94,10 +90,11 @@ fn collect_estimates_files(root: &Path, out: &mut Vec) -> Result<()> { let dir = match fs::read_dir(root) { Ok(dir) => dir, Err(e) => { - return Err( - DebuggerError::FileError(format!("Failed to read directory {:?}: {e}", root)) - .into(), - ) + return Err(DebuggerError::FileError(format!( + "Failed to read directory {:?}: {e}", + root + )) + .into()) } }; @@ -106,9 +103,9 @@ fn collect_estimates_files(root: &Path, out: &mut Vec) -> Result<()> { DebuggerError::FileError(format!("Failed to read directory entry in {:?}: {e}", root)) })?; let path = entry.path(); - let file_type = entry.file_type().map_err(|e| { - DebuggerError::FileError(format!("Failed to stat {:?}: {e}", path)) - })?; + let file_type = entry + .file_type() + .map_err(|e| DebuggerError::FileError(format!("Failed to stat {:?}: {e}", path)))?; if file_type.is_dir() { collect_estimates_files(&path, out)?; @@ -117,7 +114,10 @@ fn collect_estimates_files(root: &Path, out: &mut Vec) -> Result<()> { if file_type.is_file() { if path.file_name().and_then(|s| s.to_str()) == Some("estimates.json") - && path.parent().and_then(|p| p.file_name()).and_then(|s| s.to_str()) + && path + .parent() + .and_then(|p| p.file_name()) + .and_then(|s| s.to_str()) == Some("new") { out.push(path); @@ -128,9 +128,15 @@ fn collect_estimates_files(root: &Path, out: &mut Vec) -> Result<()> { Ok(()) } -fn parse_estimates_mean_ns(criterion_dir: &Path, estimates_path: &Path) -> Result> { +fn parse_estimates_mean_ns( + criterion_dir: &Path, + estimates_path: &Path, +) -> Result> { let bytes = fs::read(estimates_path).map_err(|e| { - DebuggerError::FileError(format!("Failed to read estimates file {:?}: {e}", estimates_path)) + DebuggerError::FileError(format!( + "Failed to read estimates file {:?}: {e}", + estimates_path + )) })?; let json: serde_json::Value = serde_json::from_slice(&bytes).map_err(|e| { @@ -210,7 +216,11 @@ pub fn compare_baselines( } // Largest regressions first, then improvements. - deltas.sort_by(|a, b| b.delta_pct.partial_cmp(&a.delta_pct).unwrap_or(std::cmp::Ordering::Equal)); + deltas.sort_by(|a, b| { + b.delta_pct + .partial_cmp(&a.delta_pct) + .unwrap_or(std::cmp::Ordering::Equal) + }); deltas } @@ -255,7 +265,11 @@ pub fn render_markdown_report( } if deltas.len() > max_rows.max(1) { - out.push_str(&format!("\nShowing top {} of {} benchmarks.\n", max_rows, deltas.len())); + out.push_str(&format!( + "\nShowing top {} of {} benchmarks.\n", + max_rows, + deltas.len() + )); } out @@ -345,4 +359,3 @@ mod tests { assert_eq!(overall_status(&deltas2), RegressionStatus::Fail); } } - diff --git a/src/bin/bench-regression.rs b/src/bin/bench-regression.rs index 9f94eea6..62dd76a9 100644 --- a/src/bin/bench-regression.rs +++ b/src/bin/bench-regression.rs @@ -118,4 +118,3 @@ fn run(cli: Cli) -> soroban_debugger::Result<()> { Ok(()) } - diff --git a/src/cli/commands.rs b/src/cli/commands.rs index 28cbfc19..8dbbd1f6 100644 --- a/src/cli/commands.rs +++ b/src/cli/commands.rs @@ -1,27 +1,14 @@ -use crate::analyzer::upgrade::{ CompatibilityReport, ExecutionDiff, UpgradeAnalyzer }; -use crate::analyzer::{ security::SecurityAnalyzer, symbolic::SymbolicAnalyzer }; +use crate::analyzer::upgrade::{CompatibilityReport, ExecutionDiff, UpgradeAnalyzer}; +use crate::analyzer::{security::SecurityAnalyzer, symbolic::SymbolicAnalyzer}; use crate::cli::args::{ - AnalyzeArgs, - CompareArgs, - InspectArgs, - InteractiveArgs, - OptimizeArgs, - ProfileArgs, - RemoteArgs, - ReplArgs, - ReplayArgs, - RunArgs, - ScenarioArgs, - ServerArgs, - SymbolicArgs, - TuiArgs, - UpgradeCheckArgs, - Verbosity, + AnalyzeArgs, CompareArgs, InspectArgs, InteractiveArgs, OptimizeArgs, ProfileArgs, RemoteArgs, + ReplArgs, ReplayArgs, RunArgs, ScenarioArgs, ServerArgs, SymbolicArgs, TuiArgs, + UpgradeCheckArgs, Verbosity, }; use crate::debugger::engine::DebuggerEngine; use crate::debugger::instruction_pointer::StepMode; -use crate::history::{ HistoryManager, RunHistory }; -use crate::inspector::events::{ ContractEvent, EventInspector }; +use crate::history::{HistoryManager, RunHistory}; +use crate::inspector::events::{ContractEvent, EventInspector}; use crate::logging; use crate::output::OutputWriter; use crate::repeat::RepeatRunner; @@ -29,8 +16,8 @@ use crate::repl::ReplConfig; use crate::runtime::executor::ContractExecutor; use crate::simulator::SnapshotLoader; use crate::ui::formatter::Formatter; -use crate::ui::{ run_dashboard, DebuggerUI }; -use crate::{ DebuggerError, Result }; +use crate::ui::{run_dashboard, DebuggerUI}; +use crate::{DebuggerError, Result}; use miette::WrapErr; use std::fs; @@ -67,13 +54,12 @@ fn print_verbose(message: impl AsRef) { } fn budget_trend_stats_or_err(records: &[RunHistory]) -> Result { - crate::history - ::budget_trend_stats(records) - .ok_or_else(|| { - DebuggerError::ExecutionError( - "Failed to compute budget trend statistics for the selected dataset".to_string() - ).into() - }) + crate::history::budget_trend_stats(records).ok_or_else(|| { + DebuggerError::ExecutionError( + "Failed to compute budget trend statistics for the selected dataset".to_string(), + ) + .into() + }) } #[derive(serde::Serialize)] @@ -95,7 +81,7 @@ fn render_symbolic_report(report: &crate::analyzer::symbolic::SymbolicReport) -> let mut lines = vec![ format!("Function: {}", report.function), format!("Paths explored: {}", report.paths_explored), - format!("Panics found: {}", report.panics_found) + format!("Panics found: {}", report.panics_found), ]; if report.paths.is_empty() { @@ -112,7 +98,12 @@ fn render_symbolic_report(report: &crate::analyzer::symbolic::SymbolicReport) -> (_, Some(panic)) => format!("panic {}", panic), _ => "unknown".to_string(), }; - lines.push(format!(" {}. inputs={} -> {}", idx + 1, path.inputs, outcome)); + lines.push(format!( + " {}. inputs={} -> {}", + idx + 1, + path.inputs, + outcome + )); } lines.join("\n") @@ -129,7 +120,10 @@ fn render_security_report(output: &AnalyzeCommandOutput) -> String { if let Some(result) = &dynamic.result { lines.push(format!("Dynamic execution result: {}", result)); } - lines.push(format!("Dynamic trace entries captured: {}", dynamic.trace_entries)); + lines.push(format!( + "Dynamic trace entries captured: {}", + dynamic.trace_entries + )); lines.push(String::new()); } @@ -148,15 +142,13 @@ fn render_security_report(output: &AnalyzeCommandOutput) -> String { lines.push(format!("Findings: {}", output.findings.len())); for (idx, finding) in output.findings.iter().enumerate() { - lines.push( - format!( - " {}. [{:?}] {} at {}", - idx + 1, - finding.severity, - finding.rule_id, - finding.location - ) - ); + lines.push(format!( + " {}. [{:?}] {} at {}", + idx + 1, + finding.severity, + finding.rule_id, + finding.location + )); lines.push(format!(" {}", finding.description)); lines.push(format!(" Remediation: {}", finding.remediation)); } @@ -168,7 +160,7 @@ fn render_security_report(output: &AnalyzeCommandOutput) -> String { fn run_instruction_stepping( _engine: &mut DebuggerEngine, _function: &str, - _args: Option<&str> + _args: Option<&str>, ) -> Result<()> { print_info("Instruction stepping is not yet fully implemented"); Ok(()) @@ -193,20 +185,18 @@ fn display_mock_call_log(calls: &[crate::runtime::executor::MockCallEntry]) { print_info("\n--- Mock Contract Calls ---"); for (i, entry) in calls.iter().enumerate() { let status = if entry.mocked { "MOCKED" } else { "REAL" }; - print_info( - format!( - "{}. {} {} (args: {}) -> {}", - i + 1, - status, - entry.function, - entry.args_count, - if entry.returned.is_some() { - "returned" - } else { - "pending" - } - ) - ); + print_info(format!( + "{}. {} {} (args: {}) -> {}", + i + 1, + status, + entry.function, + entry.args_count, + if entry.returned.is_some() { + "returned" + } else { + "pending" + } + )); } } @@ -224,15 +214,14 @@ fn run_batch(args: &RunArgs, batch_file: &std::path::Path) -> Result<()> { print_info(format!("Loading contract: {:?}", contract)); logging::log_loading_contract(&contract.to_string_lossy()); - let wasm_bytes = fs - ::read(contract) - .map_err(|e| { - DebuggerError::WasmLoadError( - format!("Failed to read WASM file at {:?}: {}", contract, e) - ) - })?; + let wasm_bytes = fs::read(contract).map_err(|e| { + DebuggerError::WasmLoadError(format!("Failed to read WASM file at {:?}: {}", contract, e)) + })?; - print_success(format!("Contract loaded successfully ({} bytes)", wasm_bytes.len())); + print_success(format!( + "Contract loaded successfully ({} bytes)", + wasm_bytes.len() + )); logging::log_contract_loaded(wasm_bytes.len()); print_info(format!("Loading batch file: {:?}", batch_file)); @@ -247,13 +236,11 @@ fn run_batch(args: &RunArgs, batch_file: &std::path::Path) -> Result<()> { logging::log_display(loaded_snapshot.format_summary(), logging::LogLevel::Info); } - print_info( - format!( - "\nExecuting {} test cases in parallel for function: {}", - batch_items.len(), - function - ) - ); + print_info(format!( + "\nExecuting {} test cases in parallel for function: {}", + batch_items.len(), + function + )); logging::log_execution_start(function, None); let executor = crate::batch::BatchExecutor::new(wasm_bytes, function.clone())?; @@ -263,33 +250,26 @@ fn run_batch(args: &RunArgs, batch_file: &std::path::Path) -> Result<()> { crate::batch::BatchExecutor::display_results(&results, &summary); if args.is_json_output() { - let output = - serde_json::json!({ + let output = serde_json::json!({ "results": results, "summary": summary, }); logging::log_display( - serde_json - ::to_string_pretty(&output) - .map_err(|e| { - DebuggerError::FileError(format!("Failed to serialize output: {}", e)) - })?, - logging::LogLevel::Info + serde_json::to_string_pretty(&output).map_err(|e| { + DebuggerError::FileError(format!("Failed to serialize output: {}", e)) + })?, + logging::LogLevel::Info, ); } logging::log_execution_complete(&format!("{}/{} passed", summary.passed, summary.total)); if summary.failed > 0 || summary.errors > 0 { - return Err( - DebuggerError::ExecutionError( - format!( - "Batch execution completed with failures: {} failed, {} errors", - summary.failed, - summary.errors - ) - ).into() - ); + return Err(DebuggerError::ExecutionError(format!( + "Batch execution completed with failures: {} failed, {} errors", + summary.failed, summary.errors + )) + .into()); } Ok(()) @@ -333,25 +313,29 @@ pub fn run(args: RunArgs, verbosity: Verbosity) -> Result<()> { output_writer.write(&format!("Loading contract: {:?}", contract))?; logging::log_loading_contract(&contract.to_string_lossy()); - let wasm_file = crate::utils::wasm - ::load_wasm(contract) + let wasm_file = crate::utils::wasm::load_wasm(contract) .with_context(|| format!("Failed to read WASM file: {:?}", contract))?; let wasm_bytes = wasm_file.bytes; let wasm_hash = wasm_file.sha256_hash; if let Some(expected) = &args.expected_hash { if expected.to_lowercase() != wasm_hash { - return Err( - (crate::DebuggerError::ChecksumMismatch { - expected: expected.clone(), - actual: wasm_hash.clone(), - }).into() - ); + return Err((crate::DebuggerError::ChecksumMismatch { + expected: expected.clone(), + actual: wasm_hash.clone(), + }) + .into()); } } - print_success(format!("Contract loaded successfully ({} bytes)", wasm_bytes.len())); - output_writer.write(&format!("Contract loaded successfully ({} bytes)", wasm_bytes.len()))?; + print_success(format!( + "Contract loaded successfully ({} bytes)", + wasm_bytes.len() + )); + output_writer.write(&format!( + "Contract loaded successfully ({} bytes)", + wasm_bytes.len() + ))?; if args.verbose || verbosity == Verbosity::Verbose { print_verbose(format!("SHA-256: {}", wasm_hash)); @@ -391,15 +375,9 @@ pub fn run(args: RunArgs, verbosity: Verbosity) -> Result<()> { print_info(format!("Importing storage from: {:?}", import_path)); let imported = crate::inspector::storage::StorageState::import_from_file(import_path)?; print_success(format!("Imported {} storage entries", imported.len())); - initial_storage = Some( - serde_json - ::to_string(&imported) - .map_err(|e| { - DebuggerError::StorageError( - format!("Failed to serialize imported storage: {}", e) - ) - })? - ); + initial_storage = Some(serde_json::to_string(&imported).map_err(|e| { + DebuggerError::StorageError(format!("Failed to serialize imported storage: {}", e)) + })?); } if let Some(n) = args.repeat { @@ -436,11 +414,10 @@ pub fn run(args: RunArgs, verbosity: Verbosity) -> Result<()> { // Remote mode is not yet implemented if args.remote.is_some() { - return Err( - DebuggerError::ExecutionError( - "Remote mode not yet implemented in run command".to_string() - ).into() - ); + return Err(DebuggerError::ExecutionError( + "Remote mode not yet implemented in run command".to_string(), + ) + .into()); } // Execute locally with debugging @@ -453,7 +430,10 @@ pub fn run(args: RunArgs, verbosity: Verbosity) -> Result<()> { if args.step_instructions { let step_mode = parse_step_mode(&args.step_mode); - print_info(format!("Starting instruction stepping in '{}' mode", args.step_mode)); + print_info(format!( + "Starting instruction stepping in '{}' mode", + args.step_mode + )); engine.start_instruction_stepping(step_mode)?; run_instruction_stepping(&mut engine, function, parsed_args.as_deref())?; return Ok(()); @@ -477,7 +457,10 @@ pub fn run(args: RunArgs, verbosity: Verbosity) -> Result<()> { print_info(format!("\nGenerating unit test: {:?}", test_path)); let test_code = crate::codegen::TestGenerator::generate(record, contract)?; crate::codegen::TestGenerator::write_to_file(test_path, &test_code, args.overwrite)?; - print_success(format!("Unit test generated successfully at {:?}", test_path)); + print_success(format!( + "Unit test generated successfully at {:?}", + test_path + )); } else { print_warning("No execution record found to generate test."); } @@ -486,7 +469,7 @@ pub fn run(args: RunArgs, verbosity: Verbosity) -> Result<()> { let storage_diff = crate::inspector::storage::StorageInspector::compute_diff( &storage_before, &storage_after, - &args.alert_on_change + &args.alert_on_change, ); if !storage_diff.is_empty() || !args.alert_on_change.is_empty() { print_info("\n--- Storage Changes ---"); @@ -522,7 +505,10 @@ pub fn run(args: RunArgs, verbosity: Verbosity) -> Result<()> { print_info(format!("Exporting storage to: {:?}", export_path)); let storage_snapshot = engine.executor().get_storage_snapshot()?; crate::inspector::storage::StorageState::export_to_file(&storage_snapshot, export_path)?; - print_success(format!("Exported {} storage entries", storage_snapshot.len())); + print_success(format!( + "Exported {} storage entries", + storage_snapshot.len() + )); } let mut json_events = None; @@ -534,27 +520,27 @@ pub fn run(args: RunArgs, verbosity: Verbosity) -> Result<()> { // Convert runtime event objects into our inspector::events::ContractEvent via serde translation. // This is a generic, safe conversion as long as runtime events are serializable with sensible fields. - let converted_events: Vec = match - serde_json::to_value(&raw_events).and_then(serde_json::from_value) - { - Ok(evts) => evts, - Err(e) => { - // If conversion fails, fall back to attempting to stringify each raw event for display. - print_warning( - format!("Failed to convert runtime events for structured display: {}", e) - ); - // Fallback: attempt a best-effort stringification - let fallback: Vec = raw_events - .into_iter() - .map(|r| ContractEvent { - contract_id: None, - topics: vec![], - data: format!("{:?}", r), - }) - .collect(); - fallback - } - }; + let converted_events: Vec = + match serde_json::to_value(&raw_events).and_then(serde_json::from_value) { + Ok(evts) => evts, + Err(e) => { + // If conversion fails, fall back to attempting to stringify each raw event for display. + print_warning(format!( + "Failed to convert runtime events for structured display: {}", + e + )); + // Fallback: attempt a best-effort stringification + let fallback: Vec = raw_events + .into_iter() + .map(|r| ContractEvent { + contract_id: None, + topics: vec![], + data: format!("{:?}", r), + }) + .collect(); + fallback + } + }; // Determine filter: prefer repeatable --event-filter, fallback to legacy --filter-topic let filter_opt = if !args.event_filter.is_empty() { @@ -583,14 +569,12 @@ pub fn run(args: RunArgs, verbosity: Verbosity) -> Result<()> { } if !args.storage_filter.is_empty() { - let storage_filter = crate::inspector::storage::StorageFilter - ::new(&args.storage_filter) + let storage_filter = crate::inspector::storage::StorageFilter::new(&args.storage_filter) .map_err(|e| DebuggerError::StorageError(format!("Invalid storage filter: {}", e)))?; print_info("\n--- Storage ---"); - let inspector = crate::inspector::storage::StorageInspector::with_state( - storage_after.clone() - ); + let inspector = + crate::inspector::storage::StorageInspector::with_state(storage_after.clone()); inspector.display_filtered(&storage_filter); } @@ -626,20 +610,17 @@ pub fn run(args: RunArgs, verbosity: Verbosity) -> Result<()> { if let Some(access_type) = footprint_map.get(key) { if let Some((entry, ttl)) = val_opt { let key_str = format!("{:?}", **key); - let storage_type = if - key_str.contains("Temporary") || - key_str.contains("temporary") - { - crate::inspector::ledger::StorageType::Temporary - } else if - key_str.contains("Instance") || - key_str.contains("instance") || - key_str.contains("LedgerKeyContractInstance") - { - crate::inspector::ledger::StorageType::Instance - } else { - crate::inspector::ledger::StorageType::Persistent - }; + let storage_type = + if key_str.contains("Temporary") || key_str.contains("temporary") { + crate::inspector::ledger::StorageType::Temporary + } else if key_str.contains("Instance") + || key_str.contains("instance") + || key_str.contains("LedgerKeyContractInstance") + { + crate::inspector::ledger::StorageType::Instance + } else { + crate::inspector::ledger::StorageType::Persistent + }; use soroban_env_host::storage::AccessType; let is_read = true; // Everything in the footprint is at least read @@ -651,7 +632,7 @@ pub fn run(args: RunArgs, verbosity: Verbosity) -> Result<()> { storage_type, ttl.unwrap_or(0), is_read, - is_write + is_write, ); } } @@ -668,8 +649,7 @@ pub fn run(args: RunArgs, verbosity: Verbosity) -> Result<()> { } if args.is_json_output() { - let mut output = - serde_json::json!({ + let mut output = serde_json::json!({ "status": "success", "result": result, "sha256": wasm_hash, @@ -699,7 +679,7 @@ pub fn run(args: RunArgs, verbosity: Verbosity) -> Result<()> { "returned": entry.returned, }) }) - .collect() + .collect(), ); } if let Some(ref ledger) = json_ledger { @@ -709,8 +689,7 @@ pub fn run(args: RunArgs, verbosity: Verbosity) -> Result<()> { match serde_json::to_string_pretty(&output) { Ok(json) => println!("{}", json), Err(e) => { - let err_output = - serde_json::json!({ + let err_output = serde_json::json!({ "status": "error", "errors": [format!("Failed to serialize output: {}", e)] }); @@ -724,11 +703,12 @@ pub fn run(args: RunArgs, verbosity: Verbosity) -> Result<()> { if let Some(trace_path) = &args.trace_output { print_info(format!("\nExporting execution trace to: {:?}", trace_path)); - let args_str = parsed_args.as_ref().map(|a| serde_json::to_string(a).unwrap_or_default()); + let args_str = parsed_args + .as_ref() + .map(|a| serde_json::to_string(a).unwrap_or_default()); - let trace_events = json_events.unwrap_or_else(|| - engine.executor().get_events().unwrap_or_default() - ); + let trace_events = + json_events.unwrap_or_else(|| engine.executor().get_events().unwrap_or_default()); let trace = build_execution_trace( function, @@ -739,7 +719,7 @@ pub fn run(args: RunArgs, verbosity: Verbosity) -> Result<()> { budget, engine.executor(), &trace_events, - usize::MAX + usize::MAX, ); if let Ok(json) = trace.to_json() { @@ -764,7 +744,7 @@ fn build_execution_trace( budget: crate::inspector::budget::BudgetInfo, executor: &ContractExecutor, events: &[crate::inspector::events::ContractEvent], - replay_until: usize + replay_until: usize, ) -> crate::compare::ExecutionTrace { let mut trace_storage = std::collections::BTreeMap::new(); for (k, v) in storage_after { @@ -775,8 +755,7 @@ fn build_execution_trace( } } - let return_val = serde_json - ::from_str(result) + let return_val = serde_json::from_str(result) .unwrap_or_else(|_| serde_json::Value::String(result.to_string())); let mut call_sequence = Vec::new(); @@ -796,9 +775,8 @@ fn build_execution_trace( } let event_str = format!("{:?}", event); - if - event_str.contains("ContractCall") || - (event_str.contains("call") && event.contract_id.is_some()) + if event_str.contains("ContractCall") + || (event_str.contains("call") && event.contract_id.is_some()) { depth += 1; call_sequence.push(crate::compare::trace::CallEntry { @@ -806,9 +784,8 @@ fn build_execution_trace( args: None, depth, }); - } else if - (event_str.contains("ContractReturn") || event_str.contains("return")) && - depth > 0 + } else if (event_str.contains("ContractReturn") || event_str.contains("return")) + && depth > 0 { depth -= 1; } @@ -850,24 +827,25 @@ fn run_dry_run(args: &RunArgs) -> Result<()> { .expect("contract is required for dry-run"); print_info(format!("[DRY RUN] Loading contract: {:?}", contract)); - let wasm_file = crate::utils::wasm - ::load_wasm(contract) + let wasm_file = crate::utils::wasm::load_wasm(contract) .with_context(|| format!("Failed to read WASM file: {:?}", contract))?; let wasm_bytes = wasm_file.bytes; let wasm_hash = wasm_file.sha256_hash; if let Some(expected) = &args.expected_hash { if expected.to_lowercase() != wasm_hash { - return Err( - (crate::DebuggerError::ChecksumMismatch { - expected: expected.clone(), - actual: wasm_hash.clone(), - }).into() - ); + return Err((crate::DebuggerError::ChecksumMismatch { + expected: expected.clone(), + actual: wasm_hash.clone(), + }) + .into()); } } - print_success(format!("[DRY RUN] Contract loaded successfully ({} bytes)", wasm_bytes.len())); + print_success(format!( + "[DRY RUN] Contract loaded successfully ({} bytes)", + wasm_bytes.len() + )); if args.verbose { print_verbose(format!("[DRY RUN] SHA-256: {}", wasm_hash)); @@ -884,7 +862,7 @@ fn run_dry_run(args: &RunArgs) -> Result<()> { /// Get instruction counts from the debugger engine #[allow(dead_code)] fn get_instruction_counts( - engine: &DebuggerEngine + engine: &DebuggerEngine, ) -> Option { // Try to get instruction counts from the executor engine.executor().get_instruction_counts().ok() @@ -900,20 +878,27 @@ fn display_instruction_counts(counts: &crate::runtime::executor::InstructionCoun print_info("\n--- Instruction Count per Function ---"); // Calculate percentages - let percentages: Vec = counts.function_counts + let percentages: Vec = counts + .function_counts .iter() .map(|(_, count)| { - if counts.total > 0 { ((*count as f64) / (counts.total as f64)) * 100.0 } else { 0.0 } + if counts.total > 0 { + ((*count as f64) / (counts.total as f64)) * 100.0 + } else { + 0.0 + } }) .collect(); // Find max widths for alignment - let max_func_width = counts.function_counts + let max_func_width = counts + .function_counts .iter() .map(|(name, _)| name.len()) .max() .unwrap_or(20); - let max_count_width = counts.function_counts + let max_count_width = counts + .function_counts .iter() .map(|(_, count)| count.to_string().len()) .max() @@ -949,13 +934,11 @@ fn display_instruction_counts(counts: &crate::runtime::executor::InstructionCoun /// Execute the upgrade-check command pub fn upgrade_check(args: UpgradeCheckArgs) -> Result<()> { println!("Loading old contract: {:?}", args.old); - let old_wasm = fs - ::read(&args.old) + let old_wasm = fs::read(&args.old) .map_err(|e| miette::miette!("Failed to read old WASM file {:?}: {}", args.old, e))?; println!("Loading new contract: {:?}", args.new); - let new_wasm = fs - ::read(&args.new) + let new_wasm = fs::read(&args.new) .map_err(|e| miette::miette!("Failed to read new WASM file {:?}: {}", args.new, e))?; // Optionally run test inputs against both versions @@ -968,25 +951,17 @@ pub fn upgrade_check(args: UpgradeCheckArgs) -> Result<()> { let old_path = args.old.to_string_lossy().to_string(); let new_path = args.new.to_string_lossy().to_string(); - let report = UpgradeAnalyzer::analyze( - &old_wasm, - &new_wasm, - &old_path, - &new_path, - execution_diffs - )?; + let report = + UpgradeAnalyzer::analyze(&old_wasm, &new_wasm, &old_path, &new_path, execution_diffs)?; let output = match args.output.as_str() { - "json" => - serde_json - ::to_string_pretty(&report) - .map_err(|e| miette::miette!("Failed to serialize report: {}", e))?, + "json" => serde_json::to_string_pretty(&report) + .map_err(|e| miette::miette!("Failed to serialize report: {}", e))?, _ => format_text_report(&report), }; if let Some(out_file) = &args.output_file { - fs - ::write(out_file, &output) + fs::write(out_file, &output) .map_err(|e| miette::miette!("Failed to write report to {:?}: {}", out_file, e))?; println!("Report written to {:?}", out_file); } else { @@ -994,12 +969,10 @@ pub fn upgrade_check(args: UpgradeCheckArgs) -> Result<()> { } if !report.is_compatible { - return Err( - miette::miette!( - "Contracts are not compatible: {} breaking change(s) detected", - report.breaking_changes.len() - ) - ); + return Err(miette::miette!( + "Contracts are not compatible: {} breaking change(s) detected", + report.breaking_changes.len() + )); } Ok(()) @@ -1009,7 +982,7 @@ pub fn upgrade_check(args: UpgradeCheckArgs) -> Result<()> { fn run_test_inputs( inputs_json: &str, old_wasm: &[u8], - new_wasm: &[u8] + new_wasm: &[u8], ) -> Result> { let inputs: serde_json::Map = serde_json ::from_str(inputs_json) @@ -1047,7 +1020,11 @@ fn invoke_wasm(wasm: &[u8], function: &str, args: &str) -> String { Err(e) => format!("Err(executor: {})", e), Ok(executor) => { let mut engine = DebuggerEngine::new(executor, vec![]); - let parsed = if args == "null" || args == "[]" { None } else { Some(args.to_string()) }; + let parsed = if args == "null" || args == "[]" { + None + } else { + Some(args.to_string()) + }; match engine.execute(function, parsed.as_deref()) { Ok(val) => format!("Ok({:?})", val), Err(e) => format!("Err({})", e), @@ -1066,11 +1043,18 @@ fn format_text_report(report: &CompatibilityReport) -> String { out.push_str(&format!("New: {}\n", report.new_wasm_path)); out.push('\n'); - let status = if report.is_compatible { "COMPATIBLE" } else { "INCOMPATIBLE" }; + let status = if report.is_compatible { + "COMPATIBLE" + } else { + "INCOMPATIBLE" + }; out.push_str(&format!("Status: {}\n", status)); out.push('\n'); - out.push_str(&format!("Breaking Changes ({}):\n", report.breaking_changes.len())); + out.push_str(&format!( + "Breaking Changes ({}):\n", + report.breaking_changes.len() + )); if report.breaking_changes.is_empty() { out.push_str(" (none)\n"); } else { @@ -1080,7 +1064,10 @@ fn format_text_report(report: &CompatibilityReport) -> String { } out.push('\n'); - out.push_str(&format!("Non-Breaking Changes ({}):\n", report.non_breaking_changes.len())); + out.push_str(&format!( + "Non-Breaking Changes ({}):\n", + report.non_breaking_changes.len() + )); if report.non_breaking_changes.is_empty() { out.push_str(" (none)\n"); } else { @@ -1091,46 +1078,56 @@ fn format_text_report(report: &CompatibilityReport) -> String { if !report.execution_diffs.is_empty() { out.push('\n'); - out.push_str(&format!("Execution Diffs ({}):\n", report.execution_diffs.len())); + out.push_str(&format!( + "Execution Diffs ({}):\n", + report.execution_diffs.len() + )); for diff in &report.execution_diffs { - let match_str = if diff.outputs_match { "MATCH" } else { "MISMATCH" }; - out.push_str( - &format!( - " {} args={} OLD={} NEW={} [{}]\n", - diff.function, - diff.args, - diff.old_result, - diff.new_result, - match_str - ) - ); + let match_str = if diff.outputs_match { + "MATCH" + } else { + "MISMATCH" + }; + out.push_str(&format!( + " {} args={} OLD={} NEW={} [{}]\n", + diff.function, diff.args, diff.old_result, diff.new_result, match_str + )); } } out.push('\n'); - let old_names: Vec<&str> = report.old_functions + let old_names: Vec<&str> = report + .old_functions .iter() .map(|f| f.name.as_str()) .collect(); - let new_names: Vec<&str> = report.new_functions + let new_names: Vec<&str> = report + .new_functions .iter() .map(|f| f.name.as_str()) .collect(); - out.push_str(&format!("Old Functions ({}): {}\n", old_names.len(), old_names.join(", "))); - out.push_str(&format!("New Functions ({}): {}\n", new_names.len(), new_names.join(", "))); + out.push_str(&format!( + "Old Functions ({}): {}\n", + old_names.len(), + old_names.join(", ") + )); + out.push_str(&format!( + "New Functions ({}): {}\n", + new_names.len(), + new_names.join(", ") + )); out } /// Parse JSON arguments with validation. pub fn parse_args(json: &str) -> Result { - let value = serde_json - ::from_str::(json) - .map_err(|e| { - DebuggerError::InvalidArguments( - format!("Failed to parse JSON arguments: {}. Error: {}", json, e) - ) - })?; + let value = serde_json::from_str::(json).map_err(|e| { + DebuggerError::InvalidArguments(format!( + "Failed to parse JSON arguments: {}. Error: {}", + json, e + )) + })?; match value { serde_json::Value::Array(ref arr) => { @@ -1149,39 +1146,42 @@ pub fn parse_args(json: &str) -> Result { /// Parse JSON storage. pub fn parse_storage(json: &str) -> Result { - serde_json - ::from_str::(json) - .map_err(|e| { - DebuggerError::StorageError( - format!("Failed to parse JSON storage: {}. Error: {}", json, e) - ) - })?; + serde_json::from_str::(json).map_err(|e| { + DebuggerError::StorageError(format!( + "Failed to parse JSON storage: {}. Error: {}", + json, e + )) + })?; Ok(json.to_string()) } /// Execute the optimize command. pub fn optimize(args: OptimizeArgs, _verbosity: Verbosity) -> Result<()> { - print_info(format!("Analyzing contract for gas optimization: {:?}", args.contract)); + print_info(format!( + "Analyzing contract for gas optimization: {:?}", + args.contract + )); logging::log_loading_contract(&args.contract.to_string_lossy()); - let wasm_file = crate::utils::wasm - ::load_wasm(&args.contract) + let wasm_file = crate::utils::wasm::load_wasm(&args.contract) .with_context(|| format!("Failed to read WASM file: {:?}", args.contract))?; let wasm_bytes = wasm_file.bytes; let wasm_hash = wasm_file.sha256_hash; if let Some(expected) = &args.expected_hash { if expected.to_lowercase() != wasm_hash { - return Err( - (crate::DebuggerError::ChecksumMismatch { - expected: expected.clone(), - actual: wasm_hash.clone(), - }).into() - ); + return Err((crate::DebuggerError::ChecksumMismatch { + expected: expected.clone(), + actual: wasm_hash.clone(), + }) + .into()); } } - print_success(format!("Contract loaded successfully ({} bytes)", wasm_bytes.len())); + print_success(format!( + "Contract loaded successfully ({} bytes)", + wasm_bytes.len() + )); if _verbosity == Verbosity::Verbose { print_verbose(format!("SHA-256: {}", wasm_hash)); @@ -1215,7 +1215,10 @@ pub fn optimize(args: OptimizeArgs, _verbosity: Verbosity) -> Result<()> { let mut optimizer = crate::profiler::analyzer::GasOptimizer::new(executor); - print_info(format!("\nAnalyzing {} function(s)...", functions_to_analyze.len())); + print_info(format!( + "\nAnalyzing {} function(s)...", + functions_to_analyze.len() + )); logging::log_analysis_start("gas optimization"); for function_name in &functions_to_analyze { @@ -1225,24 +1228,20 @@ pub fn optimize(args: OptimizeArgs, _verbosity: Verbosity) -> Result<()> { logging::log_display( format!( " CPU: {} instructions, Memory: {} bytes, Time: {} ms", - profile.total_cpu, - profile.total_memory, - profile.wall_time_ms + profile.total_cpu, profile.total_memory, profile.wall_time_ms ), - logging::LogLevel::Info - ); - print_success( - format!( - " CPU: {} instructions, Memory: {} bytes", - profile.total_cpu, - profile.total_memory - ) + logging::LogLevel::Info, ); + print_success(format!( + " CPU: {} instructions, Memory: {} bytes", + profile.total_cpu, profile.total_memory + )); } Err(e) => { - print_warning( - format!(" Warning: Failed to analyze function {}: {}", function_name, e) - ); + print_warning(format!( + " Warning: Failed to analyze function {}: {}", + function_name, e + )); tracing::warn!(function = function_name, error = %e, "Failed to analyze function"); } } @@ -1254,14 +1253,16 @@ pub fn optimize(args: OptimizeArgs, _verbosity: Verbosity) -> Result<()> { let markdown = optimizer.generate_markdown_report(&report); if let Some(output_path) = &args.output { - fs - ::write(output_path, &markdown) - .map_err(|e| { - DebuggerError::FileError( - format!("Failed to write report to {:?}: {}", output_path, e) - ) - })?; - print_success(format!("\nOptimization report written to: {:?}", output_path)); + fs::write(output_path, &markdown).map_err(|e| { + DebuggerError::FileError(format!( + "Failed to write report to {:?}: {}", + output_path, e + )) + })?; + print_success(format!( + "\nOptimization report written to: {:?}", + output_path + )); logging::log_optimization_report(&output_path.to_string_lossy()); } else { logging::log_display(&markdown, logging::LogLevel::Info); @@ -1274,29 +1275,27 @@ pub fn optimize(args: OptimizeArgs, _verbosity: Verbosity) -> Result<()> { pub fn profile(args: ProfileArgs) -> Result<()> { logging::log_display( format!("Profiling contract execution: {:?}", args.contract), - logging::LogLevel::Info + logging::LogLevel::Info, ); - let wasm_file = crate::utils::wasm - ::load_wasm(&args.contract) + let wasm_file = crate::utils::wasm::load_wasm(&args.contract) .with_context(|| format!("Failed to read WASM file: {:?}", args.contract))?; let wasm_bytes = wasm_file.bytes; let wasm_hash = wasm_file.sha256_hash; if let Some(expected) = &args.expected_hash { if expected.to_lowercase() != wasm_hash { - return Err( - (crate::DebuggerError::ChecksumMismatch { - expected: expected.clone(), - actual: wasm_hash.clone(), - }).into() - ); + return Err((crate::DebuggerError::ChecksumMismatch { + expected: expected.clone(), + actual: wasm_hash.clone(), + }) + .into()); } } logging::log_display( format!("Contract loaded successfully ({} bytes)", wasm_bytes.len()), - logging::LogLevel::Info + logging::LogLevel::Info, ); // Parse args (optional) @@ -1318,7 +1317,10 @@ pub fn profile(args: ProfileArgs) -> Result<()> { // Analyze exactly one function (this command focuses on execution hotspots) let mut optimizer = crate::profiler::analyzer::GasOptimizer::new(executor); - logging::log_display(format!("\nRunning function: {}", args.function), logging::LogLevel::Info); + logging::log_display( + format!("\nRunning function: {}", args.function), + logging::LogLevel::Info, + ); if let Some(ref a) = parsed_args { logging::log_display(format!("Args: {}", a), logging::LogLevel::Info); } @@ -1329,22 +1331,24 @@ pub fn profile(args: ProfileArgs) -> Result<()> { let report = optimizer.generate_report(&contract_path_str); // Hotspot summary first - logging::log_display(format!("\n{}", report.format_hotspots()), logging::LogLevel::Info); + logging::log_display( + format!("\n{}", report.format_hotspots()), + logging::LogLevel::Info, + ); // Then detailed suggestions (markdown format) let markdown = optimizer.generate_markdown_report(&report); if let Some(output_path) = &args.output { - fs - ::write(output_path, &markdown) - .map_err(|e| { - DebuggerError::FileError( - format!("Failed to write report to {:?}: {}", output_path, e) - ) - })?; + fs::write(output_path, &markdown).map_err(|e| { + DebuggerError::FileError(format!( + "Failed to write report to {:?}: {}", + output_path, e + )) + })?; logging::log_display( format!("\nProfile report written to: {:?}", output_path), - logging::LogLevel::Info + logging::LogLevel::Info, ); } else { logging::log_display(format!("\n{}", markdown), logging::LogLevel::Info); @@ -1366,13 +1370,12 @@ pub fn compare(args: CompareArgs) -> Result<()> { let rendered = crate::compare::CompareEngine::render_report(&report); if let Some(output_path) = &args.output { - fs - ::write(output_path, &rendered) - .map_err(|e| { - DebuggerError::FileError( - format!("Failed to write report to {:?}: {}", output_path, e) - ) - })?; + fs::write(output_path, &rendered).map_err(|e| { + DebuggerError::FileError(format!( + "Failed to write report to {:?}: {}", + output_path, e + )) + })?; print_success(format!("Comparison report written to: {:?}", output_path)); } else { logging::log_display(rendered, logging::LogLevel::Info); @@ -1392,30 +1395,29 @@ pub fn replay(args: ReplayArgs, verbosity: Verbosity) -> Result<()> { } else if let Some(contract_str) = &original_trace.contract { std::path::PathBuf::from(contract_str) } else { - return Err( - DebuggerError::ExecutionError( - "No contract path specified and trace file does not contain contract path".to_string() - ).into() - ); + return Err(DebuggerError::ExecutionError( + "No contract path specified and trace file does not contain contract path".to_string(), + ) + .into()); }; print_info(format!("Loading contract: {:?}", contract_path)); - let wasm_bytes = fs - ::read(&contract_path) - .map_err(|e| { - DebuggerError::WasmLoadError( - format!("Failed to read WASM file at {:?}: {}", contract_path, e) - ) - })?; - - print_success(format!("Contract loaded successfully ({} bytes)", wasm_bytes.len())); + let wasm_bytes = fs::read(&contract_path).map_err(|e| { + DebuggerError::WasmLoadError(format!( + "Failed to read WASM file at {:?}: {}", + contract_path, e + )) + })?; + + print_success(format!( + "Contract loaded successfully ({} bytes)", + wasm_bytes.len() + )); // Extract function and args from trace - let function = original_trace.function - .as_ref() - .ok_or_else(|| { - DebuggerError::ExecutionError("Trace file does not contain function name".to_string()) - })?; + let function = original_trace.function.as_ref().ok_or_else(|| { + DebuggerError::ExecutionError("Trace file does not contain function name".to_string()) + })?; let args_str = original_trace.args.as_deref(); @@ -1436,11 +1438,9 @@ pub fn replay(args: ReplayArgs, verbosity: Verbosity) -> Result<()> { // Set up initial storage from trace let initial_storage = if !original_trace.storage.is_empty() { - let storage_json = serde_json - ::to_string(&original_trace.storage) - .map_err(|e| { - DebuggerError::StorageError(format!("Failed to serialize trace storage: {}", e)) - })?; + let storage_json = serde_json::to_string(&original_trace.storage).map_err(|e| { + DebuggerError::StorageError(format!("Failed to serialize trace storage: {}", e)) + })?; Some(storage_json) } else { None @@ -1477,7 +1477,7 @@ pub fn replay(args: ReplayArgs, verbosity: Verbosity) -> Result<()> { budget, engine.executor(), &trace_events, - replay_steps + replay_steps, ); // Truncate original_trace's call_sequence if needed to match replay_until @@ -1492,13 +1492,12 @@ pub fn replay(args: ReplayArgs, verbosity: Verbosity) -> Result<()> { let rendered = crate::compare::CompareEngine::render_report(&report); if let Some(output_path) = &args.output { - std::fs - ::write(output_path, &rendered) - .map_err(|e| { - DebuggerError::FileError( - format!("Failed to write report to {:?}: {}", output_path, e) - ) - })?; + std::fs::write(output_path, &rendered).map_err(|e| { + DebuggerError::FileError(format!( + "Failed to write report to {:?}: {}", + output_path, e + )) + })?; print_success(format!("\nReplay report written to: {:?}", output_path)); } else { logging::log_display(rendered, logging::LogLevel::Info); @@ -1526,7 +1525,10 @@ pub fn replay(args: ReplayArgs, verbosity: Verbosity) -> Result<()> { /// Start debug server for remote connections pub fn server(args: ServerArgs) -> Result<()> { - print_info(format!("Starting remote debug server on port {}", args.port)); + print_info(format!( + "Starting remote debug server on port {}", + args.port + )); if args.token.is_some() { print_info("Token authentication enabled"); } else { @@ -1539,11 +1541,10 @@ pub fn server(args: ServerArgs) -> Result<()> { let server = crate::server::DebugServer::new( args.token.clone(), args.tls_cert.as_deref(), - args.tls_key.as_deref() + args.tls_key.as_deref(), )?; - tokio::runtime::Runtime - ::new() + tokio::runtime::Runtime::new() .map_err(|e: std::io::Error| miette::miette!(e)) .and_then(|rt| rt.block_on(server.run(args.port))) } @@ -1575,24 +1576,25 @@ pub fn interactive(args: InteractiveArgs, _verbosity: Verbosity) -> Result<()> { print_info(format!("Loading contract: {:?}", args.contract)); logging::log_loading_contract(&args.contract.to_string_lossy()); - let wasm_file = crate::utils::wasm - ::load_wasm(&args.contract) + let wasm_file = crate::utils::wasm::load_wasm(&args.contract) .with_context(|| format!("Failed to read WASM file: {:?}", args.contract))?; let wasm_bytes = wasm_file.bytes; let wasm_hash = wasm_file.sha256_hash; if let Some(expected) = &args.expected_hash { if expected.to_lowercase() != wasm_hash { - return Err( - (crate::DebuggerError::ChecksumMismatch { - expected: expected.clone(), - actual: wasm_hash.clone(), - }).into() - ); + return Err((crate::DebuggerError::ChecksumMismatch { + expected: expected.clone(), + actual: wasm_hash.clone(), + }) + .into()); } } - print_success(format!("Contract loaded successfully ({} bytes)", wasm_bytes.len())); + print_success(format!( + "Contract loaded successfully ({} bytes)", + wasm_bytes.len() + )); if let Some(snapshot_path) = &args.network_snapshot { print_info(format!("Loading network snapshot: {:?}", snapshot_path)); @@ -1618,15 +1620,9 @@ pub fn interactive(args: InteractiveArgs, _verbosity: Verbosity) -> Result<()> { print_info(format!("Importing storage from: {:?}", import_path)); let imported = crate::inspector::storage::StorageState::import_from_file(import_path)?; print_success(format!("Imported {} storage entries", imported.len())); - initial_storage = Some( - serde_json - ::to_string(&imported) - .map_err(|e| { - DebuggerError::StorageError( - format!("Failed to serialize imported storage: {}", e) - ) - })? - ); + initial_storage = Some(serde_json::to_string(&imported).map_err(|e| { + DebuggerError::StorageError(format!("Failed to serialize imported storage: {}", e)) + })?); } let mut executor = ContractExecutor::new(wasm_bytes.clone())?; @@ -1660,12 +1656,14 @@ pub fn interactive(args: InteractiveArgs, _verbosity: Verbosity) -> Result<()> { /// Launch TUI debugger pub fn tui(args: TuiArgs, _verbosity: Verbosity) -> Result<()> { print_info(format!("Loading contract: {:?}", args.contract)); - let wasm_file = crate::utils::wasm - ::load_wasm(&args.contract) + let wasm_file = crate::utils::wasm::load_wasm(&args.contract) .with_context(|| format!("Failed to read WASM file: {:?}", args.contract))?; let wasm_bytes = wasm_file.bytes; - print_success(format!("Contract loaded successfully ({} bytes)", wasm_bytes.len())); + print_success(format!( + "Contract loaded successfully ({} bytes)", + wasm_bytes.len() + )); if let Some(snapshot_path) = &args.network_snapshot { print_info(format!("Loading network snapshot: {:?}", snapshot_path)); @@ -1701,8 +1699,7 @@ pub fn tui(args: TuiArgs, _verbosity: Verbosity) -> Result<()> { /// Inspect a WASM contract pub fn inspect(args: InspectArgs, _verbosity: Verbosity) -> Result<()> { - let bytes = fs - ::read(&args.contract) + let bytes = fs::read(&args.contract) .map_err(|e| miette::miette!("Failed to read contract {:?}: {}", args.contract, e))?; let info = crate::utils::wasm::get_module_info(&bytes)?; println!("Contract: {:?}", args.contract); @@ -1714,7 +1711,8 @@ pub fn inspect(args: InspectArgs, _verbosity: Verbosity) -> Result<()> { let sigs = crate::utils::wasm::parse_function_signatures(&bytes)?; println!("Exported functions:"); for sig in &sigs { - let params: Vec = sig.params + let params: Vec = sig + .params .iter() .map(|p| format!("{}: {}", p.name, p.type_name)) .collect(); @@ -1728,8 +1726,7 @@ pub fn inspect(args: InspectArgs, _verbosity: Verbosity) -> Result<()> { /// Run symbolic execution analysis pub fn symbolic(args: SymbolicArgs, _verbosity: Verbosity) -> Result<()> { print_info(format!("Loading contract: {:?}", args.contract)); - let wasm_file = crate::utils::wasm - ::load_wasm(&args.contract) + let wasm_file = crate::utils::wasm::load_wasm(&args.contract) .with_context(|| format!("Failed to read WASM file: {:?}", args.contract))?; let analyzer = SymbolicAnalyzer::new(); @@ -1739,13 +1736,12 @@ pub fn symbolic(args: SymbolicArgs, _verbosity: Verbosity) -> Result<()> { if let Some(output_path) = &args.output { let scenario_toml = analyzer.generate_scenario_toml(&report); - fs - ::write(output_path, scenario_toml) - .map_err(|e| { - DebuggerError::FileError( - format!("Failed to write symbolic scenario to {:?}: {}", output_path, e) - ) - })?; + fs::write(output_path, scenario_toml).map_err(|e| { + DebuggerError::FileError(format!( + "Failed to write symbolic scenario to {:?}: {}", + output_path, e + )) + })?; print_success(format!("Scenario TOML written to: {:?}", output_path)); } @@ -1755,8 +1751,7 @@ pub fn symbolic(args: SymbolicArgs, _verbosity: Verbosity) -> Result<()> { /// Analyze a contract pub fn analyze(args: AnalyzeArgs, _verbosity: Verbosity) -> Result<()> { print_info(format!("Loading contract: {:?}", args.contract)); - let wasm_file = crate::utils::wasm - ::load_wasm(&args.contract) + let wasm_file = crate::utils::wasm::load_wasm(&args.contract) .with_context(|| format!("Failed to read WASM file: {:?}", args.contract))?; let mut dynamic_analysis = None; @@ -1793,15 +1788,20 @@ pub fn analyze(args: AnalyzeArgs, _verbosity: Verbosity) -> Result<()> { executor = Some(dynamic_executor); } Err(err) => { - warnings.push( - format!("Dynamic analysis for function '{}' failed: {}", function, err) - ); + warnings.push(format!( + "Dynamic analysis for function '{}' failed: {}", + function, err + )); } } } let analyzer = SecurityAnalyzer::new(); - let report = analyzer.analyze(&wasm_file.bytes, executor.as_ref(), trace_entries.as_deref())?; + let report = analyzer.analyze( + &wasm_file.bytes, + executor.as_ref(), + trace_entries.as_deref(), + )?; let output = AnalyzeCommandOutput { findings: report.findings, dynamic_analysis, @@ -1810,23 +1810,18 @@ pub fn analyze(args: AnalyzeArgs, _verbosity: Verbosity) -> Result<()> { match args.format.to_lowercase().as_str() { "text" => println!("{}", render_security_report(&output)), - "json" => - println!( - "{}", - serde_json - ::to_string_pretty(&output) - .map_err(|e| { - DebuggerError::FileError( - format!("Failed to serialize analysis output: {}", e) - ) - })? - ), + "json" => println!( + "{}", + serde_json::to_string_pretty(&output).map_err(|e| { + DebuggerError::FileError(format!("Failed to serialize analysis output: {}", e)) + })? + ), other => { - return Err( - DebuggerError::InvalidArguments( - format!("Unsupported --format '{}'. Use 'text' or 'json'.", other) - ).into() - ); + return Err(DebuggerError::InvalidArguments(format!( + "Unsupported --format '{}'. Use 'text' or 'json'.", + other + )) + .into()); } } @@ -1841,8 +1836,7 @@ pub fn scenario(args: ScenarioArgs, _verbosity: Verbosity) -> Result<()> { /// Launch the REPL pub async fn repl(args: ReplArgs) -> Result<()> { print_info(format!("Loading contract: {:?}", args.contract)); - let wasm_file = crate::utils::wasm - ::load_wasm(&args.contract) + let wasm_file = crate::utils::wasm::load_wasm(&args.contract) .with_context(|| format!("Failed to read WASM file: {:?}", args.contract))?; crate::utils::wasm::verify_wasm_hash(&wasm_file.sha256_hash, args.expected_hash.as_ref())?; @@ -1854,7 +1848,8 @@ pub async fn repl(args: ReplArgs) -> Result<()> { contract_path: args.contract, network_snapshot: args.network_snapshot, storage: args.storage, - }).await + }) + .await } /// Show budget trend chart @@ -1883,14 +1878,8 @@ pub fn show_budget_trend( } let stats = budget_trend_stats_or_err(&records)?; - let cpu_values: Vec = records - .iter() - .map(|r| r.cpu_used) - .collect(); - let mem_values: Vec = records - .iter() - .map(|r| r.memory_used) - .collect(); + let cpu_values: Vec = records.iter().map(|r| r.cpu_used).collect(); + let mem_values: Vec = records.iter().map(|r| r.memory_used).collect(); if !Formatter::is_quiet() { println!("Budget Trend"); @@ -1903,7 +1892,10 @@ pub fn show_budget_trend( "Regression params: threshold>{:.1}% lookback={} smoothing={}", regression.threshold_pct, regression.lookback, regression.smoothing_window ); - println!("Runs: {} Range: {} -> {}", stats.count, stats.first_date, stats.last_date); + println!( + "Runs: {} Range: {} -> {}", + stats.count, stats.first_date, stats.last_date + ); println!( "CPU insns: last={} avg={} min={} max={}", crate::inspector::budget::BudgetInspector::format_cpu_insns(stats.last_cpu), diff --git a/src/debugger/mod.rs b/src/debugger/mod.rs index 9be1eab4..f467b6a1 100644 --- a/src/debugger/mod.rs +++ b/src/debugger/mod.rs @@ -11,6 +11,6 @@ pub use breakpoint::BreakpointManager; pub use engine::DebuggerEngine; pub use error_db::{ErrorDatabase, ErrorExplanation}; pub use instruction_pointer::{InstructionPointer, StepMode}; -pub use source_map::{SourceLocation, SourceMap}; +pub use source_map::{SourceBreakpointResolution, SourceLocation, SourceMap}; pub use state::DebugState; pub use stepper::Stepper; diff --git a/src/debugger/source_map.rs b/src/debugger/source_map.rs index 5ffa4d6a..b0c717c4 100644 --- a/src/debugger/source_map.rs +++ b/src/debugger/source_map.rs @@ -1,6 +1,6 @@ use crate::{DebuggerError, Result}; use gimli::{Dwarf, EndianSlice, RunTimeEndian}; -use std::collections::{BTreeMap, HashMap}; +use std::collections::{BTreeMap, HashMap, HashSet}; use std::fs; use std::path::{Path, PathBuf}; use wasmparser::{Parser, Payload}; @@ -23,6 +23,27 @@ pub struct SourceMap { code_section_range: Option>, } +/// Result of resolving a source breakpoint (file + line) to a concrete contract entrypoint breakpoint. +/// +/// The debugger currently supports function-level breakpoints, so source breakpoints resolve to a +/// single exported function name (entrypoint) when possible. +#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +pub struct SourceBreakpointResolution { + /// The requested 1-based source line. + pub requested_line: u32, + /// The resolved 1-based source line (may be adjusted to the next executable line). + pub line: u32, + /// Whether the breakpoint binding is considered exact/high-confidence. + pub verified: bool, + /// Exported function (entrypoint) name to bind a runtime breakpoint to. + #[serde(skip_serializing_if = "Option::is_none")] + pub function: Option, + /// Stable reason code when `verified` is false. + pub reason_code: String, + /// Human readable explanation for UI. + pub message: String, +} + impl Default for SourceMap { fn default() -> Self { Self::new() @@ -206,4 +227,377 @@ impl SourceMap { pub fn clear_cache(&mut self) { self.source_cache.clear(); } + + /// Resolve source breakpoints for a source file into exported contract functions using DWARF line mappings. + /// + /// This relies on: + /// - DWARF line program mappings (already loaded into this `SourceMap`) + /// - WASM code section entry ranges (offset -> function index) + /// - WASM export section (function index -> exported names) + /// - The provided `exported_functions` allowlist, usually derived from `inspect --functions`. + pub fn resolve_source_breakpoints( + &self, + wasm_bytes: &[u8], + source_path: &Path, + requested_lines: &[u32], + exported_functions: &HashSet, + ) -> Vec { + const MAX_FORWARD_LINE_ADJUST: u32 = 20; + + if requested_lines.is_empty() { + return Vec::new(); + } + + if self.is_empty() { + return requested_lines + .iter() + .map(|line| SourceBreakpointResolution { + requested_line: *line, + line: *line, + verified: false, + function: None, + reason_code: "NO_DEBUG_INFO".to_string(), + message: "[NO_DEBUG_INFO] Contract is missing DWARF source mappings; rebuild with debug info to bind source breakpoints accurately.".to_string(), + }) + .collect(); + } + + let wasm_index = match WasmIndex::parse(wasm_bytes) { + Ok(index) => index, + Err(e) => { + return requested_lines + .iter() + .map(|line| SourceBreakpointResolution { + requested_line: *line, + line: *line, + verified: false, + function: None, + reason_code: "WASM_PARSE_ERROR".to_string(), + message: format!( + "[WASM_PARSE_ERROR] Failed to parse WASM for breakpoint resolution: {}", + e + ), + }) + .collect(); + } + }; + + let requested_norm = normalize_path_for_match(source_path); + let mut line_to_offsets: BTreeMap> = BTreeMap::new(); + let mut file_match_count = 0usize; + + // Build a file-specific line->offset index. + for (offset, loc) in self.offsets.iter() { + if loc.line == 0 { + continue; + } + + if !paths_match_normalized(&normalize_path_for_match(&loc.file), &requested_norm) { + continue; + } + + file_match_count += 1; + line_to_offsets.entry(loc.line).or_default().push(*offset); + } + + if file_match_count == 0 { + return requested_lines + .iter() + .map(|line| SourceBreakpointResolution { + requested_line: *line, + line: *line, + verified: false, + function: None, + reason_code: "FILE_NOT_IN_DEBUG_INFO".to_string(), + message: format!( + "[FILE_NOT_IN_DEBUG_INFO] Source file '{}' is not present in contract debug info (DWARF).", + source_path.to_string_lossy() + ), + }) + .collect(); + } + + // Pre-compute per-function line spans for this file (for disambiguation). + let mut function_spans: HashMap = HashMap::new(); + for (line, offsets) in line_to_offsets.iter() { + for offset in offsets { + if let Some(function_index) = wasm_index.function_index_for_offset(*offset) { + let entry = function_spans + .entry(function_index) + .or_insert((*line, *line)); + entry.0 = entry.0.min(*line); + entry.1 = entry.1.max(*line); + } + } + } + + requested_lines + .iter() + .map(|requested_line| { + let mut resolved_line = *requested_line; + let mut adjusted = false; + + let offsets = if let Some(offsets) = line_to_offsets.get(requested_line) { + offsets.as_slice() + } else { + let mut found: Option<(u32, &Vec)> = None; + if let Some((next_line, offsets)) = + line_to_offsets.range(*requested_line..).next() + { + if next_line.saturating_sub(*requested_line) <= MAX_FORWARD_LINE_ADJUST { + found = Some((*next_line, offsets)); + } + } + + if let Some((next_line, offsets)) = found { + adjusted = true; + resolved_line = next_line; + offsets.as_slice() + } else { + return SourceBreakpointResolution { + requested_line: *requested_line, + line: *requested_line, + verified: false, + function: None, + reason_code: "NO_CODE_AT_LINE".to_string(), + message: "[NO_CODE_AT_LINE] No executable code found at or near this line in contract debug info.".to_string(), + }; + } + }; + + let mut candidate_entrypoints: HashSet = HashSet::new(); + let mut non_exported_function_indices: HashSet = HashSet::new(); + + for offset in offsets { + let Some(function_index) = wasm_index.function_index_for_offset(*offset) else { + continue; + }; + + let Some(export_names) = wasm_index.export_names_for_function(function_index) + else { + non_exported_function_indices.insert(function_index); + continue; + }; + + let mut any_allowed = false; + for name in export_names { + if exported_functions.contains(name) { + any_allowed = true; + candidate_entrypoints.insert(name.clone()); + } + } + + if !any_allowed { + non_exported_function_indices.insert(function_index); + } + } + + if candidate_entrypoints.is_empty() { + if !non_exported_function_indices.is_empty() { + let mut indices: Vec = non_exported_function_indices.into_iter().collect(); + indices.sort_unstable(); + indices.truncate(5); + return SourceBreakpointResolution { + requested_line: *requested_line, + line: resolved_line, + verified: false, + function: None, + reason_code: "NOT_EXPORTED".to_string(), + message: format!( + "[NOT_EXPORTED] Line maps to non-entrypoint WASM function(s) {:?}; only exported contract entrypoints can be targeted.", + indices + ), + }; + } + + return SourceBreakpointResolution { + requested_line: *requested_line, + line: resolved_line, + verified: false, + function: None, + reason_code: "UNMAPPABLE".to_string(), + message: "[UNMAPPABLE] Unable to map line to an exported contract entrypoint.".to_string(), + }; + } + + let mut candidates: Vec = candidate_entrypoints.into_iter().collect(); + candidates.sort(); + + let chosen = if candidates.len() == 1 { + Some(candidates[0].clone()) + } else { + // Disambiguate using per-function line spans within this file. + let mut matching: Vec = Vec::new(); + for candidate in candidates.iter() { + if let Some(function_index) = + wasm_index.function_index_for_export(candidate) + { + if let Some((min_line, max_line)) = function_spans.get(&function_index) + { + if *requested_line >= *min_line && *requested_line <= *max_line { + matching.push(candidate.clone()); + } + } + } + } + + if matching.len() == 1 { + Some(matching.remove(0)) + } else { + None + } + }; + + let Some(function) = chosen else { + return SourceBreakpointResolution { + requested_line: *requested_line, + line: resolved_line, + verified: false, + function: None, + reason_code: "AMBIGUOUS".to_string(), + message: format!( + "[AMBIGUOUS] Source line could map to multiple entrypoints {:?}.", + candidates + ), + }; + }; + + SourceBreakpointResolution { + requested_line: *requested_line, + line: resolved_line, + verified: true, + function: Some(function.clone()), + reason_code: if adjusted { + "ADJUSTED".to_string() + } else { + "OK".to_string() + }, + message: if adjusted { + format!("Adjusted to line {} and mapped to entrypoint '{}'.", resolved_line, function) + } else { + format!("Mapped to entrypoint '{}'.", function) + }, + } + }) + .collect() + } +} + +#[derive(Debug, Clone)] +struct WasmIndex { + function_bodies: Vec<(std::ops::Range, u32)>, + exports_by_function: HashMap>, + function_by_export: HashMap, +} + +impl WasmIndex { + fn parse(wasm_bytes: &[u8]) -> Result { + let mut imported_func_count = 0u32; + let mut local_function_index = 0u32; + let mut function_bodies: Vec<(std::ops::Range, u32)> = Vec::new(); + let mut exports_by_function: HashMap> = HashMap::new(); + let mut function_by_export: HashMap = HashMap::new(); + + for payload in Parser::new(0).parse_all(wasm_bytes) { + let payload = payload.map_err(|e| { + DebuggerError::WasmLoadError(format!("Failed to parse WASM: {}", e)) + })?; + + match payload { + Payload::ImportSection(reader) => { + for import in reader { + let import = import.map_err(|e| { + DebuggerError::WasmLoadError(format!("Failed to read import: {}", e)) + })?; + if matches!(import.ty, wasmparser::TypeRef::Func(_)) { + imported_func_count = imported_func_count.saturating_add(1); + } + } + } + Payload::ExportSection(reader) => { + for export in reader { + let export = export.map_err(|e| { + DebuggerError::WasmLoadError(format!("Failed to read export: {}", e)) + })?; + if matches!(export.kind, wasmparser::ExternalKind::Func) { + let func_index = export.index; + exports_by_function + .entry(func_index) + .or_default() + .push(export.name.to_string()); + // Prefer first name if multiple exports point at same index. + function_by_export + .entry(export.name.to_string()) + .or_insert(func_index); + } + } + } + Payload::CodeSectionEntry(reader) => { + let function_index = imported_func_count.saturating_add(local_function_index); + local_function_index = local_function_index.saturating_add(1); + function_bodies.push((reader.range(), function_index)); + } + _ => {} + } + } + + // WASM parser yields code entries in module order; sort by start for binary search safety. + function_bodies.sort_by_key(|(range, _)| range.start); + + Ok(Self { + function_bodies, + exports_by_function, + function_by_export, + }) + } + + fn function_index_for_export(&self, export_name: &str) -> Option { + self.function_by_export.get(export_name).copied() + } + + fn export_names_for_function(&self, function_index: u32) -> Option<&Vec> { + self.exports_by_function.get(&function_index) + } + + fn function_index_for_offset(&self, offset: usize) -> Option { + let bodies = self.function_bodies.as_slice(); + if bodies.is_empty() { + return None; + } + + // Find rightmost body with start <= offset. + let idx = match bodies.binary_search_by_key(&offset, |(range, _)| range.start) { + Ok(i) => i, + Err(0) => return None, + Err(i) => i - 1, + }; + + let (range, function_index) = &bodies[idx]; + if offset >= range.start && offset < range.end { + Some(*function_index) + } else { + None + } + } +} + +fn normalize_path_for_match(path: &Path) -> String { + path.to_string_lossy() + .replace('\\', "/") + .trim() + .to_ascii_lowercase() +} + +fn paths_match_normalized(a: &str, b: &str) -> bool { + if a == b { + return true; + } + + if a.ends_with(b) || b.ends_with(a) { + return true; + } + + let a_file = a.rsplit('/').next().unwrap_or(a); + let b_file = b.rsplit('/').next().unwrap_or(b); + a_file == b_file } diff --git a/src/history/mod.rs b/src/history/mod.rs index f6154fb9..fd064626 100644 --- a/src/history/mod.rs +++ b/src/history/mod.rs @@ -185,9 +185,11 @@ impl HistoryManager { \x20 1. Inspect the file with `cat \"{}\"` and fix any JSON syntax errors.\n\ \x20 2. Back up and remove the file (`mv \"{}\" \"{}.bak\"`) to start fresh.\n\ \x20 3. Restore from a previous backup if one exists.", - self.file_path.display(), e, self.file_path.display(), - self.file_path.display(), self.file_path.display(), + e, + self.file_path.display(), + self.file_path.display(), + self.file_path.display(), )) })?; Ok(history) diff --git a/src/server/debug_server.rs b/src/server/debug_server.rs index 9a390114..a08ac5db 100644 --- a/src/server/debug_server.rs +++ b/src/server/debug_server.rs @@ -3,6 +3,7 @@ use crate::inspector::budget::BudgetInspector; use crate::server::protocol::{DebugMessage, DebugRequest, DebugResponse}; use crate::simulator::SnapshotLoader; use crate::Result; +use std::collections::HashSet; use std::fs; use std::io::BufReader as StdBufReader; use std::path::Path; @@ -18,6 +19,7 @@ pub struct DebugServer { token: Option, tls_config: Option, pending_execution: Option, + contract_wasm: Option>, } struct PendingExecution { @@ -42,6 +44,7 @@ impl DebugServer { token, tls_config, pending_execution: None, + contract_wasm: None, }) } @@ -161,6 +164,7 @@ impl DebugServer { let _ = engine.enable_instruction_debug(&bytes); self.engine = Some(engine); self.pending_execution = None; + self.contract_wasm = Some(bytes); DebugResponse::ContractLoaded { size: fs::metadata(&contract_path) .map(|m| m.len() as usize) @@ -176,6 +180,42 @@ impl DebugServer { message: format!("Failed to read contract {:?}: {}", contract_path, e), }, }, + DebugRequest::ResolveSourceBreakpoints { + source_path, + lines, + exported_functions, + } => match (self.engine.as_ref(), self.contract_wasm.as_deref()) { + (Some(engine), Some(wasm_bytes)) => { + if let Some(source_map) = engine.source_map() { + let exported: HashSet = + exported_functions.into_iter().collect(); + let breakpoints = source_map.resolve_source_breakpoints( + wasm_bytes, + Path::new(&source_path), + &lines, + &exported, + ); + DebugResponse::SourceBreakpointsResolved { breakpoints } + } else { + let breakpoints = lines + .into_iter() + .map(|line| crate::debugger::SourceBreakpointResolution { + requested_line: line, + line, + verified: false, + function: None, + reason_code: "NO_DEBUG_INFO".to_string(), + message: + "[NO_DEBUG_INFO] Contract is missing DWARF source mappings; rebuild with debug info to bind source breakpoints accurately.".to_string(), + }) + .collect(); + DebugResponse::SourceBreakpointsResolved { breakpoints } + } + } + _ => DebugResponse::Error { + message: "No contract loaded".to_string(), + }, + }, DebugRequest::Execute { function, args } => match self.engine.as_mut() { Some(engine) if engine.breakpoints().should_break(&function) => { engine.prepare_breakpoint_stop(&function, args.as_deref()); diff --git a/src/server/protocol.rs b/src/server/protocol.rs index 844c7842..2c5c3420 100644 --- a/src/server/protocol.rs +++ b/src/server/protocol.rs @@ -1,5 +1,7 @@ use serde::{Deserialize, Serialize}; +use crate::debugger::SourceBreakpointResolution; + /// Structured event category used by dynamic security analysis. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)] pub enum DynamicTraceEventKind { @@ -89,6 +91,13 @@ pub enum DebugRequest { /// List all breakpoints ListBreakpoints, + /// Resolve source breakpoints (file + line) into concrete exported function breakpoints. + ResolveSourceBreakpoints { + source_path: String, + lines: Vec, + exported_functions: Vec, + }, + /// Set initial storage SetStorage { storage_json: String }, @@ -176,6 +185,11 @@ pub enum DebugResponse { /// List of breakpoints BreakpointsList { breakpoints: Vec }, + /// Resolved source breakpoints. + SourceBreakpointsResolved { + breakpoints: Vec, + }, + /// Snapshot loaded SnapshotLoaded { summary: String }, diff --git a/tests/arithmetic_rule.rs b/tests/arithmetic_rule.rs index 3003c6c3..19e59900 100644 --- a/tests/arithmetic_rule.rs +++ b/tests/arithmetic_rule.rs @@ -79,7 +79,10 @@ fn test_ignores_call_guarded_arithmetic() { .iter() .filter(|f| f.rule_id == "arithmetic-overflow") .collect(); - assert!(!arithmetic_findings.is_empty(), "Call should not suppress arithmetic finding"); + assert!( + !arithmetic_findings.is_empty(), + "Call should not suppress arithmetic finding" + ); } #[test] diff --git a/tests/source_breakpoint_resolution_tests.rs b/tests/source_breakpoint_resolution_tests.rs new file mode 100644 index 00000000..21ef3ea4 --- /dev/null +++ b/tests/source_breakpoint_resolution_tests.rs @@ -0,0 +1,170 @@ +use soroban_debugger::debugger::source_map::{SourceLocation, SourceMap}; +use std::collections::HashSet; +use std::path::Path; + +fn uleb(mut value: u32) -> Vec { + let mut out = Vec::new(); + loop { + let mut byte = (value & 0x7F) as u8; + value >>= 7; + if value != 0 { + byte |= 0x80; + } + out.push(byte); + if value == 0 { + break; + } + } + out +} + +fn section(id: u8, payload: Vec) -> Vec { + let mut out = vec![id]; + out.extend(uleb(payload.len() as u32)); + out.extend(payload); + out +} + +fn minimal_two_export_wasm() -> Vec { + let mut module = vec![0x00, 0x61, 0x73, 0x6D, 0x01, 0x00, 0x00, 0x00]; + + // Type section: 1 type: (func) -> () + let mut type_payload = Vec::new(); + type_payload.extend(uleb(1)); + type_payload.push(0x60); + type_payload.extend(uleb(0)); + type_payload.extend(uleb(0)); + module.extend(section(1, type_payload)); + + // Function section: 2 functions, both type 0 + let mut func_payload = Vec::new(); + func_payload.extend(uleb(2)); + func_payload.extend(uleb(0)); + func_payload.extend(uleb(0)); + module.extend(section(3, func_payload)); + + // Export section: export func 0 as "foo", func 1 as "bar" + let mut export_payload = Vec::new(); + export_payload.extend(uleb(2)); + export_payload.extend(uleb(3)); + export_payload.extend(b"foo"); + export_payload.push(0x00); + export_payload.extend(uleb(0)); + export_payload.extend(uleb(3)); + export_payload.extend(b"bar"); + export_payload.push(0x00); + export_payload.extend(uleb(1)); + module.extend(section(7, export_payload)); + + // Code section: 2 bodies, each: locals=0, end + let body = vec![0x00, 0x0B]; + let mut code_payload = Vec::new(); + code_payload.extend(uleb(2)); + code_payload.extend(uleb(body.len() as u32)); + code_payload.extend(body.iter().copied()); + code_payload.extend(uleb(body.len() as u32)); + code_payload.extend(body); + module.extend(section(10, code_payload)); + + module +} + +fn code_entry_ranges(wasm: &[u8]) -> Vec> { + let mut ranges = Vec::new(); + for payload in wasmparser::Parser::new(0).parse_all(wasm) { + let payload = payload.expect("wasm should parse"); + if let wasmparser::Payload::CodeSectionEntry(body) = payload { + ranges.push(body.range()); + } + } + ranges +} + +#[test] +fn resolves_ambiguous_multi_function_line_as_unverified() { + let wasm = minimal_two_export_wasm(); + let ranges = code_entry_ranges(&wasm); + assert_eq!(ranges.len(), 2); + + let mut sm = SourceMap::new(); + sm.add_mapping( + ranges[0].start, + SourceLocation { + file: "src/contract.rs".into(), + line: 10, + column: None, + }, + ); + sm.add_mapping( + ranges[1].start, + SourceLocation { + file: "src/contract.rs".into(), + line: 10, + column: None, + }, + ); + + let exported: HashSet = ["foo".to_string(), "bar".to_string()].into_iter().collect(); + let resolved = + sm.resolve_source_breakpoints(&wasm, Path::new("src/contract.rs"), &[10], &exported); + + assert_eq!(resolved.len(), 1); + assert!(!resolved[0].verified); + assert_eq!(resolved[0].reason_code, "AMBIGUOUS"); + assert!(resolved[0].function.is_none()); +} + +#[test] +fn resolves_non_entrypoint_line_as_unverified_not_exported() { + let wasm = minimal_two_export_wasm(); + let ranges = code_entry_ranges(&wasm); + assert_eq!(ranges.len(), 2); + + let mut sm = SourceMap::new(); + // Map to the second function (bar) but only allow "foo" entrypoint. + sm.add_mapping( + ranges[1].start, + SourceLocation { + file: "src/contract.rs".into(), + line: 20, + column: None, + }, + ); + + let exported: HashSet = ["foo".to_string()].into_iter().collect(); + let resolved = + sm.resolve_source_breakpoints(&wasm, Path::new("src/contract.rs"), &[20], &exported); + + assert_eq!(resolved.len(), 1); + assert!(!resolved[0].verified); + assert_eq!(resolved[0].reason_code, "NOT_EXPORTED"); +} + +#[test] +fn resolves_to_next_executable_line_when_requested_line_has_no_code() { + let wasm = minimal_two_export_wasm(); + let ranges = code_entry_ranges(&wasm); + assert_eq!(ranges.len(), 2); + + let mut sm = SourceMap::new(); + // Only line 31 has code, but user requests 30. + sm.add_mapping( + ranges[0].start, + SourceLocation { + file: "src/contract.rs".into(), + line: 31, + column: None, + }, + ); + + let exported: HashSet = ["foo".to_string(), "bar".to_string()].into_iter().collect(); + let resolved = + sm.resolve_source_breakpoints(&wasm, Path::new("src/contract.rs"), &[30], &exported); + + assert_eq!(resolved.len(), 1); + assert!(resolved[0].verified); + assert_eq!(resolved[0].reason_code, "ADJUSTED"); + assert_eq!(resolved[0].requested_line, 30); + assert_eq!(resolved[0].line, 31); + assert_eq!(resolved[0].function.as_deref(), Some("foo")); +} diff --git a/tests/unbounded_iteration_tests.rs b/tests/unbounded_iteration_tests.rs index 72eea474..2c4f4347 100644 --- a/tests/unbounded_iteration_tests.rs +++ b/tests/unbounded_iteration_tests.rs @@ -1,5 +1,5 @@ -use soroban_debugger::analyzer::security::{ SecurityAnalyzer, ConfidenceLevel }; -use soroban_debugger::server::protocol::{ DynamicTraceEvent, DynamicTraceEventKind }; +use soroban_debugger::analyzer::security::{ConfidenceLevel, SecurityAnalyzer}; +use soroban_debugger::server::protocol::{DynamicTraceEvent, DynamicTraceEventKind}; use std::default::Default; fn uleb128(mut value: usize) -> Vec { @@ -70,10 +70,9 @@ fn make_wasm_with_storage_in_loop(storage_import_name: &str) -> Vec { 0x00, // no locals 0x03, // loop 0x40, // empty block type - 0x10, - 0x00, // call imported function index 0 (storage) + 0x10, 0x00, // call imported function index 0 (storage) 0x0b, // end loop - 0x0b // end function + 0x0b, // end function ]; code.extend_from_slice(&uleb128(body.len())); code.extend_from_slice(&body); @@ -125,15 +124,12 @@ fn make_wasm_with_nested_storage_loops() -> Vec { 0x40, // empty block type 0x03, // inner loop 0x40, // empty block type - 0x10, - 0x00, // call storage in inner loop - 0x10, - 0x00, // another call storage in inner loop + 0x10, 0x00, // call storage in inner loop + 0x10, 0x00, // another call storage in inner loop 0x0b, // end inner loop - 0x10, - 0x00, // call storage in outer loop + 0x10, 0x00, // call storage in outer loop 0x0b, // end outer loop - 0x0b // end function + 0x0b, // end function ]; code.extend_from_slice(&uleb128(body.len())); code.extend_from_slice(&body); @@ -181,17 +177,14 @@ fn make_wasm_with_storage_outside_loop() -> Vec { code.extend_from_slice(&uleb128(1)); let body = vec![ 0x00, // no locals - 0x10, - 0x00, // call storage outside loop + 0x10, 0x00, // call storage outside loop 0x03, // loop 0x40, // empty block type - 0x41, - 0x01, // const 1 - 0x41, - 0x01, // const 1 + 0x41, 0x01, // const 1 + 0x41, 0x01, // const 1 0x6a, // i32.add 0x0b, // end loop - 0x0b // end function + 0x0b, // end function ]; code.extend_from_slice(&uleb128(body.len())); code.extend_from_slice(&body); @@ -203,15 +196,21 @@ fn make_wasm_with_storage_outside_loop() -> Vec { fn has_unbounded_iteration_finding(wasm: &[u8]) -> bool { let analyzer = SecurityAnalyzer::new(); let report = analyzer.analyze(wasm, None, None).expect("analysis failed"); - report.findings.iter().any(|f| f.rule_id == "unbounded-iteration") + report + .findings + .iter() + .any(|f| f.rule_id == "unbounded-iteration") } fn get_unbounded_iteration_finding( - wasm: &[u8] + wasm: &[u8], ) -> Option { let analyzer = SecurityAnalyzer::new(); let report = analyzer.analyze(wasm, None, None).expect("analysis failed"); - report.findings.into_iter().find(|f| f.rule_id == "unbounded-iteration") + report + .findings + .into_iter() + .find(|f| f.rule_id == "unbounded-iteration") } #[test] @@ -220,7 +219,10 @@ fn detects_storage_call_in_simple_loop() { assert!(has_unbounded_iteration_finding(&wasm)); let finding = get_unbounded_iteration_finding(&wasm).unwrap(); - assert_eq!(finding.severity, soroban_debugger::analyzer::security::Severity::High); + assert_eq!( + finding.severity, + soroban_debugger::analyzer::security::Severity::High + ); // Check confidence level let confidence = finding.confidence.as_ref().unwrap(); @@ -311,8 +313,12 @@ fn provides_rich_context_in_findings() { let pattern = context.storage_call_pattern.as_ref().unwrap(); assert_eq!(pattern.calls_in_loops, 3); assert!( - pattern.loop_types_with_calls.contains(&"top_level_loop".to_string()) || - pattern.loop_types_with_calls.contains(&"nested_loop".to_string()) + pattern + .loop_types_with_calls + .contains(&"top_level_loop".to_string()) + || pattern + .loop_types_with_calls + .contains(&"nested_loop".to_string()) ); // Check confidence rationale @@ -336,14 +342,20 @@ fn dynamic_analysis_detects_high_storage_pressure() { } let analyzer = SecurityAnalyzer::new(); - let report = analyzer.analyze(&[], None, Some(&trace)).expect("analysis failed"); + let report = analyzer + .analyze(&[], None, Some(&trace)) + .expect("analysis failed"); - let unbounded_findings: Vec<_> = report.findings + let unbounded_findings: Vec<_> = report + .findings .iter() .filter(|f| f.rule_id == "unbounded-iteration") .collect(); - assert!(!unbounded_findings.is_empty(), "Should detect high storage pressure in dynamic trace"); + assert!( + !unbounded_findings.is_empty(), + "Should detect high storage pressure in dynamic trace" + ); let finding = &unbounded_findings[0]; assert!(finding.description.contains("high storage-read pressure")); @@ -364,12 +376,18 @@ fn dynamic_analysis_ignores_reasonable_storage_access() { } let analyzer = SecurityAnalyzer::new(); - let report = analyzer.analyze(&[], None, Some(&trace)).expect("analysis failed"); + let report = analyzer + .analyze(&[], None, Some(&trace)) + .expect("analysis failed"); - let unbounded_findings: Vec<_> = report.findings + let unbounded_findings: Vec<_> = report + .findings .iter() .filter(|f| f.rule_id == "unbounded-iteration") .collect(); - assert!(unbounded_findings.is_empty(), "Should not flag reasonable storage access"); + assert!( + unbounded_findings.is_empty(), + "Should not flag reasonable storage access" + ); } From 79537d94ada1887621e326dee44ff32cc5b97d5e Mon Sep 17 00:00:00 2001 From: brown Date: Wed, 25 Mar 2026 12:56:39 +0100 Subject: [PATCH 2/2] create explicit quality gate checklist --- .github/workflows/release.yml | 106 ++++++++++++++++++++++++++ CONTRIBUTING.md | 9 +++ docs/getting-started.md | 8 ++ docs/release-checklist.md | 90 ++++++++++++++++++++++ extensions/vscode/src/test/runTest.ts | 3 +- 5 files changed, 215 insertions(+), 1 deletion(-) create mode 100644 docs/release-checklist.md diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 6560a6be..929ba0d6 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -12,9 +12,115 @@ env: CARGO_TERM_COLOR: always jobs: + gates: + name: Release Gates (Rust + Extension + Analyzer) + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Ensure checklist exists + run: test -f docs/release-checklist.md + + - name: Install Rust + uses: dtolnay/rust-toolchain@stable + with: + components: clippy, rustfmt + + - name: Rust Cache + uses: Swatinem/rust-cache@v2 + + - name: Check Formatting + run: cargo fmt --all -- --check + + - name: Run Clippy (deny warnings) + run: cargo clippy --workspace --all-targets --all-features -- -D warnings + + - name: Run Tests + run: cargo test --workspace --all-features + + - name: Build Debug Binary (for extension smoke tests) + run: cargo build + + - name: Security Analyzer Sanity (static) + run: cargo run --quiet --bin soroban-debug -- analyze --contract tests/fixtures/wasm/echo.wasm --format json + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: 20 + cache: npm + cache-dependency-path: extensions/vscode/package-lock.json + + - name: VS Code Extension Install + working-directory: extensions/vscode + run: npm ci + + - name: VS Code Extension Compile + working-directory: extensions/vscode + run: npm run -s compile + + - name: VS Code Extension Tests + working-directory: extensions/vscode + env: + SOROBAN_DEBUG_BIN: ${{ github.workspace }}/target/debug/soroban-debug + run: npm test + + - name: Checklist link + run: | + echo "Release checklist: docs/release-checklist.md" >> "$GITHUB_STEP_SUMMARY" + + bench: + name: Benchmark Sanity (thresholded) + runs-on: ubuntu-latest + needs: gates + env: + BENCH_WARN_PCT: 10 + BENCH_FAIL_PCT: 20 + BENCH_SAMPLE_SIZE: 20 + BENCH_MEASUREMENT_TIME: 5 + BENCH_WARMUP_TIME: 2 + steps: + - uses: actions/checkout@v4 + + - uses: dtolnay/rust-toolchain@stable + with: + toolchain: stable + + - uses: Swatinem/rust-cache@v2 + + - name: Restore benchmark baseline (from cache) + uses: actions/cache/restore@v4 + with: + path: .bench/baseline.json + key: bench-baseline-${{ runner.os }}-${{ github.sha }} + restore-keys: | + bench-baseline-${{ runner.os }}- + + - name: Run benchmarks (current) + run: cargo bench --benches -- --noplot --sample-size $BENCH_SAMPLE_SIZE --measurement-time $BENCH_MEASUREMENT_TIME --warm-up-time $BENCH_WARMUP_TIME + + - name: Record current results (JSON) + run: cargo run --quiet --bin bench-regression -- record --criterion target/criterion --out .bench/current.json + + - name: Compare against baseline (pass/warn/fail) + shell: bash + run: | + set -e + report="$(cargo run --quiet --bin bench-regression -- compare \ + --baseline .bench/baseline.json \ + --current .bench/current.json \ + --warn-pct "$BENCH_WARN_PCT" \ + --fail-pct "$BENCH_FAIL_PCT" \ + --annotate-top 20 \ + --max-rows 50 \ + --require-baseline false)" + echo "$report" + echo "$report" >> "$GITHUB_STEP_SUMMARY" + build: name: Build (${{ matrix.target }}) runs-on: ${{ matrix.os }} + needs: [gates, bench] strategy: fail-fast: false matrix: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index bfc1403c..ae19163c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -19,6 +19,7 @@ Thank you for your interest in contributing to the **Soroban Debugger** project! 11. [Project Structure](#project-structure) 12. [Code of Conduct](#code-of-conduct) 13. [Communication](#communication) +14. [Release Process](#release-process) --- @@ -362,3 +363,11 @@ We are committed to providing a welcoming and inclusive environment for everyone --- Thank you for helping make Soroban Debugger better! + +--- + +## Release Process + +Releases are gated by a single unified checklist that covers Rust/CLI, analyzers, VS Code extension checks, and benchmark thresholds: + +- `docs/release-checklist.md` diff --git a/docs/getting-started.md b/docs/getting-started.md index 525ec5a8..dbf8bb61 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -85,3 +85,11 @@ Inside the interactive shell, you can use commands like: - Explore [Source-Level Debugging](source-level-debugging.md) to map WASM back to your Rust code. - Learn about [Time-Travel Debugging](remote-debugging.md) to step backward through execution. - Check the [FAQ](faq.md) for troubleshooting common issues. + +--- + +## Maintainers + +For release readiness gates (CLI + extension + analyzer + benchmarks), follow: + +- `docs/release-checklist.md` diff --git a/docs/release-checklist.md b/docs/release-checklist.md new file mode 100644 index 00000000..d5659b3a --- /dev/null +++ b/docs/release-checklist.md @@ -0,0 +1,90 @@ +# Release Checklist + +This checklist is the single release gate for the Soroban Debugger repo (CLI + analyzers + VS Code extension + benchmarks). + +Use this for: +- Tag releases (`vX.Y.Z`) and crates.io publishes +- Hotfix releases + +## Roles / Owners + +- **Release Manager:** owns the go/no-go decision and waiver sign-off +- **Rust/CLI Owner:** owns core build/lint/test and packaging +- **VS Code Extension Owner:** owns extension build/test + DAP/protocol compatibility +- **Security/Analyzer Owner:** owns `analyze` sanity and any security-facing changes +- **Performance Owner:** owns benchmark sanity gates + +## Required Gates (no waivers by default) + +### Rust (workspace) + +- Format check: `cargo fmt --all -- --check` + - Pass criteria: exit code 0 +- Clippy: `cargo clippy --workspace --all-targets --all-features -- -D warnings` + - Pass criteria: exit code 0 (no warnings) +- Tests: `cargo test --workspace --all-features` + - Pass criteria: exit code 0 + +### Security analyzer sanity + +- Static analysis: `cargo run --quiet --bin soroban-debug -- analyze --contract tests/fixtures/wasm/echo.wasm --format json` + - Pass criteria: exit code 0 +- Optional dynamic analysis (when touching runtime/debug server): + `cargo run --quiet --bin soroban-debug -- analyze --contract tests/fixtures/wasm/echo.wasm --function echo --args '[7]' --timeout 30 --format json` + - Pass criteria: exit code 0 + +### VS Code extension + +From `extensions/vscode`: + +- Install deps: `npm ci` + - Pass criteria: exit code 0 +- Compile: `npm run -s compile` + - Pass criteria: exit code 0 +- Tests: `npm test` + - Pass criteria: exit code 0 + - Notes: + - For best coverage, set `SOROBAN_DEBUG_BIN` to a locally-built debug binary path (e.g. `target/debug/soroban-debug`) so the smoke test exercises the real debugger server. + +### Benchmarks (sanity thresholds) + +Benchmarks must not regress beyond the configured thresholds: + +- Thresholds (CI defaults): + - Warn: 10% + - Fail: 20% +- Command (CI-style): + - `cargo bench --benches -- --noplot --sample-size 20 --measurement-time 5 --warm-up-time 2` + - `cargo run --quiet --bin bench-regression -- record --criterion target/criterion --out .bench/current.json` + - `cargo run --quiet --bin bench-regression -- compare --baseline .bench/baseline.json --current .bench/current.json --warn-pct 10 --fail-pct 20` + - Pass criteria: compare exits 0 and reports no FAIL-level regressions + +## Release Metadata Gates + +- Version consistency: + - Tag is `vX.Y.Z` + - `Cargo.toml` version equals `X.Y.Z` + - `extensions/vscode/package.json` version equals `X.Y.Z` (if publishing the extension as part of the release) +- Changelog: + - `CHANGELOG.md` updated for `X.Y.Z` + +## Waiver process (when absolutely necessary) + +If any required gate is waived, the release must include a waiver record and explicit sign-off: + +1. Create an issue or PR comment titled `Release waiver: vX.Y.Z`. +2. Include: + - Which gate was waived + - Why it failed / why it is safe to proceed + - Scope/impact + - Mitigation and follow-up owner + deadline +3. Release Manager signs off by linking the waiver record in the release notes under a `Waivers` section. + +## Sign-off (fill before tagging) + +- [ ] Release Manager: @____ (link to waiver record(s) if any) +- [ ] Rust/CLI Owner: @____ +- [ ] VS Code Extension Owner: @____ +- [ ] Security/Analyzer Owner: @____ +- [ ] Performance Owner: @____ + diff --git a/extensions/vscode/src/test/runTest.ts b/extensions/vscode/src/test/runTest.ts index 6dcab555..4993f332 100644 --- a/extensions/vscode/src/test/runTest.ts +++ b/extensions/vscode/src/test/runTest.ts @@ -228,8 +228,9 @@ async function main(): Promise { const sourcePath = path.join(repoRoot, 'tests', 'fixtures', 'contracts', 'echo', 'src', 'lib.rs'); const exportedFunctions = await debuggerProcess.getContractFunctions(); const resolvedBreakpoints = resolveSourceBreakpoints(sourcePath, [10], exportedFunctions); - assert.equal(resolvedBreakpoints[0].verified, true, 'Expected echo breakpoint to resolve'); + assert.equal(resolvedBreakpoints[0].verified, false, 'Expected heuristic source mapping to be unverified'); assert.equal(resolvedBreakpoints[0].functionName, 'echo'); + assert.equal(resolvedBreakpoints[0].setBreakpoint, true, 'Expected heuristic mapping to still set a function breakpoint'); await debuggerProcess.setBreakpoint('echo'); const paused = await debuggerProcess.execute();