diff --git a/src/bin/semcode-mcp.rs b/src/bin/semcode-mcp.rs index 8ef5ead..86b2246 100644 --- a/src/bin/semcode-mcp.rs +++ b/src/bin/semcode-mcp.rs @@ -2090,6 +2090,7 @@ impl McpServer { "initialize" => self.handle_initialize(params).await, "tools/list" => self.handle_list_tools().await, "tools/call" => self.handle_tool_call(params).await, + "ping" => json!({}), _ => json!({ "error": { "code": -32601, @@ -5086,20 +5087,38 @@ async fn run_stdio_server(server: Arc) -> Result<()> { match serde_json::from_str::(&line) { Ok(request) => { - let response = server.handle_request(request).await; - if let Ok(response_str) = serde_json::to_string(&response) { - let mut stdout_guard = stdout.lock().await; - if let Err(e) = stdout_guard.write_all(response_str.as_bytes()).await { - eprintln!("Failed to write response: {e}"); - break; - } - if let Err(e) = stdout_guard.write_all(b"\n").await { - eprintln!("Failed to write newline: {e}"); - break; + // Check if this is a notification (no id) or a request (has id) + if request.get("id").is_some() { + // It's a request, send a response + let response = server.handle_request(request).await; + if let Ok(response_str) = serde_json::to_string(&response) { + let mut stdout_guard = stdout.lock().await; + if let Err(e) = stdout_guard.write_all(response_str.as_bytes()).await { + eprintln!("Failed to write response: {e}"); + break; + } + if let Err(e) = stdout_guard.write_all(b"\n").await { + eprintln!("Failed to write newline: {e}"); + break; + } + if let Err(e) = stdout_guard.flush().await { + eprintln!("Failed to flush stdout: {e}"); + break; + } } - if let Err(e) = stdout_guard.flush().await { - eprintln!("Failed to flush stdout: {e}"); - break; + } else { + // It's a notification, handle without response + let method = request["method"].as_str().unwrap_or(""); + match method { + "notifications/initialized" => { + eprintln!("Client initialized"); + } + "notifications/cancelled" => { + eprintln!("Request cancelled (notification received)"); + } + _ => { + eprintln!("Received notification: {}", method); + } } } } @@ -5159,9 +5178,6 @@ async fn main() -> Result<()> { // Create MCP server let server = Arc::new(McpServer::new(&database_path, &args.git_repo, args.model_path).await?); - // Ensure tables exist - server.db.create_tables().await?; - // Spawn background task to index current commit if needed eprintln!("[Background] Spawning background indexing task"); let db_for_indexing = server.db.clone(); @@ -5169,6 +5185,11 @@ async fn main() -> Result<()> { let indexing_state_for_bg = server.indexing_state.clone(); let notification_tx_for_bg = server.notification_tx.clone(); let _indexing_handle = tokio::spawn(async move { + // Ensure tables exist before indexing + if let Err(e) = db_for_indexing.create_tables().await { + eprintln!("[Background] Error creating/verifying tables: {}", e); + } + index_current_commit_background( db_for_indexing, git_repo_for_indexing, diff --git a/src/database/connection.rs b/src/database/connection.rs index eccc360..409bc5d 100644 --- a/src/database/connection.rs +++ b/src/database/connection.rs @@ -1966,7 +1966,7 @@ impl DatabaseManager { pub async fn optimize_database(&self) -> Result<()> { use colored::Colorize; - println!( + tracing::info!( "\n{}", "═══ DATABASE OPTIMIZATION STARTED ═══".yellow().bold() ); @@ -1974,22 +1974,22 @@ impl DatabaseManager { tracing::info!("Running database optimization..."); // Rebuild all scalar indices to ensure they're optimal - println!("{}", " → Rebuilding scalar indices...".cyan()); + tracing::info!("{}", " → Rebuilding scalar indices...".cyan()); self.rebuild_indices().await?; - println!("{}", " ✓ Scalar indices rebuilt".green()); + tracing::info!("{}", " ✓ Scalar indices rebuilt".green()); // Run table optimization - println!("{}", " → Optimizing tables...".cyan()); + tracing::info!("{}", " → Optimizing tables...".cyan()); self.optimize_tables().await?; - println!("{}", " ✓ Tables optimized".green()); + tracing::info!("{}", " ✓ Tables optimized".green()); // Compact and cleanup (triggers compression) - println!("{}", " → Compacting and pruning old versions...".cyan()); + tracing::info!("{}", " → Compacting and pruning old versions...".cyan()); self.compact_and_cleanup().await?; - println!("{}", " ✓ Compaction complete".green()); + tracing::info!("{}", " ✓ Compaction complete".green()); let elapsed = start_time.elapsed(); - println!( + tracing::info!( "{}", format!( "═══ DATABASE OPTIMIZATION COMPLETE ({:.1}s) ═══\n", diff --git a/src/git_range.rs b/src/git_range.rs index 988393a..06a0aae 100644 --- a/src/git_range.rs +++ b/src/git_range.rs @@ -749,7 +749,7 @@ pub async fn process_git_range( let commit_count = commit_shas.len(); if !commit_shas.is_empty() { - println!( + info!( "Checking for {} commits already in database...", commit_count ); @@ -768,13 +768,13 @@ pub async fn process_git_range( let already_indexed = commit_count - new_commit_shas.len(); if already_indexed > 0 { - println!( + info!( "{} commits already indexed, processing {} new commits", already_indexed, new_commit_shas.len() ); } else { - println!("Processing all {} new commits", new_commit_shas.len()); + info!("Processing all {} new commits", new_commit_shas.len()); } if !new_commit_shas.is_empty() { @@ -799,7 +799,7 @@ pub async fn process_git_range( commit_extraction_start.elapsed().as_secs_f64() ); } else { - println!("All commits in range are already indexed!"); + info!("All commits in range are already indexed!"); } } else { info!("No commits found in range"); @@ -843,24 +843,24 @@ pub async fn process_git_range( let total_time = start_time.elapsed(); - println!("\n=== Git Range Pipeline Complete ==="); - println!("Total time: {:.1}s", total_time.as_secs_f64()); - println!("Commits indexed: {commit_count}"); - println!("Files processed: {}", stats.files_processed); - println!("Functions indexed: {}", stats.functions_count); - println!("Types indexed: {}", stats.types_count); + info!("\n=== Git Range Pipeline Complete ==="); + info!("Total time: {:.1}s", total_time.as_secs_f64()); + info!("Commits indexed: {commit_count}"); + info!("Files processed: {}", stats.files_processed); + info!("Functions indexed: {}", stats.functions_count); + info!("Types indexed: {}", stats.types_count); // Check if optimization is needed after git range indexing match db_manager.check_optimization_health().await { Ok((needs_optimization, message)) => { if needs_optimization { - println!("\n{}", message); + info!("\n{}", message); match db_manager.optimize_database().await { - Ok(_) => println!("Database optimization completed successfully"), + Ok(_) => info!("Database optimization completed successfully"), Err(e) => error!("Failed to optimize database: {}", e), } } else { - println!("\n{}", message); + info!("\n{}", message); } } Err(e) => { diff --git a/src/indexer.rs b/src/indexer.rs index f0ecb9e..266498f 100644 --- a/src/indexer.rs +++ b/src/indexer.rs @@ -53,14 +53,14 @@ pub async fn check_and_optimize_if_needed( match db_manager.check_optimization_health().await { Ok((needs_optimization, message)) => { if needs_optimization { - println!("{}", message); - println!( + info!("{}", message); + info!( "\n{} Fragment threshold exceeded during indexing, running optimization...", "⚠️".yellow() ); match db_manager.optimize_database().await { Ok(_) => { - println!("{} In-progress optimization completed", "✓".green()); + info!("{} In-progress optimization completed", "✓".green()); } Err(e) => { warn!("In-progress optimization failed: {}", e); @@ -1027,11 +1027,11 @@ pub async fn index_git_commits( let commit_count = commit_shas.len(); if commit_shas.is_empty() { - println!("No commits found in range: {}", git_range); + info!("No commits found in range: {}", git_range); return Ok(0); } - println!( + info!( "Checking for {} commits already in database...", commit_count ); @@ -1050,17 +1050,17 @@ pub async fn index_git_commits( let already_indexed = commit_count - new_commit_shas.len(); if already_indexed > 0 { - println!( + info!( "{} commits already indexed, processing {} new commits", already_indexed, new_commit_shas.len() ); } else { - println!("Processing all {} new commits", new_commit_shas.len()); + info!("Processing all {} new commits", new_commit_shas.len()); } if new_commit_shas.is_empty() { - println!("All commits in range are already indexed!"); + info!("All commits in range are already indexed!"); return Ok(commit_count); } @@ -1083,9 +1083,9 @@ pub async fn index_git_commits( let total_time = start_time.elapsed(); - println!("\n=== Commit Indexing Complete ==="); - println!("Total time: {:.1}s", total_time.as_secs_f64()); - println!("Commits indexed: {}", commit_count); + info!("\n=== Commit Indexing Complete ==="); + info!("Total time: {:.1}s", total_time.as_secs_f64()); + info!("Commits indexed: {}", commit_count); Ok(commit_count) } diff --git a/src/lib.rs b/src/lib.rs index 38db987..d3804ad 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -81,6 +81,9 @@ pub mod logging { .add_directive("DatasetRecordBatchStream=error".parse().unwrap()); } - tracing_subscriber::fmt().with_env_filter(env_filter).init(); + tracing_subscriber::fmt() + .with_env_filter(env_filter) + .with_writer(std::io::stderr) + .init(); } }