diff --git a/.clinerules b/.clinerules new file mode 100644 index 000000000..c93aeb944 --- /dev/null +++ b/.clinerules @@ -0,0 +1,541 @@ +# LARP .clinerules + +This file provides guidance for AI assistants working with the LARP codebase. It contains repository-specific patterns, conventions, and development processes to ensure generated code integrates seamlessly with the existing codebase. + +## Repository Structure + +LARP is organized as a Rust workspace with multiple crates, each with a specific responsibility: + +``` +larp/ # Main crate with core functionality +llm_client/ # LLM communication and provider integration +llm_prompts/ # Prompt generation and formatting +logging/ # Logging utilities +``` + +When working with this repository: +- Keep functionality in its appropriate crate +- Maintain the existing module structure +- Follow the established dependency hierarchy + +## Module Architecture + +### larp (Main Crate) + +The main crate contains these key modules: + +``` +larp/src/ +├── agent/ # Base agent functionality +├── agentic/ # Advanced agentic system +│ ├── memory/ # Memory management for agents +│ ├── symbol/ # Symbol management and tracking +│ └── tool/ # Tool implementations +├── application/ # Application core and configuration +├── chunking/ # Code parsing and chunking +├── git/ # Git integration +├── mcts/ # Monte Carlo Tree Search decision engine +├── repo/ # Repository management +├── repomap/ # Repository mapping and analysis +├── webserver/ # API endpoints +``` + +### llm_client (LLM Communication) + +``` +llm_client/src/ +├── clients/ # Provider-specific clients +├── format/ # Request/response formatting +├── tokenizer/ # Token counting and management +``` + +### llm_prompts (Prompt Generation) + +``` +llm_prompts/src/ +├── chat/ # Chat prompt generation +├── fim/ # Fill-in-middle prompt generation +├── in_line_edit/ # Inline editing prompt generation +├── reranking/ # Result reranking prompts +``` + +## Coding Patterns + +### 1. Broker Pattern + +The codebase uses a broker pattern extensively for managing multiple implementations: + +```rust +// Example broker pattern +pub struct LLMBroker { + clients: Arc>>, +} + +impl LLMBroker { + pub async fn new() -> anyhow::Result { + let clients = Arc::new(DashMap::new()); + // Register clients + Ok(Self { clients }) + } + + pub fn get_client(&self, llm_type: LLMType) -> Option { + // Get appropriate client + } +} +``` + +When adding new functionality that requires multiple implementations: +- Create a trait defining the interface +- Implement the trait for each specific case +- Create a broker that manages instances and routes requests + +### 2. Error Handling + +The project uses a combination of `anyhow` for general error handling and `thiserror` for defining specific error types: + +```rust +// For library functions that need to define error types: +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum MyError { + #[error("IO error: {0}")] + Io(#[from] std::io::Error), + + #[error("Invalid value: {0}")] + InvalidValue(String), +} + +// For application code, use anyhow: +use anyhow::{Result, Context}; + +fn do_something() -> Result<()> { + let file = std::fs::File::open("file.txt") + .context("Failed to open file.txt")?; + // More code... + Ok(()) +} +``` + +### 3. Asynchronous Programming + +The codebase uses Tokio for asynchronous operations: + +```rust +// Async function pattern +pub async fn process_request(&self, request: Request) -> Result { + // Asynchronous operations + let result = self.llm_client.generate_text(request.prompt).await?; + // Process result + Ok(Response { result }) +} +``` + +When implementing new functionality: +- Use `async`/`await` for I/O operations +- Leverage Tokio's utilities for concurrent processing +- Consider using `tokio::spawn` for background tasks + +### 4. Arc/Clone Pattern + +The codebase extensively uses `Arc` for shared ownership: + +```rust +// Arc pattern for shared components +pub struct Application { + config: Arc, + llm_broker: Arc, + // Other fields +} + +impl Application { + pub fn new() -> Self { + let config = Arc::new(Configuration::default()); + let llm_broker = Arc::new(LLMBroker::new().await?); + Self { config, llm_broker } + } +} +``` + +When designing new components: +- Use `Arc` for shared ownership across threads +- Implement `Clone` for types that need to be shared +- Consider using interior mutability patterns like `Mutex` or `RwLock` when shared mutable access is needed + +## LLM Integration + +### Adding a New LLM Provider + +1. Create a new client file in `llm_client/src/clients/` +2. Implement the `LLMClient` trait +3. Add the provider to the `LLMType` enum in `llm_client/src/clients/types.rs` +4. Register the provider in `LLMBroker::new()` + +Example pattern: + +```rust +// In llm_client/src/clients/new_provider.rs +pub struct NewProviderClient { + api_key: String, + client: reqwest::Client, +} + +impl NewProviderClient { + pub fn new(api_key: String) -> Self { + Self { + api_key, + client: reqwest::Client::new(), + } + } +} + +#[async_trait] +impl LLMClient for NewProviderClient { + async fn generate_text(&self, prompt: &str) -> Result { + // Implementation + } + + async fn generate_chat_completion(&self, messages: &[ChatMessage]) -> Result { + // Implementation + } +} + +// In llm_client/src/clients/types.rs +pub enum LLMType { + // Existing types + NewProvider, +} + +// In llm_client/src/broker.rs +impl LLMBroker { + pub async fn new() -> Result { + let clients = Arc::new(DashMap::new()); + + // Register existing clients + + // Register new client + clients.insert( + LLMType::NewProvider, + Box::new(NewProviderClient::new(get_api_key()?)) as Box, + ); + + Ok(Self { clients }) + } +} +``` + +## Tree-sitter Integration + +### Adding a New Language Parser + +1. Add the tree-sitter grammar dependency to `larp/Cargo.toml` +2. Create a new file in `larp/src/chunking/` for your language +3. Implement the parsing logic for your language +4. Register your language in `larp/src/chunking/languages.rs` + +Example pattern: + +```rust +// In larp/src/chunking/new_language.rs +pub fn parse_new_language(source: &str) -> Result> { + let mut parser = Parser::new(); + let language = tree_sitter_new_language(); + parser.set_language(language)?; + + let tree = parser.parse(source, None)?; + let root_node = tree.root_node(); + + let mut chunks = Vec::new(); + extract_chunks(root_node, source, &mut chunks)?; + + Ok(chunks) +} + +// In larp/src/chunking/languages.rs +pub fn get_parser_for_language(language: &str) -> Option { + match language { + // Existing languages + "new_language" => Some(parse_new_language), + _ => None, + } +} +``` + +## Agentic System + +### Adding a New Tool + +1. Create a new file in `larp/src/agentic/tool/` for your tool +2. Implement the `Tool` trait for your tool +3. Register your tool in the `ToolBox::new()` method + +Example pattern: + +```rust +// In larp/src/agentic/tool/new_tool.rs +pub struct NewTool { + // Tool state +} + +impl NewTool { + pub fn new() -> Self { + Self { /* initialize state */ } + } +} + +#[async_trait] +impl Tool for NewTool { + async fn execute(&self, params: ToolParams) -> Result { + // Tool implementation + Ok(ToolResult::new(/* result data */)) + } + + fn name(&self) -> &'static str { + "new_tool" + } + + fn description(&self) -> &'static str { + "Description of the new tool" + } +} + +// In larp/src/agentic/tool_box.rs +impl ToolBox { + pub fn new( + tool_broker: Arc, + symbol_tracker: Arc, + editor_parsing: Arc, + ) -> Self { + let mut tools = HashMap::new(); + + // Register existing tools + + // Register new tool + tools.insert( + "new_tool".to_string(), + Box::new(NewTool::new()) as Box, + ); + + Self { tools, tool_broker, symbol_tracker, editor_parsing } + } +} +``` + +## Testing Guidelines + +### Unit Tests + +Write unit tests in the same file as the code they're testing: + +```rust +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_functionality() { + // Test code + assert_eq!(function_under_test(input), expected_output); + } +} +``` + +### Integration Tests + +For integration tests, create files in the `tests/` directory: + +```rust +// In tests/integration_test.rs +use larp::{Component, AnotherComponent}; + +#[tokio::test] +async fn test_component_interaction() { + let component = Component::new(); + let another = AnotherComponent::new(); + + let result = component.interact_with(another).await; + assert!(result.is_ok()); +} +``` + +## Documentation + +### Function Documentation + +Document public functions with rustdoc comments: + +```rust +/// This function processes a request and returns a response. +/// +/// # Arguments +/// +/// * `request` - The request to process +/// +/// # Returns +/// +/// A result containing the response or an error +/// +/// # Errors +/// +/// Returns an error if the request is invalid or processing fails +/// +/// # Examples +/// +/// ``` +/// let response = process_request(request).await?; +/// ``` +pub async fn process_request(request: Request) -> Result { + // Implementation +} +``` + +### Module Documentation + +Document modules with module-level comments: + +```rust +//! This module provides functionality for processing requests. +//! +//! It includes functions for validating, transforming, and responding to requests. + +pub mod validation; +pub mod transformation; +pub mod response; +``` + +## Common Development Tasks + +### Adding a New Feature + +1. Create a new branch: `git checkout -b feature/feature-name` +2. Implement the feature +3. Add tests +4. Update documentation +5. Submit a pull request + +### Debugging + +Use the tracing macros for debugging: + +```rust +use tracing::{trace, debug, info, warn, error}; + +// Levels from most to least verbose +trace!("Very detailed information"); +debug!("Useful for debugging"); +info!("General information"); +warn!("Warning that might need attention"); +error!("Error that needs immediate attention"); +``` + +Set the log level using the `RUST_LOG` environment variable: + +```bash +RUST_LOG=debug cargo run --bin webserver +``` + +## Performance Considerations + +### Token Optimization + +When working with LLMs, optimize token usage: + +- Remove unnecessary whitespace and formatting +- Use targeted context pruning +- Cache frequently used prompts and responses + +### Caching + +Use caching where appropriate: + +```rust +// Example of a simple cache +struct Cache { + data: Arc>, + ttl: Duration, +} + +impl Cache { + fn new(ttl: Duration) -> Self { + Self { + data: Arc::new(DashMap::new()), + ttl, + } + } + + fn get(&self, key: &K) -> Option { + if let Some(entry) = self.data.get(key) { + let (value, timestamp) = entry.value(); + if timestamp.elapsed() < self.ttl { + return Some(value.clone()); + } + } + None + } + + fn insert(&self, key: K, value: V) { + self.data.insert(key, (value, Instant::now())); + } +} +``` + +### Parallel Processing + +Use Rayon for CPU-bound parallel processing: + +```rust +use rayon::prelude::*; + +// Sequential processing +let results: Vec<_> = items.iter().map(|item| process_item(item)).collect(); + +// Parallel processing +let results: Vec<_> = items.par_iter().map(|item| process_item(item)).collect(); +``` + +## Git Workflow + +### Commit Messages + +Format commit messages with a clear title and detailed description: + +``` +feat: Add support for new LLM provider + +- Implement client for new provider +- Add provider to LLMType enum +- Register provider in LLMBroker +- Add tests for the new provider +``` + +Use prefixes like: +- `feat:` for new features +- `fix:` for bug fixes +- `docs:` for documentation changes +- `refactor:` for code refactoring +- `test:` for adding or updating tests +- `chore:` for maintenance tasks + +### Branch Naming + +Name branches according to their purpose: +- `feature/feature-name` for new features +- `bugfix/issue-description` for bug fixes +- `refactor/component-name` for code refactoring +- `docs/documentation-description` for documentation updates + +## Security Considerations + +### API Key Management + +Handle API keys securely: +- Never hardcode API keys +- Use environment variables or secure configuration files +- Log errors without exposing sensitive information + +### Input Validation + +Validate all inputs, especially those from external sources: +- Check for malicious input patterns +- Validate file paths to prevent path traversal +- Sanitize inputs before using them in commands or queries diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index a4d6e1f87..2dc2c7c6b 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -1,4 +1,4 @@ -name: Build & release sidecar +name: Build & release larp on: workflow_dispatch: @@ -166,9 +166,9 @@ jobs: # Create zip file Push-Location staging if ($env:RUNNER_OS -eq "Windows") { - Compress-Archive -Path * -DestinationPath ../sidecar.zip -Force + Compress-Archive -Path * -DestinationPath ../larp.zip -Force } else { - zip -r ../sidecar.zip . + zip -r ../larp.zip . } Pop-Location @@ -211,16 +211,16 @@ jobs: echo "ARCH: ${ARCH}" echo "CARGO_PKG_VERSION: ${CARGO_PKG_VERSION}" - SPECIFIC_VERSION_PATH="${CARGO_PKG_VERSION}/${OS_NAME}/${ARCH}/sidecar.zip" - LATEST_VERSION_PATH="latest/${OS_NAME}/${ARCH}/sidecar.zip" + SPECIFIC_VERSION_PATH="${CARGO_PKG_VERSION}/${OS_NAME}/${ARCH}/larp.zip" + LATEST_VERSION_PATH="latest/${OS_NAME}/${ARCH}/larp.zip" echo "Paths:" echo "SPECIFIC_VERSION_PATH: ${SPECIFIC_VERSION_PATH}" echo "LATEST_VERSION_PATH: ${LATEST_VERSION_PATH}" # Verify file exists - ls -l "./sidecar.zip" + ls -l "./larp.zip" # Copy with verbose flag - gsutil cp "./sidecar.zip" "gs://sidecar-bin/${SPECIFIC_VERSION_PATH}" - gsutil cp "./sidecar.zip" "gs://sidecar-bin/${LATEST_VERSION_PATH}" + gsutil cp "./larp.zip" "gs://larp-bin/${SPECIFIC_VERSION_PATH}" + gsutil cp "./larp.zip" "gs://larp-bin/${LATEST_VERSION_PATH}" diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md new file mode 100644 index 000000000..3b5868cb2 --- /dev/null +++ b/ARCHITECTURE.md @@ -0,0 +1,374 @@ +# Sidecar Architecture + +This document provides a detailed explanation of the Sidecar architecture, including component interactions, data flow, and design decisions. + +## Overview + +Sidecar is built as a Rust workspace with multiple crates that work together to provide AI-powered code assistance. The architecture follows a modular design with clear separation of concerns. + +```mermaid +flowchart TD + A[Aide Editor] <--> B[Webserver API] + B <--> C[Application Core] + C <--> D[LLM Client] + C <--> E[Repository Analysis] + C <--> F[Agentic Tools] + C <--> G[MCTS Decision Engine] + C <--> H[Code Chunking] + D <--> I[External LLM Providers] + E <--> J[Git Repository] + F <--> K[Symbol Management] + H <--> L[Language-Specific Parsers] +``` + +## Core Components + +### 1. Application Core + +The Application Core is the central component that coordinates all other parts of the system. It initializes and manages the various subsystems, handles configuration, and provides a unified interface for the webserver. + +**Key Responsibilities:** +- System initialization and configuration +- Component lifecycle management +- Resource allocation and management +- Cross-component coordination + +**Implementation Details:** +- Located in `sidecar/src/application/` +- Main class: `Application` in `application.rs` +- Configuration handled by `Configuration` in `config/configuration.rs` + +### 2. Webserver API + +The Webserver API provides HTTP endpoints for the Aide editor to communicate with Sidecar. It handles request routing, authentication, and response formatting. + +**Key Endpoints:** +- `/api/agentic/*`: Endpoints for AI agent operations +- `/api/tree_sitter/*`: Endpoints for code parsing and analysis +- `/api/file/*`: Endpoints for file operations +- `/api/plan/*`: Endpoints for planning operations + +**Implementation Details:** +- Located in `sidecar/src/webserver/` +- Main entry point: `webserver.rs` in `sidecar/src/bin/` +- Uses Axum for HTTP routing and handling + +### 3. LLM Client + +The LLM Client handles communication with various Large Language Model providers. It manages API keys, formats requests according to provider specifications, and processes responses. + +**Supported Providers:** +- OpenAI (GPT models) +- Anthropic (Claude models) +- Google AI (Gemini models) +- Various open-source models via Ollama, LM Studio, etc. + +**Implementation Details:** +- Located in `llm_client/src/` +- Main broker: `LLMBroker` in `broker.rs` +- Provider-specific clients in `clients/` + +### 4. Repository Analysis + +The Repository Analysis component analyzes code repositories to build a graph representation of the codebase. It uses PageRank-like algorithms to identify important symbols and relationships. + +**Key Features:** +- File and directory scanning +- Symbol extraction and relationship mapping +- Importance scoring using PageRank +- Context retrieval for relevant code sections + +**Implementation Details:** +- Located in `sidecar/src/repomap/` +- Main analyzer: `Analyser` in `analyser.rs` +- Graph implementation in `graph.rs` + +### 5. Agentic Tools + +The Agentic Tools component provides a collection of tools that AI agents can use to perform complex code operations. It includes tools for code editing, symbol analysis, and context gathering. + +**Key Tools:** +- Code editing tools +- Symbol analysis tools +- Repository search tools +- Context gathering tools + +**Implementation Details:** +- Located in `sidecar/src/agentic/` +- Tool management in `tool/` +- Symbol management in `symbol/` +- Memory management in `memory/` + +### 6. MCTS Decision Engine + +The Monte Carlo Tree Search (MCTS) Decision Engine explores possible code changes and selects the most promising ones. It uses a tree-based search algorithm to evaluate different actions and their potential outcomes. + +**Key Components:** +- Action node representation +- Selection strategies +- Value functions for evaluating changes +- Execution planning + +**Implementation Details:** +- Located in `sidecar/src/mcts/` +- Action node implementation in `action_node.rs` +- Selection strategies in `selector/` +- Execution handling in `execution/` + +### 7. Code Chunking + +The Code Chunking component parses and chunks code into meaningful segments for better understanding by LLMs. It uses language-specific parsers to extract symbols, relationships, and context. + +**Supported Languages:** +- Rust +- Python +- JavaScript/TypeScript +- Go + +**Implementation Details:** +- Located in `sidecar/src/chunking/` +- Language-specific parsers in language-named files (e.g., `rust.rs`) +- Common parsing utilities in `helpers.rs` +- Text document management in `text_document.rs` + +## Data Flow + +### Request Processing Flow + +```mermaid +sequenceDiagram + participant Editor as Aide Editor + participant API as Webserver API + participant App as Application Core + participant LLM as LLM Client + participant Repo as Repository Analysis + participant Agent as Agentic Tools + + Editor->>API: Request (code context, query) + API->>App: Process request + App->>Repo: Analyze repository context + Repo-->>App: Repository context + App->>Agent: Select appropriate tools + Agent->>LLM: Generate prompt with context + LLM-->>Agent: LLM response + Agent->>App: Process LLM response + App->>API: Formatted response + API->>Editor: Display results to user +``` + +### Code Editing Flow + +```mermaid +sequenceDiagram + participant Editor as Aide Editor + participant API as Webserver API + participant Agent as Agentic System + participant MCTS as MCTS Engine + participant LLM as LLM Client + participant FS as File System + + Editor->>API: Edit request with code context + API->>Agent: Process edit request + Agent->>MCTS: Generate possible edits + + loop Action Selection + MCTS->>LLM: Generate candidate actions + LLM-->>MCTS: Candidate actions + MCTS->>MCTS: Evaluate actions + MCTS->>MCTS: Select best action + end + + MCTS-->>Agent: Best edit action + Agent->>FS: Apply edit to file + FS-->>Agent: Edit result + Agent->>API: Edit response + API->>Editor: Updated code +``` + +## Component Interactions + +### Application Initialization + +```mermaid +sequenceDiagram + participant Main as Main + participant App as Application + participant Config as Configuration + participant Logging as Logging + participant LLM as LLM Broker + participant Repo as Repository Pool + participant Lang as Language Parsing + participant Tools as Tool Box + participant Symbols as Symbol Manager + + Main->>Config: Parse configuration + Main->>Logging: Install logging + Main->>App: Initialize application + App->>Repo: Initialize repository pool + App->>Lang: Initialize language parsing + App->>LLM: Initialize LLM broker + App->>Tools: Initialize tool box + App->>Symbols: Initialize symbol manager + Main->>Main: Start webserver +``` + +### Tool Execution + +```mermaid +sequenceDiagram + participant API as API Endpoint + participant Agent as Agent + participant ToolBox as Tool Box + participant Tool as Specific Tool + participant LLM as LLM Client + participant FS as File System + + API->>Agent: Tool use request + Agent->>ToolBox: Select appropriate tool + ToolBox->>Tool: Execute tool with parameters + Tool->>LLM: Generate content (if needed) + LLM-->>Tool: Generated content + Tool->>FS: Perform file operations (if needed) + FS-->>Tool: Operation result + Tool-->>ToolBox: Tool execution result + ToolBox-->>Agent: Processed result + Agent-->>API: Tool use response +``` + +## Design Decisions + +### Modular Architecture + +Sidecar uses a modular architecture with clear separation of concerns. This allows for easier maintenance, testing, and extension of the codebase. + +**Benefits:** +- Components can be developed and tested independently +- New features can be added without affecting existing functionality +- Different teams can work on different components simultaneously + +### Rust Workspace + +The project is organized as a Rust workspace with multiple crates. This provides better dependency management and compilation times. + +**Benefits:** +- Clear separation between major components +- Independent versioning of components +- Faster incremental compilation +- Better dependency management + +### Asynchronous Processing + +Sidecar uses asynchronous processing extensively to handle concurrent requests and long-running operations. + +**Benefits:** +- Better resource utilization +- Improved responsiveness +- Ability to handle multiple requests simultaneously +- Support for streaming responses + +### LLM Provider Abstraction + +The LLM client uses an abstraction layer to support multiple LLM providers. This allows for easy switching between providers and support for new providers. + +**Benefits:** +- Support for multiple LLM providers +- Easy addition of new providers +- Ability to switch providers at runtime +- Consistent interface for all providers + +### Tree-sitter Integration + +Sidecar uses Tree-sitter for code parsing and analysis. This provides accurate and efficient parsing for multiple programming languages. + +**Benefits:** +- Support for multiple programming languages +- Accurate parsing and symbol extraction +- Efficient incremental parsing +- Rich query capabilities + +## Performance Considerations + +### Token Usage Optimization + +Sidecar optimizes token usage to reduce costs and improve performance when communicating with LLM providers. + +**Strategies:** +- Context pruning to remove irrelevant information +- Chunking large files into smaller segments +- Caching frequently used prompts and responses +- Using embeddings for efficient similarity search + +### Caching + +Sidecar uses caching at multiple levels to improve performance and reduce redundant operations. + +**Cache Types:** +- Repository analysis cache +- LLM response cache +- Parsed code cache +- Symbol information cache + +### Parallel Processing + +Sidecar uses parallel processing for computationally intensive operations like repository analysis and code parsing. + +**Parallel Operations:** +- File scanning and parsing +- Repository graph construction +- PageRank calculation +- MCTS exploration + +## Security Considerations + +### API Key Management + +Sidecar handles API keys for various LLM providers. These keys are sensitive and must be protected. + +**Security Measures:** +- Keys are never logged or exposed in responses +- Keys can be provided via environment variables +- Support for key rotation and management + +### Code Execution + +Sidecar does not execute user code directly, but it does perform file operations that could potentially be exploited. + +**Security Measures:** +- Strict validation of file paths +- Limiting operations to the repository directory +- Careful handling of user input + +### Authentication + +The webserver API can be configured to require authentication for sensitive operations. + +**Security Measures:** +- Support for token-based authentication +- Role-based access control +- Secure token validation + +## Extensibility + +### Plugin System + +Sidecar is designed to be extensible through a plugin system. This allows for adding new functionality without modifying the core codebase. + +**Extension Points:** +- New language support +- Additional tools +- Custom LLM providers +- Alternative repository analysis methods + +### Configuration Options + +Sidecar provides extensive configuration options to customize its behavior for different environments and use cases. + +**Configurable Aspects:** +- LLM provider selection and parameters +- Repository analysis settings +- Caching behavior +- Performance tuning + +## Conclusion + +The Sidecar architecture is designed to be modular, extensible, and performant. It provides a solid foundation for AI-powered code assistance and can be extended to support new features and use cases. \ No newline at end of file diff --git a/CONCEPTS.md b/CONCEPTS.md new file mode 100644 index 000000000..a8e1d40bc --- /dev/null +++ b/CONCEPTS.md @@ -0,0 +1,238 @@ +# Sidecar Key Concepts and Terminology + +This document explains the key concepts and terminology used in the Sidecar project. Understanding these concepts will help you navigate the codebase and contribute effectively. + +## Table of Contents + +- [AI Concepts](#ai-concepts) +- [Code Understanding](#code-understanding) +- [System Architecture](#system-architecture) +- [Development Concepts](#development-concepts) +- [Integration Concepts](#integration-concepts) + +## AI Concepts + +### Large Language Models (LLMs) + +**Definition**: Large Language Models are AI models trained on vast amounts of text data that can generate human-like text, understand context, and perform various language tasks. + +**In Sidecar**: LLMs are used for code understanding, generation, and editing. Sidecar supports multiple LLM providers, including OpenAI, Anthropic, and Google AI. + +**Key Components**: +- `llm_client` crate: Handles communication with LLM providers +- `LLMBroker`: Manages different LLM clients and routes requests +- `LLMClient` trait: Interface for different LLM providers + +### Agentic System + +**Definition**: An agentic system is an AI system that can take actions autonomously to achieve specific goals. It can use tools, make decisions, and interact with its environment. + +**In Sidecar**: The agentic system allows AI to perform complex code operations, such as editing, refactoring, and analyzing code. + +**Key Components**: +- `agentic` module: Contains the core agentic functionality +- `ToolBox`: Collection of tools available to the agent +- `SymbolManager`: Manages code symbols and their relationships +- `Memory`: Stores context and information for the agent + +### Monte Carlo Tree Search (MCTS) + +**Definition**: MCTS is a heuristic search algorithm for decision processes, particularly useful for complex decision spaces where evaluating all possibilities is impractical. + +**In Sidecar**: MCTS is used to explore possible code changes and select the most promising ones for implementation. + +**Key Components**: +- `mcts` module: Contains the MCTS implementation +- `ActionNode`: Represents a node in the MCTS tree +- `Selector`: Selects nodes for exploration +- `ValueFunction`: Evaluates the value of nodes + +### Tool-Based Approach + +**Definition**: A tool-based approach allows an AI agent to use specific tools to interact with its environment and accomplish tasks. + +**In Sidecar**: Tools are used by the agent to perform specific operations, such as editing code, searching repositories, and analyzing symbols. + +**Key Components**: +- `tool` module: Contains tool implementations +- `Tool` trait: Interface for different tools +- `ToolBroker`: Manages tool selection and execution + +## Code Understanding + +### Symbol + +**Definition**: A symbol is a named entity in code, such as a function, class, variable, or module. + +**In Sidecar**: Symbols are the basic units of code understanding. Sidecar extracts symbols from code and analyzes their relationships. + +**Key Components**: +- `symbol` module: Contains symbol-related functionality +- `SymbolManager`: Manages symbols and their relationships +- `SymbolTrackerInline`: Tracks symbols in editor sessions + +### Repository Mapping + +**Definition**: Repository mapping is the process of analyzing a code repository to understand its structure, dependencies, and important components. + +**In Sidecar**: Repository mapping is used to build a graph representation of the codebase, which helps in understanding context and relationships. + +**Key Components**: +- `repomap` module: Contains repository mapping functionality +- `Analyser`: Analyzes repositories and builds graphs +- `Graph`: Represents the repository as a graph +- `PageRank`: Calculates importance scores for symbols + +### Code Chunking + +**Definition**: Code chunking is the process of breaking down code into meaningful segments for analysis and understanding. + +**In Sidecar**: Code chunking is used to parse and analyze code in a structured way, extracting symbols and their relationships. + +**Key Components**: +- `chunking` module: Contains code chunking functionality +- `TSLanguageParsing`: Uses Tree-sitter for language parsing +- `EditorParsing`: Parses code in editor sessions +- Language-specific parsers (e.g., `rust.rs`, `python.rs`) + +### Tree-sitter + +**Definition**: Tree-sitter is a parser generator tool and incremental parsing library that can build a concrete syntax tree for source code. + +**In Sidecar**: Tree-sitter is used for accurate and efficient parsing of multiple programming languages. + +**Key Components**: +- `tree-sitter` dependency: The core parsing library +- Language-specific grammars (e.g., `tree-sitter-rust`, `tree-sitter-python`) +- `TSLanguageParsing`: Wrapper for Tree-sitter functionality + +## System Architecture + +### Application Core + +**Definition**: The application core is the central component that coordinates all other parts of the system. + +**In Sidecar**: The application core initializes and manages the various subsystems, handles configuration, and provides a unified interface for the webserver. + +**Key Components**: +- `application` module: Contains the application core +- `Application` struct: Main application class +- `Configuration` struct: Application configuration + +### Webserver + +**Definition**: The webserver provides HTTP endpoints for external systems to communicate with Sidecar. + +**In Sidecar**: The webserver handles requests from the Aide editor, routes them to the appropriate components, and returns responses. + +**Key Components**: +- `webserver` module: Contains the webserver implementation +- `webserver.rs` binary: Main entry point for the webserver +- Various router functions (e.g., `agentic_router`, `tree_sitter_router`) + +### Repository Pool + +**Definition**: The repository pool manages access to code repositories and their state. + +**In Sidecar**: The repository pool provides a unified interface for accessing and manipulating repositories. + +**Key Components**: +- `repo` module: Contains repository-related functionality +- `RepositoryPool` struct: Manages repository access +- `state` module: Manages repository state + +### LLM Broker + +**Definition**: The LLM broker manages communication with different LLM providers and routes requests to the appropriate client. + +**In Sidecar**: The LLM broker provides a unified interface for generating text, chat completions, and other LLM operations. + +**Key Components**: +- `LLMBroker` struct: Main broker class +- `LLMClient` trait: Interface for LLM providers +- Provider-specific clients (e.g., `OpenAIClient`, `AnthropicClient`) + +## Development Concepts + +### Rust Workspace + +**Definition**: A Rust workspace is a collection of packages that share dependencies and configuration. + +**In Sidecar**: Sidecar is organized as a Rust workspace with multiple crates, each responsible for a specific aspect of functionality. + +**Key Components**: +- `Cargo.toml` at the root: Defines the workspace +- Individual crates: `sidecar`, `llm_client`, `llm_prompts`, `logging` +- Shared dependencies and configuration + +### Asynchronous Programming + +**Definition**: Asynchronous programming allows operations to run concurrently without blocking the main thread. + +**In Sidecar**: Asynchronous programming is used extensively for handling concurrent requests, I/O operations, and long-running tasks. + +**Key Components**: +- `tokio` dependency: Asynchronous runtime +- `async`/`await` syntax: Used for asynchronous functions +- `Future` trait: Represents asynchronous computations + +### Error Handling + +**Definition**: Error handling is the process of managing and responding to error conditions in a program. + +**In Sidecar**: Error handling is done using the `anyhow` and `thiserror` crates, which provide flexible and ergonomic error handling. + +**Key Components**: +- `anyhow` dependency: For general error handling +- `thiserror` dependency: For defining specific error types +- `Result` type: Used for functions that can fail + +### Tracing and Logging + +**Definition**: Tracing and logging are techniques for recording information about a program's execution for debugging and monitoring. + +**In Sidecar**: Tracing and logging are used to record information about the system's operation, errors, and performance. + +**Key Components**: +- `tracing` dependency: For structured logging and tracing +- `logging` crate: Custom logging utilities +- `tracing_subscriber`: For configuring tracing output + +## Integration Concepts + +### Aide Editor Integration + +**Definition**: Aide is a code editor that integrates with Sidecar for AI-powered code assistance. + +**In Sidecar**: Sidecar provides API endpoints for the Aide editor to request AI assistance and receive responses. + +**Key Components**: +- Webserver API endpoints +- Request and response formats +- Streaming response support + +### LLM Provider Integration + +**Definition**: LLM providers are services that offer access to large language models through APIs. + +**In Sidecar**: Sidecar integrates with multiple LLM providers to leverage their models for code understanding and generation. + +**Key Components**: +- Provider-specific clients +- API key management +- Request and response formatting + +### Git Integration + +**Definition**: Git is a distributed version control system used for tracking changes in source code. + +**In Sidecar**: Sidecar integrates with Git to understand repository history, changes, and structure. + +**Key Components**: +- `git` module: Contains Git-related functionality +- `gix` dependency: Git implementation in Rust +- Repository analysis based on Git history + +## Conclusion + +Understanding these key concepts and terminology will help you navigate the Sidecar codebase and contribute effectively to the project. If you encounter terms or concepts that are not explained here, please consider adding them to this document to help future contributors. \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ef5592049..2ebbffbd4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,12 +1,12 @@ -# Contributing to Sidecar +# Contributing to LARP -Welcome, and thank you for your interest in contributing to Sidecar! +Welcome, and thank you for your interest in contributing to LARP! There are several ways in which you can contribute, beyond writing code. The goal of this document is to provide a high-level overview of how you can get involved. ## Contributing Fixes -If you are interested in writing code to fix issues, please see [How to Contribute](https://github.com/codestoryai/sidecar/blob/main/HOW_TO_CONTRIBUTE.md.md). +If you are interested in writing code to fix issues, please see [How to Contribute](https://github.com/codestoryai/larp/blob/main/HOW_TO_CONTRIBUTE.md.md). ## Asking Questions @@ -17,7 +17,7 @@ Have a question? The [Aide Discord](https://discord.gg/mtgrhXM5Xf) is a communit Your comments and feedback are welcome, and the development team is available via a handful of different channels. ### GitHub issues -[GitHub issues](https://github.com/codestoryai/sidecar/issues) should be used for bugs and feature requests. How to submit good bugs and feature requests is described in [How to Contribute](https://github.com/codestoryai/sidecar/blob/main/HOW_TO_CONTRIBUTE.md) and how we track issues is described in [[Issue Tracking]]. +[GitHub issues](https://github.com/codestoryai/larp/issues) should be used for bugs and feature requests. How to submit good bugs and feature requests is described in [How to Contribute](https://github.com/codestoryai/larp/blob/main/HOW_TO_CONTRIBUTE.md) and how we track issues is described in [[Issue Tracking]]. ### Discord As mentioned above, the [Aide Discord](https://discord.gg/mtgrhXM5Xf) has the development team available to look at your feedback. If there is an action to be tracked, an issue will be created on GitHub for providing visibility into the status of the feedback. @@ -39,7 +39,7 @@ The Aide project is distributed across multiple repositories. Try to file the is |Component|Repository| |---|---| |The Aide code editor|[aide](https://github.com/codestoryai/aide)| -|AI sidecar|[sidecar](https://github.com/codestoryai/sidecar)| +|AI LARP|[larp](https://github.com/codestoryai/larp)| #### Maintained by the VSCode team We regularly pull changes from the VSCode project into Aide, so issues reported here when fixed will automatically be included in Aide. But if the fix is urgent and important, just file them under the [aide](https://github.com/codestoryai/aide) repository and we will follow up as required. @@ -99,7 +99,7 @@ Please include the following with each issue: ### Creating Pull Requests -* Please refer to the article on [creating pull requests](https://github.com/codestoryai/sidecar/blob/main/HOW_TO_CONTRIBUTE.md.md#pull-requests) and contributing to this project. +* Please refer to the article on [creating pull requests](https://github.com/codestoryai/larp/blob/main/HOW_TO_CONTRIBUTE.md.md#pull-requests) and contributing to this project. ### Final Checklist @@ -113,4 +113,4 @@ Don't feel bad if the developers can't reproduce the issue right away. They will ## Thank You -Your contributions to open source, large or small, make projects like this possible. Thank you for taking the time to contribute. \ No newline at end of file +Your contributions to open source, large or small, make projects like this possible. Thank you for taking the time to contribute. diff --git a/Cargo.toml b/Cargo.toml index b508edef5..85349705a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [workspace] members = [ - "sidecar", + "larp", "llm_client", "llm_prompts", "logging", @@ -8,4 +8,4 @@ members = [ resolver = "2" [profile.release] -lto = "thin" \ No newline at end of file +lto = "thin" diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md new file mode 100644 index 000000000..6cde4ec50 --- /dev/null +++ b/DEVELOPMENT.md @@ -0,0 +1,386 @@ +# Sidecar Development Guide + +This document provides a comprehensive guide for developers who want to contribute to the Sidecar project. It covers setup, development workflows, testing, and best practices. + +## Table of Contents + +- [Development Environment Setup](#development-environment-setup) +- [Project Structure](#project-structure) +- [Development Workflow](#development-workflow) +- [Testing](#testing) +- [Debugging](#debugging) +- [Code Style and Guidelines](#code-style-and-guidelines) +- [Documentation](#documentation) +- [Common Tasks](#common-tasks) +- [Troubleshooting](#troubleshooting) + +## Development Environment Setup + +### Prerequisites + +- **Rust**: Version 1.79 or later +- **Cargo**: Latest version compatible with your Rust installation +- **Git**: For version control +- **SQLite**: For database operations +- **Tree-sitter**: For code parsing (installed automatically via Cargo) + +### Setup Steps + +1. **Clone the repository**: + ```bash + git clone https://github.com/codestoryai/sidecar.git + cd sidecar + ``` + +2. **Install Rust dependencies**: + ```bash + rustup update + rustup component add rustfmt + ``` + +3. **Build the project**: + ```bash + cargo build + ``` + +4. **Run the webserver**: + ```bash + cargo run --bin webserver + ``` + +### Environment Variables + +Sidecar uses environment variables for configuration. Here are the most important ones: + +- `OPENAI_API_KEY`: Your OpenAI API key +- `ANTHROPIC_API_KEY`: Your Anthropic API key +- `GOOGLE_AI_API_KEY`: Your Google AI API key +- `SIDECAR_PORT`: Port for the webserver (default: 3000) +- `SIDECAR_HOST`: Host for the webserver (default: 127.0.0.1) +- `SIDECAR_LOG_LEVEL`: Log level (default: info) + +You can set these variables in your shell or create a `.env` file in the project root. + +## Project Structure + +Sidecar is organized as a Rust workspace with multiple crates: + +``` +sidecar/ # Main crate with core functionality +├── src/ # Source code +│ ├── agentic/ # AI agent system +│ ├── agent/ # Agent implementation +│ ├── application/ # Application core +│ ├── bin/ # Binary entry points +│ ├── chunking/ # Code chunking and parsing +│ ├── git/ # Git integration +│ ├── llm/ # LLM integration +│ ├── mcts/ # Monte Carlo Tree Search +│ ├── repo/ # Repository management +│ ├── repomap/ # Repository mapping +│ ├── webserver/ # Web API +│ └── lib.rs # Library entry point +├── Cargo.toml # Crate manifest +└── build.rs # Build script + +llm_client/ # LLM client crate +├── src/ # Source code +│ ├── clients/ # LLM provider clients +│ ├── format/ # Request/response formatting +│ ├── tokenizer/ # Token counting and management +│ └── lib.rs # Library entry point +└── Cargo.toml # Crate manifest + +llm_prompts/ # LLM prompt generation crate +├── src/ # Source code +│ ├── chat/ # Chat prompt generation +│ ├── fim/ # Fill-in-middle prompt generation +│ ├── in_line_edit/ # Inline editing prompt generation +│ └── lib.rs # Library entry point +└── Cargo.toml # Crate manifest + +logging/ # Logging utilities crate +├── src/ # Source code +│ └── lib.rs # Library entry point +└── Cargo.toml # Crate manifest +``` + +## Development Workflow + +### Feature Development + +1. **Create a new branch**: + ```bash + git checkout -b feature/your-feature-name + ``` + +2. **Implement your changes**: + - Make the necessary code changes + - Add tests for your changes + - Update documentation as needed + +3. **Run tests**: + ```bash + cargo test + ``` + +4. **Format your code**: + ```bash + cargo fmt + ``` + +5. **Commit your changes**: + ```bash + git add . + git commit -m "Add your feature description" + ``` + +6. **Push your branch**: + ```bash + git push origin feature/your-feature-name + ``` + +7. **Create a pull request**: + - Go to the GitHub repository + - Click on "Pull Requests" and then "New Pull Request" + - Select your branch and provide a description of your changes + +### Code Review Process + +1. **Automated checks**: + - CI will run tests and linting on your PR + - Address any issues reported by CI + +2. **Peer review**: + - A maintainer will review your code + - Address any feedback from the review + +3. **Approval and merge**: + - Once approved, your PR will be merged + - The branch will be deleted after merging + +## Testing + +### Running Tests + +```bash +# Run all tests +cargo test + +# Run tests for a specific crate +cargo test -p sidecar + +# Run tests for a specific module +cargo test -p sidecar -- agentic::symbol + +# Run a specific test +cargo test -p sidecar -- agentic::symbol::test_symbol_manager +``` + +### Writing Tests + +Tests should be placed in the same file as the code they're testing, using the `#[cfg(test)]` attribute: + +```rust +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_something() { + // Test code here + assert_eq!(2 + 2, 4); + } +} +``` + +For integration tests, create files in the `tests/` directory of the respective crate. + +### Test Coverage + +You can generate test coverage reports using `cargo-tarpaulin`: + +```bash +cargo install cargo-tarpaulin +cargo tarpaulin -p sidecar +``` + +## Debugging + +### Logging + +Sidecar uses the `tracing` crate for logging. You can control the log level using the `RUST_LOG` environment variable: + +```bash +RUST_LOG=debug cargo run --bin webserver +``` + +Log levels from most to least verbose: `trace`, `debug`, `info`, `warn`, `error`. + +### Debugging with VS Code + +1. Install the [Rust Analyzer](https://marketplace.visualstudio.com/items?itemName=rust-lang.rust-analyzer) extension +2. Create a `.vscode/launch.json` file with the following content: + +```json +{ + "version": "0.2.0", + "configurations": [ + { + "type": "lldb", + "request": "launch", + "name": "Debug webserver", + "cargo": { + "args": ["build", "--bin=webserver", "--package=sidecar"], + "filter": { + "name": "webserver", + "kind": "bin" + } + }, + "args": [], + "cwd": "${workspaceFolder}" + } + ] +} +``` + +3. Set breakpoints in your code +4. Press F5 to start debugging + +## Code Style and Guidelines + +### Formatting + +Sidecar uses `rustfmt` for code formatting. Format your code before committing: + +```bash +cargo fmt +``` + +### Naming Conventions + +- **Types** (structs, enums, traits): `PascalCase` +- **Variables and functions**: `snake_case` +- **Constants**: `SCREAMING_SNAKE_CASE` +- **Modules**: `snake_case` + +### Documentation + +All public items should be documented using rustdoc comments: + +```rust +/// This function does something useful. +/// +/// # Arguments +/// +/// * `arg1` - The first argument +/// * `arg2` - The second argument +/// +/// # Returns +/// +/// A result containing the output or an error +/// +/// # Examples +/// +/// ``` +/// let result = do_something(1, 2); +/// assert_eq!(result, Ok(3)); +/// ``` +pub fn do_something(arg1: i32, arg2: i32) -> Result { + // Implementation +} +``` + +### Error Handling + +Use the `anyhow` crate for error handling in most cases. For library code that needs to define specific error types, use `thiserror`. + +```rust +// Using anyhow +use anyhow::{Result, Context}; + +fn do_something() -> Result<()> { + let file = std::fs::File::open("file.txt") + .context("Failed to open file.txt")?; + // More code... + Ok(()) +} + +// Using thiserror +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum MyError { + #[error("IO error: {0}")] + Io(#[from] std::io::Error), + + #[error("Invalid value: {0}")] + InvalidValue(String), +} +``` + +## Documentation + +### Generating Documentation + +You can generate and view the API documentation locally: + +```bash +cargo doc --open +``` + +### Writing Documentation + +- Document all public items (functions, structs, enums, traits) +- Include examples where appropriate +- Explain the purpose and behavior of each item +- Document error conditions and return values + +## Common Tasks + +### Adding a New LLM Provider + +1. Create a new file in `llm_client/src/clients/` for your provider +2. Implement the `LLMClient` trait for your provider +3. Add your provider to the `LLMType` enum in `llm_client/src/clients/types.rs` +4. Register your provider in the `LLMBroker::new()` method in `llm_client/src/broker.rs` + +### Adding a New Language Parser + +1. Add the tree-sitter grammar dependency to `sidecar/Cargo.toml` +2. Create a new file in `sidecar/src/chunking/` for your language +3. Implement the parsing logic for your language +4. Register your language in `sidecar/src/chunking/languages.rs` + +### Adding a New Tool + +1. Create a new file in `sidecar/src/agentic/tool/` for your tool +2. Implement the `Tool` trait for your tool +3. Register your tool in the `ToolBox::new()` method in `sidecar/src/agentic/symbol/tool_box.rs` + +## Troubleshooting + +### Common Issues + +#### Build Failures + +- **Missing dependencies**: Make sure you have all required system dependencies installed +- **Incompatible Rust version**: Ensure you're using Rust 1.79 or later +- **Cargo.lock conflicts**: Try running `cargo clean` and then `cargo build` + +#### Runtime Errors + +- **API key issues**: Check that you've set the required API keys as environment variables +- **Port conflicts**: If the port is already in use, change it using the `SIDECAR_PORT` environment variable +- **Database errors**: Check that SQLite is installed and working correctly + +### Getting Help + +If you're stuck, you can get help from the community: + +- **GitHub Issues**: Search existing issues or create a new one +- **Discord**: Join our [Discord server](https://discord.gg/mtgrhXM5Xf) for real-time help + +## Conclusion + +This development guide should help you get started with contributing to Sidecar. If you have any questions or suggestions for improving this guide, please open an issue or pull request. \ No newline at end of file diff --git a/FEATURES.md b/FEATURES.md new file mode 100644 index 000000000..82e0e3b46 --- /dev/null +++ b/FEATURES.md @@ -0,0 +1,340 @@ +# Sidecar Features and Capabilities + +This document provides a comprehensive overview of the features and capabilities of Sidecar, the AI intelligence engine that powers the Aide code editor. + +## Table of Contents + +- [AI-Powered Code Assistance](#ai-powered-code-assistance) +- [Code Understanding](#code-understanding) +- [Repository Analysis](#repository-analysis) +- [Language Support](#language-support) +- [LLM Integration](#llm-integration) +- [Agentic Tools](#agentic-tools) +- [Decision Making](#decision-making) +- [User Interaction](#user-interaction) +- [Performance Optimizations](#performance-optimizations) +- [Security Features](#security-features) + +## AI-Powered Code Assistance + +### Code Editing + +Sidecar provides AI-powered code editing capabilities that help developers write, modify, and refactor code more efficiently. + +**Key Features:** +- **Smart Code Completion**: Context-aware code completion that understands the codebase +- **Code Refactoring**: Intelligent refactoring suggestions and implementations +- **Bug Fixing**: Identification and correction of bugs and issues +- **Code Generation**: Generation of new code based on natural language descriptions + +### Code Explanation + +Sidecar can explain code to help developers understand complex or unfamiliar code more quickly. + +**Key Features:** +- **Function Explanation**: Detailed explanations of function behavior and purpose +- **Algorithm Explanation**: Descriptions of algorithms and their implementation +- **Code Flow Analysis**: Analysis of code execution flow and logic +- **Documentation Generation**: Automatic generation of documentation from code + +### Code Review + +Sidecar can assist with code reviews by identifying issues, suggesting improvements, and providing feedback. + +**Key Features:** +- **Issue Detection**: Identification of potential bugs, performance issues, and security vulnerabilities +- **Style Checking**: Verification of code style and consistency +- **Best Practice Suggestions**: Recommendations for following best practices +- **Improvement Suggestions**: Ideas for improving code quality and maintainability + +## Code Understanding + +### Symbol Analysis + +Sidecar analyzes code symbols (functions, classes, variables, etc.) to understand their purpose, behavior, and relationships. + +**Key Features:** +- **Symbol Extraction**: Identification and extraction of symbols from code +- **Symbol Relationship Mapping**: Analysis of relationships between symbols +- **Symbol Usage Analysis**: Tracking of symbol usage throughout the codebase +- **Symbol Documentation**: Generation of documentation for symbols + +### Semantic Analysis + +Sidecar performs semantic analysis to understand the meaning and intent of code beyond its syntax. + +**Key Features:** +- **Type Inference**: Determination of variable and expression types +- **Control Flow Analysis**: Analysis of code execution paths +- **Data Flow Analysis**: Tracking of data movement through the code +- **Intent Recognition**: Understanding the purpose and intent of code sections + +### Context Awareness + +Sidecar maintains context awareness to provide more relevant and accurate assistance. + +**Key Features:** +- **File Context**: Understanding of the current file and its purpose +- **Project Context**: Awareness of the overall project structure and goals +- **User Context**: Adaptation to user preferences and work patterns +- **Historical Context**: Consideration of previous interactions and changes + +## Repository Analysis + +### Repository Mapping + +Sidecar maps the structure and relationships within a code repository to provide better context for AI operations. + +**Key Features:** +- **File Relationship Analysis**: Identification of relationships between files +- **Dependency Mapping**: Analysis of import and dependency relationships +- **Module Structure Analysis**: Understanding of the module and package structure +- **Architecture Visualization**: Visual representation of the codebase architecture + +### PageRank-Based Importance Scoring + +Sidecar uses a PageRank-like algorithm to identify important symbols and files in the codebase. + +**Key Features:** +- **Symbol Importance Scoring**: Ranking of symbols by their importance in the codebase +- **File Importance Scoring**: Ranking of files by their importance in the codebase +- **Relevance Determination**: Identification of code relevant to specific queries or tasks +- **Focus Prioritization**: Prioritization of important code sections for analysis + +### Git Integration + +Sidecar integrates with Git to understand repository history and changes. + +**Key Features:** +- **Commit History Analysis**: Analysis of commit history and patterns +- **Change Tracking**: Tracking of changes to specific files and symbols +- **Author Attribution**: Identification of code authors and contributors +- **Branch Analysis**: Understanding of branch structure and purpose + +## Language Support + +### Multi-Language Parsing + +Sidecar supports parsing and understanding of multiple programming languages. + +**Supported Languages:** +- **Rust**: Full support for Rust syntax and semantics +- **Python**: Comprehensive Python language support +- **JavaScript/TypeScript**: Support for JavaScript and TypeScript +- **Go**: Go language parsing and analysis + +### Language-Specific Features + +Sidecar provides language-specific features tailored to the characteristics and idioms of each supported language. + +**Key Features:** +- **Rust**: Ownership and borrowing analysis, trait understanding +- **Python**: Type hint analysis, decorator understanding +- **JavaScript/TypeScript**: Type inference, async/await analysis +- **Go**: Interface implementation checking, goroutine analysis + +### Extensible Language Support + +Sidecar's language support is designed to be extensible, allowing for the addition of new languages. + +**Key Features:** +- **Language Parser Interface**: Common interface for language parsers +- **Tree-sitter Integration**: Use of Tree-sitter for efficient parsing +- **Language Configuration**: Configurable language settings +- **Custom Language Rules**: Support for custom language-specific rules + +## LLM Integration + +### Multi-Provider Support + +Sidecar integrates with multiple LLM providers to leverage their models for code understanding and generation. + +**Supported Providers:** +- **OpenAI**: Integration with GPT models +- **Anthropic**: Support for Claude models +- **Google AI**: Integration with Gemini models +- **Open-Source Models**: Support for various open-source models via Ollama, LM Studio, etc. + +### Provider-Specific Optimizations + +Sidecar includes optimizations for specific LLM providers to maximize their effectiveness. + +**Key Features:** +- **Prompt Engineering**: Provider-specific prompt templates and strategies +- **Token Optimization**: Efficient use of tokens for each provider +- **Model Selection**: Intelligent selection of appropriate models +- **Parameter Tuning**: Optimization of request parameters + +### Fallback Mechanisms + +Sidecar includes fallback mechanisms to handle provider failures or limitations. + +**Key Features:** +- **Provider Failover**: Automatic switching to alternative providers +- **Graceful Degradation**: Reduced functionality when optimal providers are unavailable +- **Error Recovery**: Recovery from provider errors and limitations +- **Offline Capabilities**: Basic functionality without LLM access + +## Agentic Tools + +### Tool Selection + +Sidecar's agentic system can select appropriate tools for specific tasks. + +**Key Features:** +- **Task Analysis**: Analysis of tasks to determine required tools +- **Tool Matching**: Matching of tasks to appropriate tools +- **Tool Composition**: Combination of multiple tools for complex tasks +- **Tool Adaptation**: Adaptation of tools to specific contexts + +### Tool Execution + +Sidecar can execute tools to perform specific operations. + +**Key Features:** +- **Parameter Determination**: Intelligent determination of tool parameters +- **Execution Monitoring**: Monitoring of tool execution +- **Result Processing**: Processing and interpretation of tool results +- **Error Handling**: Handling of tool execution errors + +### Available Tools + +Sidecar includes a variety of tools for different operations. + +**Key Tools:** +- **Code Editing Tools**: Tools for modifying code +- **Symbol Analysis Tools**: Tools for analyzing code symbols +- **Repository Search Tools**: Tools for searching the repository +- **Context Gathering Tools**: Tools for gathering relevant context + +## Decision Making + +### Monte Carlo Tree Search + +Sidecar uses Monte Carlo Tree Search (MCTS) to explore possible code changes and select the most promising ones. + +**Key Features:** +- **Action Space Exploration**: Exploration of possible actions +- **Value Estimation**: Estimation of action values +- **Selection Strategy**: Intelligent selection of actions to explore +- **Execution Planning**: Planning of action execution + +### Feedback-Based Learning + +Sidecar can learn from feedback to improve its decision making. + +**Key Features:** +- **User Feedback Processing**: Processing of explicit user feedback +- **Implicit Feedback Analysis**: Analysis of implicit feedback from user actions +- **Preference Learning**: Learning of user preferences +- **Adaptation**: Adaptation to user preferences and patterns + +### Multi-Step Planning + +Sidecar can plan and execute multi-step operations. + +**Key Features:** +- **Task Decomposition**: Breaking down complex tasks into steps +- **Step Sequencing**: Determining the optimal sequence of steps +- **Dependency Management**: Handling dependencies between steps +- **Progress Tracking**: Tracking progress through multi-step plans + +## User Interaction + +### Natural Language Understanding + +Sidecar can understand and process natural language queries and instructions. + +**Key Features:** +- **Query Parsing**: Parsing and understanding of user queries +- **Intent Recognition**: Identification of user intent +- **Context Incorporation**: Incorporation of context into query understanding +- **Ambiguity Resolution**: Resolution of ambiguous queries + +### Response Generation + +Sidecar generates natural language responses to user queries. + +**Key Features:** +- **Clear Explanations**: Generation of clear and concise explanations +- **Code Examples**: Inclusion of relevant code examples +- **Contextual References**: References to relevant code and documentation +- **Follow-up Suggestions**: Suggestions for follow-up queries or actions + +### Interactive Sessions + +Sidecar supports interactive sessions for ongoing user interaction. + +**Key Features:** +- **Session State Management**: Maintenance of session state +- **Context Retention**: Retention of context across interactions +- **Conversation History**: Tracking of conversation history +- **Session Persistence**: Persistence of sessions across restarts + +## Performance Optimizations + +### Token Usage Optimization + +Sidecar optimizes token usage to reduce costs and improve performance. + +**Key Features:** +- **Context Pruning**: Removal of irrelevant context +- **Chunking**: Breaking large content into manageable chunks +- **Compression**: Compression of context information +- **Prioritization**: Prioritization of important content + +### Caching + +Sidecar uses caching to improve performance and reduce redundant operations. + +**Key Features:** +- **Response Caching**: Caching of LLM responses +- **Analysis Caching**: Caching of code analysis results +- **Repository Caching**: Caching of repository information +- **Invalidation Strategies**: Intelligent cache invalidation + +### Parallel Processing + +Sidecar uses parallel processing for computationally intensive operations. + +**Key Features:** +- **Multi-threading**: Use of multiple threads for parallel operations +- **Asynchronous Processing**: Non-blocking asynchronous operations +- **Work Distribution**: Intelligent distribution of work +- **Resource Management**: Efficient management of system resources + +## Security Features + +### API Key Management + +Sidecar securely manages API keys for various services. + +**Key Features:** +- **Secure Storage**: Secure storage of API keys +- **Access Control**: Controlled access to API keys +- **Key Rotation**: Support for key rotation +- **Minimal Exposure**: Minimization of key exposure + +### Code Safety + +Sidecar includes features to ensure the safety of code operations. + +**Key Features:** +- **Path Validation**: Validation of file paths +- **Operation Limits**: Limits on potentially dangerous operations +- **Input Sanitization**: Sanitization of user input +- **Permission Checking**: Checking of operation permissions + +### Privacy Protection + +Sidecar includes features to protect user privacy. + +**Key Features:** +- **Data Minimization**: Minimization of data sent to external services +- **Local Processing**: Local processing when possible +- **Anonymization**: Anonymization of sensitive information +- **Transparency**: Transparency about data usage + +## Conclusion + +Sidecar provides a comprehensive set of features and capabilities for AI-powered code assistance. Its modular architecture, extensible design, and focus on performance and security make it a powerful tool for developers working with the Aide editor. \ No newline at end of file diff --git a/HOW_TO_CONTRIBUTE.md b/HOW_TO_CONTRIBUTE.md index 0576ec859..d363db2fb 100644 --- a/HOW_TO_CONTRIBUTE.md +++ b/HOW_TO_CONTRIBUTE.md @@ -1,12 +1,12 @@ -# Contributing to Sidecar -There are many ways to contribute to Sidecar: logging bugs, submitting pull requests, reporting issues, and creating suggestions. +# Contributing to LARP +There are many ways to contribute to LARP: logging bugs, submitting pull requests, reporting issues, and creating suggestions. -After cloning and building the repo, check out the [issues list](https://github.com/codestoryai/sidecar/issues?utf8=%E2%9C%93&q=is%3Aopen+is%3Aissue). Issues labeled [`help wanted`](https://github.com/codestoryai/sidecar/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) are good issues to submit a PR for. Issues labeled [`good first issue`](https://github.com/codestoryai/sidecar/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) are great candidates to pick up if you are in the code for the first time. If you are contributing significant changes, or if the issue is already assigned to a specific month milestone, please discuss with the assignee of the issue first before starting to work on the issue. +After cloning and building the repo, check out the [issues list](https://github.com/codestoryai/larp/issues?utf8=%E2%9C%93&q=is%3Aopen+is%3Aissue). Issues labeled [`help wanted`](https://github.com/codestoryai/larp/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) are good issues to submit a PR for. Issues labeled [`good first issue`](https://github.com/codestoryai/larp/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) are great candidates to pick up if you are in the code for the first time. If you are contributing significant changes, or if the issue is already assigned to a specific month milestone, please discuss with the assignee of the issue first before starting to work on the issue. ## How to build locally 1. Ensure you are using Rust 1.73 2. Build the binary: `cargo build --bin webserver` -3. Run the binary: `./target/debug/webserver --qdrant-binary-directory ./sidecar/qdrant --dylib-directory ./sidecar/onnxruntime/ --model-dir ./sidecar/models/all-MiniLM-L6-v2/ --qdrant-url http://127.0.0.1:6334` +3. Run the binary: `./target/debug/webserver --qdrant-binary-directory ./larp/qdrant --dylib-directory ./larp/onnxruntime/ --model-dir ./larp/models/all-MiniLM-L6-v2/ --qdrant-url http://127.0.0.1:6334` 4. Profit! ## Your own ideas @@ -15,7 +15,7 @@ If you want a new feature or want to change something, please reach out to us on ## Debugging The best way to debug is cowboy style, put print statments and check if your code is hitting the right branch and doing the right things. -Since you will be working on the debug build of the sidecar, iteration cycles are fast, just run `cargo buid --bin webserver` and you should see the log spam on stdout. +Since you will be working on the debug build of the larp, iteration cycles are fast, just run `cargo buid --bin webserver` and you should see the log spam on stdout. ## Pull Requests -We use the [GitHub flow](https://guides.github.com/introduction/flow/) for pull requests. This means that you should fork the repo, create a branch, make your changes, and then create a pull request. We will review your PR and provide feedback. Once the PR is approved, we will merge it into the main branch. \ No newline at end of file +We use the [GitHub flow](https://guides.github.com/introduction/flow/) for pull requests. This means that you should fork the repo, create a branch, make your changes, and then create a pull request. We will review your PR and provide feedback. Once the PR is approved, we will merge it into the main branch. diff --git a/KNOWLEDGE_GRAPH.md b/KNOWLEDGE_GRAPH.md new file mode 100644 index 000000000..3f3fbc097 --- /dev/null +++ b/KNOWLEDGE_GRAPH.md @@ -0,0 +1,469 @@ +# LARP Knowledge Graph + +This document provides a comprehensive knowledge graph of the LARP codebase, showing the relationships between different components and modules. + +## Repository Structure + +```mermaid +graph TD + Root["/ (Root)"] --> LARP["larp/"] + Root --> LLMClient["llm_client/"] + Root --> LLMPrompts["llm_prompts/"] + Root --> Logging["logging/"] + + LARP --> SrcLARP["src/"] + SrcLARP --> Webserver["webserver/"] + SrcLARP --> MCTS["mcts/"] + SrcLARP --> Agentic["agentic/"] + SrcLARP --> Repomap["repomap/"] + SrcLARP --> LLM["llm/"] + SrcLARP --> Repo["repo/"] + SrcLARP --> Chunking["chunking/"] + SrcLARP --> Agent["agent/"] + SrcLARP --> Git["git/"] + SrcLARP --> Bin["bin/"] + + LLMClient --> SrcLLM["src/"] + SrcLLM --> Clients["clients/"] + SrcLLM --> Format["format/"] + SrcLLM --> Tokenizer["tokenizer/"] + + LLMPrompts --> SrcPrompts["src/"] + SrcPrompts --> FIM["fim/"] + SrcPrompts --> Chat["chat/"] + SrcPrompts --> InLineEdit["in_line_edit/"] + + Logging --> SrcLogging["src/"] +``` + +## Crate Dependencies + +```mermaid +graph TD + LARP["larp"] --> LLMClient["llm_client"] + LARP --> LLMPrompts["llm_prompts"] + LARP --> Logging["logging"] + LLMPrompts --> LLMClient + Logging --> LLMClient +``` + +## Core Components and Their Relationships + +```mermaid +flowchart TD + subgraph Application + App["Application Core"] --> Config["Configuration"] + App --> RepoPool["Repository Pool"] + App --> LangParsing["Language Parsing"] + App --> LLMBroker["LLM Broker"] + App --> SymbolMgr["Symbol Manager"] + App --> ToolBox["Tool Box"] + end + + subgraph WebServer + Server["Web Server"] --> AgenticRouter["Agentic Router"] + Server --> PlanRouter["Plan Router"] + Server --> TreeSitterRouter["Tree Sitter Router"] + Server --> FileOpsRouter["File Operations Router"] + end + + subgraph Agentic + SymbolMgr --> ToolBroker["Tool Broker"] + ToolBox --> ToolBroker + ToolBroker --> CodeEditBroker["Code Edit Broker"] + SymbolMgr --> SymbolTracker["Symbol Tracker"] + ToolBox --> SymbolTracker + SymbolMgr --> EditorParsing["Editor Parsing"] + ToolBox --> EditorParsing + end + + subgraph LLM + LLMBroker --> Clients["LLM Clients"] + Clients --> OpenAI["OpenAI"] + Clients --> Anthropic["Anthropic"] + Clients --> GoogleAI["Google AI"] + Clients --> Ollama["Ollama"] + Clients --> Others["Other Providers"] + end + + App --> Server + SymbolMgr --> LLMBroker + ToolBroker --> LLMBroker +``` + +## Detailed Module Relationships + +### Webserver Module + +```mermaid +classDiagram + class Webserver { + +start(app: Application) + +run(app: Application) + } + + class AgenticRouter { + +probe_request_stop() + +code_sculpting() + +push_diagnostics() + +agent_session_chat() + +agent_session_edit_anchored() + +agent_session_edit_agentic() + +agent_session_plan() + +agent_tool_use() + } + + class TreeSitterRouter { + +extract_documentation_strings() + +extract_diagnostics_range() + +tree_sitter_node_check() + +check_valid_xml() + } + + class FileOperationsRouter { + +file_edit() + } + + Webserver --> AgenticRouter + Webserver --> TreeSitterRouter + Webserver --> FileOperationsRouter +``` + +### Agentic Module + +```mermaid +classDiagram + class SymbolManager { + +tool_broker: ToolBroker + +symbol_tracker: SymbolTrackerInline + +editor_parsing: EditorParsing + +llm_properties: LLMProperties + } + + class ToolBox { + +tool_broker: ToolBroker + +symbol_tracker: SymbolTrackerInline + +editor_parsing: EditorParsing + } + + class ToolBroker { + +llm_broker: LLMBroker + +code_edit_broker: CodeEditBroker + +symbol_tracker: SymbolTrackerInline + +language_parsing: TSLanguageParsing + +configuration: ToolBrokerConfiguration + +llm_properties: LLMProperties + } + + class Memory { + +store(key, value) + +retrieve(key) + +list_keys() + } + + SymbolManager --> ToolBroker + ToolBox --> ToolBroker + SymbolManager --> Memory +``` + +### MCTS (Monte Carlo Tree Search) Module + +```mermaid +classDiagram + class ActionNode { + +id: String + +parent_id: Option + +children: Vec + +action: Action + +state: ActionState + +visits: u64 + +value: f64 + } + + class Selector { + +select_node(nodes: &[ActionNode]) + } + + class Decider { + +decide(nodes: &[ActionNode]) + } + + class Execution { + +execute(action: Action) + } + + class Feedback { + +evaluate(action: Action, result: ActionResult) + } + + class ValueFunction { + +calculate(node: &ActionNode) + } + + ActionNode --> Selector + Selector --> Decider + Decider --> Execution + Execution --> Feedback + Feedback --> ValueFunction + ValueFunction --> ActionNode +``` + +### Repository Mapping Module + +```mermaid +classDiagram + class Analyser { + +analyze_repository(repo_path: &Path) + } + + class Graph { + +add_node(node: Node) + +add_edge(from: NodeId, to: NodeId) + +calculate_page_rank() + } + + class TreeWalker { + +walk(root: &Path) + } + + class Files { + +list_files(root: &Path) + +filter_files(files: &[PathBuf], ignore_patterns: &[String]) + } + + class TreeContext { + +get_context(node: &Node, depth: usize) + } + + Analyser --> Graph + Analyser --> TreeWalker + TreeWalker --> Files + Graph --> TreeContext +``` + +### LLM Client Module + +```mermaid +classDiagram + class LLMBroker { + +clients: Map + +get_client(llm_type: LLMType) + +generate_completion(prompt: String, llm_type: LLMType) + +generate_chat_completion(messages: Vec, llm_type: LLMType) + } + + class LLMClient { + +generate_completion(prompt: String) + +generate_chat_completion(messages: Vec) + +stream_chat_completion(messages: Vec) + } + + class OpenAIClient { + +api_key: String + +model: String + } + + class AnthropicClient { + +api_key: String + +model: String + } + + class GoogleAIClient { + +api_key: String + +model: String + } + + class LLMTokenizer { + +count_tokens(text: &str, model: &str) + +truncate_text(text: &str, max_tokens: usize, model: &str) + } + + LLMBroker --> LLMClient + LLMClient <|-- OpenAIClient + LLMClient <|-- AnthropicClient + LLMClient <|-- GoogleAIClient + LLMBroker --> LLMTokenizer +``` + +### Code Chunking Module + +```mermaid +classDiagram + class TSLanguageParsing { + +parse_file(file_path: &Path, language: Language) + +get_symbols(parsed: &Tree) + } + + class EditorParsing { + +parse_text(text: &str, language: Language) + +get_symbols_at_position(parsed: &Tree, position: Position) + } + + class FileContent { + +path: PathBuf + +content: String + +language: Language + } + + class TextDocument { + +uri: String + +version: i32 + +content: String + +language_id: String + } + + class ScopeGraph { + +add_scope(scope: Scope) + +connect_scopes(parent: ScopeId, child: ScopeId) + +get_scope_at_position(position: Position) + } + + TSLanguageParsing --> FileContent + EditorParsing --> TextDocument + TSLanguageParsing --> ScopeGraph + EditorParsing --> ScopeGraph +``` + +## Data Flow Diagrams + +### Request Processing Flow + +```mermaid +sequenceDiagram + participant Editor as Aide Editor + participant API as Webserver API + participant App as Application Core + participant LLM as LLM Client + participant Repo as Repository Analysis + participant Agent as Agentic Tools + + Editor->>API: Request (code context, query) + API->>App: Process request + App->>Repo: Analyze repository context + Repo-->>App: Repository context + App->>Agent: Select appropriate tools + Agent->>LLM: Generate prompt with context + LLM-->>Agent: LLM response + Agent->>App: Process LLM response + App->>API: Formatted response + API->>Editor: Display results to user +``` + +### Code Editing Flow + +```mermaid +sequenceDiagram + participant Editor as Aide Editor + participant API as Webserver API + participant Agent as Agentic System + participant MCTS as MCTS Engine + participant LLM as LLM Client + participant FS as File System + + Editor->>API: Edit request with code context + API->>Agent: Process edit request + Agent->>MCTS: Generate possible edits + + loop Action Selection + MCTS->>LLM: Generate candidate actions + LLM-->>MCTS: Candidate actions + MCTS->>MCTS: Evaluate actions + MCTS->>MCTS: Select best action + end + + MCTS-->>Agent: Best edit action + Agent->>FS: Apply edit to file + FS-->>Agent: Edit result + Agent->>API: Edit response + API->>Editor: Updated code +``` + +### Symbol Analysis Flow + +```mermaid +sequenceDiagram + participant Editor as Aide Editor + participant API as Webserver API + participant SymbolMgr as Symbol Manager + participant Parser as Language Parser + participant LLM as LLM Client + + Editor->>API: Symbol analysis request + API->>SymbolMgr: Process symbol request + SymbolMgr->>Parser: Parse code for symbols + Parser-->>SymbolMgr: Symbol tree + SymbolMgr->>LLM: Generate symbol analysis + LLM-->>SymbolMgr: Symbol insights + SymbolMgr->>API: Symbol analysis response + API->>Editor: Display symbol insights +``` + +## Key Concepts and Abstractions + +```mermaid +mindmap + root((LARP)) + Application + Configuration + Repository Pool + Language Parsing + AI Components + LLM Integration + OpenAI + Anthropic + Google AI + Others + Agentic System + Tool Box + Symbol Manager + Memory + MCTS + Action Nodes + Selection Strategy + Execution + Code Understanding + Repository Mapping + Page Rank + Symbol Graph + Code Chunking + Language Parsers + Symbol Extraction + Tree-sitter Integration + Web Interface + API Endpoints + Request Handling + Response Streaming +``` + +## Feature Dependency Graph + +```mermaid +graph TD + CodeEditing["Code Editing"] --> SymbolAnalysis["Symbol Analysis"] + CodeEditing --> LanguageParsing["Language Parsing"] + CodeEditing --> LLMIntegration["LLM Integration"] + + SymbolAnalysis --> RepositoryMapping["Repository Mapping"] + SymbolAnalysis --> TreeSitter["Tree-sitter Parsing"] + + AgentChat["Agent Chat"] --> LLMIntegration + AgentChat --> ContextGathering["Context Gathering"] + + ContextGathering --> RepositoryMapping + ContextGathering --> FileAnalysis["File Analysis"] + + MCTSDecision["MCTS Decision Making"] --> LLMIntegration + MCTSDecision --> ToolExecution["Tool Execution"] + + ToolExecution --> FileSystem["File System Access"] + ToolExecution --> GitIntegration["Git Integration"] + + InlineCompletion["Inline Completion"] --> LanguageParsing + InlineCompletion --> LLMIntegration + + LanguageParsing --> TreeSitter +``` + +## Conclusion + +This knowledge graph provides a comprehensive view of the LARP codebase structure and relationships between different components. It should help developers understand how the various parts of the system interact and how data flows through the application. diff --git a/README.md b/README.md index 42e8b63b8..fc66db557 100644 --- a/README.md +++ b/README.md @@ -1,53 +1,454 @@ ``` - - ██████╗ ██████╗ ██████╗ ███████╗███████╗████████╗ ██████╗ ██████╗ ██╗ ██╗ -██╔════╝██╔═══██╗██╔══██╗██╔════╝██╔════╝╚══██╔══╝██╔═══██╗██╔══██╗╚██╗ ██╔╝ -██║ ██║ ██║██║ ██║█████╗ ███████╗ ██║ ██║ ██║██████╔╝ ╚████╔╝ -██║ ██║ ██║██║ ██║██╔══╝ ╚════██║ ██║ ██║ ██║██╔══██╗ ╚██╔╝ -╚██████╗╚██████╔╝██████╔╝███████╗███████║ ██║ ╚██████╔╝██║ ██║ ██║ - ╚═════╝ ╚═════╝ ╚═════╝ ╚══════╝╚══════╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝ - +▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▓▓▓▓▒▒▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓ +▓▓▓▓▓▒▓▒▓▒▒▓▓▓▓█████▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▓ +▓▓▓▒▒▒▒▒▒▓▓███████████▓▓██▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒▒ +▓▒▒▓▒▒▒▓██████▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒ +▓▒▒▒▒▒▓██▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒ +▓▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒ +▓▒▒▒▓▓▓▓▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒ +▓▒▒▒▓▓▓▒▓▓▓▓▓▓▓▓▓▓▓███▓▓▓▒▒▓▓▓███▓▓▓▓▓▒▒▒▒ +▓▒▒▒▓▓▒▒▓▓▓▓▓▓▓▓▓█▒▒░░░░░░░░░░░░░▒█▒▓▓▒▒▒▒ +▓▒▒▒▓▓▒▒▓██▓▓▓▓█▒░░░░░░░░░░░░░▓██▓░░░▒▒▒▒▒ +▓▒▒▓▓▓▒▓▓▓▓▓░░░░░▓▓█▓░░░░░░░░▒█░██▓░░░▒░▒▒ +▓▒▒▓▓▒▓▓▓▒▒░░░░░██▓███▓░░░░░░█████░░░░░░░▒ +▓▒▒▓▓░░░░░░░░░░▓▒█████▓░░░░░░▒███▒░░░░░░░▒ +██▓▓▓▒░░░░░░░░░░░▓███▓▒░░░░░░░░░▒░░░▒▒▒▒▒▒ +███▓▓▓▓▒░░░░░░░░░░▒█▒░░░░░░░░░░░░░▒▓▒▓▓▓▒▒ +████▓▓▓▓██▓▒░░░░░░░░░░░░░░░▒░░░░░▒▓▒▓▓▓▓▓▓ +███████▓▓██████▓▒░░░░░░░░░░░░░░▒▓▓████████ +████████▓▓▓████████▓▒░░░░░░▒▓▓▓▓▓▓████████ +███████████▓▓██████▓▓▓▓▓▓▓▓███████████████ +███████████████▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█████████ +██████████████▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓████████ +█████████████▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓████████ +█████████████▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓███████ + + + ``` -