Skip to content

Latest commit

 

History

History
478 lines (373 loc) · 9.98 KB

File metadata and controls

478 lines (373 loc) · 9.98 KB

API Reference

Complete reference for the SentinelAI Python API.

SentinelEngine

The main entry point for running security scans.

SentinelEngine

class SentinelEngine:
    def __init__(self, config: dict | None = None) -> None: ...

Parameters:

Name Type Description
config dict | None Configuration dictionary. See Configuration.

Class methods:

@classmethod
def from_config(cls, path: str | Path) -> SentinelEngine:
    """Create an engine from a YAML configuration file."""

SentinelEngine.scan

def scan(
    self,
    target: str | Path,
    *,
    scanners: list[str] | None = None,
    severity_threshold: str | Severity | None = None,
) -> ScanResults:
    """
    Scan a file or directory for security vulnerabilities.

    Args:
        target: Path to a file or directory to scan.
        scanners: Override the configured scanner list for this scan.
        severity_threshold: Override the minimum severity for this scan.

    Returns:
        ScanResults containing all findings.

    Raises:
        FileNotFoundError: If target does not exist.
        SentinelAIError: If a scanner encounters an unrecoverable error.
    """

ScanResults

class ScanResults:
    findings: list[Finding]
    files_scanned: int
    total_findings: int
    duration_ms: float
    scanners_used: list[str]
    timestamp: datetime

    def filter(
        self,
        *,
        min_severity: Severity | None = None,
        category: str | None = None,
        rule_id: str | None = None,
    ) -> list[Finding]:
        """Filter findings by severity, category, or rule ID."""

    def to_dict(self) -> dict:
        """Serialize results to a dictionary."""

    def to_json(self, path: str | Path) -> None:
        """Write results to a JSON file."""

    def to_sarif(self, path: str | Path) -> None:
        """Write results in SARIF format."""

Finding

class Finding:
    rule_id: str
    severity: Severity
    confidence: Confidence
    category: str
    message: str
    file_path: str
    line_number: int
    column: int | None
    code_snippet: str | None
    cwe_id: str | None
    remediation: str | None

Severity

class Severity(Enum):
    CRITICAL = "critical"
    HIGH = "high"
    MEDIUM = "medium"
    LOW = "low"
    INFO = "info"

Confidence

class Confidence(Enum):
    HIGH = "high"
    MEDIUM = "medium"
    LOW = "low"

LLMFirewall

Protects LLM applications against prompt injection, PII leakage, and token abuse.

LLMFirewall

class LLMFirewall:
    def __init__(self, config: dict | None = None) -> None: ...

LLMFirewall.analyze_input

def analyze_input(
    self,
    text: str,
    *,
    context: dict | None = None,
) -> InputAnalysisResult:
    """
    Analyze input text for prompt injection attacks.

    Args:
        text: The user input to analyze.
        context: Optional context (user_id, session_id, etc.).

    Returns:
        InputAnalysisResult with verdict and details.
    """

InputAnalysisResult

class InputAnalysisResult:
    verdict: AnalysisVerdict
    is_safe: bool
    injection_score: float          # 0.0 (safe) to 1.0 (definite injection)
    matched_patterns: list[str]
    explanation: str | None
    latency_ms: float

LLMFirewall.analyze_output

def analyze_output(
    self,
    text: str,
    *,
    pii_action: str | None = None,
) -> OutputAnalysisResult:
    """
    Analyze LLM output for PII leakage.

    Args:
        text: The LLM-generated text to analyze.
        pii_action: Override the configured PII action (redact|block|warn).

    Returns:
        OutputAnalysisResult with PII findings and sanitized text.
    """

OutputAnalysisResult

class OutputAnalysisResult:
    pii_detected: bool
    pii_entities: list[PIIEntity]
    sanitized_text: str | None
    verdict: AnalysisVerdict

PIIEntity

class PIIEntity:
    entity_type: str                # email, phone, ssn, credit_card, etc.
    start: int                      # start offset in text
    end: int                        # end offset in text
    original_value: str
    redacted_value: str
    confidence: float

LLMFirewall.record_token_usage

def record_token_usage(
    self,
    *,
    model: str,
    input_tokens: int,
    output_tokens: int,
    request_id: str | None = None,
) -> None:
    """Record token consumption for budget tracking."""

LLMFirewall.get_stats

def get_stats(self) -> FirewallStats:
    """Get aggregated firewall statistics."""

FirewallStats

class FirewallStats:
    total_requests: int
    blocked_injection: int
    allowed_requests: int
    pii_detections: int
    total_tokens: int
    tokens_by_model: dict[str, int]
    budget_utilization_pct: float

AnalysisVerdict

class AnalysisVerdict(Enum):
    ALLOWED = "allowed"
    BLOCKED = "blocked"
    WARNED = "warned"

AgentMonitor

Observes and controls autonomous AI agent behavior.

AgentMonitor

class AgentMonitor:
    def __init__(self, config: dict | None = None) -> None: ...

AgentMonitor.start

def start(self) -> None:
    """Start the monitoring session."""

AgentMonitor.stop

def stop(self) -> None:
    """Stop the monitoring session and finalize data."""

AgentMonitor.record_action

def record_action(
    self,
    *,
    agent_id: str,
    action_type: ActionType,
    detail: str,
    tool_name: str | None = None,
    input_tokens: int = 0,
    output_tokens: int = 0,
    metadata: dict | None = None,
    timestamp: datetime | None = None,
) -> None:
    """
    Record an agent action.

    Args:
        agent_id: Unique identifier for the agent.
        action_type: The type of action performed.
        detail: Human-readable description of the action.
        tool_name: Name of the tool invoked (if applicable).
        input_tokens: Number of input tokens consumed.
        output_tokens: Number of output tokens generated.
        metadata: Arbitrary metadata to attach to the event.
        timestamp: Override the event timestamp (default: now).
    """

AgentMonitor.get_report

def get_report(
    self,
    *,
    agent_id: str | None = None,
) -> AgentReport:
    """
    Get a monitoring report.

    Args:
        agent_id: Filter report to a specific agent. If None, returns
                  an aggregate report across all agents.

    Returns:
        AgentReport with action counts, token usage, and risk assessment.
    """

AgentMonitor.on

def on(
    self,
    event: str,
    callback: Callable[[Event], None] | Callable[[Event], Awaitable[None]],
) -> None:
    """
    Register a callback for a monitoring event.

    Args:
        event: Event name (see Agent Monitor docs for available events).
        callback: Sync or async function to invoke when the event fires.
    """

AgentReport

class AgentReport:
    agent_id: str | None
    total_actions: int
    llm_calls: int
    tool_calls: int
    high_risk_count: int
    total_input_tokens: int
    total_output_tokens: int
    overall_risk: RiskLevel
    duration: timedelta
    actions_per_minute: float
    warnings: list[str]
    risk_breakdown: dict[RiskLevel, int]

    def count_by_risk(self, level: RiskLevel) -> int:
        """Count actions at the specified risk level."""

    def to_dict(self) -> dict:
        """Serialize report to a dictionary."""

    def to_json(self, path: str | Path) -> None:
        """Write report to a JSON file."""

ActionType

class ActionType(Enum):
    LLM_CALL = "llm_call"
    TOOL_CALL = "tool_call"
    FILE_READ = "file_read"
    FILE_WRITE = "file_write"
    FILE_DELETE = "file_delete"
    SHELL_COMMAND = "shell_command"
    NETWORK_REQUEST = "network_request"
    DATABASE_QUERY = "database_query"
    CREDENTIAL_ACCESS = "credential_access"
    USER_INTERACTION = "user_interaction"

RiskLevel

class RiskLevel(Enum):
    CRITICAL = "critical"
    HIGH = "high"
    MEDIUM = "medium"
    LOW = "low"
    NONE = "none"

ReportGenerator

Generates formatted reports from scan results.

ReportGenerator

class ReportGenerator:
    def __init__(self, results: ScanResults) -> None: ...

ReportGenerator.to_text

def to_text(self, *, group_by: str = "file", colorize: bool = True) -> str:
    """Generate a human-readable text report."""

ReportGenerator.to_json

def to_json(self, path: str | Path | None = None) -> str | None:
    """
    Generate a JSON report.

    Args:
        path: File path to write to. If None, returns the JSON string.
    """

ReportGenerator.to_sarif

def to_sarif(self, path: str | Path | None = None) -> str | None:
    """Generate a SARIF 2.1.0 report for integration with code analysis tools."""

ReportGenerator.to_html

def to_html(self, path: str | Path) -> None:
    """Generate a self-contained HTML report with interactive filtering."""

ReportGenerator.to_markdown

def to_markdown(self, path: str | Path | None = None) -> str | None:
    """Generate a Markdown report."""

Exceptions

class SentinelAIError(Exception):
    """Base exception for all SentinelAI errors."""

class ConfigurationError(SentinelAIError):
    """Raised when configuration is invalid."""

class ScanError(SentinelAIError):
    """Raised when a scan encounters an unrecoverable error."""

class FirewallError(SentinelAIError):
    """Raised when the firewall encounters an error."""

class MonitorError(SentinelAIError):
    """Raised when the monitor encounters an error."""