Complete reference for the SentinelAI Python API.
The main entry point for running security scans.
class SentinelEngine:
def __init__(self, config: dict | None = None) -> None: ...Parameters:
| Name | Type | Description |
|---|---|---|
config |
dict | None |
Configuration dictionary. See Configuration. |
Class methods:
@classmethod
def from_config(cls, path: str | Path) -> SentinelEngine:
"""Create an engine from a YAML configuration file."""def scan(
self,
target: str | Path,
*,
scanners: list[str] | None = None,
severity_threshold: str | Severity | None = None,
) -> ScanResults:
"""
Scan a file or directory for security vulnerabilities.
Args:
target: Path to a file or directory to scan.
scanners: Override the configured scanner list for this scan.
severity_threshold: Override the minimum severity for this scan.
Returns:
ScanResults containing all findings.
Raises:
FileNotFoundError: If target does not exist.
SentinelAIError: If a scanner encounters an unrecoverable error.
"""class ScanResults:
findings: list[Finding]
files_scanned: int
total_findings: int
duration_ms: float
scanners_used: list[str]
timestamp: datetime
def filter(
self,
*,
min_severity: Severity | None = None,
category: str | None = None,
rule_id: str | None = None,
) -> list[Finding]:
"""Filter findings by severity, category, or rule ID."""
def to_dict(self) -> dict:
"""Serialize results to a dictionary."""
def to_json(self, path: str | Path) -> None:
"""Write results to a JSON file."""
def to_sarif(self, path: str | Path) -> None:
"""Write results in SARIF format."""class Finding:
rule_id: str
severity: Severity
confidence: Confidence
category: str
message: str
file_path: str
line_number: int
column: int | None
code_snippet: str | None
cwe_id: str | None
remediation: str | Noneclass Severity(Enum):
CRITICAL = "critical"
HIGH = "high"
MEDIUM = "medium"
LOW = "low"
INFO = "info"class Confidence(Enum):
HIGH = "high"
MEDIUM = "medium"
LOW = "low"Protects LLM applications against prompt injection, PII leakage, and token abuse.
class LLMFirewall:
def __init__(self, config: dict | None = None) -> None: ...def analyze_input(
self,
text: str,
*,
context: dict | None = None,
) -> InputAnalysisResult:
"""
Analyze input text for prompt injection attacks.
Args:
text: The user input to analyze.
context: Optional context (user_id, session_id, etc.).
Returns:
InputAnalysisResult with verdict and details.
"""class InputAnalysisResult:
verdict: AnalysisVerdict
is_safe: bool
injection_score: float # 0.0 (safe) to 1.0 (definite injection)
matched_patterns: list[str]
explanation: str | None
latency_ms: floatdef analyze_output(
self,
text: str,
*,
pii_action: str | None = None,
) -> OutputAnalysisResult:
"""
Analyze LLM output for PII leakage.
Args:
text: The LLM-generated text to analyze.
pii_action: Override the configured PII action (redact|block|warn).
Returns:
OutputAnalysisResult with PII findings and sanitized text.
"""class OutputAnalysisResult:
pii_detected: bool
pii_entities: list[PIIEntity]
sanitized_text: str | None
verdict: AnalysisVerdictclass PIIEntity:
entity_type: str # email, phone, ssn, credit_card, etc.
start: int # start offset in text
end: int # end offset in text
original_value: str
redacted_value: str
confidence: floatdef record_token_usage(
self,
*,
model: str,
input_tokens: int,
output_tokens: int,
request_id: str | None = None,
) -> None:
"""Record token consumption for budget tracking."""def get_stats(self) -> FirewallStats:
"""Get aggregated firewall statistics."""class FirewallStats:
total_requests: int
blocked_injection: int
allowed_requests: int
pii_detections: int
total_tokens: int
tokens_by_model: dict[str, int]
budget_utilization_pct: floatclass AnalysisVerdict(Enum):
ALLOWED = "allowed"
BLOCKED = "blocked"
WARNED = "warned"Observes and controls autonomous AI agent behavior.
class AgentMonitor:
def __init__(self, config: dict | None = None) -> None: ...def start(self) -> None:
"""Start the monitoring session."""def stop(self) -> None:
"""Stop the monitoring session and finalize data."""def record_action(
self,
*,
agent_id: str,
action_type: ActionType,
detail: str,
tool_name: str | None = None,
input_tokens: int = 0,
output_tokens: int = 0,
metadata: dict | None = None,
timestamp: datetime | None = None,
) -> None:
"""
Record an agent action.
Args:
agent_id: Unique identifier for the agent.
action_type: The type of action performed.
detail: Human-readable description of the action.
tool_name: Name of the tool invoked (if applicable).
input_tokens: Number of input tokens consumed.
output_tokens: Number of output tokens generated.
metadata: Arbitrary metadata to attach to the event.
timestamp: Override the event timestamp (default: now).
"""def get_report(
self,
*,
agent_id: str | None = None,
) -> AgentReport:
"""
Get a monitoring report.
Args:
agent_id: Filter report to a specific agent. If None, returns
an aggregate report across all agents.
Returns:
AgentReport with action counts, token usage, and risk assessment.
"""def on(
self,
event: str,
callback: Callable[[Event], None] | Callable[[Event], Awaitable[None]],
) -> None:
"""
Register a callback for a monitoring event.
Args:
event: Event name (see Agent Monitor docs for available events).
callback: Sync or async function to invoke when the event fires.
"""class AgentReport:
agent_id: str | None
total_actions: int
llm_calls: int
tool_calls: int
high_risk_count: int
total_input_tokens: int
total_output_tokens: int
overall_risk: RiskLevel
duration: timedelta
actions_per_minute: float
warnings: list[str]
risk_breakdown: dict[RiskLevel, int]
def count_by_risk(self, level: RiskLevel) -> int:
"""Count actions at the specified risk level."""
def to_dict(self) -> dict:
"""Serialize report to a dictionary."""
def to_json(self, path: str | Path) -> None:
"""Write report to a JSON file."""class ActionType(Enum):
LLM_CALL = "llm_call"
TOOL_CALL = "tool_call"
FILE_READ = "file_read"
FILE_WRITE = "file_write"
FILE_DELETE = "file_delete"
SHELL_COMMAND = "shell_command"
NETWORK_REQUEST = "network_request"
DATABASE_QUERY = "database_query"
CREDENTIAL_ACCESS = "credential_access"
USER_INTERACTION = "user_interaction"class RiskLevel(Enum):
CRITICAL = "critical"
HIGH = "high"
MEDIUM = "medium"
LOW = "low"
NONE = "none"Generates formatted reports from scan results.
class ReportGenerator:
def __init__(self, results: ScanResults) -> None: ...def to_text(self, *, group_by: str = "file", colorize: bool = True) -> str:
"""Generate a human-readable text report."""def to_json(self, path: str | Path | None = None) -> str | None:
"""
Generate a JSON report.
Args:
path: File path to write to. If None, returns the JSON string.
"""def to_sarif(self, path: str | Path | None = None) -> str | None:
"""Generate a SARIF 2.1.0 report for integration with code analysis tools."""def to_html(self, path: str | Path) -> None:
"""Generate a self-contained HTML report with interactive filtering."""def to_markdown(self, path: str | Path | None = None) -> str | None:
"""Generate a Markdown report."""class SentinelAIError(Exception):
"""Base exception for all SentinelAI errors."""
class ConfigurationError(SentinelAIError):
"""Raised when configuration is invalid."""
class ScanError(SentinelAIError):
"""Raised when a scan encounters an unrecoverable error."""
class FirewallError(SentinelAIError):
"""Raised when the firewall encounters an error."""
class MonitorError(SentinelAIError):
"""Raised when the monitor encounters an error."""