diff --git a/CHANGELOG.md b/CHANGELOG.md index a3aad58..9d910d1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,12 @@ ### Features * Replace sqlite3 library with [sqlean](https://antonz.org/sqlean/). It's a drop-in replacement for sqlite3. +* The 'llm' library is now a default dependency not installed on demand. +* The `\llm` command now has three modes. Succinct, Regular and Verbose. + + Succinct = `\llm-` - This will return just the sql query. No explanation. + Regular = `\llm` - This will return just the sql query and the explanation. + Verbose = `\llm+` - This will print the prompt sent to the LLM and the sql query and the explanation. ### Bug Fixes diff --git a/litecli/packages/special/llm.py b/litecli/packages/special/llm.py index 956d659..c6294d5 100644 --- a/litecli/packages/special/llm.py +++ b/litecli/packages/special/llm.py @@ -2,34 +2,29 @@ import io import logging import os +import pprint import re import shlex import sys from runpy import run_module -from typing import Optional, Tuple from time import time +from typing import Optional, Tuple import click - -try: - import llm - from llm.cli import cli - - LLM_CLI_COMMANDS = list(cli.commands.keys()) - MODELS = {x.model_id: None for x in llm.get_models()} -except ImportError: - llm = None - cli = None - LLM_CLI_COMMANDS = [] - MODELS = {} +import llm +from llm.cli import cli from . import export -from .main import parse_special_command +from .main import Verbosity, parse_special_command log = logging.getLogger(__name__) +LLM_TEMPLATE_NAME = "litecli-llm-template" +LLM_CLI_COMMANDS = list(cli.commands.keys()) +MODELS = {x.model_id: None for x in llm.get_models()} -def run_external_cmd(cmd, *args, capture_output=False, restart_cli=False, raise_exception=True): + +def run_external_cmd(cmd, *args, capture_output=False, restart_cli=False, raise_exception=True) -> Tuple[int, str]: original_exe = sys.executable original_args = sys.argv @@ -55,6 +50,13 @@ def run_external_cmd(cmd, *args, capture_output=False, restart_cli=False, raise_ raise RuntimeError(buffer.getvalue()) else: raise RuntimeError(f"Command {cmd} failed with exit code {code}.") + except Exception as e: + code = 1 + if raise_exception: + if capture_output: + raise RuntimeError(buffer.getvalue()) + else: + raise RuntimeError(f"Command {cmd} failed: {e}") if restart_cli and code == 0: os.execv(original_exe, [original_exe] + original_args) @@ -171,14 +173,10 @@ def __init__(self, results=None): ```sql SELECT count(*) FROM table_name; ``` -""" - -def initialize_llm(): - # Initialize the LLM library. - if click.confirm("This feature requires additional libraries. Install LLM library?", default=True): - click.echo("Installing LLM library. Please wait...") - run_external_cmd("pip", "install", "--quiet", "llm", restart_cli=True) +If the question cannot be answered based on the database schema respond with "I +cannot answer that question" in a sql code fence. +""" def ensure_litecli_template(replace=False): @@ -187,11 +185,11 @@ def ensure_litecli_template(replace=False): """ if not replace: # Check if it already exists. - code, _ = run_external_cmd("llm", "templates", "show", "litecli", capture_output=True, raise_exception=False) + code, _ = run_external_cmd("llm", "templates", "show", LLM_TEMPLATE_NAME, capture_output=True, raise_exception=False) if code == 0: # Template already exists. No need to create it. return - run_external_cmd("llm", PROMPT, "--save", "litecli") + run_external_cmd("llm", PROMPT, "--save", LLM_TEMPLATE_NAME) return @@ -205,12 +203,10 @@ def handle_llm(text, cur) -> Tuple[str, Optional[str], float]: FinishIteration() which will be caught by the main loop AND print any output that was supplied (or None). """ - _, verbose, arg = parse_special_command(text) - - # LLM is not installed. - if llm is None: - initialize_llm() - raise FinishIteration(None) + # Determine invocation mode: regular, verbose (+), or succinct (-) + _, mode, arg = parse_special_command(text) + is_verbose = mode is Verbosity.VERBOSE + is_succinct = mode is Verbosity.SUCCINCT if not arg.strip(): # No question provided. Print usage and bail. output = [(None, None, None, USAGE)] @@ -268,20 +264,23 @@ def handle_llm(text, cur) -> Tuple[str, Optional[str], float]: output = [(None, None, None, result)] raise FinishIteration(output) - return result if verbose else "", sql, end - start + context = "" if is_succinct else result + return context, sql, end - start else: run_external_cmd("llm", *args, restart_cli=restart) raise FinishIteration(None) try: ensure_litecli_template() - # Measure end to end llm command invocation. - # This measures the internal DB command to pull the schema and llm command + # Measure end-to-end LLM command invocation (schema gathering and LLM call) start = time() - context, sql = sql_using_llm(cur=cur, question=arg, verbose=verbose) + result, sql, prompt_text = sql_using_llm(cur=cur, question=arg, verbose=is_verbose) end = time() - if not verbose: - context = "" + context = "" if is_succinct else result + if is_verbose and prompt_text is not None: + click.echo("LLM Prompt:") + click.echo(prompt_text) + click.echo("---") return context, sql, end - start except Exception as e: # Something went wrong. Raise an exception and bail. @@ -298,7 +297,7 @@ def is_llm_command(command) -> bool: @export -def sql_using_llm(cur, question=None, verbose=False) -> Tuple[str, Optional[str]]: +def sql_using_llm(cur, question=None, verbose=False) -> Tuple[str, Optional[str], Optional[str]]: if cur is None: raise RuntimeError("Connect to a datbase and try again.") schema_query = """ @@ -331,7 +330,7 @@ def sql_using_llm(cur, question=None, verbose=False) -> Tuple[str, Optional[str] args = [ "--template", - "litecli", + LLM_TEMPLATE_NAME, "--param", "db_schema", db_schema, @@ -347,9 +346,16 @@ def sql_using_llm(cur, question=None, verbose=False) -> Tuple[str, Optional[str] _, result = run_external_cmd("llm", *args, capture_output=True) click.echo("Received response from the llm command") match = re.search(_SQL_CODE_FENCE, result, re.DOTALL) - if match: - sql = match.group(1).strip() - else: - sql = "" - - return result, sql + sql = match.group(1).strip() if match else "" + + # When verbose, build and return the rendered prompt text + prompt_text = None + if verbose: + # Render the prompt by substituting schema, sample_data, and question + prompt_text = PROMPT + prompt_text = prompt_text.replace("$db_schema", db_schema) + prompt_text = prompt_text.replace("$sample_data", pprint.pformat(sample_data)) + prompt_text = prompt_text.replace("$question", question or "") + if verbose: + return result, sql, prompt_text + return result, sql, None diff --git a/litecli/packages/special/main.py b/litecli/packages/special/main.py index 9544811..285dc2a 100644 --- a/litecli/packages/special/main.py +++ b/litecli/packages/special/main.py @@ -3,6 +3,7 @@ from collections import namedtuple from . import export +from enum import Enum log = logging.getLogger(__name__) @@ -36,12 +37,32 @@ class CommandNotFound(Exception): pass +class Verbosity(Enum): + """Mode for special command invocation: regular, verbose (+), or succinct (-).""" + + REGULAR = "regular" + VERBOSE = "verbose" + SUCCINCT = "succinct" + + @export def parse_special_command(sql): - command, _, arg = sql.partition(" ") - verbose = "+" in command - command = command.strip().replace("+", "") - return (command, verbose, arg.strip()) + """ + Parse a special command prefix, extracting the base command name, + an invocation mode (regular, verbose, or succinct), and the argument. + """ + raw, _, arg = sql.partition(" ") + is_verbose = raw.endswith("+") + is_succinct = raw.endswith("-") + # strip out any + or - modifiers to get the actual command name + command = raw.strip().rstrip("+-") + if is_verbose: + mode = Verbosity.VERBOSE + elif is_succinct: + mode = Verbosity.SUCCINCT + else: + mode = Verbosity.REGULAR + return (command, mode, arg.strip()) @export @@ -101,7 +122,7 @@ def execute(cur, sql): """Execute a special command and return the results. If the special command is not supported a KeyError will be raised. """ - command, verbose, arg = parse_special_command(sql) + command, mode, arg = parse_special_command(sql) if (command not in COMMANDS) and (command.lower() not in COMMANDS): raise CommandNotFound @@ -116,7 +137,7 @@ def execute(cur, sql): if special_cmd.arg_type == NO_QUERY: return special_cmd.handler() elif special_cmd.arg_type == PARSED_QUERY: - return special_cmd.handler(cur=cur, arg=arg, verbose=verbose) + return special_cmd.handler(cur=cur, arg=arg, verbose=(mode is Verbosity.VERBOSE)) elif special_cmd.arg_type == RAW_QUERY: return special_cmd.handler(cur=cur, query=sql) diff --git a/pyproject.toml b/pyproject.toml index 8046986..e2f8eef 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,6 +16,7 @@ dependencies = [ "sqlparse>=0.4.4", "setuptools", # Required by llm commands to install models "pip", + "llm>=0.25.0", ] [build-system] diff --git a/tests/conftest.py b/tests/conftest.py index c6524ca..35d79b5 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,8 +1,10 @@ from __future__ import print_function import os + import pytest from utils import create_db, db_connection, drop_tables + import litecli.sqlexecute diff --git a/tests/test_llm_special.py b/tests/test_llm_special.py index 9de29df..2e48b95 100644 --- a/tests/test_llm_special.py +++ b/tests/test_llm_special.py @@ -3,22 +3,6 @@ from litecli.packages.special.llm import handle_llm, FinishIteration, USAGE -@patch("litecli.packages.special.llm.initialize_llm") -@patch("litecli.packages.special.llm.llm", new=None) -def test_llm_command_without_install(mock_initialize_llm, executor): - """ - Test that handle_llm initializes llm when it is None and raises FinishIteration. - """ - test_text = r"\llm" - cur_mock = executor - - with pytest.raises(FinishIteration) as exc_info: - handle_llm(test_text, cur_mock) - - mock_initialize_llm.assert_called_once() - assert exc_info.value.args[0] is None - - @patch("litecli.packages.special.llm.llm") def test_llm_command_without_args(mock_llm, executor): r""" @@ -61,11 +45,8 @@ def test_llm_command_with_c_flag_and_fenced_sql(mock_run_cmd, mock_llm, executor result, sql, duration = handle_llm(test_text, executor) - # We expect the function to return (result, sql), but result might be "" if verbose is not set - # By default, `verbose` is false unless text has something like \llm --verbose? - # The function code: return result if verbose else "", sql - # Our test_text doesn't set verbose => we expect "" for the returned context. - assert result == "" + # In regular mode, context is returned + assert result == return_text assert sql == "SELECT * FROM table;" assert isinstance(duration, float) @@ -133,7 +114,7 @@ def test_llm_command_with_prompt(mock_sql_using_llm, mock_ensure_template, mock_ Should use context, capture output, and call sql_using_llm. """ # Mock out the return from sql_using_llm - mock_sql_using_llm.return_value = ("context from LLM", "SELECT 1;") + mock_sql_using_llm.return_value = ("context from LLM", "SELECT 1;", None) test_text = r"\llm prompt 'Magic happening here?'" context, sql, duration = handle_llm(test_text, executor) @@ -144,7 +125,7 @@ def test_llm_command_with_prompt(mock_sql_using_llm, mock_ensure_template, mock_ # Actually, the question is the entire "prompt 'Magic happening here?'" minus the \llm # But in the function we do parse shlex.split. mock_sql_using_llm.assert_called() - assert context == "" + assert context == "context from LLM" assert sql == "SELECT 1;" assert isinstance(duration, float) @@ -156,14 +137,14 @@ def test_llm_command_question_with_context(mock_sql_using_llm, mock_ensure_templ """ If arg doesn't contain any known command, it's treated as a question => capture output + context. """ - mock_sql_using_llm.return_value = ("You have context!", "SELECT 2;") + mock_sql_using_llm.return_value = ("You have context!", "SELECT 2;", None) test_text = r"\llm 'Top 10 downloads by size.'" context, sql, duration = handle_llm(test_text, executor) mock_ensure_template.assert_called_once() mock_sql_using_llm.assert_called() - assert context == "" + assert context == "You have context!" assert sql == "SELECT 2;" assert isinstance(duration, float) @@ -175,7 +156,7 @@ def test_llm_command_question_verbose(mock_sql_using_llm, mock_ensure_template, r""" Invoking \llm+ returns the context and the SQL query. """ - mock_sql_using_llm.return_value = ("Verbose context, oh yeah!", "SELECT 42;") + mock_sql_using_llm.return_value = ("Verbose context, oh yeah!", "SELECT 42;", None) test_text = r"\llm+ 'Top 10 downloads by size.'" context, sql, duration = handle_llm(test_text, executor)