Skip to content

Modify \llm to use either + or - modifiers. #227

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 8 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,12 @@
### Features

* Replace sqlite3 library with [sqlean](https://antonz.org/sqlean/). It's a drop-in replacement for sqlite3.
* The 'llm' library is now a default dependency not installed on demand.
* The `\llm` command now has three modes. Succinct, Regular and Verbose.

Succinct = `\llm-` - This will return just the sql query. No explanation.
Regular = `\llm` - This will return just the sql query and the explanation.
Verbose = `\llm+` - This will print the prompt sent to the LLM and the sql query and the explanation.

### Bug Fixes

Expand Down
94 changes: 50 additions & 44 deletions litecli/packages/special/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,34 +2,29 @@
import io
import logging
import os
import pprint
import re
import shlex
import sys
from runpy import run_module
from typing import Optional, Tuple
from time import time
from typing import Optional, Tuple

import click

try:
import llm
from llm.cli import cli

LLM_CLI_COMMANDS = list(cli.commands.keys())
MODELS = {x.model_id: None for x in llm.get_models()}
except ImportError:
llm = None
cli = None
LLM_CLI_COMMANDS = []
MODELS = {}
import llm
from llm.cli import cli

from . import export
from .main import parse_special_command
from .main import Verbosity, parse_special_command

log = logging.getLogger(__name__)

LLM_TEMPLATE_NAME = "litecli-llm-template"
LLM_CLI_COMMANDS = list(cli.commands.keys())
MODELS = {x.model_id: None for x in llm.get_models()}

def run_external_cmd(cmd, *args, capture_output=False, restart_cli=False, raise_exception=True):

def run_external_cmd(cmd, *args, capture_output=False, restart_cli=False, raise_exception=True) -> Tuple[int, str]:
original_exe = sys.executable
original_args = sys.argv

Expand All @@ -55,6 +50,13 @@ def run_external_cmd(cmd, *args, capture_output=False, restart_cli=False, raise_
raise RuntimeError(buffer.getvalue())
else:
raise RuntimeError(f"Command {cmd} failed with exit code {code}.")
except Exception as e:
code = 1
if raise_exception:
if capture_output:
raise RuntimeError(buffer.getvalue())
else:
raise RuntimeError(f"Command {cmd} failed: {e}")

if restart_cli and code == 0:
os.execv(original_exe, [original_exe] + original_args)
Expand Down Expand Up @@ -171,14 +173,10 @@ def __init__(self, results=None):
```sql
SELECT count(*) FROM table_name;
```
"""


def initialize_llm():
# Initialize the LLM library.
if click.confirm("This feature requires additional libraries. Install LLM library?", default=True):
click.echo("Installing LLM library. Please wait...")
run_external_cmd("pip", "install", "--quiet", "llm", restart_cli=True)
If the question cannot be answered based on the database schema respond with "I
cannot answer that question" in a sql code fence.
"""


def ensure_litecli_template(replace=False):
Expand All @@ -187,11 +185,11 @@ def ensure_litecli_template(replace=False):
"""
if not replace:
# Check if it already exists.
code, _ = run_external_cmd("llm", "templates", "show", "litecli", capture_output=True, raise_exception=False)
code, _ = run_external_cmd("llm", "templates", "show", LLM_TEMPLATE_NAME, capture_output=True, raise_exception=False)
if code == 0: # Template already exists. No need to create it.
return

run_external_cmd("llm", PROMPT, "--save", "litecli")
run_external_cmd("llm", PROMPT, "--save", LLM_TEMPLATE_NAME)
return


Expand All @@ -205,12 +203,10 @@ def handle_llm(text, cur) -> Tuple[str, Optional[str], float]:
FinishIteration() which will be caught by the main loop AND print any
output that was supplied (or None).
"""
_, verbose, arg = parse_special_command(text)

# LLM is not installed.
if llm is None:
initialize_llm()
raise FinishIteration(None)
# Determine invocation mode: regular, verbose (+), or succinct (-)
_, mode, arg = parse_special_command(text)
is_verbose = mode is Verbosity.VERBOSE
is_succinct = mode is Verbosity.SUCCINCT

if not arg.strip(): # No question provided. Print usage and bail.
output = [(None, None, None, USAGE)]
Expand Down Expand Up @@ -268,20 +264,23 @@ def handle_llm(text, cur) -> Tuple[str, Optional[str], float]:
output = [(None, None, None, result)]
raise FinishIteration(output)

return result if verbose else "", sql, end - start
context = "" if is_succinct else result
return context, sql, end - start
else:
run_external_cmd("llm", *args, restart_cli=restart)
raise FinishIteration(None)

try:
ensure_litecli_template()
# Measure end to end llm command invocation.
# This measures the internal DB command to pull the schema and llm command
# Measure end-to-end LLM command invocation (schema gathering and LLM call)
start = time()
context, sql = sql_using_llm(cur=cur, question=arg, verbose=verbose)
result, sql, prompt_text = sql_using_llm(cur=cur, question=arg, verbose=is_verbose)
end = time()
if not verbose:
context = ""
context = "" if is_succinct else result
if is_verbose and prompt_text is not None:
click.echo("LLM Prompt:")
click.echo(prompt_text)
click.echo("---")
return context, sql, end - start
except Exception as e:
# Something went wrong. Raise an exception and bail.
Expand All @@ -298,7 +297,7 @@ def is_llm_command(command) -> bool:


@export
def sql_using_llm(cur, question=None, verbose=False) -> Tuple[str, Optional[str]]:
def sql_using_llm(cur, question=None, verbose=False) -> Tuple[str, Optional[str], Optional[str]]:
if cur is None:
raise RuntimeError("Connect to a datbase and try again.")
schema_query = """
Expand Down Expand Up @@ -331,7 +330,7 @@ def sql_using_llm(cur, question=None, verbose=False) -> Tuple[str, Optional[str]

args = [
"--template",
"litecli",
LLM_TEMPLATE_NAME,
"--param",
"db_schema",
db_schema,
Expand All @@ -347,9 +346,16 @@ def sql_using_llm(cur, question=None, verbose=False) -> Tuple[str, Optional[str]
_, result = run_external_cmd("llm", *args, capture_output=True)
click.echo("Received response from the llm command")
match = re.search(_SQL_CODE_FENCE, result, re.DOTALL)
if match:
sql = match.group(1).strip()
else:
sql = ""

return result, sql
sql = match.group(1).strip() if match else ""

# When verbose, build and return the rendered prompt text
prompt_text = None
if verbose:
# Render the prompt by substituting schema, sample_data, and question
prompt_text = PROMPT
prompt_text = prompt_text.replace("$db_schema", db_schema)
prompt_text = prompt_text.replace("$sample_data", pprint.pformat(sample_data))
prompt_text = prompt_text.replace("$question", question or "")
if verbose:
return result, sql, prompt_text
return result, sql, None
33 changes: 27 additions & 6 deletions litecli/packages/special/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from collections import namedtuple

from . import export
from enum import Enum

log = logging.getLogger(__name__)

Expand Down Expand Up @@ -36,12 +37,32 @@ class CommandNotFound(Exception):
pass


class Verbosity(Enum):
"""Mode for special command invocation: regular, verbose (+), or succinct (-)."""

REGULAR = "regular"
VERBOSE = "verbose"
SUCCINCT = "succinct"


@export
def parse_special_command(sql):
command, _, arg = sql.partition(" ")
verbose = "+" in command
command = command.strip().replace("+", "")
return (command, verbose, arg.strip())
"""
Parse a special command prefix, extracting the base command name,
an invocation mode (regular, verbose, or succinct), and the argument.
"""
raw, _, arg = sql.partition(" ")
is_verbose = raw.endswith("+")
is_succinct = raw.endswith("-")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we validate for llm+- and other invalid/unsupported commands?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What did you have in mind? I could see someone calling \llm* Question or \llm= Question. It'll just ignore unknown chars and treat them as regular verbosity.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I want to cover two cases: more than 1 special character and unknown special character. Ideally, incorrect usage should not pass silently. What do you think?

# strip out any + or - modifiers to get the actual command name
command = raw.strip().rstrip("+-")
Copy link
Contributor

@kracekumar kracekumar May 27, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We may need to be strict here to check command == llm.

Following are some cases

>>> raw ="llm+-"
>>> raw.strip().rstrip("+-")
'llm'
>>> raw ="llm*"
>>> raw.strip().rstrip("+-")
'llm*'

if is_verbose:
mode = Verbosity.VERBOSE
elif is_succinct:
mode = Verbosity.SUCCINCT
else:
mode = Verbosity.REGULAR
return (command, mode, arg.strip())


@export
Expand Down Expand Up @@ -101,7 +122,7 @@ def execute(cur, sql):
"""Execute a special command and return the results. If the special command
is not supported a KeyError will be raised.
"""
command, verbose, arg = parse_special_command(sql)
command, mode, arg = parse_special_command(sql)

if (command not in COMMANDS) and (command.lower() not in COMMANDS):
raise CommandNotFound
Expand All @@ -116,7 +137,7 @@ def execute(cur, sql):
if special_cmd.arg_type == NO_QUERY:
return special_cmd.handler()
elif special_cmd.arg_type == PARSED_QUERY:
return special_cmd.handler(cur=cur, arg=arg, verbose=verbose)
return special_cmd.handler(cur=cur, arg=arg, verbose=(mode is Verbosity.VERBOSE))
elif special_cmd.arg_type == RAW_QUERY:
return special_cmd.handler(cur=cur, query=sql)

Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ dependencies = [
"sqlparse>=0.4.4",
"setuptools", # Required by llm commands to install models
"pip",
"llm>=0.25.0",
]

[build-system]
Expand Down
2 changes: 2 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
from __future__ import print_function

import os

import pytest
from utils import create_db, db_connection, drop_tables

import litecli.sqlexecute


Expand Down
33 changes: 7 additions & 26 deletions tests/test_llm_special.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,22 +3,6 @@
from litecli.packages.special.llm import handle_llm, FinishIteration, USAGE


@patch("litecli.packages.special.llm.initialize_llm")
@patch("litecli.packages.special.llm.llm", new=None)
def test_llm_command_without_install(mock_initialize_llm, executor):
"""
Test that handle_llm initializes llm when it is None and raises FinishIteration.
"""
test_text = r"\llm"
cur_mock = executor

with pytest.raises(FinishIteration) as exc_info:
handle_llm(test_text, cur_mock)

mock_initialize_llm.assert_called_once()
assert exc_info.value.args[0] is None


@patch("litecli.packages.special.llm.llm")
def test_llm_command_without_args(mock_llm, executor):
r"""
Expand Down Expand Up @@ -61,11 +45,8 @@ def test_llm_command_with_c_flag_and_fenced_sql(mock_run_cmd, mock_llm, executor

result, sql, duration = handle_llm(test_text, executor)

# We expect the function to return (result, sql), but result might be "" if verbose is not set
# By default, `verbose` is false unless text has something like \llm --verbose?
# The function code: return result if verbose else "", sql
# Our test_text doesn't set verbose => we expect "" for the returned context.
assert result == ""
# In regular mode, context is returned
assert result == return_text
assert sql == "SELECT * FROM table;"
assert isinstance(duration, float)

Expand Down Expand Up @@ -133,7 +114,7 @@ def test_llm_command_with_prompt(mock_sql_using_llm, mock_ensure_template, mock_
Should use context, capture output, and call sql_using_llm.
"""
# Mock out the return from sql_using_llm
mock_sql_using_llm.return_value = ("context from LLM", "SELECT 1;")
mock_sql_using_llm.return_value = ("context from LLM", "SELECT 1;", None)

test_text = r"\llm prompt 'Magic happening here?'"
context, sql, duration = handle_llm(test_text, executor)
Expand All @@ -144,7 +125,7 @@ def test_llm_command_with_prompt(mock_sql_using_llm, mock_ensure_template, mock_
# Actually, the question is the entire "prompt 'Magic happening here?'" minus the \llm
# But in the function we do parse shlex.split.
mock_sql_using_llm.assert_called()
assert context == ""
assert context == "context from LLM"
assert sql == "SELECT 1;"
assert isinstance(duration, float)

Expand All @@ -156,14 +137,14 @@ def test_llm_command_question_with_context(mock_sql_using_llm, mock_ensure_templ
"""
If arg doesn't contain any known command, it's treated as a question => capture output + context.
"""
mock_sql_using_llm.return_value = ("You have context!", "SELECT 2;")
mock_sql_using_llm.return_value = ("You have context!", "SELECT 2;", None)

test_text = r"\llm 'Top 10 downloads by size.'"
context, sql, duration = handle_llm(test_text, executor)

mock_ensure_template.assert_called_once()
mock_sql_using_llm.assert_called()
assert context == ""
assert context == "You have context!"
assert sql == "SELECT 2;"
assert isinstance(duration, float)

Expand All @@ -175,7 +156,7 @@ def test_llm_command_question_verbose(mock_sql_using_llm, mock_ensure_template,
r"""
Invoking \llm+ returns the context and the SQL query.
"""
mock_sql_using_llm.return_value = ("Verbose context, oh yeah!", "SELECT 42;")
mock_sql_using_llm.return_value = ("Verbose context, oh yeah!", "SELECT 42;", None)

test_text = r"\llm+ 'Top 10 downloads by size.'"
context, sql, duration = handle_llm(test_text, executor)
Expand Down
Loading