Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 2 additions & 23 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# ShellGPT
A command-line productivity tool powered by AI large language models (LLM). This command-line tool offers streamlined generation of **shell commands, code snippets, documentation**, eliminating the need for external resources (like Google search). Supports Linux, macOS, Windows and compatible with all major Shells like PowerShell, CMD, Bash, Zsh, etc.

https://github.com/TheR1D/shell_gpt/assets/16740832/9197283c-db6a-4b46-bfea-3eb776dd9093
https://github.com/TheR1D/shell_gpt/assets/16740832/721ddb19-97e7-428f-a0ee-107d027ddd59

## Installation
```shell
Expand Down Expand Up @@ -290,28 +290,7 @@ The snippet of code you've provided is written in Python. It prompts the user...
sgpt --install-functions
```

ShellGPT has a convenient way to define functions and use them. In order to create your custom function, navigate to `~/.config/shell_gpt/functions` and create a new .py file with the function name. Inside this file, you can define your function using the following syntax:
```python
# execute_shell_command.py
import subprocess
from pydantic import Field
from instructor import OpenAISchema


class Function(OpenAISchema):
"""
Executes a shell command and returns the output (result).
"""
shell_command: str = Field(..., example="ls -la", descriptions="Shell command to execute.")

class Config:
title = "execute_shell_command"

@classmethod
def execute(cls, shell_command: str) -> str:
result = subprocess.run(shell_command.split(), capture_output=True, text=True)
return f"Exit code: {result.returncode}, Output:\n{result.stdout}"
```
ShellGPT has a convenient way to define functions and use them. In order to create your custom function, navigate to `~/.config/shell_gpt/functions` and create a new .py file with the function name. Inside this file, you can define your function using this [example](https://github.com/TheR1D/shell_gpt/blob/main/sgpt/llm_functions/common/execute_shell.py).

The docstring comment inside the class will be passed to OpenAI API as a description for the function, along with the `title` attribute and parameters descriptions. The `execute` function will be called if LLM decides to use your function. In this case we are allowing LLM to execute any Shell commands in our system. Since we are returning the output of the command, LLM will be able to analyze it and decide if it is a good fit for the prompt. Here is an example how the function might be executed by LLM:
```shell
Expand Down
4 changes: 1 addition & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,10 @@ classifiers = [
"Programming Language :: Python :: 3.13",
]
dependencies = [
"openai >= 1.34.0, < 2.0.0",
"openai >= 2.0.0, < 3.0.0",
"typer >= 0.7.0, < 1.0.0",
"click >= 7.1.1, < 9.0.0",
"rich >= 13.1.0, < 14.0.0",
"distro >= 1.8.0, < 2.0.0",
"instructor >= 1.0.0, < 2.0.0",
'pyreadline3 >= 3.4.1, < 4.0.0; sys_platform == "win32"',
"prompt_toolkit >= 3.0.51",
]
Expand Down
2 changes: 1 addition & 1 deletion sgpt/__version__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "1.4.5"
__version__ = "1.5.0"
9 changes: 5 additions & 4 deletions sgpt/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import sys

import typer
from click import BadArgumentUsage
from click import UsageError
from click.types import Choice
from prompt_toolkit import PromptSession

Expand Down Expand Up @@ -187,15 +187,15 @@ def main(
ChatHandler.show_messages(show_chat, md)

if sum((shell, describe_shell, code)) > 1:
raise BadArgumentUsage(
raise UsageError(
"Only one of --shell, --describe-shell, and --code options can be used at a time."
)

if chat and repl:
raise BadArgumentUsage("--chat and --repl options cannot be used together.")
raise UsageError("--chat and --repl options cannot be used together.")

if editor and stdin_passed:
raise BadArgumentUsage("--editor option cannot be used with stdin input.")
raise UsageError("--editor option cannot be used with stdin input.")

if editor:
prompt = get_edited_prompt()
Expand Down Expand Up @@ -248,6 +248,7 @@ def main(
show_choices=False,
show_default=False,
)

if option in ("e", "y"):
# "y" option is for keeping compatibility with old version.
run_command(full_completion)
Expand Down
28 changes: 11 additions & 17 deletions sgpt/function.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,19 @@
import importlib.util
import sys
from abc import ABCMeta
from pathlib import Path
from typing import Any, Callable, Dict, List

from pydantic import BaseModel

from .config import cfg


class Function:
def __init__(self, path: str):
module = self._read(path)
self._function = module.Function.execute
self._openai_schema = module.Function.openai_schema
self._name = self._openai_schema["name"]
self._openai_schema = module.Function.openai_schema()
self._name = self._openai_schema["function"]["name"]

@property
def name(self) -> str:
Expand All @@ -34,13 +35,17 @@ def _read(cls, path: str) -> Any:
sys.modules[module_name] = module
spec.loader.exec_module(module) # type: ignore

if not isinstance(module.Function, ABCMeta):
if not issubclass(module.Function, BaseModel):
raise TypeError(
f"Function {module_name} must be a subclass of pydantic.BaseModel"
)
if not hasattr(module.Function, "execute"):
raise TypeError(
f"Function {module_name} must have a 'execute' static method"
f"Function {module_name} must have an 'execute' classmethod"
)
if not hasattr(module.Function, "openai_schema"):
raise TypeError(
f"Function {module_name} must have an 'openai_schema' classmethod"
)

return module
Expand All @@ -59,15 +64,4 @@ def get_function(name: str) -> Callable[..., Any]:


def get_openai_schemas() -> List[Dict[str, Any]]:
transformed_schemas = []
for function in functions:
schema = {
"type": "function",
"function": {
"name": function.openai_schema["name"],
"description": function.openai_schema.get("description", ""),
"parameters": function.openai_schema.get("parameters", {}),
},
}
transformed_schemas.append(schema)
return transformed_schemas
return [function.openai_schema for function in functions]
8 changes: 3 additions & 5 deletions sgpt/handlers/chat_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from typing import Any, Callable, Dict, Generator, List, Optional

import typer
from click import BadArgumentUsage
from click import BadParameter, UsageError
from rich.console import Console
from rich.markdown import Markdown

Expand Down Expand Up @@ -151,15 +151,13 @@ def validate(self) -> None:
if self.initiated:
chat_role_name = self.role.get_role_name(self.initial_message(self.chat_id))
if not chat_role_name:
raise BadArgumentUsage(
f'Could not determine chat role of "{self.chat_id}"'
)
raise BadParameter(f'Could not determine chat role of "{self.chat_id}"')
if self.role.name == DefaultRoles.DEFAULT.value:
# If user didn't pass chat mode, we will use the one that was used to initiate the chat.
self.role = SystemRole.get(chat_role_name)
else:
if not self.is_same_role:
raise BadArgumentUsage(
raise UsageError(
f'Cant change chat role to "{self.role.name}" '
f'since it was initiated as "{chat_role_name}" chat.'
)
Expand Down
39 changes: 30 additions & 9 deletions sgpt/handlers/handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,14 +59,22 @@ def make_messages(self, prompt: str) -> List[Dict[str, str]]:
def handle_function_call(
self,
messages: List[dict[str, Any]],
tool_call_id: str,
name: str,
arguments: str,
) -> Generator[str, None, None]:
# Add assistant message with tool call
messages.append(
{
"role": "assistant",
"content": "",
"function_call": {"name": name, "arguments": arguments},
"content": None,
"tool_calls": [
{
"id": tool_call_id,
"type": "function",
"function": {"name": name, "arguments": arguments},
}
],
}
)

Expand All @@ -80,7 +88,11 @@ def handle_function_call(
result = get_function(name)(**dict_args)
if cfg.get("SHOW_FUNCTIONS_OUTPUT") == "true":
yield f"```text\n{result}\n```\n"
messages.append({"role": "function", "content": result, "name": name})

# Add tool response message
messages.append(
{"role": "tool", "content": result, "tool_call_id": tool_call_id}
)

@cache
def get_completion(
Expand All @@ -91,7 +103,7 @@ def get_completion(
messages: List[Dict[str, Any]],
functions: Optional[List[Dict[str, str]]],
) -> Generator[str, None, None]:
name = arguments = ""
tool_call_id = name = arguments = ""
is_shell_role = self.role.name == DefaultRoles.SHELL.value
is_code_role = self.role.name == DefaultRoles.CODE.value
is_dsc_shell_role = self.role.name == DefaultRoles.DESCRIBE_SHELL.value
Expand Down Expand Up @@ -124,12 +136,21 @@ def get_completion(
)
if tool_calls:
for tool_call in tool_calls:
if tool_call.function.name:
name = tool_call.function.name
if tool_call.function.arguments:
arguments += tool_call.function.arguments
if use_litellm:
# TODO: test.
tool_call_id = tool_call.get("id") or tool_call_id
name = tool_call.get("function", {}).get("name") or name
arguments += tool_call.get("function", {}).get(
"arguments", ""
)
else:
tool_call_id = tool_call.id or tool_call_id
name = tool_call.function.name or name
arguments += tool_call.function.arguments or ""
if chunk.choices[0].finish_reason == "tool_calls":
yield from self.handle_function_call(messages, name, arguments)
yield from self.handle_function_call(
messages, tool_call_id, name, arguments
)
yield from self.get_completion(
model=model,
temperature=temperature,
Expand Down
30 changes: 22 additions & 8 deletions sgpt/llm_functions/common/execute_shell.py
Original file line number Diff line number Diff line change
@@ -1,22 +1,19 @@
import subprocess
from typing import Any, Dict

from instructor import OpenAISchema
from pydantic import Field
from pydantic import BaseModel, Field


class Function(OpenAISchema):
class Function(BaseModel):
"""
Executes a shell command and returns the output (result).
"""

shell_command: str = Field(
...,
example="ls -la",
descriptions="Shell command to execute.",
)

class Config:
title = "execute_shell_command"
description="Shell command to execute.",
) # type: ignore

@classmethod
def execute(cls, shell_command: str) -> str:
Expand All @@ -26,3 +23,20 @@ def execute(cls, shell_command: str) -> str:
output, _ = process.communicate()
exit_code = process.returncode
return f"Exit code: {exit_code}, Output:\n{output.decode()}"

@classmethod
def openai_schema(cls) -> Dict[str, Any]:
"""Generate OpenAI function schema from Pydantic model."""
schema = cls.model_json_schema()
return {
"type": "function",
"function": {
"name": "execute_shell_command",
"description": cls.__doc__.strip() if cls.__doc__ else "",
"parameters": {
"type": "object",
"properties": schema.get("properties", {}),
"required": schema.get("required", []),
},
},
}
32 changes: 23 additions & 9 deletions sgpt/llm_functions/mac/apple_script.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,20 @@
import subprocess
from typing import Any, Dict

from instructor import OpenAISchema
from pydantic import Field
from pydantic import BaseModel, Field


class Function(OpenAISchema):
class Function(BaseModel):
"""
Executes Apple Script on macOS and returns the output (result).
Can be used for actions like: draft (prepare) an email, show calendar events, create a note.
"""

apple_script: str = Field(
...,
default=...,
example='tell application "Finder" to get the name of every disk',
descriptions="Apple Script to execute.",
)

class Config:
title = "execute_apple_script"
description="Apple Script to execute.",
) # type: ignore

@classmethod
def execute(cls, apple_script):
Expand All @@ -31,3 +28,20 @@ def execute(cls, apple_script):
return f"Output: {output}"
except Exception as e:
return f"Error: {e}"

@classmethod
def openai_schema(cls) -> Dict[str, Any]:
"""Generate OpenAI function schema from Pydantic model."""
schema = cls.model_json_schema()
return {
"type": "function",
"function": {
"name": "execute_apple_script",
"description": cls.__doc__.strip() if cls.__doc__ else "",
"parameters": {
"type": "object",
"properties": schema.get("properties", {}),
"required": schema.get("required", []),
},
},
}
4 changes: 2 additions & 2 deletions sgpt/role.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from typing import Dict, Optional

import typer
from click import BadArgumentUsage
from click import UsageError
from distro import name as distro_name

from .config import cfg
Expand Down Expand Up @@ -76,7 +76,7 @@ def create_defaults(cls) -> None:
def get(cls, name: str) -> "SystemRole":
file_path = cls.storage / f"{name}.json"
if not file_path.exists():
raise BadArgumentUsage(f'Role "{name}" not found.')
raise UsageError(f'Role "{name}" not found.')
return cls(**json.loads(file_path.read_text()))

@classmethod
Expand Down