Skip to content
This repository was archived by the owner on Mar 26, 2025. It is now read-only.

Commit 782ea77

Browse files
authored
Merge pull request #124 from openchatai/custom_planner_3
Custom API Planner - Enhancements and Adjustments
2 parents 774a8f2 + caa37a8 commit 782ea77

20 files changed

+426
-286
lines changed

llm-server/api_caller/base.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66

77
def try_to_match_and_call_api_endpoint(
8-
swagger_spec: OpenAPISpec, text: str, headers: Dict[str, str]
8+
swagger_spec: OpenAPISpec, text: str, headers: Dict[str, str]
99
) -> str:
1010
openapi_call_chain = get_openapi_chain(swagger_spec, verbose=True, headers=headers)
1111

llm-server/readme.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,8 @@ To install Mypy, which is a static type checker for Python, follow these steps:
6363
MONGODB_URL=mongodb://localhost:27017/opencopilot
6464
QDRANT_URL=http://localhost:6333
6565
STORE=QDRANT
66+
QDRANT_API_KEY= # When using cloud hosted version
67+
SCORE_THRESHOLD=0.95 # When using pre defined workflows, the confidence score at which the opencopilot should select your workflow. If the score falls below this, the planner will design it's own workflow
6668
```
6769

6870
Ensure you replace the placeholders with your actual API keys and configuration settings.

llm-server/routes/root_service.py

Lines changed: 29 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
from typing import Dict, Any, cast
44

55
import logging
6-
import requests
76
import traceback
87
from dotenv import load_dotenv
98
from langchain.chains.openai_functions import create_structured_output_chain
@@ -13,16 +12,19 @@
1312
from models.models import AiResponseFormat
1413
from prompts.base import api_base_prompt, non_api_base_prompt
1514
from routes.workflow.typings.run_workflow_input import WorkflowData
16-
from routes.workflow.workflow_service import run_workflow
17-
from utils.detect_multiple_intents import hasSingleIntent
15+
from routes.workflow.utils import (
16+
run_workflow,
17+
check_workflow_in_store,
18+
fetch_swagger_text,
19+
hasSingleIntent,
20+
create_workflow_from_operation_ids,
21+
)
22+
from bson import ObjectId
1823
import os
1924
from dotenv import load_dotenv
2025
from typing import Dict, Any, cast
2126
from utils.db import Database
22-
from utils.detect_multiple_intents import hasSingleIntent
2327
import json
24-
import yaml
25-
from yaml.parser import ParserError
2628
from api_caller.base import try_to_match_and_call_api_endpoint
2729

2830
db_instance = Database()
@@ -40,48 +42,6 @@
4042
FAILED_TO_CALL_API_ENDPOINT = "Failed to call or map API endpoint"
4143

4244

43-
def fetch_swagger_text(swagger_url: str) -> str:
44-
if swagger_url.startswith("https://"):
45-
response = requests.get(swagger_url)
46-
if response.status_code == 200:
47-
try:
48-
# Try parsing the content as JSON
49-
json_content = json.loads(response.text)
50-
return json.dumps(json_content, indent=2)
51-
except json.JSONDecodeError:
52-
try:
53-
# Try parsing the content as YAML
54-
yaml_content = yaml.safe_load(response.text)
55-
if isinstance(yaml_content, dict):
56-
return json.dumps(yaml_content, indent=2)
57-
else:
58-
raise Exception("Invalid YAML content")
59-
except ParserError:
60-
raise Exception("Failed to parse content as JSON or YAML")
61-
62-
raise Exception("Failed to fetch Swagger content")
63-
64-
try:
65-
with open(shared_folder + swagger_url, "r") as file:
66-
content = file.read()
67-
try:
68-
# Try parsing the content as JSON
69-
json_content = json.loads(content)
70-
return json.dumps(json_content, indent=2)
71-
except json.JSONDecodeError:
72-
try:
73-
# Try parsing the content as YAML
74-
yaml_content = yaml.safe_load(content)
75-
if isinstance(yaml_content, dict):
76-
return json.dumps(yaml_content, indent=2)
77-
else:
78-
raise Exception("Invalid YAML content")
79-
except ParserError:
80-
raise Exception("Failed to parse content as JSON or YAML")
81-
except FileNotFoundError:
82-
raise Exception("File not found")
83-
84-
8545
def handle_request(data: Dict[str, Any]) -> Any:
8646
text: str = cast(str, data.get("text"))
8747
swagger_url = cast(str, data.get("swagger_url", ""))
@@ -99,7 +59,7 @@ def handle_request(data: Dict[str, Any]) -> Any:
9959
if not locals()[required_field]:
10060
raise Exception(error_msg)
10161

102-
swagger_doc = mongo.swagger_files.find_one(
62+
swagger_doc: Dict[str, Any] = mongo.swagger_files.find_one(
10363
{"meta.swagger_url": swagger_url}, {"meta": 0, "_id": 0}
10464
) or json.loads(fetch_swagger_text(swagger_url))
10565

@@ -114,9 +74,26 @@ def handle_request(data: Dict[str, Any]) -> Any:
11474
"[OpenCopilot] Apparently, the user request require calling more than single API endpoint "
11575
"to get the job done"
11676
)
77+
78+
# check workflow in mongodb, if present use that, else ask planner to create a workflow based on summaries
79+
# then call run_workflow on that
80+
(document, score) = check_workflow_in_store(text, swagger_url)
81+
82+
_workflow = None
83+
if document:
84+
_workflow = mongo.workflows.find_one(
85+
{"_id": ObjectId(document.metadata["workflow_id"])}
86+
)
87+
else:
88+
_workflow = create_workflow_from_operation_ids(
89+
bot_response.ids, SWAGGER_SPEC=swagger_doc
90+
)
11791
return run_workflow(
118-
WorkflowData(text, headers, server_base_url, swagger_url), swagger_doc
92+
_workflow,
93+
swagger_doc,
94+
WorkflowData(text, headers, server_base_url, swagger_url),
11995
)
96+
12097
elif len(bot_response.ids) == 0:
12198
logging.info("[OpenCopilot] The user request doesnot require an api call")
12299
return {"response": bot_response.bot_message}
@@ -125,9 +102,7 @@ def handle_request(data: Dict[str, Any]) -> Any:
125102
logging.info(
126103
"[OpenCopilot] The user request can be handled in single API call"
127104
)
128-
raise "Falling back to planner"
129-
# else:
130-
# return {"": k}
105+
131106
except Exception as e:
132107
logging.info(
133108
"[OpenCopilot] Something went wrong when try to get how many calls is required"
@@ -145,9 +120,7 @@ def handle_request(data: Dict[str, Any]) -> Any:
145120
)
146121
json_output = try_to_match_and_call_api_endpoint(swagger_spec, text, headers)
147122

148-
formatted_response = json.dumps(
149-
json_output, indent=4
150-
) # Indent the JSON with 4 spaces
123+
formatted_response = json.dumps(json_output, indent=4)
151124
logging.info(
152125
"[OpenCopilot] We were able to match and call the API endpoint, the response was: {}".format(
153126
formatted_response
Lines changed: 29 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,12 @@
11
import os
2-
from langchain.prompts import PromptTemplate
3-
from langchain.chains import LLMChain
2+
from langchain.schema import AIMessage, HumanMessage, SystemMessage
3+
from langchain.chat_models import ChatOpenAI
44
from utils.get_llm import get_llm
55

66
from typing import Any
77
from routes.workflow.extractors.extract_json import extract_json_payload
88
from custom_types.t_json import JsonData
9+
import logging
910

1011
openai_api_key = os.getenv("OPENAI_API_KEY")
1112
llm = get_llm()
@@ -14,47 +15,33 @@
1415
def gen_body_from_schema(
1516
body_schema: str, text: str, prev_api_response: str, example: str
1617
) -> Any:
17-
_DEFAULT_TEMPLATE = """To enable a substantially intelligent language model to execute a series of APIs sequentially, the following essential details are necessary to gather information needed for the next API call:
18-
1. Initial input when starting the flow: `{text}`
19-
2. Previous API responses: `{prev_api_response}`
20-
3. A JSON response schema that defines the expected format: `{body_schema}`
21-
22-
Try to adhere to this sample api payload as much as possible: ```{example}```
23-
The JSON payload, enclosed within triple backticks on both sides, strictly conforming to the specified "type/format" as outlined in the schema is as follows:
24-
"""
25-
26-
PROMPT = PromptTemplate(
27-
input_variables=[
28-
"text",
29-
"body_schema",
30-
"prev_api_response",
31-
"example",
32-
],
33-
template=_DEFAULT_TEMPLATE,
18+
chat = ChatOpenAI(
19+
openai_api_key=os.getenv("OPENAI_API_KEY"),
20+
model="gpt-3.5-turbo-16k",
21+
temperature=0,
3422
)
3523

36-
PROMPT.format(
37-
prev_api_response=prev_api_response,
38-
body_schema=body_schema,
39-
text=text,
40-
example=example,
24+
messages = [
25+
SystemMessage(
26+
content="You are an intelligent machine learning model that can produce REST API's body in json format, given the json schema, dummy json payload, user input, data from previous api calls."
27+
),
28+
HumanMessage(content="Json Schema: {}".format(body_schema)),
29+
HumanMessage(content="Dummy json payload: {}".format(example)),
30+
HumanMessage(content="User input: {}".format(text)),
31+
HumanMessage(content="prev api responses: {}".format(prev_api_response)),
32+
HumanMessage(
33+
content="Given the provided information, generate the appropriate JSON payload to use as body for the API request"
34+
),
35+
]
36+
result = chat(messages)
37+
38+
logging.info("[OpenCopilot] LLM Body Response: {}".format(result.content))
39+
40+
d: Any = extract_json_payload(result.content)
41+
logging.info(
42+
"[OpenCopilot] Parsed the json payload: {}, context: {}".format(
43+
d, "gen_body_from_schema"
44+
)
4145
)
4246

43-
chain = LLMChain(
44-
llm=llm,
45-
prompt=PROMPT,
46-
# memory=memory,
47-
verbose=True,
48-
)
49-
json_string = chain.run(
50-
{
51-
"text": text,
52-
"body_schema": body_schema,
53-
"prev_api_response": prev_api_response,
54-
"example": example,
55-
}
56-
)
57-
58-
response = extract_json_payload(json_string)
59-
60-
return response
47+
return d
Lines changed: 29 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,11 @@
11
import os
2-
from langchain.prompts import PromptTemplate
3-
from langchain.chains import LLMChain
2+
from langchain.chat_models import ChatOpenAI
43
from routes.workflow.extractors.extract_json import extract_json_payload
54
from utils.get_llm import get_llm
65
from custom_types.t_json import JsonData
7-
from typing import Optional
6+
from typing import Optional, Any
7+
import logging
8+
from langchain.schema import HumanMessage, SystemMessage
89

910
openai_api_key = os.getenv("OPENAI_API_KEY")
1011
llm = get_llm()
@@ -13,49 +14,32 @@
1314
def gen_params_from_schema(
1415
param_schema: str, text: str, prev_resp: str
1516
) -> Optional[JsonData]:
16-
"""Extracts API parameters from a schema based on user text and previous response.
17-
18-
Args:
19-
param_schema (JsonData): A snippet of the OpenAPI parameter schema relevant to this operation.
20-
text (str): The original user text query.
21-
prev_resp (str): The previous API response.
22-
23-
Returns:
24-
Optional[JsonData]: The extracted JSON parameters, if successful.
25-
26-
This function constructs a prompt with the given inputs and passes it to
27-
an LLM to generate a JSON string containing the parameters. It then parses
28-
this to extract a JSON payload matching the schema structure.
29-
"""
30-
31-
_DEFAULT_TEMPLATE = """In order to facilitate the sequential execution of a highly intelligent language model with a series of APIs, we furnish the vital information required for executing the next API call.
32-
33-
The initial input at the onset of the process: {text}
34-
The responses obtained from previous API calls: {prev_resp}
35-
A schema for request parameters that defines the expected format: {param_schema}
36-
37-
The JSON payload, which is used to represent the query parameters and is constructed using the initial input and previous API responses, must be enclosed within triple backticks on both sides. It must strictly adhere to the specified "type/format" guidelines laid out in the schema, and the structure is as follows:"""
38-
39-
PROMPT = PromptTemplate(
40-
input_variables=["prev_resp", "text", "param_schema"],
41-
template=_DEFAULT_TEMPLATE,
42-
)
43-
44-
PROMPT.format(
45-
prev_resp=prev_resp,
46-
text=text,
47-
param_schema=param_schema,
17+
chat = ChatOpenAI(
18+
openai_api_key=os.getenv("OPENAI_API_KEY"),
19+
model="gpt-3.5-turbo-16k",
20+
temperature=0,
4821
)
4922

50-
chain = LLMChain(llm=llm, prompt=PROMPT, verbose=True)
51-
json_string = chain.run(
52-
{
53-
"param_schema": param_schema,
54-
"text": text,
55-
"prev_resp": prev_resp,
56-
}
23+
messages = [
24+
SystemMessage(
25+
content="You are an intelligent machine learning model that can produce REST API's params / query params in json format, given the json schema, user input, data from previous api calls."
26+
),
27+
HumanMessage(content="Json Schema: {}".format(param_schema)),
28+
HumanMessage(content="User input: {}".format(text)),
29+
HumanMessage(content="prev api responses: {}".format(prev_resp)),
30+
HumanMessage(
31+
content="Based on the information provided, construct a valid parameter object to be used with python requests library. In cases where user input doesnot contain information for a query, DO NOT add that specific query parameter to the output. "
32+
),
33+
]
34+
result = chat(messages)
35+
36+
logging.info("[OpenCopilot] LLM Body Response: {}".format(result.content))
37+
38+
d: Optional[JsonData] = extract_json_payload(result.content)
39+
logging.info(
40+
"[OpenCopilot] Parsed the json payload: {}, context: {}".format(
41+
d, "gen_body_from_schema"
42+
)
5743
)
5844

59-
response = extract_json_payload(json_string)
60-
print(f"Query params: {response}")
61-
return response
45+
return d
Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
import os, logging
2+
from langchain.chat_models import ChatOpenAI
3+
from dotenv import load_dotenv
4+
from langchain.schema import HumanMessage, SystemMessage
5+
from typing import Any
6+
from routes.workflow.extractors.extract_json import extract_json_payload
7+
8+
load_dotenv()
9+
10+
openai_api_key = os.getenv("OPENAI_API_KEY")
11+
12+
13+
def transform_api_response_from_schema(server_url: str, api_response: str) -> str:
14+
chat = ChatOpenAI(
15+
openai_api_key=os.getenv("OPENAI_API_KEY"),
16+
model="gpt-3.5-turbo-16k",
17+
temperature=0,
18+
)
19+
20+
messages = [
21+
SystemMessage(
22+
content="You are a bot capable of comprehending API responses."
23+
),
24+
HumanMessage(
25+
content="Here is the response from current REST API: {} for endpoint: {}".format(
26+
api_response, server_url
27+
)
28+
),
29+
HumanMessage(
30+
content="Analyze the provided API responses and extract only the essential fields required for subsequent API interactions. Disregard any non-essential attributes such as CSS or color-related data. If there are generic fields like 'id,' provide them with more descriptive names in your response. Format your response as a JSON object with clear and meaningful keys that map to their respective values from the API response."
31+
),
32+
]
33+
34+
result = chat(messages)
35+
logging.info("[OpenCopilot] Transformed Response: {}".format(result.content))
36+
37+
return result.content

0 commit comments

Comments
 (0)