Skip to content

Commit 3cb27e4

Browse files
authored
Merge pull request continuedev#5138 from continuedev/dallin/tools-makeover
Tools architecture and error handling updates
2 parents 6ff2f1f + 16a3aa3 commit 3cb27e4

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

41 files changed

+900
-698
lines changed

core/.eslintrc.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
"@typescript-eslint/semi": "off",
1414
"eqeqeq": "error",
1515
"complexity": ["error", { "max": 36 }],
16-
"max-lines-per-function": ["error", { "max": 472 }],
16+
"max-lines-per-function": ["error", { "max": 500 }],
1717
"max-statements": ["error", { "max": 108 }],
1818
"max-depth": ["error", { "max": 6 }],
1919
"max-nested-callbacks": ["error", { "max": 4 }],

core/config/types.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -403,6 +403,7 @@ declare global {
403403
| "generated"
404404
| "calling"
405405
| "done"
406+
| "errored"
406407
| "canceled";
407408
408409
// Will exist only on "assistant" messages with tool calls

core/core.ts

Lines changed: 32 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -682,59 +682,44 @@ export class Core {
682682
}
683683
});
684684

685-
on(
686-
"tools/call",
687-
async ({ data: { toolCall, selectedModelTitle }, messageId }) => {
688-
const { config } = await this.configHandler.loadConfig();
689-
if (!config) {
690-
throw new Error("Config not loaded");
691-
}
692-
693-
const tool = config.tools.find(
694-
(t) => t.function.name === toolCall.function.name,
695-
);
696-
697-
if (!tool) {
698-
throw new Error(`Tool ${toolCall.function.name} not found`);
699-
}
685+
on("tools/call", async ({ data: { toolCall } }) => {
686+
const { config } = await this.configHandler.loadConfig();
687+
if (!config) {
688+
throw new Error("Config not loaded");
689+
}
700690

701-
if (!config.selectedModelByRole.chat) {
702-
throw new Error("No chat model selected");
703-
}
691+
const tool = config.tools.find(
692+
(t) => t.function.name === toolCall.function.name,
693+
);
704694

705-
// Define a callback for streaming output updates
706-
const onPartialOutput = (params: {
707-
toolCallId: string;
708-
contextItems: ContextItem[];
709-
}) => {
710-
this.messenger.send("toolCallPartialOutput", params);
711-
};
695+
if (!tool) {
696+
throw new Error(`Tool ${toolCall.function.name} not found`);
697+
}
712698

713-
const contextItems = await callTool(
714-
tool,
715-
JSON.parse(toolCall.function.arguments || "{}"),
716-
{
717-
ide: this.ide,
718-
llm: config.selectedModelByRole.chat,
719-
fetch: (url, init) =>
720-
fetchwithRequestOptions(url, init, config.requestOptions),
721-
tool,
722-
toolCallId: toolCall.id,
723-
onPartialOutput,
724-
},
725-
);
699+
if (!config.selectedModelByRole.chat) {
700+
throw new Error("No chat model selected");
701+
}
726702

727-
if (tool.faviconUrl) {
728-
contextItems.forEach((item) => {
729-
item.icon = tool.faviconUrl;
730-
});
731-
}
703+
// Define a callback for streaming output updates
704+
const onPartialOutput = (params: {
705+
toolCallId: string;
706+
contextItems: ContextItem[];
707+
}) => {
708+
this.messenger.send("toolCallPartialOutput", params);
709+
};
732710

733-
return { contextItems };
734-
},
735-
);
711+
return await callTool(tool, toolCall.function.arguments, {
712+
ide: this.ide,
713+
llm: config.selectedModelByRole.chat,
714+
fetch: (url, init) =>
715+
fetchwithRequestOptions(url, init, config.requestOptions),
716+
tool,
717+
toolCallId: toolCall.id,
718+
onPartialOutput,
719+
});
720+
});
736721

737-
on("isItemTooBig", async ({ data: { item, selectedModelTitle } }) => {
722+
on("isItemTooBig", async ({ data: { item } }) => {
738723
return this.isItemTooBig(item);
739724
});
740725

@@ -932,7 +917,6 @@ export class Core {
932917
query: string;
933918
fullInput: string;
934919
selectedCode: RangeInFile[];
935-
selectedModelTitle: string;
936920
}>,
937921
) => {
938922
const { config } = await this.configHandler.loadConfig();

core/edit/streamDiffLines.ts

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ import {
44
ILLM,
55
Prediction,
66
RuleWithSource,
7+
ToolResultChatMessage,
78
UserChatMessage,
89
} from "../";
910
import {
@@ -104,15 +105,16 @@ export async function* streamDiffLines({
104105

105106
// Rules will be included with edit prompt
106107
// If any rules are present this will result in using chat instead of legacy completion
107-
const lastUserMessage: UserChatMessage | undefined =
108+
const lastUserMessage =
108109
typeof prompt === "string"
109-
? {
110+
? ({
110111
role: "user",
111112
content: prompt,
112-
}
113-
: (findLast(prompt, (msg) => msg.role === "user") as
114-
| UserChatMessage
115-
| undefined);
113+
} as UserChatMessage)
114+
: (findLast(
115+
prompt,
116+
(msg) => msg.role === "user" || msg.role === "tool",
117+
) as UserChatMessage | ToolResultChatMessage | undefined);
116118

117119
const systemMessage = getSystemMessageWithRules({
118120
currentModel: llm.model,

core/index.d.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -428,6 +428,7 @@ export type ToolStatus =
428428
| "generating"
429429
| "generated"
430430
| "calling"
431+
| "errored"
431432
| "done"
432433
| "canceled";
433434

core/llm/constants.ts

Lines changed: 0 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -7,26 +7,6 @@ const DEFAULT_ARGS = {
77
temperature: DEFAULT_TEMPERATURE,
88
};
99

10-
const GPT_4_CTX_LEN = 128_000;
11-
12-
const CONTEXT_LENGTH_FOR_MODEL: { [name: string]: number } = {
13-
"gpt-3.5-turbo": 4096,
14-
"gpt-3.5-turbo-0613": 4096,
15-
"gpt-3.5-turbo-16k": 16_384,
16-
"gpt-35-turbo-16k": 16_384,
17-
"gpt-35-turbo-0613": 4096,
18-
"gpt-35-turbo": 4096,
19-
"gpt-4": 4096,
20-
"gpt-4-32k": 32_000,
21-
"gpt-4-turbo-preview": 32_000,
22-
"gpt-4o": GPT_4_CTX_LEN,
23-
"gpt-4o-mini": GPT_4_CTX_LEN,
24-
"gpt-4-vision": GPT_4_CTX_LEN,
25-
"gpt-4-0125-preview": GPT_4_CTX_LEN,
26-
"gpt-4-1106-preview": GPT_4_CTX_LEN,
27-
};
28-
29-
const TOKEN_BUFFER_FOR_SAFETY = 350;
3010
const PROXY_URL = "http://localhost:65433";
3111

3212
const DEFAULT_MAX_CHUNK_SIZE = 500; // 512 - buffer for safety (in case of differing tokenizers)
@@ -38,12 +18,10 @@ export enum LLMConfigurationStatuses {
3818
}
3919

4020
export {
41-
CONTEXT_LENGTH_FOR_MODEL,
4221
DEFAULT_ARGS,
4322
DEFAULT_CONTEXT_LENGTH,
4423
DEFAULT_MAX_BATCH_SIZE,
4524
DEFAULT_MAX_CHUNK_SIZE,
4625
DEFAULT_MAX_TOKENS,
4726
PROXY_URL,
48-
TOKEN_BUFFER_FOR_SAFETY,
4927
};

core/llm/constructMessages.ts

Lines changed: 6 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,12 @@ import {
33
ChatMessage,
44
RuleWithSource,
55
TextMessagePart,
6+
ToolResultChatMessage,
67
UserChatMessage,
78
} from "../";
89
import { findLast } from "../util/findLast";
910
import { normalizeToMessageParts } from "../util/messageContent";
10-
import { messageIsEmpty } from "./messages";
11+
import { isUserOrToolMsg } from "./messages";
1112
import { getSystemMessageWithRules } from "./rules/getSystemMessageWithRules";
1213

1314
export const DEFAULT_CHAT_SYSTEM_MESSAGE_URL =
@@ -92,24 +93,20 @@ export function constructMessages(
9293
...historyItem.message,
9394
content,
9495
});
95-
} else if (historyItem.toolCallState?.status === "canceled") {
96-
// Canceled tool call
97-
msgs.push({
98-
...historyItem.message,
99-
content: CANCELED_TOOL_CALL_MESSAGE,
100-
});
10196
} else {
10297
msgs.push(historyItem.message);
10398
}
10499
}
105100

106-
const userMessage = findLast(msgs, (msg) => msg.role === "user") as
101+
const lastUserMsg = findLast(msgs, isUserOrToolMsg) as
107102
| UserChatMessage
103+
| ToolResultChatMessage
108104
| undefined;
105+
109106
const systemMessage = getSystemMessageWithRules({
110107
baseSystemMessage: baseChatSystemMessage ?? DEFAULT_CHAT_SYSTEM_MESSAGE,
111108
rules,
112-
userMessage,
109+
userMessage: lastUserMsg,
113110
currentModel: modelName,
114111
});
115112
if (systemMessage.trim()) {
@@ -119,16 +116,6 @@ export function constructMessages(
119116
});
120117
}
121118

122-
// We dispatch an empty assistant chat message to the history on submission. Don't send it
123-
const lastMessage = msgs.at(-1);
124-
if (
125-
lastMessage &&
126-
lastMessage.role === "assistant" &&
127-
messageIsEmpty(lastMessage)
128-
) {
129-
msgs.pop();
130-
}
131-
132119
// Remove the "id" from all of the messages
133120
return msgs.map((msg) => {
134121
const { id, ...rest } = msg as any;

0 commit comments

Comments
 (0)