diff --git a/packages/responses-server/src/routes/responses.ts b/packages/responses-server/src/routes/responses.ts index bccc4146e2..514e843b28 100644 --- a/packages/responses-server/src/routes/responses.ts +++ b/packages/responses-server/src/routes/responses.ts @@ -45,46 +45,53 @@ export const postCreateResponse = async ( if (Array.isArray(req.body.input)) { messages.push( - ...req.body.input.map((item) => ({ - role: item.role, - content: - typeof item.content === "string" - ? item.content - : item.content - .map((content) => { - switch (content.type) { - case "input_image": - return { - type: "image_url" as ChatCompletionInputMessageChunkType, - image_url: { - url: content.image_url, - }, - }; - case "output_text": - return { - type: "text" as ChatCompletionInputMessageChunkType, - text: content.text, - }; - case "refusal": - return undefined; - case "input_text": - return { - type: "text" as ChatCompletionInputMessageChunkType, - text: content.text, - }; - } - }) - .filter((item) => item !== undefined), - })) + ...req.body.input + .map((item) => ({ + role: item.role, + content: + typeof item.content === "string" + ? item.content + : item.content + .map((content) => { + switch (content.type) { + case "input_image": + return { + type: "image_url" as ChatCompletionInputMessageChunkType, + image_url: { + url: content.image_url, + }, + }; + case "output_text": + return content.text + ? { + type: "text" as ChatCompletionInputMessageChunkType, + text: content.text, + } + : undefined; + case "refusal": + return undefined; + case "input_text": + return { + type: "text" as ChatCompletionInputMessageChunkType, + text: content.text, + }; + } + }) + .filter((item) => item !== undefined), + })) + .filter((message) => message.content?.length !== 0) ); } else { messages.push({ role: "user", content: req.body.input }); } + const model = req.body.model.includes("@") ? req.body.model.split("@")[1] : req.body.model; + const provider = req.body.model.includes("@") ? req.body.model.split("@")[0] : undefined; + const payload: ChatCompletionInput = { // main params - model: req.body.model, - provider: req.body.provider, + model: model, + provider: provider, messages: messages, stream: req.body.stream, // options @@ -239,7 +246,7 @@ export const postCreateResponse = async ( delta: chunk.choices[0].delta.content, sequence_number: sequenceNumber++, }); - } else if (chunk.choices[0].delta.tool_calls) { + } else if (chunk.choices[0].delta.tool_calls && chunk.choices[0].delta.tool_calls.length > 0) { if (chunk.choices[0].delta.tool_calls.length > 1) { throw new StreamingError("Not implemented: only single tool call is supported in streaming mode."); } diff --git a/packages/responses-server/src/schemas.ts b/packages/responses-server/src/schemas.ts index 427716c10e..5fd12020a0 100644 --- a/packages/responses-server/src/schemas.ts +++ b/packages/responses-server/src/schemas.ts @@ -84,7 +84,6 @@ export const createResponseParamsSchema = z.object({ .default(null), model: z.string(), // parallel_tool_calls: z.boolean().default(true), // TODO: how to handle this if chat completion doesn't? - provider: z.string().optional(), // previous_response_id: z.string().nullable().default(null), // reasoning: z.object({ // effort: z.enum(["low", "medium", "high"]).default("medium"),