Skip to content

Commit dcc5eb9

Browse files
Fix: Split tool_calls status into multiple chunks for proper Open WebUI display
OpenAI streaming format requires tool calls to be sent across multiple chunks: 1. First chunk: id, type, function name with empty arguments 2. Second chunk: function arguments only This matches OpenAI's actual streaming behavior where tool_calls are accumulated incrementally, fixing the display issue in Open WebUI where the status was showing as empty JSON. Changes: - Rename createStatusToolCallChunk -> createStatusToolCallChunks (returns array) - First chunk contains metadata (id, type, name) with arguments: '' - Second chunk contains only arguments field - Both chunks share same chat ID and timestamp - Update streamingHandler to send both chunks sequentially - Update all tests to match new multi-chunk format All 427 tests passing.
1 parent 7d47eb4 commit dcc5eb9

File tree

3 files changed

+113
-35
lines changed

3 files changed

+113
-35
lines changed

src/handlers/streamingHandler.js

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
const {
2020
createStreamingChunk,
2121
createStatus,
22-
createStatusToolCallChunk,
22+
createStatusToolCallChunks,
2323
createTypeStatusChunk,
2424
} = require('../utils/openaiResponse');
2525
const { createErrorResponse } = require('../utils/errorResponse');
@@ -40,17 +40,22 @@ function emitStatus(res, config, model, message, done = false) {
4040

4141
const status = createStatus(message, done);
4242

43-
let statusChunk;
4443
switch (config.statusEmitFormat) {
45-
case 'type_status':
46-
statusChunk = createTypeStatusChunk(model, status);
44+
case 'type_status': {
45+
const statusChunk = createTypeStatusChunk(model, status);
46+
res.write(`data: ${JSON.stringify(statusChunk)}\n\n`);
4747
break;
48+
}
4849
case 'tool_calls':
49-
default:
50-
statusChunk = createStatusToolCallChunk(model, status);
50+
default: {
51+
// tool_calls format requires multiple chunks
52+
const chunks = createStatusToolCallChunks(model, status);
53+
chunks.forEach((chunk) => {
54+
res.write(`data: ${JSON.stringify(chunk)}\n\n`);
55+
});
5156
break;
57+
}
5258
}
53-
res.write(`data: ${JSON.stringify(statusChunk)}\n\n`);
5459
}
5560

5661
/**

src/utils/openaiResponse.js

Lines changed: 45 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -88,19 +88,28 @@ function createStatus(message, done = false) {
8888
}
8989

9090
/**
91-
* Creates an OpenAI-compatible tool call chunk for status updates
91+
* Creates OpenAI-compatible tool call chunks for status updates
92+
* Returns an array of chunks that must be sent sequentially:
93+
* 1. First chunk with id, type, and function name
94+
* 2. Following chunks with arguments (streamed incrementally)
9295
*
9396
* @param {string} model - Model identifier
9497
* @param {Object} status - Status object from createStatus()
95-
* @returns {Object} OpenAI-compatible chunk with tool_calls
98+
* @returns {Array<Object>} Array of OpenAI-compatible chunks with tool_calls
9699
*/
97-
function createStatusToolCallChunk(model, status) {
100+
function createStatusToolCallChunks(model, status) {
98101
const callId = `call_status_${Date.now()}`;
102+
const chatId = `chatcmpl-${uuidv4()}`;
103+
const created = Math.floor(Date.now() / 1000);
104+
const argsJson = JSON.stringify({ message: status.message });
99105

100-
return {
101-
id: `chatcmpl-${uuidv4()}`,
106+
const chunks = [];
107+
108+
// First chunk: id, type, and function name with empty arguments
109+
chunks.push({
110+
id: chatId,
102111
object: 'chat.completion.chunk',
103-
created: Math.floor(Date.now() / 1000),
112+
created,
104113
model,
105114
choices: [
106115
{
@@ -113,15 +122,42 @@ function createStatusToolCallChunk(model, status) {
113122
type: 'function',
114123
function: {
115124
name: 'emit_status',
116-
arguments: JSON.stringify({ message: status.message }),
125+
arguments: '',
117126
},
118127
},
119128
],
120129
},
121130
finish_reason: null,
122131
},
123132
],
124-
};
133+
});
134+
135+
// Stream arguments incrementally (can be sent as single chunk or character-by-character)
136+
// For simplicity, we send the complete arguments in one chunk
137+
chunks.push({
138+
id: chatId,
139+
object: 'chat.completion.chunk',
140+
created,
141+
model,
142+
choices: [
143+
{
144+
index: 0,
145+
delta: {
146+
tool_calls: [
147+
{
148+
index: 0,
149+
function: {
150+
arguments: argsJson,
151+
},
152+
},
153+
],
154+
},
155+
finish_reason: null,
156+
},
157+
],
158+
});
159+
160+
return chunks;
125161
}
126162

127163
/**
@@ -148,6 +184,6 @@ module.exports = {
148184
createStreamingChunk,
149185
createCompletionResponse,
150186
createStatus,
151-
createStatusToolCallChunk,
187+
createStatusToolCallChunks,
152188
createTypeStatusChunk,
153189
};

tests/utils/openaiResponse.test.js

Lines changed: 56 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ const {
2020
createStreamingChunk,
2121
createCompletionResponse,
2222
createStatus,
23-
createStatusToolCallChunk,
23+
createStatusToolCallChunks,
2424
createTypeStatusChunk,
2525
} = require('../../src/utils/openaiResponse');
2626

@@ -122,12 +122,21 @@ describe('openaiResponse utility', () => {
122122
});
123123
});
124124

125-
describe('createStatusToolCallChunk', () => {
126-
it('should create status tool call chunk with all fields', () => {
125+
describe('createStatusToolCallChunks', () => {
126+
it('should create array of status tool call chunks', () => {
127127
const status = createStatus('Processing', false);
128-
const chunk = createStatusToolCallChunk('gpt-4', status);
128+
const chunks = createStatusToolCallChunks('gpt-4', status);
129129

130-
expect(chunk).toMatchObject({
130+
expect(Array.isArray(chunks)).toBe(true);
131+
expect(chunks).toHaveLength(2);
132+
});
133+
134+
it('should have first chunk with id, type, name, and empty arguments', () => {
135+
const status = createStatus('Processing', false);
136+
const chunks = createStatusToolCallChunks('gpt-4', status);
137+
const firstChunk = chunks[0];
138+
139+
expect(firstChunk).toMatchObject({
131140
object: 'chat.completion.chunk',
132141
model: 'gpt-4',
133142
choices: [
@@ -140,6 +149,36 @@ describe('openaiResponse utility', () => {
140149
type: 'function',
141150
function: {
142151
name: 'emit_status',
152+
arguments: '',
153+
},
154+
},
155+
],
156+
},
157+
finish_reason: null,
158+
},
159+
],
160+
});
161+
expect(firstChunk.id).toMatch(/^chatcmpl-/);
162+
expect(firstChunk.created).toBeGreaterThan(0);
163+
expect(firstChunk.choices[0].delta.tool_calls[0].id).toMatch(/^call_status_/);
164+
});
165+
166+
it('should have second chunk with arguments only', () => {
167+
const status = createStatus('Processing', false);
168+
const chunks = createStatusToolCallChunks('gpt-4', status);
169+
const secondChunk = chunks[1];
170+
171+
expect(secondChunk).toMatchObject({
172+
object: 'chat.completion.chunk',
173+
model: 'gpt-4',
174+
choices: [
175+
{
176+
index: 0,
177+
delta: {
178+
tool_calls: [
179+
{
180+
index: 0,
181+
function: {
143182
arguments: JSON.stringify({ message: 'Processing' }),
144183
},
145184
},
@@ -149,34 +188,32 @@ describe('openaiResponse utility', () => {
149188
},
150189
],
151190
});
152-
expect(chunk.id).toMatch(/^chatcmpl-/);
153-
expect(chunk.created).toBeGreaterThan(0);
154-
expect(chunk.choices[0].delta.tool_calls[0].id).toMatch(/^call_status_/);
191+
expect(secondChunk.id).toMatch(/^chatcmpl-/);
192+
expect(secondChunk.created).toBeGreaterThan(0);
155193
});
156194

157-
it('should create status tool call chunk for initiating step', () => {
158-
const status = createStatus('Initiating', false);
159-
const chunk = createStatusToolCallChunk('test-model', status);
195+
it('should use same chat ID and timestamp for both chunks', () => {
196+
const status = createStatus('Test', false);
197+
const chunks = createStatusToolCallChunks('gpt-4', status);
160198

161-
expect(chunk.choices[0].delta.tool_calls[0].function.arguments).toBe(
162-
JSON.stringify({ message: 'Initiating' }),
163-
);
199+
expect(chunks[0].id).toBe(chunks[1].id);
200+
expect(chunks[0].created).toBe(chunks[1].created);
164201
});
165202

166-
it('should create status tool call chunk for completed step', () => {
203+
it('should create chunks for completed status', () => {
167204
const status = createStatus('Completed', true);
168-
const chunk = createStatusToolCallChunk('test-model', status);
205+
const chunks = createStatusToolCallChunks('test-model', status);
169206

170-
expect(chunk.choices[0].delta.tool_calls[0].function.arguments).toBe(
207+
expect(chunks[1].choices[0].delta.tool_calls[0].function.arguments).toBe(
171208
JSON.stringify({ message: 'Completed' }),
172209
);
173210
});
174211

175212
it('should have call ID with correct format', () => {
176213
const status = createStatus('Step 1', false);
177-
const chunk = createStatusToolCallChunk('gpt-4', status);
214+
const chunks = createStatusToolCallChunks('gpt-4', status);
178215

179-
const id = chunk.choices[0].delta.tool_calls[0].id;
216+
const id = chunks[0].choices[0].delta.tool_calls[0].id;
180217

181218
expect(id).toMatch(/^call_status_\d+$/);
182219
});

0 commit comments

Comments
 (0)