Skip to content

Commit 995bef7

Browse files
authored
Merge pull request #6599 from DreamRivulet/add-support-GPT5
add: model gpt-5
2 parents 557a2cc + 38ac502 commit 995bef7

File tree

2 files changed

+17
-3
lines changed

2 files changed

+17
-3
lines changed

app/client/platforms/openai.ts

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -200,6 +200,7 @@ export class ChatGPTApi implements LLMApi {
200200
options.config.model.startsWith("o1") ||
201201
options.config.model.startsWith("o3") ||
202202
options.config.model.startsWith("o4-mini");
203+
const isGpt5 = options.config.model.startsWith("gpt-5");
203204
if (isDalle3) {
204205
const prompt = getMessageTextContent(
205206
options.messages.slice(-1)?.pop() as any,
@@ -230,15 +231,21 @@ export class ChatGPTApi implements LLMApi {
230231
messages,
231232
stream: options.config.stream,
232233
model: modelConfig.model,
233-
temperature: !isO1OrO3 ? modelConfig.temperature : 1,
234+
temperature: (!isO1OrO3 && !isGpt5) ? modelConfig.temperature : 1,
234235
presence_penalty: !isO1OrO3 ? modelConfig.presence_penalty : 0,
235236
frequency_penalty: !isO1OrO3 ? modelConfig.frequency_penalty : 0,
236237
top_p: !isO1OrO3 ? modelConfig.top_p : 1,
237238
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
238239
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
239240
};
240241

241-
if (isO1OrO3) {
242+
if (isGpt5) {
243+
// Remove max_tokens if present
244+
delete requestPayload.max_tokens;
245+
// Add max_completion_tokens (or max_completion_tokens if that's what you meant)
246+
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
247+
248+
} else if (isO1OrO3) {
242249
// by default the o1/o3 models will not attempt to produce output that includes markdown formatting
243250
// manually add "Formatting re-enabled" developer message to encourage markdown inclusion in model responses
244251
// (https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python-secure#markdown-output)
@@ -251,8 +258,9 @@ export class ChatGPTApi implements LLMApi {
251258
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
252259
}
253260

261+
254262
// add max_tokens to vision model
255-
if (visionModel && !isO1OrO3) {
263+
if (visionModel && !isO1OrO3 && ! isGpt5) {
256264
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
257265
}
258266
}

app/constant.ts

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -493,6 +493,7 @@ export const VISION_MODEL_REGEXES = [
493493
/o3/,
494494
/o4-mini/,
495495
/grok-4/i,
496+
/gpt-5/
496497
];
497498

498499
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
@@ -517,6 +518,11 @@ const openaiModels = [
517518
"gpt-4.1-nano-2025-04-14",
518519
"gpt-4.5-preview",
519520
"gpt-4.5-preview-2025-02-27",
521+
"gpt-5-chat",
522+
"gpt-5-mini",
523+
"gpt-5-nano",
524+
"gpt-5",
525+
"gpt-5-chat-2025-01-01-preview",
520526
"gpt-4o",
521527
"gpt-4o-2024-05-13",
522528
"gpt-4o-2024-08-06",

0 commit comments

Comments
 (0)