Skip to content

Commit bff31ff

Browse files
committed
chore: update default model to gpt-4o
1 parent 0fca4d4 commit bff31ff

File tree

6 files changed

+35
-9
lines changed

6 files changed

+35
-9
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -396,7 +396,7 @@ environment variables, a config.yaml file, and default values, in that respectiv
396396
| `image_edits_path` | The API endpoint for image editing. | '/v1/images/edits' |
397397
| `image_generations_path` | The API endpoint for image generation. | '/v1/images/generations' |
398398
| `max_tokens` | The maximum number of tokens that can be used in a single API call. | 4096 |
399-
| `model` | The GPT model used by the application. | 'gpt-3.5-turbo' |
399+
| `model` | The GPT model used by the application. | 'gpt-4o' |
400400
| `models_path` | The API endpoint for accessing model information. | '/v1/models' |
401401
| `presence_penalty` | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far. | 0.0 |
402402
| `responses_path` | The API endpoint for responses. Used by o1-pro models. | '/v1/responses' |

api/client/client_test.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1379,11 +1379,12 @@ func testClient(t *testing.T, when spec.G, it spec.S) {
13791379
result, err := subject.ListModels()
13801380
Expect(err).NotTo(HaveOccurred())
13811381
Expect(result).NotTo(BeEmpty())
1382-
Expect(result).To(HaveLen(4))
1382+
Expect(result).To(HaveLen(5))
13831383
Expect(result[0]).To(Equal("- gpt-3.5-env-model"))
13841384
Expect(result[1]).To(Equal("* gpt-3.5-turbo (current)"))
13851385
Expect(result[2]).To(Equal("- gpt-3.5-turbo-0301"))
1386-
Expect(result[3]).To(Equal("- o1-mini"))
1386+
Expect(result[3]).To(Equal("- gpt-4o"))
1387+
Expect(result[4]).To(Equal("- o1-mini"))
13871388
})
13881389
})
13891390
when("ProvideContext()", func() {

cmd/chatgpt/main.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ type ConfigMetadata struct {
6262
}
6363

6464
var configMetadata = []ConfigMetadata{
65-
{"model", "set-model", "gpt-3.5-turbo", "Set a new default model by specifying the model name"},
65+
{"model", "set-model", "gpt-4o", "Set a new default model by specifying the model name"},
6666
{"max_tokens", "set-max-tokens", 4096, "Set a new default max token size"},
6767
{"context_window", "set-context-window", 8192, "Set a new default context window size"},
6868
{"thread", "set-thread", "default", "Set a new active thread by specifying the thread name"},

config/store.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ import (
1313

1414
const (
1515
openAIName = "openai"
16-
openAIModel = "gpt-3.5-turbo"
16+
openAIModel = "gpt-4o"
1717
openAIMaxTokens = 4096
1818
openAIContextWindow = 8192
1919
openAIURL = "https://api.openai.com"

test/data/models.json

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,30 @@
7373
"root": "gpt-3.5-turbo",
7474
"parent": null
7575
},
76+
{
77+
"id": "gpt-4o",
78+
"object": "model",
79+
"created": 1677610602,
80+
"owned_by": "openai",
81+
"permission": [
82+
{
83+
"id": "modelperm-Gsp3SyIu7GamHB3McQv3rMf5",
84+
"object": "model_permission",
85+
"created": 1684434433,
86+
"allow_create_engine": false,
87+
"allow_sampling": true,
88+
"allow_logprobs": true,
89+
"allow_search_indices": false,
90+
"allow_view": true,
91+
"allow_fine_tuning": false,
92+
"organization": "*",
93+
"group": null,
94+
"is_blocking": false
95+
}
96+
],
97+
"root": "gpt-3.5-turbo",
98+
"parent": null
99+
},
76100
{
77101
"id": "davinci",
78102
"object": "model",

test/integration/integration_test.go

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -512,7 +512,8 @@ func testIntegration(t *testing.T, when spec.G, it spec.S) {
512512
it("should return the expected result for the --list-models flag", func() {
513513
output := runCommand("--list-models")
514514

515-
Expect(output).To(ContainSubstring("* gpt-3.5-turbo (current)"))
515+
Expect(output).To(ContainSubstring("* gpt-4o (current)"))
516+
Expect(output).To(ContainSubstring("- gpt-3.5-turbo"))
516517
Expect(output).To(ContainSubstring("- gpt-3.5-turbo-0301"))
517518
})
518519

@@ -540,7 +541,7 @@ func testIntegration(t *testing.T, when spec.G, it spec.S) {
540541
Expect(output).To(ContainSubstring("/v1/chat/completions"))
541542
Expect(output).To(ContainSubstring("--header \"Authorization: Bearer ${OPENAI_API_KEY}\""))
542543
Expect(output).To(ContainSubstring("--header 'Content-Type: application/json'"))
543-
Expect(output).To(ContainSubstring("\"model\":\"gpt-3.5-turbo\""))
544+
Expect(output).To(ContainSubstring("\"model\":\"gpt-4o\""))
544545
Expect(output).To(ContainSubstring("\"messages\":"))
545546
Expect(output).To(ContainSubstring("Response"))
546547

@@ -832,7 +833,7 @@ func testIntegration(t *testing.T, when spec.G, it spec.S) {
832833
})
833834

834835
it("has a configurable default model", func() {
835-
oldModel := "gpt-3.5-turbo"
836+
oldModel := "gpt-4o"
836837
newModel := "gpt-3.5-turbo-0301"
837838

838839
// Verify initial model
@@ -939,7 +940,7 @@ func testIntegration(t *testing.T, when spec.G, it spec.S) {
939940

940941
when("configuration precedence", func() {
941942
var (
942-
defaultModel = "gpt-3.5-turbo"
943+
defaultModel = "gpt-4o"
943944
newModel = "gpt-3.5-turbo-0301"
944945
envModel = "gpt-3.5-env-model"
945946
envVar string

0 commit comments

Comments
 (0)