Skip to content

Commit 9a51253

Browse files
committed
Read default values from a config file
1 parent a5c9df3 commit 9a51253

File tree

13 files changed

+332
-87
lines changed

13 files changed

+332
-87
lines changed

README.md

Lines changed: 49 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# ChatGPT CLI
22

3-
This project showcases an implementation of ChatGPT clients with streaming support in a Command-Line Interface (CLI)
3+
This project showcases an implementation of a ChatGPT client with streaming support in a Command-Line Interface (CLI)
44
environment, demonstrating its practicality and effectiveness.
55

66
![a screenshot](resources/vhs.gif)
@@ -17,6 +17,7 @@ environment, demonstrating its practicality and effectiveness.
1717
- [Linux (arm64)](#linux-arm64)
1818
- [Windows (amd64)](#windows-amd64)
1919
- [Getting Started](#getting-started)
20+
- [Configuration](#configuration)
2021
- [Development](#development)
2122
- [Reporting Issues and Contributing](#reporting-issues-and-contributing)
2223
- [Uninstallation](#uninstallation)
@@ -31,9 +32,10 @@ environment, demonstrating its practicality and effectiveness.
3132
* **Context management**: Seamless conversations with the GPT model by maintaining message history across CLI calls.
3233
* **Sliding window history**: Automatically trims conversation history while maintaining context to stay within token
3334
limits.
34-
* **Custom context from local files**: Provide custom context through piping for GPT model reference during
35-
conversation.
36-
* **Custom chat models**: Use a custom chat model by specifying the model name with the `--set-model` flag. Ensure that the model exists in the OpenAI model list.
35+
* **Custom context from local files**: Provide a custom context for the GPT model to reference during the conversation
36+
by piping it in.
37+
* **Custom chat models**: Use a custom chat model by specifying the model name with the `--set-model` flag. Ensure that
38+
the model exists in the OpenAI model list.
3739
* **Model listing**: Get a list of available models by using the `-l` or `--list-models` flag.
3840
* **Viper integration**: Robust configuration management.
3941

@@ -145,6 +147,48 @@ Remember to check that the model exists in the OpenAI model list before setting
145147
chatgpt --list-models
146148
```
147149

150+
## Configuration
151+
152+
The chatGPT CLI uses a two-level configuration system. The default configuration is read from the
153+
file `resources/config.yaml` located within the package. These default values are:
154+
155+
```yaml
156+
model: gpt-3.5-turbo
157+
max_tokens: 4096
158+
url: https://api.openai.com
159+
completions_path: /v1/chat/completions
160+
models_path: /v1/models
161+
```
162+
163+
These default settings can be overwritten by user-defined configuration options. The user configuration file
164+
is `.chatgpt-cli/config.yaml` and is expected to be in the user's home directory.
165+
166+
The user configuration file follows the same structure as the default configuration file. Here is an example of how to
167+
override the `model` and `max_tokens` values:
168+
169+
```yaml
170+
model: gpt-3.5-turbo-16k
171+
max_tokens: 8192
172+
```
173+
174+
In this example, the `model` is changed to `gpt-3.5-turbo-16k`, and `max_tokens` is set to `8192`. Other options such
175+
as `url`, `completions_path`, and `models_path` can be adjusted in the same manner if needed.
176+
177+
Note: If the user configuration file is not found or cannot be read for any reason, the application will fall back to
178+
the default configuration.
179+
180+
As a more immediate and flexible alternative to changing the configuration file manually, the CLI offers command-line
181+
flags for overwriting specific configuration values. For instance, the `model` can be changed using the `--model`
182+
flag. This is particularly useful for temporary adjustments or testing different configurations.
183+
184+
```shell
185+
chatgpt --model gpt-3.5-turbo-16k What are some fun things to do in Red Hook?
186+
```
187+
188+
This command will temporarily overwrite the `model` value for the duration of the current command. We're currently
189+
working on adding similar flags for other configuration values, which will allow you to adjust most aspects of the
190+
configuration directly from the command line.
191+
148192
## Development
149193

150194
To start developing, set the `OPENAI_API_KEY` environment variable to
@@ -226,6 +270,7 @@ brew uninstall chatgpt-cli
226270
```
227271

228272
And to remove the tap:
273+
229274
```shell
230275
brew untap kardolus/chatgpt-cli
231276
```

client/client.go

Lines changed: 19 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -17,56 +17,46 @@ const (
1717
AssistantContent = "You are a helpful assistant."
1818
AssistantRole = "assistant"
1919
ErrEmptyResponse = "empty response"
20-
DefaultGPTModel = "gpt-3.5-turbo"
21-
DefaultServiceURL = "https://api.openai.com"
22-
CompletionPath = "/v1/chat/completions"
23-
ModelPath = "/v1/models"
2420
MaxTokenBufferPercentage = 20
25-
MaxTokenSize = 4096
2621
SystemRole = "system"
2722
UserRole = "user"
2823
gptPrefix = "gpt"
2924
)
3025

3126
type Client struct {
27+
Config types.Config
3228
History []types.Message
33-
Model string
3429
caller http.Caller
35-
capacity int
3630
historyStore history.HistoryStore
37-
serviceURL string
3831
}
3932

40-
func New(caller http.Caller, cs config.ConfigStore, hs history.HistoryStore) *Client {
33+
func New(caller http.Caller, cs config.ConfigStore, hs history.HistoryStore) (*Client, error) {
34+
cm, err := configmanager.New(cs)
35+
if err != nil {
36+
return nil, err
37+
}
38+
4139
result := &Client{
40+
Config: cm.Config,
4241
caller: caller,
4342
historyStore: hs,
44-
capacity: MaxTokenSize,
45-
serviceURL: DefaultServiceURL,
4643
}
4744

48-
// do not error out when the config cannot be read
49-
result.Model, _ = configmanager.New(cs).ReadModel()
50-
51-
if result.Model == "" {
52-
result.Model = DefaultGPTModel
53-
}
54-
55-
return result
45+
return result, nil
5646
}
5747

5848
func (c *Client) WithCapacity(capacity int) *Client {
59-
c.capacity = capacity
49+
c.Config.MaxTokens = capacity
6050
return c
6151
}
6252

6353
func (c *Client) WithModel(model string) *Client {
64-
c.Model = model
54+
c.Config.Model = model
6555
return c
6656
}
6757

6858
func (c *Client) WithServiceURL(url string) *Client {
69-
c.serviceURL = url
59+
c.Config.URL = url
7060
return c
7161
}
7262

@@ -79,7 +69,7 @@ func (c *Client) WithServiceURL(url string) *Client {
7969
func (c *Client) ListModels() ([]string, error) {
8070
var result []string
8171

82-
raw, err := c.caller.Get(c.getEndpoint(ModelPath))
72+
raw, err := c.caller.Get(c.getEndpoint(c.Config.ModelsPath))
8373
if err != nil {
8474
return nil, err
8575
}
@@ -91,7 +81,7 @@ func (c *Client) ListModels() ([]string, error) {
9181

9282
for _, model := range response.Data {
9383
if strings.HasPrefix(model.Id, gptPrefix) {
94-
if model.Id != c.Model {
84+
if model.Id != c.Config.Model {
9585
result = append(result, fmt.Sprintf("- %s", model.Id))
9686
continue
9787
}
@@ -129,7 +119,7 @@ func (c *Client) Query(input string) (string, error) {
129119
return "", err
130120
}
131121

132-
raw, err := c.caller.Post(c.getEndpoint(CompletionPath), body, false)
122+
raw, err := c.caller.Post(c.getEndpoint(c.Config.CompletionsPath), body, false)
133123
if err != nil {
134124
return "", err
135125
}
@@ -161,7 +151,7 @@ func (c *Client) Stream(input string) error {
161151
return err
162152
}
163153

164-
result, err := c.caller.Post(c.getEndpoint(CompletionPath), body, true)
154+
result, err := c.caller.Post(c.getEndpoint(c.Config.CompletionsPath), body, true)
165155
if err != nil {
166156
return err
167157
}
@@ -174,7 +164,7 @@ func (c *Client) Stream(input string) error {
174164
func (c *Client) createBody(stream bool) ([]byte, error) {
175165
body := types.CompletionsRequest{
176166
Messages: c.History,
177-
Model: c.Model,
167+
Model: c.Config.Model,
178168
Stream: stream,
179169
}
180170

@@ -206,7 +196,7 @@ func (c *Client) addQuery(query string) {
206196
}
207197

208198
func (c *Client) getEndpoint(path string) string {
209-
return c.serviceURL + path
199+
return c.Config.URL + path
210200
}
211201

212202
func (c *Client) prepareQuery(input string) {
@@ -228,7 +218,7 @@ func (c *Client) processResponse(raw []byte, v interface{}) error {
228218

229219
func (c *Client) truncateHistory() {
230220
tokens, rolling := countTokens(c.History)
231-
effectiveTokenSize := calculateEffectiveTokenSize(c.capacity, MaxTokenBufferPercentage)
221+
effectiveTokenSize := calculateEffectiveTokenSize(c.Config.MaxTokens, MaxTokenBufferPercentage)
232222

233223
if tokens <= effectiveTokenSize {
234224
return

0 commit comments

Comments
 (0)