Skip to content

Commit 93e50b8

Browse files
authored
fix(gemini): refactor maxTokens type to int32 across the codebase (#196)
- Change the type of `maxTokens` from `int` to `int32` in multiple files - Update the function `WithMaxTokens` to accept `int32` instead of `int` - Adjust the usage of `maxTokens` in the `New` function to match the new type Signed-off-by: Bo-Yi Wu <[email protected]>
1 parent 8106c7e commit 93e50b8

File tree

3 files changed

+5
-5
lines changed

3 files changed

+5
-5
lines changed

cmd/openai.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ func NewGemini() (*gemini.Client, error) {
3636
return gemini.New(
3737
gemini.WithToken(viper.GetString("openai.api_key")),
3838
gemini.WithModel(viper.GetString("openai.model")),
39-
gemini.WithMaxTokens(viper.GetInt("openai.max_tokens")),
39+
gemini.WithMaxTokens(viper.GetInt32("openai.max_tokens")),
4040
gemini.WithTemperature(float32(viper.GetFloat64("openai.temperature"))),
4141
gemini.WithTopP(float32(viper.GetFloat64("openai.top_p"))),
4242
)

gemini/gemini.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ import (
1515
type Client struct {
1616
client *genai.GenerativeModel
1717
model string
18-
maxTokens int
18+
maxTokens int32
1919
temperature float32
2020
topP float32
2121
debug bool
@@ -106,7 +106,7 @@ func New(opts ...Option) (c *Client, err error) {
106106
}
107107

108108
engine.client = client.GenerativeModel(engine.model)
109-
engine.client.MaxOutputTokens = util.Int32Ptr(int32(engine.maxTokens))
109+
engine.client.MaxOutputTokens = util.Int32Ptr(engine.maxTokens)
110110
engine.client.Temperature = util.Float32Ptr(engine.temperature)
111111
engine.client.TopP = util.Float32Ptr(engine.topP)
112112

gemini/options.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ func WithModel(val string) Option {
5050
// WithMaxTokens returns a new Option that sets the max tokens for the client configuration.
5151
// The maximum number of tokens to generate in the chat completion.
5252
// The total length of input tokens and generated tokens is limited by the model's context length.
53-
func WithMaxTokens(val int) Option {
53+
func WithMaxTokens(val int32) Option {
5454
if val <= 0 {
5555
val = defaultMaxTokens
5656
}
@@ -83,7 +83,7 @@ func WithTopP(val float32) Option {
8383
type config struct {
8484
token string
8585
model string
86-
maxTokens int
86+
maxTokens int32
8787
temperature float32
8888
topP float32
8989
}

0 commit comments

Comments
 (0)