Skip to content

Commit d6be309

Browse files
committed
update make gpt options in global for openai and ollama
1 parent 47bb30a commit d6be309

File tree

2 files changed

+87
-79
lines changed

2 files changed

+87
-79
lines changed

.env.example

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ WHATSAPP_CLIENT_PROXY_URL=""
1010
# WHATSAPP_VERSION_MINOR=3000
1111
# WHATSAPP_VERSION_PATCH=1019175440
1212

13-
WHATSAPP_GPT_ENGINE="ollama"
13+
WHATSAPP_GPT_ENGINE="openai"
1414
WHATSAPP_GPT_TAG="askme"
1515
WHASTAPP_GPT_BLOCKED_WORD=
1616

@@ -20,18 +20,20 @@ WHASTAPP_GPT_BLOCKED_WORD=
2020
OPENAI_HOST=https://api.openai.com
2121
OPENAI_HOST_PATH=/v1
2222
OPENAI_API_KEY=
23-
OPENAI_GPT_MODEL_NAME=gpt-3.5-turbo
24-
OPENAI_GPT_MODEL_SYSTEM_PROMPT=
25-
OPENAI_GPT_MODEL_TOKEN=4096
26-
OPENAI_GPT_MODEL_TEMPERATURE=0.0
27-
OPENAI_GPT_MODEL_TOP_P=1.0
28-
OPENAI_GPT_MODEL_PENALTY_PRESENCE=0.0
29-
OPENAI_GPT_MODEL_PENALTY_FREQUENCY=0.0
3023

3124
# -----------------------------------
3225
# Ollama Configuration
3326
# -----------------------------------
3427
OLLAMA_HOST=
3528
OLLAMA_HOST_PATH=/
36-
OLLAMA_GPT_MODEL_NAME=gemma2:latest
37-
OLLAMA_GPT_MODEL_SYSTEM_PROMPT=
29+
30+
# -----------------------------------
31+
# GPT Configuration
32+
# -----------------------------------
33+
GPT_MODEL_NAME=gpt-3.5-turbo
34+
GPT_MODEL_SYSTEM_PROMPT=
35+
GPT_MODEL_TOKEN=4096
36+
GPT_MODEL_TEMPERATURE=0.0
37+
GPT_MODEL_TOP_P=1.0
38+
GPT_MODEL_PENALTY_PRESENCE=0.0
39+
GPT_MODEL_PENALTY_FREQUENCY=0.0

pkg/gpt/gpt.go

Lines changed: 75 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ package gpt
22

33
import (
44
"context"
5+
"encoding/json"
56
"net"
67
"net/http"
78
"net/url"
@@ -28,21 +29,22 @@ var (
2829
var (
2930
OAIHost,
3031
OAIHostPath,
31-
OAIAPIKey,
32-
OAIGPTModelName,
33-
OAIGPTModelPrompt string
34-
OAIGPTModelToken int
35-
OAIGPTModelTemperature,
36-
OAIGPTModelTopP,
37-
OAIGPTModelPenaltyPresence,
38-
OAIGPTModelPenaltyFreq float32
32+
OAIAPIKey string
3933
)
4034

4135
var (
4236
OHost,
43-
OHostPath,
44-
OGPTModelName,
45-
OGPTModelPrompt string
37+
OHostPath string
38+
)
39+
40+
var (
41+
GPTModelName,
42+
GPTModelPrompt string
43+
GPTModelToken int
44+
GPTModelTemperature,
45+
GPTModelTopP,
46+
GPTModelPenaltyPresence,
47+
GPTModelPenaltyFreq float32
4648
)
4749

4850
const listBlockedWord string = "" +
@@ -88,41 +90,6 @@ func init() {
8890
log.Println(log.LogLevelFatal, "Error Parse Environment Variable for OpenAI API Key")
8991
}
9092

91-
OAIGPTModelName, err = env.GetEnvString("OPENAI_GPT_MODEL_NAME")
92-
if err != nil {
93-
OAIGPTModelName = "gpt-3.5-turbo"
94-
}
95-
96-
OAIGPTModelPrompt, err = env.GetEnvString("OPENAI_GPT_MODEL_SYSTEM_PROMPT")
97-
if err != nil {
98-
OAIGPTModelPrompt = ""
99-
}
100-
101-
OAIGPTModelToken, err = env.GetEnvInt("OPENAI_GPT_MODEL_TOKEN")
102-
if err != nil {
103-
OAIGPTModelToken = 4096
104-
}
105-
106-
OAIGPTModelTemperature, err = env.GetEnvFloat32("OPENAI_GPT_MODEL_TEMPERATURE")
107-
if err != nil {
108-
OAIGPTModelTemperature = 0
109-
}
110-
111-
OAIGPTModelTopP, err = env.GetEnvFloat32("OPENAI_GPT_MODEL_TOP_P")
112-
if err != nil {
113-
OAIGPTModelTopP = 1
114-
}
115-
116-
OAIGPTModelPenaltyPresence, err = env.GetEnvFloat32("OPENAI_GPT_MODEL_PENALTY_PRESENCE")
117-
if err != nil {
118-
OAIGPTModelPenaltyPresence = 0
119-
}
120-
121-
OAIGPTModelPenaltyFreq, err = env.GetEnvFloat32("OPENAI_GPT_MODEL_PENALTY_FREQUENCY")
122-
if err != nil {
123-
OAIGPTModelPenaltyFreq = 0
124-
}
125-
12693
default:
12794
// -----------------------------------------------------------------------
12895
// Ollama Configuration Environment
@@ -136,16 +103,44 @@ func init() {
136103
if err != nil {
137104
OHostPath = "/"
138105
}
106+
}
139107

140-
OGPTModelName, err = env.GetEnvString("OLLAMA_GPT_MODEL_NAME")
141-
if err != nil {
142-
log.Println(log.LogLevelFatal, "Error Parse Environment Variable for Ollama GPT Model Name")
143-
}
108+
// -----------------------------------------------------------------------
109+
// GPT Configuration Environment
110+
// -----------------------------------------------------------------------
111+
GPTModelName, err = env.GetEnvString("GPT_MODEL_NAME")
112+
if err != nil {
113+
GPTModelName = "gpt-3.5-turbo"
114+
}
144115

145-
OGPTModelPrompt, err = env.GetEnvString("OLLAMA_GPT_MODEL_SYSTEM_PROMPT")
146-
if err != nil {
147-
OGPTModelPrompt = ""
148-
}
116+
GPTModelPrompt, err = env.GetEnvString("GPT_MODEL_SYSTEM_PROMPT")
117+
if err != nil {
118+
GPTModelPrompt = ""
119+
}
120+
121+
GPTModelToken, err = env.GetEnvInt("GPT_MODEL_TOKEN")
122+
if err != nil {
123+
GPTModelToken = 4096
124+
}
125+
126+
GPTModelTemperature, err = env.GetEnvFloat32("GPT_MODEL_TEMPERATURE")
127+
if err != nil {
128+
GPTModelTemperature = 0
129+
}
130+
131+
GPTModelTopP, err = env.GetEnvFloat32("GPT_MODEL_TOP_P")
132+
if err != nil {
133+
GPTModelTopP = 1
134+
}
135+
136+
GPTModelPenaltyPresence, err = env.GetEnvFloat32("GPT_MODEL_PENALTY_PRESENCE")
137+
if err != nil {
138+
GPTModelPenaltyPresence = 0
139+
}
140+
141+
GPTModelPenaltyFreq, err = env.GetEnvFloat32("GPT_MODEL_PENALTY_FREQUENCY")
142+
if err != nil {
143+
GPTModelPenaltyFreq = 0
149144
}
150145

151146
// -----------------------------------------------------------------------
@@ -190,16 +185,19 @@ func GPTResponse(question string) (response string, err error) {
190185
return "Sorry, the AI can not response due to it is containing some blocked word 🥺", nil
191186
}
192187

188+
isStream := new(bool)
189+
*isStream = false
190+
193191
switch strings.ToLower(WAGPTEngine) {
194192
case "openai":
195193
var OAIGPTResponseText string
196194
var OAIGPTChatCompletion []OpenAI.ChatCompletionMessage
197195

198-
if len(strings.TrimSpace(OAIGPTModelPrompt)) != 0 {
196+
if len(strings.TrimSpace(GPTModelPrompt)) != 0 {
199197
OAIGPTChatCompletion = []OpenAI.ChatCompletionMessage{
200198
{
201199
Role: OpenAI.ChatMessageRoleSystem,
202-
Content: OAIGPTModelPrompt,
200+
Content: GPTModelPrompt,
203201
},
204202
{
205203
Role: OpenAI.ChatMessageRoleUser,
@@ -216,14 +214,14 @@ func GPTResponse(question string) (response string, err error) {
216214
}
217215

218216
OAIGPTPrompt := OpenAI.ChatCompletionRequest{
219-
Model: OAIGPTModelName,
220-
MaxTokens: OAIGPTModelToken,
221-
Temperature: OAIGPTModelTemperature,
222-
TopP: OAIGPTModelTopP,
223-
PresencePenalty: OAIGPTModelPenaltyPresence,
224-
FrequencyPenalty: OAIGPTModelPenaltyFreq,
217+
Model: GPTModelName,
218+
MaxTokens: GPTModelToken,
219+
Temperature: GPTModelTemperature,
220+
TopP: GPTModelTopP,
221+
PresencePenalty: GPTModelPenaltyPresence,
222+
FrequencyPenalty: GPTModelPenaltyFreq,
225223
Messages: OAIGPTChatCompletion,
226-
Stream: false,
224+
Stream: *isStream,
227225
}
228226

229227
OAIGPTResponse, err := OAIClient.CreateChatCompletion(
@@ -253,14 +251,11 @@ func GPTResponse(question string) (response string, err error) {
253251
var OGPTResponseText string
254252
var OGPTChatCompletion []Ollama.Message
255253

256-
isStream := new(bool)
257-
*isStream = false
258-
259-
if len(strings.TrimSpace(OGPTModelPrompt)) != 0 {
254+
if len(strings.TrimSpace(GPTModelPrompt)) != 0 {
260255
OGPTChatCompletion = []Ollama.Message{
261256
{
262257
Role: "system",
263-
Content: OGPTModelPrompt,
258+
Content: GPTModelPrompt,
264259
},
265260
{
266261
Role: "user",
@@ -276,8 +271,19 @@ func GPTResponse(question string) (response string, err error) {
276271
}
277272
}
278273

274+
OGPTOptions := map[string]interface{}{}
275+
OGPTOptionsMarshal, _ := json.Marshal(Ollama.Options{
276+
Temperature: GPTModelTemperature,
277+
TopP: GPTModelTopP,
278+
PresencePenalty: GPTModelPenaltyPresence,
279+
FrequencyPenalty: GPTModelPenaltyFreq,
280+
})
281+
282+
json.Unmarshal(OGPTOptionsMarshal, &OGPTOptions)
283+
279284
OGPTPrompt := &Ollama.ChatRequest{
280-
Model: OGPTModelName,
285+
Model: GPTModelName,
286+
Options: OGPTOptions,
281287
Messages: OGPTChatCompletion,
282288
Stream: isStream,
283289
}

0 commit comments

Comments
 (0)