Skip to content

Commit 1dfe793

Browse files
committed
v1.4.6 Добавлен новый провайдер API: DakuWorks
Поддерживаемые модели: gpt-3.5-turbo-16k gpt-4 gpt-4-32k claude-2 claude-2-100k codellama-34b llama-2-70b
1 parent f5c54ba commit 1dfe793

File tree

7 files changed

+62
-38
lines changed

7 files changed

+62
-38
lines changed

config.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
{
22
"openai_api_key": "",
3-
"purgpt_api_key": "purgpt-b2vrs9w13oiyf14a7v4lt",
3+
"purgpt_api_key": "",
44
"chatty_api_key": "",
5-
"pawan_api_key": " ",
5+
"daku_api_key": "",
66
"usage_limit": 999,
77
"language": "ru_RU",
88
"users": [],

g4f/models.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,13 @@ class Model:
2525
best_provider=Wewordle,
2626
)
2727

28+
# GPT-3.5 / GPT-4
29+
gpt_35_turbo_0613 = Model(
30+
name="gpt-3.5-turbo-0613",
31+
base_provider="openai",
32+
best_provider=Wewordle,
33+
)
34+
2835

2936
gpt_4 = Model(
3037
name="gpt-4",
@@ -165,6 +172,7 @@ class ModelUtils:
165172
convert: dict[str, Model] = {
166173
# GPT-3.5 / GPT-4
167174
"gpt-3.5-turbo": gpt_35_turbo,
175+
"gpt-3.5-turbo-0613": gpt_35_turbo_0613,
168176
"gpt-3.5-turbo-16k": gpt_35_turbo_16k,
169177
"gpt-3.5-turbo-16k_0613": gpt_35_turbo_16k_0613,
170178
"gpt-4": gpt_4,

modules/config.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,6 @@
2929
"default_chuanhu_assistant_model"
3030
]
3131

32-
# 添加一个统一的config文件,避免文件过多造成的疑惑(优先级最低)
33-
# 同时,也可以为后续支持自定义功能提供config的帮助
3432
if os.path.exists("config.json"):
3533
with open("config.json", "r") as f:
3634
config = json.load(f)

modules/models/models.py

Lines changed: 10 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,8 @@ def _get_api_url(self):
115115
url = "https://api.naga.ac/v1/completions"
116116
elif "chatty" in self.model_name:
117117
url = "https://chattyapi.tech/v1/chat/completions"
118+
elif "daku" in self.model_name:
119+
url = "https://api.daku.tech/v1/chat/completions"
118120
elif "neuro" in self.model_name:
119121
url = "https://neuroapi.host/v1/chat/completions"
120122
else:
@@ -134,6 +136,12 @@ def _get_headers(self):
134136
"Content-Type": "application/json",
135137
"Authorization": f"Bearer {chatty_api_key}",
136138
}
139+
elif "daku" in self.model_name:
140+
daku_api_key = self.configuration_json["daku_api_key"]
141+
headers = {
142+
"Content-Type": "application/json",
143+
"Authorization": f"Bearer {daku_api_key}",
144+
}
137145
else:
138146
headers = {
139147
"Content-Type": "application/json",
@@ -150,30 +158,7 @@ def _get_history(self):
150158
return history
151159

152160
def _get_payload(self, history, stream):
153-
model_names = {
154-
'naga-gpt-4': 'gpt-4',
155-
'naga-gpt-4-0314': 'gpt-4-0314',
156-
'naga-text-davinci-003': 'text-davinci-003',
157-
'naga-claude-2': 'claude-2',
158-
'naga-llama-2-70b-chat': 'llama-2-70b-chat',
159-
'naga-gpt-3.5-turbo-16k': 'gpt-3.5-turbo-16k',
160-
'chatty-gpt-3.5-turbo-16k': 'gpt-3.5-turbo-16k',
161-
'gpt-3.5-turbo-chatty-api': 'gpt-3.5-turbo',
162-
'chatty-gpt-4': 'gpt-4',
163-
'neuro-gpt-3.5-turbo': 'gpt-3.5-turbo',
164-
'neuro-gpt-3.5-turbo-0613': 'gpt-3.5-turbo-0613',
165-
'neuro-gpt-3.5-turbo-16k': 'gpt-3.5-turbo-16k',
166-
'neuro-gpt-3.5-turbo-0613': 'gpt-3.5-turbo-0613',
167-
'neuro-gpt-3.5-turbo_0613': 'gpt-3.5-turbo_0613',
168-
'neuro-gpt-4': 'gpt-4',
169-
'neuro-gpt-4-0613': 'gpt-4-0613',
170-
'neuro-gpt-4-32k': 'gpt-4-32k',
171-
'neuro-gpt-4-32k-0613': 'gpt-4-32k-0613',
172-
'neuro-claude-2': 'claude-2',
173-
'neuro-text-curie-001': 'text-curie-001'
174-
175-
}
176-
model = model_names.get(self.model_name, self.model_name)
161+
model = self.model_name.replace("naga-", "").replace("chatty-", "").replace("neuro-", "").replace("daku-", "")
177162
if "naga-text" in self.model_name:
178163
last_msg = self.history[-1]
179164
last_user_input = last_msg["role"] == "user"
@@ -212,7 +197,7 @@ def _make_request(self, headers, payload, stream):
212197
else:
213198
timeout = TIMEOUT_ALL
214199
try: #Заготовочка для переписания системы отправки запросов
215-
if self.model_name == "purgpt" or self.model_name == "naga" or self.model_name == "chatty":
200+
if any(substring in self.model_name for substring in ["purgpt", "naga", "chatty"]):
216201
response = requests.post(
217202
shared.state.completion_url,
218203
headers = headers,

modules/presets.py

Lines changed: 21 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4,14 +4,15 @@
44
import requests
55
import json
66

7-
VERSION = "v 1.4.5"
7+
VERSION = "v 1.4.6"
88

99
CHATGLM_MODEL = None
1010
CHATGLM_TOKENIZER = None
1111
LLAMA_MODEL = None
1212
LLAMA_INFERENCER = None
1313

1414
# ChatGPT 设置
15+
1516
INITIAL_SYSTEM_PROMPT = "You are a helpful assistant. You follow all of the user's instructions and answer all of their questions without going out of your way to answer them. Respond using markdown."
1617
API_HOST = "http://127.0.0.0.1:1337"
1718
COMPLETION_URL = "http://127.0.0.0.1:1337/v1/chat/completions"
@@ -86,8 +87,6 @@ def get_online_gpt3_models():
8687

8788
ONLINE_MODELS = [
8889
'gpt-3.5-turbo',
89-
'gpt-3.5-turbo-16k',
90-
'gpt-3.5-turbo-16k-0613',
9190
]
9291

9392
NAGA_MODELS = [
@@ -111,17 +110,27 @@ def get_online_gpt3_models():
111110
'purgpt-text-davinci-003'
112111
]
113112

113+
DAKU_MODELS = [
114+
'daku-gpt-3.5-turbo-16k',
115+
'daku-gpt-4',
116+
'daku-gpt-4-32k',
117+
'daku-claude-2',
118+
'daku-claude-2-100k',
119+
'daku-codellama-34b',
120+
'daku-llama-2-70b'
121+
]
122+
114123
NEURO_MODELS = get_online_gpt4_models()
115124

116125
if os.environ.get('HIDE_OTHER_PROVIDERS', 'false') == 'true':
117126
MODELS = ONLINE_MODELS + NEURO_MODELS
118127
else:
119-
MODELS = ONLINE_MODELS + NEURO_MODELS + NAGA_MODELS
128+
MODELS = ONLINE_MODELS + NEURO_MODELS + DAKU_MODELS
120129

121130
if os.environ.get('SHOW_ALL_PROVIDERS', 'false') == 'true':
122-
MODELS = ONLINE_MODELS + NEURO_MODELS + NAGA_MODELS + PURGPT_MODELS
131+
MODELS = ONLINE_MODELS + NEURO_MODELS + NAGA_MODELS + DAKU_MODELS+ PURGPT_MODELS
123132
else:
124-
MODELS = ONLINE_MODELS + NEURO_MODELS + NAGA_MODELS
133+
MODELS = ONLINE_MODELS + NEURO_MODELS + DAKU_MODELS
125134

126135
DEFAULT_MODEL = 0
127136

@@ -160,6 +169,12 @@ def get_online_gpt3_models():
160169
'purgpt-text-davinci-003': 4096,
161170
'naga-text-davinci-003': 4096,
162171
'text-davinci-003': 4096,
172+
'daku-gpt-4': 8192,
173+
'daku-gpt-4-32k': 32768,
174+
'daku-claude-2': 100000,
175+
'daku-claude-2-100k': 100000,
176+
'daku-codellama-34b': 4096,
177+
'daku-llama-2-70b': 4096,
163178
}
164179

165180
TOKEN_OFFSET = 1000

test.py

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
import sys
2+
import time
3+
from pathlib import Path
4+
import threading
5+
6+
sys.path.append(str(Path(__file__).parent.parent))
7+
8+
import g4f
9+
10+
11+
stream = False
12+
response = g4f.ChatCompletion.create(
13+
model="gpt-3.5-turbo-16k-0613",
14+
messages=[{"role": "user", "content": "hello"}],
15+
provider=g4f.Provider.Vercel,
16+
stream=stream,
17+
)
18+
19+
print(response)

testing/test.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
import sys
22
import time
33
from pathlib import Path
4-
from auto_proxy import get_random_proxy, update_working_proxies
54
import threading
65

76
sys.path.append(str(Path(__file__).parent.parent))
@@ -11,9 +10,9 @@
1110

1211
stream = False
1312
response = g4f.ChatCompletion.create(
14-
model="gpt-3.5-turbo-16k",
13+
model="gpt-3.5-turbo",
1514
messages=[{"role": "user", "content": "hello"}],
16-
provider=g4f.Provider.Vercel,
15+
provider=g4f.Provider.Lucascool,
1716
stream=stream,
1817
)
1918

0 commit comments

Comments
 (0)