Skip to content

Commit b4f3183

Browse files
committed
v 1.4.2 - добавлена модель neuro-gpt-3.5-turbo-16k
1 parent 08d8e54 commit b4f3183

File tree

5 files changed

+83
-7
lines changed

5 files changed

+83
-7
lines changed

auto_proxy.py

Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
import random
2+
import requests
3+
import time
4+
import threading
5+
import socket
6+
import redis
7+
8+
9+
# Создаем подключение к redis
10+
r = redis.StrictRedis(host='localhost', port=6379, db=0)
11+
12+
13+
def fetch_proxies():
14+
url = "https://api.proxyscrape.com/v2/?request=displayproxies&protocol=http"
15+
response = requests.get(url)
16+
if response.status_code == 200:
17+
return response.text.split("\r\n")[:-1]
18+
print(f"Error fetching proxies: {response.status_code}")
19+
return []
20+
21+
22+
def test_proxy(proxy, prompt, timeout):
23+
try:
24+
ip, port = proxy.split(':')
25+
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
26+
start_time = time.time()
27+
sock.connect((ip, int(port)))
28+
end_time = time.time()
29+
elapsed_time = end_time - start_time
30+
sock.close()
31+
32+
if elapsed_time < timeout:
33+
print(f"proxy: {proxy} ✅ | Elapsed time: {elapsed_time} seconds")
34+
r.rpush('working_proxies', proxy) # Сохраняем рабочего прокси в redis
35+
except Exception as e:
36+
pass
37+
38+
39+
def get_working_proxies(prompt, timeout=1):
40+
proxy_list = fetch_proxies()
41+
threads = []
42+
r.delete('working_proxies') # Очищаем список рабочих прокси в redis перед обновлением
43+
44+
for proxy in proxy_list:
45+
thread = threading.Thread(target=test_proxy, args=(proxy, prompt, timeout))
46+
threads.append(thread)
47+
thread.start()
48+
49+
for t in threads:
50+
t.join(timeout)
51+
52+
53+
def update_working_proxies():
54+
test_prompt = "What is the capital of France?"
55+
56+
while True:
57+
get_working_proxies(test_prompt)
58+
print('proxies updated')
59+
time.sleep(1800) # Обновляем список прокси каждые 30 минут
60+
61+
62+
def get_random_proxy():
63+
# Получаем случайного прокси из рабочих
64+
working_proxies = r.lrange('working_proxies', 0, -1)
65+
return random.choice(working_proxies)

check.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,9 @@
77
import concurrent.futures
88
import asyncio
99
from g4f import ChatCompletion
10+
from fp.fp import FreeProxy
11+
import threading
12+
import socket
1013

1114
def process_provider(provider_name, model_name):
1215
try:
@@ -25,8 +28,10 @@ def process_provider(provider_name, model_name):
2528
return provider_status
2629

2730
try:
31+
2832
response = ChatCompletion.create(model=model_name, provider=p,
2933
messages=[{"role": "user", "content": "Say 'Hello World!'"}], stream=False)
34+
print(f"Using proxy: {proxy}")
3035
if any(word in response for word in ['Hello World', 'Hello', 'hello', 'world']):
3136
provider_status['status'] = 'Active'
3237
print(f"{provider_name} with {model_name} say: {response}")
@@ -72,7 +77,7 @@ def main():
7277
json.dump(status, f)
7378

7479
# Pause for 10 minutes before starting the next cycle
75-
time.sleep(600)
80+
#time.sleep(600)
7681

7782
if __name__ == "__main__":
7883
main()

g4f/models.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,13 +22,13 @@ class Model:
2222
gpt_35_turbo = Model(
2323
name="gpt-3.5-turbo",
2424
base_provider="openai",
25-
best_provider=Yqcloud
25+
best_provider=Wewordle
2626
)
2727

2828
gpt_35_turbo_16k = Model(
2929
name="gpt-3.5-turbo-16k",
3030
base_provider="openai",
31-
best_provider=Vercel,
31+
best_provider=Freet,
3232
)
3333

3434
gpt_35_turbo_16k_0613 = Model(

modules/models/models.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -160,11 +160,15 @@ def _get_payload(self, history, stream):
160160
'chatty-gpt-3.5-turbo-16k': 'gpt-3.5-turbo-16k',
161161
'gpt-3.5-turbo-chatty-api': 'gpt-3.5-turbo',
162162
'chatty-gpt-4': 'gpt-4',
163+
'neuro-gpt-3.5-turbo': 'gpt-3.5-turbo',
164+
'neuro-gpt-3.5-turbo-0613': 'gpt-3.5-turbo-0613',
165+
'neuro-gpt-3.5-turbo-16k': 'gpt-3.5-turbo-16k',
166+
'neuro-gpt-3.5-turbo-0613': 'gpt-3.5-turbo-0613',
167+
'neuro-gpt-3.5-turbo_0613': 'gpt-3.5-turbo_0613',
163168
'neuro-gpt-4': 'gpt-4',
164169
'neuro-gpt-4-0613': 'gpt-4-0613',
165170
'neuro-gpt-4-32k': 'gpt-4-32k',
166171
'neuro-gpt-4-32k-0613': 'gpt-4-32k-0613',
167-
168172

169173
}
170174
model = model_names.get(self.model_name, self.model_name)

modules/presets.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import requests
55
import json
66

7-
VERSION = "v 1.4.1"
7+
VERSION = "v 1.4.2"
88

99
CHATGLM_MODEL = None
1010
CHATGLM_TOKENIZER = None
@@ -55,7 +55,7 @@ def get_online_gpt4_models():
5555
model_info = provider["model"]
5656
model_status = provider["status"]
5757

58-
if model_status == "Active" and model_info.startswith('gpt-4'):
58+
if model_status == "Active" and model_info.startswith('gpt-4') or model_info.startswith('gpt-3.5-turbo-16k'):
5959
online_models.add("neuro-" + model_info)
6060

6161
return list(online_models)
@@ -76,7 +76,9 @@ def get_online_gpt3_models():
7676

7777

7878

79-
ONLINE_MODELS = get_online_gpt3_models()
79+
ONLINE_MODELS = [
80+
'gpt-3.5-turbo',
81+
]
8082

8183
CHIMERA_MODELS = [
8284
'chimera-gpt-3.5-turbo-16k',

0 commit comments

Comments
 (0)