Skip to content

Commit 5d4b898

Browse files
committed
Merge branch 'main' into portable
2 parents fbce5e3 + 989206b commit 5d4b898

40 files changed

+879
-563
lines changed

.github/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
<img src="https://img.shields.io/badge/license-GPL_3.0-indigo.svg" alt="license"/>
1515
</a>
1616
<a href="https://github.com/Em1tSan/NeuroGPT/commits/main">
17-
<img src="https://img.shields.io/badge/latest-v1.4.0-indigo.svg" alt="latest"/>
17+
<img src="https://img.shields.io/badge/latest-v1.4.2-indigo.svg" alt="latest"/>
1818
</a>
1919

2020
<br> Советуем сразу посетить <a href="https://github.com/Em1tSan/NeuroGPT/wiki#%D1%80%D1%83%D1%81%D1%81%D0%BA%D0%B8%D0%B9-%D1%8F%D0%B7%D1%8B%D0%BA">wiki проекта</a><br/>

.github/README_EN.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ We extend our gratitude to the authors.
1414
<img src="https://img.shields.io/badge/license-GPL_3.0-indigo.svg" alt="license"/>
1515
</a>
1616
<a href="https://github.com/Em1tSan/NeuroGPT/commits/main">
17-
<img src="https://img.shields.io/badge/latest-v1.4.0-indigo.svg" alt="latest"/>
17+
<img src="https://img.shields.io/badge/latest-v1.4.2-indigo.svg" alt="latest"/>
1818
</a>
1919

2020
<br> We recommend visiting <a href="https://github.com/Em1tSan/NeuroGPT/wiki#english-language">our project's wiki</a> right away.<br/>

auto_proxy.py

Lines changed: 84 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1,65 +1,121 @@
1-
import random
2-
import requests
3-
import time
4-
import threading
1+
import random
2+
import requests
3+
import time
4+
import threading
55
import socket
6-
import redis
7-
8-
9-
# Создаем подключение к redis
10-
r = redis.StrictRedis(host='localhost', port=6379, db=0)
116

127

138
def fetch_proxies():
14-
url = "https://api.proxyscrape.com/v2/?request=displayproxies&protocol=http"
9+
"""Fetch a list of proxy servers from proxyscrape.com.
10+
11+
Returns:
12+
list: A list of proxy servers in the format "IP:Port".
13+
"""
14+
url = "https://www.proxy-list.download/api/v1/get?type=https"
1515
response = requests.get(url)
1616
if response.status_code == 200:
1717
return response.text.split("\r\n")[:-1]
1818
print(f"Error fetching proxies: {response.status_code}")
19-
return []
19+
return []
2020

2121

2222
def test_proxy(proxy, prompt, timeout):
23+
"""Test the given proxy server with a specified prompt and timeout.
24+
25+
Args:
26+
proxy (str): The proxy server in the format "IP:Port".
27+
prompt (str): The test prompt to be used for testing.
28+
timeout (int): The maximum time in seconds allowed for the test.
29+
"""
2330
try:
24-
ip, port = proxy.split(':')
31+
# Split IP and Port
32+
ip, port = proxy.split(':')
33+
34+
# Create a socket object
2535
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
36+
37+
# Start the timer
2638
start_time = time.time()
39+
40+
# Connect to the proxy server
2741
sock.connect((ip, int(port)))
42+
43+
# Stop the timer and calculate the elapsed time
2844
end_time = time.time()
2945
elapsed_time = end_time - start_time
46+
47+
# Print the elapsed time
48+
#print(f"Elapsed time: {elapsed_time} seconds")
49+
50+
# Close the socket
3051
sock.close()
31-
52+
53+
# Check if the elapsed time is below the timeout
3254
if elapsed_time < timeout:
3355
print(f"proxy: {proxy} ✅ | Elapsed time: {elapsed_time} seconds")
34-
r.rpush('working_proxies', proxy) # Сохраняем рабочего прокси в redis
56+
add_working_proxy(proxy)
3557
except Exception as e:
3658
pass
3759

3860

39-
def get_working_proxies(prompt, timeout=1):
40-
proxy_list = fetch_proxies()
41-
threads = []
42-
r.delete('working_proxies') # Очищаем список рабочих прокси в redis перед обновлением
61+
def add_working_proxy(proxy):
62+
"""Add a working proxy server to the global working_proxies list.
63+
64+
Args:
65+
proxy (str): The proxy server in the format "IP:Port".
66+
"""
67+
global working_proxies
68+
working_proxies.append(proxy)
69+
70+
71+
def remove_proxy(proxy):
72+
"""Remove a proxy server from the global working_proxies list.
73+
74+
Args:
75+
proxy (str): The proxy server in the format "IP:Port".
76+
"""
77+
global working_proxies
78+
if proxy in working_proxies:
79+
working_proxies.remove(proxy)
80+
81+
82+
def get_working_proxies(prompt, timeout=5):
83+
"""Fetch and test proxy servers, adding working proxies to the global working_proxies list.
4384
85+
Args:
86+
prompt (str): The test prompt to be used for testing.
87+
timeout (int, optional): The maximum time in seconds allowed for testing. Defaults to 5.
88+
"""
89+
proxy_list = fetch_proxies()
90+
threads = []
91+
4492
for proxy in proxy_list:
45-
thread = threading.Thread(target=test_proxy, args=(proxy, prompt, timeout))
93+
thread = threading.Thread(target=test_proxy, args=(
94+
proxy, prompt, timeout))
4695
threads.append(thread)
4796
thread.start()
48-
97+
4998
for t in threads:
50-
t.join(timeout)
99+
t.join(timeout)
51100

52101

53102
def update_working_proxies():
103+
"""Continuously update the global working_proxies list with working proxy servers."""
104+
global working_proxies
54105
test_prompt = "What is the capital of France?"
55-
106+
56107
while True:
108+
working_proxies = [] # Clear the list before updating
57109
get_working_proxies(test_prompt)
58110
print('proxies updated')
59-
time.sleep(1800) # Обновляем список прокси каждые 30 минут
60-
111+
time.sleep(1800) # Update proxies list every 30 minutes
112+
113+
114+
def get_random_proxy():
115+
"""Get a random working proxy server from the global working_proxies list.
61116
62-
def get_random_proxy():
63-
# Получаем случайного прокси из рабочих
64-
working_proxies = r.lrange('working_proxies', 0, -1)
65-
return random.choice(working_proxies)
117+
Returns:
118+
str: A random working proxy server in the format "IP:Port".
119+
"""
120+
global working_proxies
121+
return random.choice(working_proxies)

backend.py

Lines changed: 3 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -50,20 +50,7 @@ async def chat_completions(request: Request):
5050
top_p = req_data.get('top_p', 1.0)
5151
max_tokens = req_data.get('max_tokens', 4096)
5252

53-
# Получение данных о провайдерах из API
54-
# Загрузка данных о провайдерах из локального файла
55-
with open('status.json', 'r') as f:
56-
provider_data = json.load(f)
57-
58-
active_providers = [data['provider'] for data in provider_data['data'] if data['model'] == model and data['status'] == 'Active']
59-
60-
if not active_providers:
61-
return JSONResponse({"error": "No active provider found for the model"})
62-
63-
provider_name = random.choice(active_providers)
64-
provider = getattr(Provider, provider_name)
65-
66-
response = ChatCompletion.create(model=model, stream=stream, messages=messages, provider=provider, temperature=temperature, top_p=top_p, max_tokens=max_tokens, system_prompt="")
53+
response = ChatCompletion.create(model=model, stream=stream, messages=messages, temperature=temperature, top_p=top_p, max_tokens=max_tokens, system_prompt="")
6754

6855
completion_id = "".join(random.choices(string.ascii_letters + string.digits, k=28))
6956
completion_timestamp = int(time.time())
@@ -79,7 +66,7 @@ async def chat_completions(request: Request):
7966
"index": 0,
8067
"message": {
8168
"role": "assistant",
82-
"content": response.encode().decode(),
69+
"content": response,
8370
},
8471
"finish_reason": "stop",
8572
}
@@ -102,7 +89,7 @@ def streaming():
10289
{
10390
"index": 0,
10491
"delta": {
105-
"content": chunk.encode().decode(),
92+
"content": chunk,
10693
},
10794
"finish_reason": None,
10895
}

check.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
from fp.fp import FreeProxy
1111
import threading
1212
import socket
13+
from auto_proxy import get_random_proxy, update_working_proxies
1314

1415
def process_provider(provider_name, model_name):
1516
try:
@@ -22,16 +23,17 @@ def process_provider(provider_name, model_name):
2223
}
2324

2425
# Проверяем только модель 'gpt-3.5-turbo' для провайдеров Wewordle и Qidinam
25-
if provider_name in ['Wewordle', 'Qidinam', 'DeepAi', 'GetGpt', 'Yqcloud'] and model_name != 'gpt-3.5-turbo':
26+
if provider_name in ['Wewordle', 'Qidinam', 'DeepAi', 'GetGpt', 'Yqcloud', 'WewordleApple'] and model_name != 'gpt-3.5-turbo':
2627
provider_status['status'] = 'Inactive'
2728
print(f"{provider_name} with {model_name} skipped")
2829
return provider_status
2930

3031
try:
32+
proxy = get_random_proxy().decode("utf-8")
33+
formatted_proxy = f'https://{proxy}'
3134

3235
response = ChatCompletion.create(model=model_name, provider=p,
3336
messages=[{"role": "user", "content": "Say 'Hello World!'"}], stream=False)
34-
print(f"Using proxy: {proxy}")
3537
if any(word in response for word in ['Hello World', 'Hello', 'hello', 'world']):
3638
provider_status['status'] = 'Active'
3739
print(f"{provider_name} with {model_name} say: {response}")
@@ -77,7 +79,7 @@ def main():
7779
json.dump(status, f)
7880

7981
# Pause for 10 minutes before starting the next cycle
80-
#time.sleep(600)
82+
time.sleep(600)
8183

8284
if __name__ == "__main__":
8385
main()

g4f/Provider/AItianhu.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -7,17 +7,16 @@
77

88

99
class AItianhu(BaseProvider):
10-
url = "https://ixlc0.aitianhu.site/api/chat-process"
11-
working = True
12-
supports_gpt_4 = True
10+
url = "https://www.aitianhu.com/"
11+
working = False
12+
supports_gpt_35_turbo = True
1313

1414
@staticmethod
1515
def create_completion(
1616
model: str,
1717
messages: list[dict[str, str]],
18-
stream: bool,
19-
**kwargs: Any,
20-
) -> CreateResult:
18+
stream: bool, **kwargs: Any) -> CreateResult:
19+
2120
base = ""
2221
for message in messages:
2322
base += "%s: %s\n" % (message["role"], message["content"])
@@ -33,7 +32,7 @@ def create_completion(
3332
"temperature": kwargs.get("temperature", 0.8),
3433
"top_p": kwargs.get("top_p", 1),
3534
}
36-
url = "https://ixlc0.aitianhu.site/api/chat-process"
35+
url = "https://www.aitianhu.com/api/chat-process"
3736
response = requests.post(url, headers=headers, json=data)
3837
response.raise_for_status()
3938
lines = response.text.strip().split("\n")

g4f/Provider/Acytoo.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@ class Acytoo(BaseProvider):
1010
url = 'https://chat.acytoo.com/'
1111
working = True
1212
supports_gpt_35_turbo = True
13-
supports_stream = True
1413

1514
@classmethod
1615
def create_completion(
@@ -35,14 +34,14 @@ def _create_header():
3534
}
3635

3736

38-
def _create_payload(messages: list[dict[str, str]], temperature, model):
37+
def _create_payload(messages: list[dict[str, str]], temperature):
3938
payload_messages = [
4039
message | {'createdAt': int(time.time()) * 1000} for message in messages
4140
]
4241

4342
return {
4443
'key' : '',
45-
'model' : model,
44+
'model' : 'gpt-3.5-turbo',
4645
'messages' : payload_messages,
4746
'temperature' : temperature,
4847
'password' : ''

g4f/Provider/AiService.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,8 @@
55

66

77
class AiService(BaseProvider):
8-
url = "https://aiservice.vercel.app/api/chat/answer"
9-
working = True
8+
url = "https://aiservice.vercel.app/"
9+
working = False
1010
supports_gpt_35_turbo = True
1111

1212
@staticmethod

g4f/Provider/Aichat.py

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -5,20 +5,17 @@
55

66

77
class Aichat(BaseProvider):
8-
url = "https://chat-gpt.org/chat"
9-
working = True
10-
supports_stream = True
8+
url = "https://chat-gpt.org/chat"
9+
working = True
1110
supports_gpt_35_turbo = True
1211

1312
@staticmethod
1413
def create_completion(
1514
model: str,
1615
messages: list[dict[str, str]],
17-
stream: bool,
18-
**kwargs: Any,
19-
) -> CreateResult:
16+
stream: bool, **kwargs: Any) -> CreateResult:
17+
2018
base = ""
21-
2219
for message in messages:
2320
base += "%s: %s\n" % (message["role"], message["content"])
2421
base += "assistant:"

0 commit comments

Comments
 (0)