Skip to content

Commit 8ae2e5e

Browse files
committed
Merge branch 'main' into portable
2 parents fb4132a + d6fac03 commit 8ae2e5e

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

43 files changed

+1750
-817
lines changed

.github/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@
6161
- Встроенные джейлбрейки для снятия цензуры
6262
- Контекст беседы
6363
- Режим endpoint для работы с API
64-
- Тонкая настройка модели
64+
- Изменение параметров генерации для gpt-моделей
6565
- Сохранение и загрузка истории диалогов
6666

6767
<div align="center">

.github/README_EN.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ Given that this project doesn't use an official API but relies on reverse-engine
6161
- Built-in jailbreaks for removing censorship
6262
- Conversation context
6363
- API endpoint
64-
- Fine-tuning a model
64+
- Setting generation parameters for GPT models
6565
- Saving and loading dialogue history
6666

6767
<div align="center">

backend.py

Lines changed: 97 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@
44
import random
55
import string
66
import asyncio
7+
import async_timeout
8+
import aiohttp, aiofiles
79
import requests
810
import pytz
911
import logging
@@ -13,10 +15,15 @@
1315
from fastapi.responses import JSONResponse, StreamingResponse
1416
from starlette.middleware.cors import CORSMiddleware
1517
from typing import Any
18+
import g4f
1619
from g4f import ChatCompletion, Provider, BaseProvider, models
1720
from cachetools import LRUCache
18-
import httpx
19-
import check
21+
22+
import aiofiles
23+
import async_timeout
24+
25+
from fp.fp import FreeProxy
26+
import concurrent.futures
2027

2128
app = FastAPI()
2229
app.add_middleware(GZipMiddleware)
@@ -27,18 +34,18 @@
2734
allow_headers=["*"],
2835
)
2936

30-
# Кэш для хранения данных из API
31-
api_cache = LRUCache(maxsize=1000)
37+
cache_ttl_secs = 600
38+
api_cache = LRUCache(maxseze=1000)
39+
40+
def get_proxy():
41+
proxy = FreeProxy(rand=True, timeout=1).get()
42+
return proxy
3243

33-
# Асинхронная функция для получения данных из API
34-
async def get_data_from_api(url: str) -> Any:
35-
if url in api_cache:
36-
return api_cache[url]
37-
else:
38-
async with httpx.AsyncClient() as client:
39-
response = await client.get(url)
40-
data = response.json()
41-
api_cache[url] = data
44+
async def get_data_from_api(url, session):
45+
async with async_timeout.timeout(cache_ttl_secs):
46+
async with session.get(url) as response:
47+
data = await response.json()
48+
api_cache[url] = (data, datetime.now())
4249
return data
4350

4451
@app.post("/chat/completions")
@@ -93,7 +100,7 @@ async def chat_completions(request: Request):
93100
},
94101
}
95102

96-
async def streaming():
103+
def streaming():
97104
for chunk in response:
98105
completion_data = {
99106
"id": f"chatcmpl-{completion_id}",
@@ -110,11 +117,11 @@ async def streaming():
110117
}
111118
],
112119
}
113-
120+
114121
content = json.dumps(completion_data, separators=(",", ":"))
115122
yield f"data: {content}\n\n"
116-
await asyncio.sleep(0.1)
117-
123+
time.sleep(0.1)
124+
118125
end_completion_data: dict[str, Any] = {
119126
"id": f"chatcmpl-{completion_id}",
120127
"object": "chat.completion.chunk",
@@ -130,7 +137,7 @@ async def streaming():
130137
}
131138
content = json.dumps(end_completion_data, separators=(",", ":"))
132139
yield f"data: {content}\n\n"
133-
140+
134141
return StreamingResponse(streaming(), media_type='text/event-stream')
135142

136143
@app.get("/v1/dashboard/billing/subscription")
@@ -248,41 +255,94 @@ async def get_providers():
248255
pass
249256
return JSONResponse(providers_data)
250257

251-
def setup_logging():
252-
root_logger = logging.getLogger()
253-
root_logger.setLevel(logging.DEBUG)
258+
def process_provider(provider_name, model_name):
259+
try:
260+
p = getattr(g4f.Provider, provider_name)
261+
provider_status = {
262+
"provider": provider_name,
263+
"model": model_name,
264+
"url": p.url,
265+
"status": ""
266+
}
267+
268+
# Проверяем только модель 'gpt-3.5-turbo' для провайдеров Wewordle и Qidinam
269+
if provider_name in ['Wewordle', 'Qidinam', 'DeepAi', 'GetGpt', 'Yqcloud'] and model_name != 'gpt-3.5-turbo':
270+
provider_status['status'] = 'Inactive'
271+
#print(f"{provider_name} with {model_name} skipped")
272+
return provider_status
254273

255-
handler = logging.StreamHandler()
256-
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
257-
handler.setFormatter(formatter)
274+
try:
275+
response = ChatCompletion.create(model=model_name, provider=p,
276+
messages=[{"role": "user", "content": "Say 'Hello World!'"}], stream=False)
277+
if any(word in response for word in ['Hello World', 'Hello', 'hello', 'world']):
278+
provider_status['status'] = 'Active'
279+
#print(f"{provider_name} with {model_name} say: {response}")
280+
else:
281+
provider_status['status'] = 'Inactive'
282+
#print(f"{provider_name} with {model_name} say: Inactive")
283+
except Exception as e:
284+
provider_status['status'] = 'Inactive'
285+
# print(f"{provider_name} with {model_name} say: Error")
286+
287+
return provider_status
288+
except:
289+
return None
290+
291+
async def run_check_script():
292+
session = aiohttp.ClientSession()
293+
while True:
294+
models = [model for model in g4f.models.ModelUtils.convert if model.startswith('gpt-') or model.startswith('claude') or model.startswith('text-')]
295+
providers = [provider for provider in dir(g4f.Provider) if not provider.startswith('__')]
296+
297+
status = {'data': []}
298+
with concurrent.futures.ThreadPoolExecutor() as executor:
299+
futures = []
300+
for provider_name in providers:
301+
for model_name in models:
302+
future = executor.submit(process_provider, provider_name, model_name)
303+
futures.append(future)
304+
305+
for future in concurrent.futures.as_completed(futures):
306+
result = future.result()
307+
if result is not None and result['status'] == 'Active':
308+
status['data'].append(result)
258309

259-
root_logger.addHandler(handler)
310+
print(status)
311+
status['key'] = "test"
312+
tz = pytz.timezone('Asia/Shanghai')
313+
now = datetime.now(tz)
314+
print(now)
315+
status['time'] = now.strftime("%Y-%m-%d %H:%M:%S")
316+
317+
if status['data']:
318+
# Здесь мы используем aiofiles для асинхронного записывания в файл
319+
async with aiofiles.open('status.json', 'w') as f:
320+
await f.write(json.dumps(status))
321+
322+
# Pause for 5 minutes before starting the next cycle
323+
time.sleep(360)
260324

261325
# Асинхронная функция для обновления кэша данных из API
262326
async def update_api_cache():
263327
while True:
264328
try:
265329
# Обновление данных каждые 10 минут
266-
await asyncio.sleep(600)
330+
await asyncio.sleep(360)
267331
api_cache.clear()
268332
except:
269333
pass
270334

271335
# Запуск асинхронных задач
272336
async def run_tasks():
273-
tasks = [
274-
asyncio.create_task(update_api_cache())
275-
]
276-
await asyncio.gather(*tasks)
277-
337+
while True:
338+
await asyncio.gather(run_check_script())
339+
await asyncio.sleep(300)
340+
278341
# Запуск приложения
279342
def main():
280-
setup_logging()
281-
tz = pytz.timezone('Asia/Shanghai')
282-
now = datetime.now(tz)
283-
print(now)
284-
asyncio.run(run_tasks())
285-
check.main()
343+
loop = asyncio.get_event_loop()
344+
loop.run_until_complete(run_tasks())
345+
loop.close()
286346

287347
if __name__ == "__main__":
288348
main()

check.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ def process_provider(provider_name, model_name):
1919
}
2020

2121
# Проверяем только модель 'gpt-3.5-turbo' для провайдеров Wewordle и Qidinam
22-
if provider_name in ['Wewordle', 'Qidinam', 'DeepAi', 'ChatgptLogin'] and model_name != 'gpt-3.5-turbo':
22+
if provider_name in ['Wewordle', 'Qidinam', 'DeepAi', 'GetGpt', 'Yqcloud'] and model_name != 'gpt-3.5-turbo':
2323
provider_status['status'] = 'Inactive'
2424
print(f"{provider_name} with {model_name} skipped")
2525
return provider_status

endpoint.py

Lines changed: 0 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1,38 +1,7 @@
1-
import os
2-
import time
3-
import json
4-
import random
5-
import time
6-
7-
from fastapi import FastAPI, Request
8-
from fastapi.middleware.gzip import GZipMiddleware
9-
from fastapi.responses import JSONResponse, StreamingResponse
10-
import json
11-
from typing import List
12-
import os
13-
import random
14-
import time
15-
import asyncio
16-
from starlette.middleware.cors import CORSMiddleware
17-
18-
import logging
191
import uvicorn
202

21-
import g4f
22-
233
from multiprocessing import Process
244

25-
26-
def setup_logging():
27-
root_logger = logging.getLogger()
28-
root_logger.setLevel(logging.DEBUG)
29-
30-
handler = logging.StreamHandler()
31-
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
32-
handler.setFormatter(formatter)
33-
34-
root_logger.addHandler(handler)
35-
365
def run_api_server():
376
uvicorn.run("backend:app", host="0.0.0.0", port=1337)
387

g4f/Provider/Acytoo.py

Lines changed: 22 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -7,48 +7,43 @@
77

88

99
class Acytoo(BaseProvider):
10-
url = "https://chat.acytoo.com/api/completions"
11-
working = True
12-
supports_stream = True
10+
url = 'https://chat.acytoo.com/'
11+
working = True
1312
supports_gpt_35_turbo = True
14-
supports_gpt_35_turbo_16k = True
15-
supports_gpt_4 = True
16-
supports_gpt_4_0613 = True
17-
supports_gpt_4_32k = True
18-
13+
supports_stream = True
1914

20-
@staticmethod
15+
@classmethod
2116
def create_completion(
17+
cls,
2218
model: str,
2319
messages: list[dict[str, str]],
24-
stream: bool,
25-
**kwargs: Any,
26-
) -> CreateResult:
27-
headers = _create_header()
28-
payload = _create_payload(messages, kwargs.get('temperature', 0.5))
29-
30-
url = "https://chat.acytoo.com/api/completions"
31-
response = requests.post(url=url, headers=headers, json=payload)
20+
stream: bool, **kwargs: Any) -> CreateResult:
21+
22+
response = requests.post(f'{cls.url}api/completions',
23+
headers=_create_header(), json=_create_payload(messages, kwargs.get('temperature', 0.5)))
24+
3225
response.raise_for_status()
33-
response.encoding = "utf-8"
26+
response.encoding = 'utf-8'
27+
3428
yield response.text
3529

3630

3731
def _create_header():
3832
return {
39-
"accept": "*/*",
40-
"content-type": "application/json",
33+
'accept': '*/*',
34+
'content-type': 'application/json',
4135
}
4236

4337

4438
def _create_payload(messages: list[dict[str, str]], temperature, model):
4539
payload_messages = [
46-
message | {"createdAt": int(time.time()) * 1000} for message in messages
40+
message | {'createdAt': int(time.time()) * 1000} for message in messages
4741
]
42+
4843
return {
49-
"key": "",
50-
"model": model,
51-
"messages": payload_messages,
52-
"temperature": temperature,
53-
"password": "",
54-
}
44+
'key' : '',
45+
'model' : model,
46+
'messages' : payload_messages,
47+
'temperature' : temperature,
48+
'password' : ''
49+
}

g4f/Provider/AiService.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66

77
class AiService(BaseProvider):
88
url = "https://aiservice.vercel.app/api/chat/answer"
9-
working = False
9+
working = True
1010
supports_gpt_35_turbo = True
1111

1212
@staticmethod

0 commit comments

Comments
 (0)