Skip to content

Commit 3df94c8

Browse files
committed
v 1.4.0 Обновление и рефакторинг g4f
1 parent b63a4dc commit 3df94c8

File tree

137 files changed

+3616
-6105
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

137 files changed

+3616
-6105
lines changed

backend.py

Lines changed: 219 additions & 180 deletions
Large diffs are not rendered by default.

check.py

Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
import os
2+
import g4f
3+
import json
4+
import time
5+
import pytz
6+
from datetime import datetime
7+
import concurrent.futures
8+
import asyncio
9+
from g4f import ChatCompletion
10+
11+
def process_provider(provider_name, model_name):
12+
try:
13+
p = getattr(g4f.Provider, provider_name)
14+
provider_status = {
15+
"provider": provider_name,
16+
"model": model_name,
17+
"url": p.url,
18+
"status": ""
19+
}
20+
21+
# Проверяем только модель 'gpt-3.5-turbo' для провайдеров Wewordle и Qidinam
22+
if provider_name in ['Wewordle', 'Qidinam', 'DeepAi', 'ChatgptLogin'] and model_name != 'gpt-3.5-turbo':
23+
provider_status['status'] = 'Inactive'
24+
print(f"{provider_name} with {model_name} skipped")
25+
return provider_status
26+
27+
try:
28+
response = ChatCompletion.create(model=model_name, provider=p,
29+
messages=[{"role": "user", "content": "Say 'Hello World!'"}], stream=False)
30+
if any(word in response for word in ['Hello World', 'Hello', 'hello', 'world']):
31+
provider_status['status'] = 'Active'
32+
print(f"{provider_name} with {model_name} say: {response}")
33+
else:
34+
provider_status['status'] = 'Inactive'
35+
print(f"{provider_name} with {model_name} say: Inactive")
36+
except Exception as e:
37+
provider_status['status'] = 'Inactive'
38+
print(f"{provider_name} with {model_name} say: Error")
39+
40+
return provider_status
41+
except:
42+
return None
43+
44+
def main():
45+
while True:
46+
models = [model for model in g4f.models.ModelUtils.convert if model.startswith('gpt-') or model.startswith('claude') or model.startswith('text-')]
47+
providers = [provider for provider in dir(g4f.Provider) if not provider.startswith('__')]
48+
49+
status = {'data': []}
50+
with concurrent.futures.ThreadPoolExecutor() as executor:
51+
futures = []
52+
for provider_name in providers:
53+
for model_name in models:
54+
future = executor.submit(process_provider, provider_name, model_name)
55+
futures.append(future)
56+
57+
for future in concurrent.futures.as_completed(futures):
58+
result = future.result()
59+
if result is not None and result['status'] == 'Active':
60+
status['data'].append(result)
61+
62+
print(status)
63+
status['key'] = "test"
64+
tz = pytz.timezone('Asia/Shanghai')
65+
now = datetime.now(tz)
66+
print(now)
67+
status['time'] = now.strftime("%Y-%m-%d %H:%M:%S")
68+
69+
# Save the status data to a JSON file only if there are active providers
70+
if status['data']:
71+
with open('status.json', 'w') as f:
72+
json.dump(status, f)
73+
74+
# Pause for 10 minutes before starting the next cycle
75+
time.sleep(600)
76+
77+
if __name__ == "__main__":
78+
main()

config.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
app = {
2+
'host': '0.0.0.0',
3+
'port': 1337,
4+
'debug': True
5+
}
6+
7+
request = {
8+
'timeout': 60
9+
}

endpoint.py

Lines changed: 0 additions & 216 deletions
Original file line numberDiff line numberDiff line change
@@ -22,222 +22,6 @@
2222

2323
from multiprocessing import Process
2424

25-
app = FastAPI()
26-
27-
app.add_middleware(GZipMiddleware)
28-
app.add_middleware(
29-
CORSMiddleware,
30-
allow_origins=["*"],
31-
allow_methods=["*"],
32-
allow_headers=["*"],
33-
)
34-
35-
@app.post("/chat/completions")
36-
@app.post("/v1/chat/completions")
37-
async def chat_completions(request: Request):
38-
req_data = await request.json()
39-
streaming = req_data.get('stream', False)
40-
streaming_ = req_data.get('stream', False)
41-
model = req_data.get('model')
42-
messages = req_data.get('messages')
43-
provider = req_data.get('provider', False)
44-
if model == 'bing':
45-
response = g4f.ChatCompletion.create(model=model, provider=g4f.Provider.BingHuan, stream=True,
46-
messages=messages)
47-
else:
48-
49-
response = g4f.ChatCompletion.create(model=model, stream=streaming,
50-
messages=messages)
51-
52-
if not streaming:
53-
while 'curl_cffi.requests.errors.RequestsError' in response:
54-
response = g4f.ChatCompletion.create(model=model, stream=streaming,
55-
messages=messages)
56-
57-
completion_timestamp = int(time.time())
58-
completion_id = ''.join(random.choices(
59-
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28))
60-
61-
return {
62-
'id': 'chatcmpl-%s' % completion_id,
63-
'object': 'chat.completion',
64-
'created': completion_timestamp,
65-
'model': model,
66-
'usage': {
67-
'prompt_tokens': None,
68-
'completion_tokens': None,
69-
'total_tokens': None
70-
},
71-
'choices': [{
72-
'message': {
73-
'role': 'assistant',
74-
'content': response
75-
},
76-
'finish_reason': 'stop',
77-
'index': 0
78-
}]
79-
}
80-
81-
async def stream():
82-
completion_data = {
83-
'id': '',
84-
'object': 'chat.completion.chunk',
85-
'created': 0,
86-
'model': 'gpt-3.5-turbo-0301',
87-
'choices': [
88-
{
89-
'delta': {
90-
'content': ""
91-
},
92-
'index': 0,
93-
'finish_reason': None
94-
}
95-
]
96-
}
97-
98-
for token in response:
99-
completion_id = ''.join(
100-
random.choices('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28))
101-
completion_timestamp = int(time.time())
102-
completion_data['id'] = f'chatcmpl-{completion_id}'
103-
completion_data['created'] = completion_timestamp
104-
completion_data['choices'][0]['delta']['content'] = token
105-
if token.startswith("an error occured"):
106-
completion_data['choices'][0]['delta']['content'] = "Server Response Error, please try again.\n"
107-
completion_data['choices'][0]['delta']['stop'] = "error"
108-
yield 'data: %s\n\ndata: [DONE]\n\n' % json.dumps(completion_data, separators=(',' ':'))
109-
return
110-
yield 'data: %s\n\n' % json.dumps(completion_data, separators=(',' ':'))
111-
time.sleep(0.05)
112-
113-
completion_data['choices'][0]['finish_reason'] = "stop"
114-
completion_data['choices'][0]['delta']['content'] = ""
115-
yield 'data: %s\n\n' % json.dumps(completion_data, separators=(',' ':'))
116-
yield 'data: [DONE]\n\n'
117-
return StreamingResponse(stream(), media_type='text/event-stream')
118-
119-
@app.get("/v1/dashboard/billing/subscription")
120-
@app.get("/dashboard/billing/subscription")
121-
async def billing_subscription():
122-
return JSONResponse({
123-
"object": "billing_subscription",
124-
"has_payment_method": True,
125-
"canceled": False,
126-
"canceled_at": None,
127-
"delinquent": None,
128-
"access_until": 2556028800,
129-
"soft_limit": 6944500,
130-
"hard_limit": 166666666,
131-
"system_hard_limit": 166666666,
132-
"soft_limit_usd": 416.67,
133-
"hard_limit_usd": 9999.99996,
134-
"system_hard_limit_usd": 9999.99996,
135-
"plan": {
136-
"title": "Pay-as-you-go",
137-
"id": "payg"
138-
},
139-
"primary": True,
140-
"account_name": "OpenAI",
141-
"po_number": None,
142-
"billing_email": None,
143-
"tax_ids": None,
144-
"billing_address": {
145-
"city": "New York",
146-
"line1": "OpenAI",
147-
"country": "US",
148-
"postal_code": "NY10031"
149-
},
150-
"business_address": None
151-
}
152-
)
153-
154-
155-
@app.get("/v1/dashboard/billing/usage")
156-
@app.get("/dashboard/billing/usage")
157-
async def billing_usage():
158-
return JSONResponse({
159-
"object": "list",
160-
"daily_costs": [
161-
{
162-
"timestamp": time.time(),
163-
"line_items": [
164-
{
165-
"name": "GPT-4",
166-
"cost": 0.0
167-
},
168-
{
169-
"name": "Chat models",
170-
"cost": 1.01
171-
},
172-
{
173-
"name": "InstructGPT",
174-
"cost": 0.0
175-
},
176-
{
177-
"name": "Fine-tuning models",
178-
"cost": 0.0
179-
},
180-
{
181-
"name": "Embedding models",
182-
"cost": 0.0
183-
},
184-
{
185-
"name": "Image models",
186-
"cost": 16.0
187-
},
188-
{
189-
"name": "Audio models",
190-
"cost": 0.0
191-
}
192-
]
193-
}
194-
],
195-
"total_usage": 1.01
196-
}
197-
)
198-
199-
@app.get("/v1/models")
200-
@app.get("/models")
201-
async def models():
202-
import g4f.models
203-
model = {"data":[]}
204-
for i in g4f.models.ModelUtils.convert:
205-
model['data'].append({
206-
"id": i,
207-
"object": "model",
208-
"owned_by": g4f.models.ModelUtils.convert[i].base_provider,
209-
"tokens": 99999,
210-
"fallbacks": None,
211-
"endpoints": [
212-
"/v1/chat/completions"
213-
],
214-
"limits": None,
215-
"permission": []
216-
})
217-
return JSONResponse(model)
218-
219-
@app.get("/v1/providers")
220-
@app.get("/providers")
221-
async def providers():
222-
files = os.listdir("g4f/Provider/Providers")
223-
files = [f for f in files if os.path.isfile(os.path.join("g4f/Provider/Providers", f))]
224-
files.sort(key=str.lower)
225-
providers_data = {"data":[]}
226-
for file in files:
227-
if file.endswith(".py"):
228-
name = file[:-3]
229-
try:
230-
p = getattr(g4f.Provider,name)
231-
providers_data["data"].append({
232-
"provider": str(name),
233-
"model": list(p.model),
234-
"url": str(p.url),
235-
"working": bool(p.working),
236-
"supports_stream": bool(p.supports_stream)
237-
})
238-
except:
239-
pass
240-
return JSONResponse(providers_data)
24125

24226
def setup_logging():
24327
root_logger = logging.getLogger()

g4f/Provider/AItianhu.py

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
import json
2+
3+
import requests
4+
5+
from ..typing import Any, CreateResult
6+
from .base_provider import BaseProvider
7+
8+
9+
class AItianhu(BaseProvider):
10+
url = "https://ixlc0.aitianhu.site/api/chat-process"
11+
working = True
12+
supports_gpt_4 = True
13+
14+
@staticmethod
15+
def create_completion(
16+
model: str,
17+
messages: list[dict[str, str]],
18+
stream: bool,
19+
**kwargs: Any,
20+
) -> CreateResult:
21+
base = ""
22+
for message in messages:
23+
base += "%s: %s\n" % (message["role"], message["content"])
24+
base += "assistant:"
25+
26+
headers = {
27+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
28+
}
29+
data: dict[str, Any] = {
30+
"prompt": base,
31+
"options": {},
32+
"systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
33+
"temperature": kwargs.get("temperature", 0.8),
34+
"top_p": kwargs.get("top_p", 1),
35+
}
36+
url = "https://ixlc0.aitianhu.site/api/chat-process"
37+
response = requests.post(url, headers=headers, json=data)
38+
response.raise_for_status()
39+
lines = response.text.strip().split("\n")
40+
res = json.loads(lines[-1])
41+
yield res["text"]
42+
43+
@classmethod
44+
@property
45+
def params(cls):
46+
params = [
47+
("model", "str"),
48+
("messages", "list[dict[str, str]]"),
49+
("stream", "bool"),
50+
("temperature", "float"),
51+
("top_p", "int"),
52+
]
53+
param = ", ".join([": ".join(p) for p in params])
54+
return f"g4f.provider.{cls.__name__} supports: ({param})"

0 commit comments

Comments
 (0)