11from __future__ import annotations
2+ import requests
23
3- from aiohttp import ClientSession
4-
5- from .base_provider import AsyncGeneratorProvider
6- from ..typing import AsyncGenerator
4+ from .base_provider import BaseProvider
5+ from ..typing import CreateResult
76
87models = {
9- "gpt-4" : {
10- "id" : "gpt-4" ,
11- "name" : "GPT-4" ,
12- },
13- "gpt-3.5-turbo" : {
14- "id" : "gpt-3.5-turbo" ,
15- "name" : "GPT-3.5" ,
16- },
17- "gpt-3.5-turbo-16k" : {
18- "id" : "gpt-3.5-turbo-16k" ,
19- "name" : "GPT-3.5-16k" ,
20- },
8+ 'gpt-3.5-turbo' : {'id' : 'gpt-3.5-turbo' , 'name' : 'GPT-3.5' },
9+ 'gpt-3.5-turbo-0613' : {'id' : 'gpt-3.5-turbo-0613' , 'name' : 'GPT-3.5-0613' },
10+ 'gpt-3.5-turbo-16k' : {'id' : 'gpt-3.5-turbo-16k' , 'name' : 'GPT-3.5-16K' },
11+ 'gpt-3.5-turbo-16k-0613' : {'id' : 'gpt-3.5-turbo-16k-0613' , 'name' : 'GPT-3.5-16K-0613' },
12+ 'gpt-4' : {'id' : 'gpt-4' , 'name' : 'GPT-4' },
13+ 'gpt-4-0613' : {'id' : 'gpt-4-0613' , 'name' : 'GPT-4-0613' },
14+ 'gpt-4-32k' : {'id' : 'gpt-4-32k' , 'name' : 'GPT-4-32K' },
15+ 'gpt-4-32k-0613' : {'id' : 'gpt-4-32k-0613' , 'name' : 'GPT-4-32K-0613' },
2116}
2217
23- class Aivvm (AsyncGeneratorProvider ):
24- url = "https://chat.aivvm.com"
18+ class Aivvm (BaseProvider ):
19+ url = 'https://chat.aivvm.com'
20+ supports_stream = True
2521 working = True
2622 supports_gpt_35_turbo = True
2723 supports_gpt_4 = True
2824
29-
3025 @classmethod
31- async def create_async_generator (
32- cls ,
26+ def create_completion (cls ,
3327 model : str ,
3428 messages : list [dict [str , str ]],
35- proxy : str = None ,
29+ stream : bool ,
3630 ** kwargs
37- ) -> AsyncGenerator :
38- model = model if model else "gpt-3.5-turbo"
39- if model not in models :
31+ ) -> CreateResult :
32+ if not model :
33+ model = "gpt-3.5-turbo"
34+ elif model not in models :
4035 raise ValueError (f"Model are not supported: { model } " )
36+
4137 headers = {
42- "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36" ,
43- "Accept" : "*/*" ,
44- "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3" ,
45- "Origin" : cls .url ,
46- "Referer" : cls .url + "/" ,
47- "Sec-Fetch-Dest" : "empty" ,
48- "Sec-Fetch-Mode" : "cors" ,
49- "Sec-Fetch-Site" : "same-origin" ,
38+ "authority" : "chat.aivvm.com" ,
39+ "accept" : "*/*" ,
40+ "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3" ,
41+ "content-type" : "application/json" ,
42+ "origin" : "https://chat.aivvm.com" ,
43+ "referer" : "https://chat.aivvm.com/" ,
44+ "sec-ch-ua" : '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"' ,
45+ "sec-ch-ua-mobile" : "?0" ,
46+ "sec-ch-ua-platform" : '"macOS"' ,
47+ "sec-fetch-dest" : "empty" ,
48+ "sec-fetch-mode" : "cors" ,
49+ "sec-fetch-site" : "same-origin" ,
50+ "user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36" ,
5051 }
51- async with ClientSession (
52- headers = headers
53- ) as session :
54- data = {
55- "temperature" : 1 ,
56- "key" : "" ,
57- "messages" : messages ,
58- "model" : models [model ],
59- "prompt" : "" ,
60- ** kwargs
61- }
62- async with session .post (cls .url + "/api/chat" , json = data , proxy = proxy ) as response :
63- response .raise_for_status ()
64- async for stream in response .content .iter_any ():
65- yield stream .decode ()
6652
53+ json_data = {
54+ "model" : models [model ],
55+ "messages" : messages ,
56+ "key" : "" ,
57+ "prompt" : "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown." ,
58+ "temperature" : kwargs .get ("temperature" , 0.7 )
59+ }
60+
61+ response = requests .post (
62+ "https://chat.aivvm.com/api/chat" , headers = headers , json = json_data , stream = True )
63+
64+ for line in response .iter_content (chunk_size = 1048 ):
65+ yield line .decode ('utf-8' )
6766
6867 @classmethod
6968 @property
7069 def params (cls ):
7170 params = [
72- (" model" , " str" ),
73- (" messages" , " list[dict[str, str]]" ),
74- (" stream" , " bool" ),
75- (" temperature" , " float" ),
71+ (' model' , ' str' ),
72+ (' messages' , ' list[dict[str, str]]' ),
73+ (' stream' , ' bool' ),
74+ (' temperature' , ' float' ),
7675 ]
77- param = ", " .join ([": " .join (p ) for p in params ])
78- return f" g4f.provider.{ cls .__name__ } supports: ({ param } )"
76+ param = ', ' .join ([': ' .join (p ) for p in params ])
77+ return f' g4f.provider.{ cls .__name__ } supports: ({ param } )'
0 commit comments