1515from chatgpt .fp import get_fp
1616from chatgpt .proofofWork import get_answer_token , get_config , get_requirements_token
1717from gateway .chatgpt import chatgpt_html
18- from gateway .reverseProxy import chatgpt_reverse_proxy , content_generator , get_real_req_token , headers_reject_list
18+ from gateway .reverseProxy import chatgpt_reverse_proxy , content_generator , get_real_req_token , headers_reject_list , \
19+ headers_accept_list
1920from utils .Client import Client
2021from utils .Logger import logger
2122from utils .configs import x_sign , turnstile_solver_url , chatgpt_base_url_list , no_sentinel , sentinel_proxy_url_list , \
@@ -93,6 +94,24 @@ async def get_gizmos_discovery_recent(request: Request):
9394 }
9495
9596
97+ @app .get ("/backend-api/gizmos/snorlax/sidebar" )
98+ async def get_gizmos_snorlax_sidebar (request : Request ):
99+ token = request .headers .get ("Authorization" , "" ).replace ("Bearer " , "" )
100+ if len (token ) == 45 or token .startswith :
101+ return await chatgpt_reverse_proxy (request , "backend-api/gizmos/snorlax/sidebar" )
102+ else :
103+ return {"items" : [], "cursor" : None }
104+
105+
106+ @app .post ("/backend-api/gizmos/snorlax/upsert" )
107+ async def get_gizmos_snorlax_upsert (request : Request ):
108+ token = request .headers .get ("Authorization" , "" ).replace ("Bearer " , "" )
109+ if len (token ) == 45 or token .startswith :
110+ return await chatgpt_reverse_proxy (request , "backend-api/gizmos/snorlax/upsert" )
111+ else :
112+ raise HTTPException (status_code = 403 , detail = "Forbidden" )
113+
114+
96115@app .api_route ("/backend-api/conversations" , methods = ["GET" , "PATCH" ])
97116async def get_conversations (request : Request ):
98117 token = request .headers .get ("Authorization" , "" ).replace ("Bearer " , "" )
@@ -230,8 +249,81 @@ async def edge():
230249
231250
232251if no_sentinel :
252+ openai_sentinel_tokens_cache = {}
253+
233254 @app .post ("/backend-api/sentinel/chat-requirements" )
234- async def sentinel_chat_conversations ():
255+ async def sentinel_chat_conversations (request : Request ):
256+ token = request .headers .get ("Authorization" , "" ).replace ("Bearer " , "" )
257+ req_token = await get_real_req_token (token )
258+ access_token = await verify_token (req_token )
259+ fp = get_fp (req_token ).copy ()
260+ proxy_url = fp .pop ("proxy_url" , None )
261+ impersonate = fp .pop ("impersonate" , "safari15_3" )
262+ user_agent = fp .get ("user-agent" ,
263+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 Edg/130.0.0.0" )
264+
265+ host_url = random .choice (chatgpt_base_url_list ) if chatgpt_base_url_list else "https://chatgpt.com"
266+ proof_token = None
267+ turnstile_token = None
268+
269+ # headers = {
270+ # key: value for key, value in request.headers.items()
271+ # if (key.lower() not in ["host", "origin", "referer", "priority", "sec-ch-ua-platform", "sec-ch-ua",
272+ # "sec-ch-ua-mobile", "oai-device-id"] and key.lower() not in headers_reject_list)
273+ # }
274+ headers = {
275+ key : value for key , value in request .headers .items ()
276+ if (key .lower () in headers_accept_list )
277+ }
278+ headers .update (fp )
279+ headers .update ({"authorization" : f"Bearer { access_token } " })
280+ client = Client (proxy = proxy_url , impersonate = impersonate )
281+ if sentinel_proxy_url_list :
282+ clients = Client (proxy = random .choice (sentinel_proxy_url_list ), impersonate = impersonate )
283+ else :
284+ clients = client
285+
286+ try :
287+ config = get_config (user_agent )
288+ p = get_requirements_token (config )
289+ data = {'p' : p }
290+ r = await clients .post (f'{ host_url } /backend-api/sentinel/chat-requirements' , headers = headers , json = data ,
291+ timeout = 10 )
292+ if r .status_code != 200 :
293+ raise HTTPException (status_code = r .status_code , detail = "Failed to get chat requirements" )
294+ resp = r .json ()
295+ turnstile = resp .get ('turnstile' , {})
296+ turnstile_required = turnstile .get ('required' )
297+ if turnstile_required :
298+ turnstile_dx = turnstile .get ("dx" )
299+ try :
300+ if turnstile_solver_url :
301+ res = await client .post (turnstile_solver_url ,
302+ json = {"url" : "https://chatgpt.com" , "p" : p , "dx" : turnstile_dx })
303+ turnstile_token = res .json ().get ("t" )
304+ except Exception as e :
305+ logger .info (f"Turnstile ignored: { e } " )
306+
307+ proofofwork = resp .get ('proofofwork' , {})
308+ proofofwork_required = proofofwork .get ('required' )
309+ if proofofwork_required :
310+ proofofwork_diff = proofofwork .get ("difficulty" )
311+ proofofwork_seed = proofofwork .get ("seed" )
312+ proof_token , solved = await run_in_threadpool (
313+ get_answer_token , proofofwork_seed , proofofwork_diff , config
314+ )
315+ if not solved :
316+ raise HTTPException (status_code = 403 , detail = "Failed to solve proof of work" )
317+ chat_token = resp .get ('token' )
318+
319+ openai_sentinel_tokens_cache [req_token ] = {
320+ "chat_token" : chat_token ,
321+ "proof_token" : proof_token ,
322+ "turnstile_token" : turnstile_token
323+ }
324+ except Exception as e :
325+ logger .error (f"Sentinel failed: { e } " )
326+
235327 return {
236328 "arkose" : {
237329 "dx" : None ,
@@ -251,6 +343,7 @@ async def sentinel_chat_conversations():
251343 }
252344
253345
346+ @app .post ("/backend-alt/conversation" )
254347 @app .post ("/backend-api/conversation" )
255348 async def chat_conversations (request : Request ):
256349 token = request .headers .get ("Authorization" , "" ).replace ("Bearer " , "" )
@@ -266,54 +359,71 @@ async def chat_conversations(request: Request):
266359 proof_token = None
267360 turnstile_token = None
268361
362+ # headers = {
363+ # key: value for key, value in request.headers.items()
364+ # if (key.lower() not in ["host", "origin", "referer", "priority", "sec-ch-ua-platform", "sec-ch-ua",
365+ # "sec-ch-ua-mobile", "oai-device-id"] and key.lower() not in headers_reject_list)
366+ # }
269367 headers = {
270368 key : value for key , value in request .headers .items ()
271- if (key .lower () not in ["host" , "origin" , "referer" , "priority" , "sec-ch-ua-platform" , "sec-ch-ua" ,
272- "sec-ch-ua-mobile" , "oai-device-id" ] and key .lower () not in headers_reject_list )
369+ if (key .lower () in headers_accept_list )
273370 }
274371 headers .update (fp )
275372 headers .update ({"authorization" : f"Bearer { access_token } " })
276373
277- client = Client (proxy = proxy_url , impersonate = impersonate )
278- if sentinel_proxy_url_list :
279- clients = Client (proxy = random .choice (sentinel_proxy_url_list ), impersonate = impersonate )
280- else :
281- clients = client
282-
283- config = get_config (user_agent )
284- p = get_requirements_token (config )
285- data = {'p' : p }
286- r = await clients .post (f'{ host_url } /backend-api/sentinel/chat-requirements' , headers = headers , json = data ,
287- timeout = 10 )
288- resp = r .json ()
289- turnstile = resp .get ('turnstile' , {})
290- turnstile_required = turnstile .get ('required' )
291- if turnstile_required :
292- turnstile_dx = turnstile .get ("dx" )
293- try :
294- if turnstile_solver_url :
295- res = await client .post (turnstile_solver_url ,
296- json = {"url" : "https://chatgpt.com" , "p" : p , "dx" : turnstile_dx })
297- turnstile_token = res .json ().get ("t" )
298- except Exception as e :
299- logger .info (f"Turnstile ignored: { e } " )
300-
301- proofofwork = resp .get ('proofofwork' , {})
302- proofofwork_required = proofofwork .get ('required' )
303- if proofofwork_required :
304- proofofwork_diff = proofofwork .get ("difficulty" )
305- proofofwork_seed = proofofwork .get ("seed" )
306- proof_token , solved = await run_in_threadpool (
307- get_answer_token , proofofwork_seed , proofofwork_diff , config
308- )
309- if not solved :
310- raise HTTPException (status_code = 403 , detail = "Failed to solve proof of work" )
311- chat_token = resp .get ('token' )
312- headers .update ({
313- "openai-sentinel-chat-requirements-token" : chat_token ,
314- "openai-sentinel-proof-token" : proof_token ,
315- "openai-sentinel-turnstile-token" : turnstile_token ,
316- })
374+ try :
375+ client = Client (proxy = proxy_url , impersonate = impersonate )
376+ if sentinel_proxy_url_list :
377+ clients = Client (proxy = random .choice (sentinel_proxy_url_list ), impersonate = impersonate )
378+ else :
379+ clients = client
380+
381+ sentinel_tokens = openai_sentinel_tokens_cache .get (req_token , {})
382+ openai_sentinel_tokens_cache .pop (req_token , None )
383+ if not sentinel_tokens :
384+ config = get_config (user_agent )
385+ p = get_requirements_token (config )
386+ data = {'p' : p }
387+ r = await clients .post (f'{ host_url } /backend-api/sentinel/chat-requirements' , headers = headers , json = data ,
388+ timeout = 10 )
389+ resp = r .json ()
390+ turnstile = resp .get ('turnstile' , {})
391+ turnstile_required = turnstile .get ('required' )
392+ if turnstile_required :
393+ turnstile_dx = turnstile .get ("dx" )
394+ try :
395+ if turnstile_solver_url :
396+ res = await client .post (turnstile_solver_url ,
397+ json = {"url" : "https://chatgpt.com" , "p" : p , "dx" : turnstile_dx })
398+ turnstile_token = res .json ().get ("t" )
399+ except Exception as e :
400+ logger .info (f"Turnstile ignored: { e } " )
401+
402+ proofofwork = resp .get ('proofofwork' , {})
403+ proofofwork_required = proofofwork .get ('required' )
404+ if proofofwork_required :
405+ proofofwork_diff = proofofwork .get ("difficulty" )
406+ proofofwork_seed = proofofwork .get ("seed" )
407+ proof_token , solved = await run_in_threadpool (
408+ get_answer_token , proofofwork_seed , proofofwork_diff , config
409+ )
410+ if not solved :
411+ raise HTTPException (status_code = 403 , detail = "Failed to solve proof of work" )
412+ chat_token = resp .get ('token' )
413+ headers .update ({
414+ "openai-sentinel-chat-requirements-token" : chat_token ,
415+ "openai-sentinel-proof-token" : proof_token ,
416+ "openai-sentinel-turnstile-token" : turnstile_token ,
417+ })
418+ else :
419+ headers .update ({
420+ "openai-sentinel-chat-requirements-token" : sentinel_tokens .get ("chat_token" , "" ),
421+ "openai-sentinel-proof-token" : sentinel_tokens .get ("proof_token" , "" ),
422+ "openai-sentinel-turnstile-token" : sentinel_tokens .get ("turnstile_token" , "" )
423+ })
424+ except Exception as e :
425+ logger .error (f"Sentinel failed: { e } " )
426+ return Response (status_code = 403 , content = "Sentinel failed" )
317427
318428 params = dict (request .query_params )
319429 data = await request .body ()
@@ -340,7 +450,7 @@ async def c_close(client, clients):
340450 data = json .dumps (req_json ).encode ("utf-8" )
341451
342452 background = BackgroundTask (c_close , client , clients )
343- r = await client .post_stream (f"{ host_url } /backend-api/conversation " , params = params , headers = headers ,
453+ r = await client .post_stream (f"{ host_url } { request . url . path } " , params = params , headers = headers ,
344454 cookies = request_cookies , data = data , stream = True , allow_redirects = False )
345455 rheaders = r .headers
346456 logger .info (f"Request token: { req_token } " )
0 commit comments