11from fastapi import APIRouter , HTTPException
22from fastapi .responses import StreamingResponse
33from sqlmodel import select
4+
5+ from apps .chat .curd .chat import list_chats , get_chat_with_records , create_chat , save_question , save_answer
6+ from apps .chat .models .chat_model import CreateChat , ChatRecord
47from apps .chat .schemas .chat_base_schema import LLMConfig
58from apps .chat .schemas .chat_schema import ChatQuestion
69from apps .chat .schemas .llm import AgentService
710from apps .datasource .models .datasource import CoreDatasource
811from apps .system .models .system_model import AiModelDetail
9- from common .core .deps import SessionDep
12+ from common .core .deps import SessionDep , CurrentUser
1013import json
1114import asyncio
1215
1316router = APIRouter (tags = ["Data Q&A" ], prefix = "/chat" )
1417
1518
19+ @router .get ("/list" )
20+ async def chats (session : SessionDep , current_user : CurrentUser ):
21+ return list_chats (session , current_user )
22+
23+
24+ @router .get ("/get/{chart_id}" )
25+ async def list_chat (session : SessionDep , current_user : CurrentUser , chart_id : int ):
26+ try :
27+ return get_chat_with_records (chart_id = chart_id , session = session , current_user = current_user )
28+ except Exception as e :
29+ raise HTTPException (
30+ status_code = 500 ,
31+ detail = str (e )
32+ )
33+
34+
35+ @router .post ("/start" )
36+ async def start_chat (session : SessionDep , current_user : CurrentUser , create_chat_obj : CreateChat ):
37+ try :
38+ return create_chat (session , current_user , create_chat_obj )
39+ except Exception as e :
40+ raise HTTPException (
41+ status_code = 400 ,
42+ detail = str (e )
43+ )
44+
45+
1646@router .post ("/question" )
17- async def stream_sql (session : SessionDep , requestQuestion : ChatQuestion ):
47+ async def stream_sql (session : SessionDep , current_user : CurrentUser , request_question : ChatQuestion ):
1848 """Stream SQL analysis results
1949
2050 Args:
2151 session: Database session
22- requestQuestion: User question model
52+ current_user: CurrentUser
53+ request_question: User question model
2354
2455 Returns:
2556 Streaming response with analysis results
2657 """
27- question = requestQuestion .question
28-
58+ question = request_question .question
59+
2960 # Get available AI model
3061 aimodel = session .exec (select (AiModelDetail ).where (
31- AiModelDetail .status == True ,
62+ AiModelDetail .status == True ,
3263 AiModelDetail .api_key .is_not (None )
3364 )).first ()
34-
65+
3566 # Get available datasource
3667 ds = session .exec (select (CoreDatasource ).where (
3768 CoreDatasource .status == 'Success'
@@ -42,13 +73,22 @@ async def stream_sql(session: SessionDep, requestQuestion: ChatQuestion):
4273 status_code = 400 ,
4374 detail = "No available AI model configuration found"
4475 )
45-
76+
4677 if not ds :
4778 raise HTTPException (
4879 status_code = 400 ,
4980 detail = "No available datasource configuration found"
5081 )
51-
82+
83+ record : ChatRecord
84+ try :
85+ record = save_question (session = session , current_user = current_user , question = request_question )
86+ except Exception as e1 :
87+ raise HTTPException (
88+ status_code = 400 ,
89+ detail = str (e1 )
90+ )
91+
5292 # Use Tongyi Qianwen
5393 tongyi_config = LLMConfig (
5494 model_type = "openai" ,
@@ -72,25 +112,37 @@ async def stream_sql(session: SessionDep, requestQuestion: ChatQuestion):
72112 vllm_service = LLMService(vllm_config) """
73113 """ result = llm_service.generate_sql(question)
74114 return result """
75-
115+
76116 async def event_generator ():
117+ all_text = ''
77118 try :
78119 async for chunk in llm_service .async_generate (question ):
79120 data = json .loads (chunk .replace ('data: ' , '' ))
80-
121+
81122 if data ['type' ] in ['final' , 'tool_result' ]:
82123 content = data ['content' ]
124+ print ('-- ' + content )
125+ all_text += content
83126 for char in content :
84127 yield f"data: { json .dumps ({'type' : 'char' , 'content' : char })} \n \n "
85- await asyncio .sleep (0.05 )
86-
128+ await asyncio .sleep (0.05 )
129+
87130 if 'html' in data :
88131 yield f"data: { json .dumps ({'type' : 'html' , 'content' : data ['html' ]})} \n \n "
89132 else :
90133 yield chunk
91-
134+
92135 except Exception as e :
136+ all_text = 'Exception:' + str (e )
93137 yield f"data: { json .dumps ({'type' : 'error' , 'content' : str (e )})} \n \n "
94-
95- #return EventSourceResponse(event_generator(), headers={"Content-Type": "text/event-stream"})
96- return StreamingResponse (event_generator (), media_type = "text/event-stream" )
138+
139+ try :
140+ save_answer (session = session , id = record .id , answer = all_text )
141+ except Exception as e :
142+ raise HTTPException (
143+ status_code = 500 ,
144+ detail = str (e )
145+ )
146+
147+ # return EventSourceResponse(event_generator(), headers={"Content-Type": "text/event-stream"})
148+ return StreamingResponse (event_generator (), media_type = "text/event-stream" )
0 commit comments