@@ -113,19 +113,20 @@ async def stream_sql(session: SessionDep, current_user: CurrentUser, request_que
113113 chart_id = request_question .chat_id )
114114 # get schema
115115 request_question .db_schema = get_table_schema (session = session , ds = ds )
116- llm_service = LLMService (request_question , history_records , CoreDatasource (** ds .model_dump ()), aimodel )
116+ db_user = get_user_info (session = session , user_id = current_user .id )
117+ request_question .lang = db_user .language
117118
118- llm_service . init_record ( session = session , current_user = current_user )
119+ llm_service = LLMService ( request_question , aimodel , history_records , CoreDatasource ( ** ds . model_dump ()) )
119120
120- db_user = get_user_info (session = session , user_id = current_user . id )
121+ llm_service . init_record (session = session , current_user = current_user )
121122
122123 def run_task ():
123124 try :
124125 # return id
125126 yield orjson .dumps ({'type' : 'id' , 'id' : llm_service .get_record ().id }).decode () + '\n \n '
126127
127128 # generate sql
128- sql_res = llm_service .generate_sql (session = session , lang = db_user . language )
129+ sql_res = llm_service .generate_sql (session = session )
129130 full_sql_text = ''
130131 for chunk in sql_res :
131132 full_sql_text += chunk
@@ -144,7 +145,7 @@ def run_task():
144145 yield orjson .dumps ({'content' : orjson .dumps (result ).decode (), 'type' : 'sql-data' }).decode () + '\n \n '
145146
146147 # generate chart
147- chart_res = llm_service .generate_chart (session = session , lang = db_user . language )
148+ chart_res = llm_service .generate_chart (session = session )
148149 full_chart_text = ''
149150 for chunk in chart_res :
150151 full_chart_text += chunk
@@ -166,3 +167,67 @@ def run_task():
166167 yield orjson .dumps ({'content' : str (e ), 'type' : 'error' }).decode () + '\n \n '
167168
168169 return StreamingResponse (run_task (), media_type = "text/event-stream" )
170+
171+
172+ @router .post ("/record/{chart_record_id}/analysis" )
173+ async def analysis (session : SessionDep , current_user : CurrentUser , chart_record_id : int ):
174+ record = session .query (ChatRecord ).get (chart_record_id )
175+ if not record :
176+ raise HTTPException (
177+ status_code = 400 ,
178+ detail = f"Chat record with id { chart_record_id } not found"
179+ )
180+
181+ if not record .chart :
182+ raise HTTPException (
183+ status_code = 500 ,
184+ detail = f"Chat record with id { chart_record_id } has not generated chart, do not support to analyze it"
185+ )
186+
187+ chat = session .query (Chat ).filter (Chat .id == record .chat_id ).first ()
188+ if not chat :
189+ raise HTTPException (
190+ status_code = 400 ,
191+ detail = f"Chat with id { record .chart_id } not found"
192+ )
193+
194+ if chat .create_by != current_user .id :
195+ raise HTTPException (
196+ status_code = 401 ,
197+ detail = f"You cannot use the chat with id { record .chart_id } "
198+ )
199+
200+ # Get available AI model
201+ aimodel = session .exec (select (AiModelDetail ).where (
202+ AiModelDetail .status == True ,
203+ AiModelDetail .api_key .is_not (None )
204+ )).first ()
205+ if not aimodel :
206+ raise HTTPException (
207+ status_code = 500 ,
208+ detail = "No available AI model configuration found"
209+ )
210+
211+ request_question = ChatQuestion (chat_id = chat .id , question = '' )
212+ db_user = get_user_info (session = session , user_id = current_user .id )
213+ request_question .lang = db_user .language
214+
215+ llm_service = LLMService (request_question , aimodel )
216+ llm_service .set_record (record )
217+
218+ def run_task ():
219+ try :
220+ # generate analysis
221+ analysis_res = llm_service .generate_analysis (session = session )
222+ for chunk in analysis_res :
223+ yield orjson .dumps ({'content' : chunk , 'type' : 'analysis-result' }).decode () + '\n \n '
224+ yield orjson .dumps ({'type' : 'info' , 'msg' : 'analysis generated' }).decode () + '\n \n '
225+
226+ yield orjson .dumps ({'type' : 'analysis_finish' }).decode () + '\n \n '
227+
228+ except Exception as e :
229+ traceback .print_exc ()
230+ # llm_service.save_error(session=session, message=str(e))
231+ yield orjson .dumps ({'content' : str (e ), 'type' : 'error' }).decode () + '\n \n '
232+
233+ return StreamingResponse (run_task (), media_type = "text/event-stream" )
0 commit comments