@@ -15,72 +15,84 @@ def handle_chat_request(self):
1515 user_input = request .json .get ("message" , "" ).strip ()
1616 if not user_input :
1717 return jsonify ({"error" : "User input is required" }), 400
18-
19- user_name = session .get ("user_name" )
20- if not user_name :
21- return jsonify ({"error" : "User name is not set" }), 400
2218
2319 # Add current input to chat history
2420 self .llm_service .add_to_chat_history ("user" , user_input )
2521
2622 # Get updated chat history after adding the new message
2723 updated_chat_history = self .llm_service .chat_history .get_messages ()
2824
29- result = self .llm_service .determine_input_type (updated_chat_history )
25+ # Format chat history as a single string
26+ chat_history_text = "\n " .join ([
27+ f"{ 'User' if msg ['role' ] == 'user' else 'Assistant' } : { msg ['content' ]} "
28+ for msg in updated_chat_history
29+ ])
30+
31+ # Determine input type with enhanced analysis
32+ result = self .llm_service .determine_input_type (chat_history_text )
33+
34+ # Extract all classification fields
35+ decision = result .get ("decision" , False )
36+ is_update = result .get ("is_update" , False )
37+ is_casual_comment = result .get ("is_casual_comment" , False )
3038 suggestions = result .get ("suggestions" , [])
3139
32- # Check if there's enough info for a domain model based on decision
33- if result .get ("decision" , False ):
34- # Generate domain model description if there's enough information
35- scenario = self .llm_service .generate_scenario (user_input )
36- formatted_suggestions = "**Suggestions to improve your domain model:**\n " + "\n " .join ([f"- { suggestion } " for suggestion in suggestions ])
40+ # Format the response from the suggestions array
41+ formatted_suggestions = "\n " .join (suggestions ) if isinstance (suggestions , list ) else suggestions
42+
43+ # Handle each case appropriately
44+ if is_casual_comment :
45+ # Casual comment - don't regenerate the domain model
46+ self .llm_service .add_to_chat_history ("assistant" , formatted_suggestions )
47+ return jsonify ({
48+ "response" : formatted_suggestions ,
49+ "history" : self .llm_service .get_chat_history ()
50+ })
51+
52+ elif decision :
53+ # Enough information for domain modeling (either new model or update)
54+ domain_model_description = self .llm_service .generate_domain_model_description (chat_history_text )
3755
38- # Add suggestions to chat history
56+ # Add response to chat history
3957 self .llm_service .add_to_chat_history ("assistant" , formatted_suggestions )
4058
41- return jsonify ({"scenario" : scenario , "suggestion" : formatted_suggestions })
42- else :
43- # Not enough info for domain modeling - show suggestions for what's needed
44- formatted_suggestions = "**To create a domain model, I need more information:** \n " + " \n " . join ([ f"- { suggestion } " for suggestion in suggestions ] )
59+ return jsonify ({
60+ "domain_model_description" : domain_model_description ,
61+ "suggestion" : formatted_suggestions
62+ } )
4563
46- # Add suggestions to chat history
64+ else :
65+ # Not enough info for domain modeling
4766 self .llm_service .add_to_chat_history ("assistant" , formatted_suggestions )
4867
49- # Return response in a format that won't trigger domain model description display
50- return jsonify ({"response" : formatted_suggestions , "history" : self .llm_service .get_chat_history ()})
68+ return jsonify ({
69+ "response" : formatted_suggestions ,
70+ "history" : self .llm_service .get_chat_history ()
71+ })
5172
5273 except Exception as e :
5374 print (f"Error in chat request: { e } " )
75+ import traceback
76+ traceback .print_exc () # Add traceback for better error debugging
5477 return jsonify ({"error" : "An unexpected error occurred" }), 500
5578
56- def submit_name (self ):
57- """Store user's name in session"""
58- try :
59- user_name = request .json .get ("name" , "" ).strip ()
60- if not user_name :
61- return jsonify ({"error" : "Name is required" }), 400
62-
63- session ["user_name" ] = user_name
64- return jsonify ({"message" : "Name saved successfully!" , "name" : user_name })
65- except Exception as e :
66- print (f"Error storing name: { e } " )
67- return jsonify ({"error" : "An error occurred while storing the name" }), 500
79+ # Remove submit_name method
6880
6981 def generate_uml (self ):
70- """Generate UML diagram from scenario """
82+ """Generate UML diagram from domain model description """
7183 try :
72- scenario_text = request .json .get ("scenarioText " , "" ).strip ()
73- if not scenario_text :
84+ domain_model_description_text = request .json .get ("domainModelDescriptionText " , "" ).strip ()
85+ if not domain_model_description_text :
7486 return jsonify ({"error" : "Domain Model Description is required" }), 400
7587
7688 client = self .llm_service .client
77- plant_uml = gpt_v2_interface (scenario_text , client )
89+ plant_uml = gpt_v2_interface (domain_model_description_text , client )
7890 return jsonify ({"plantuml" : plant_uml })
7991 except Exception as e :
8092 print (f"Error generating UML: { e } " )
8193 return jsonify ({"error" : "An error occurred while generating the UML" }), 500
8294
83- def get_current_scenario (self ):
84- """Retrieve the current scenario """
85- scenario = self .llm_service .get_current_scenario ()
86- return jsonify ({"scenario " : scenario })
95+ def get_current_domain_model_description (self ):
96+ """Retrieve the current domain model description """
97+ domain_model_description = self .llm_service .get_current_domain_model_description ()
98+ return jsonify ({"domain_model_description " : domain_model_description })
0 commit comments