|
1 | 1 | from flask import request, jsonify, session |
2 | 2 | from src.model.llm_service import LLMService |
3 | 3 | from src.model.gpt2 import gpt_v2_interface |
| 4 | +from src.model.project_service import ProjectService |
4 | 5 |
|
5 | 6 | class ChatController: |
6 | | - """Controller for chat-related operations""" |
| 7 | + """Controller for chat-related operations and version saving""" |
7 | 8 |
|
8 | 9 | def __init__(self): |
9 | 10 | self.llm_service = LLMService() |
| 11 | + self.project_service = ProjectService() |
10 | 12 |
|
11 | 13 | def handle_chat_request(self): |
12 | | - """Process user input and return domain model with suggestions""" |
| 14 | + """Process user input and generate response with version saving""" |
13 | 15 | try: |
14 | | - # Get and validate user input |
15 | | - user_input = request.json.get("message", "").strip() |
| 16 | + data = request.json |
| 17 | + user_input = data.get("message", "").strip() |
| 18 | + project_name = data.get("project_name", "").strip() |
| 19 | + |
16 | 20 | if not user_input: |
17 | 21 | return jsonify({"error": "User input is required"}), 400 |
| 22 | + if not project_name: |
| 23 | + return jsonify({"error": "Project name is required to save version"}), 400 |
| 24 | + |
| 25 | + # Get the current state before processing new input |
| 26 | + # This ensures we always have the latest domain model and PlantUML |
| 27 | + project_result, _ = self.project_service.get_project_data(project_name) |
| 28 | + current_project_data = project_result.get("project_data", {}) |
| 29 | + |
| 30 | + # Get existing domain model description and PlantUML from project data |
| 31 | + # These will be used as fallbacks if nothing new is generated |
| 32 | + existing_dmd = current_project_data.get("domain_model_description", "Welcome to your new project! Start by describing your domain.") |
| 33 | + existing_plant_uml = current_project_data.get("plant_uml", "@startuml\nskinparam monochrome true\ntitle Your New Project\n\nclass ExampleEntity {\n +id: string\n +name: string\n}\n\nnote \"Start building your domain model!\" as N1\n@enduml") |
18 | 34 |
|
19 | | - # Add current input to chat history |
20 | 35 | self.llm_service.add_to_chat_history("user", user_input) |
21 | | - |
22 | | - # Get updated chat history after adding the new message |
23 | 36 | updated_chat_history = self.llm_service.chat_history.get_messages() |
24 | | - |
25 | | - # Format chat history as a single string |
26 | 37 | chat_history_text = "\n".join([ |
27 | 38 | f"{'User' if msg['role'] == 'user' else 'Assistant'}: {msg['content']}" |
28 | 39 | for msg in updated_chat_history |
29 | 40 | ]) |
30 | 41 |
|
31 | | - # Determine input type with enhanced analysis |
32 | | - result = self.llm_service.determine_input_type(chat_history_text) |
33 | | - |
34 | | - # Extract all classification fields |
35 | | - decision = result.get("decision", False) |
36 | | - is_update = result.get("is_update", False) |
37 | | - is_casual_comment = result.get("is_casual_comment", False) |
38 | | - suggestions = result.get("suggestions", []) |
| 42 | + classification_result = self.llm_service.determine_input_type(chat_history_text) |
39 | 43 |
|
40 | | - # Format the response from the suggestions array |
41 | | - formatted_suggestions = "\n".join(suggestions) if isinstance(suggestions, list) else suggestions |
42 | | - |
43 | | - # Handle each case appropriately |
| 44 | + decision = classification_result.get("decision", False) |
| 45 | + is_casual_comment = classification_result.get("is_casual_comment", False) |
| 46 | + suggestions = classification_result.get("suggestions", []) |
| 47 | + assistant_response = "\n".join(suggestions) if isinstance(suggestions, list) else suggestions |
| 48 | + |
| 49 | + # Set initial state using existing values to ensure we never store nulls |
| 50 | + current_dmd = self.llm_service.get_current_domain_model_description() or existing_dmd |
| 51 | + current_plant_uml = existing_plant_uml |
| 52 | + |
44 | 53 | if is_casual_comment: |
45 | | - # Casual comment - don't regenerate the domain model |
46 | | - self.llm_service.add_to_chat_history("assistant", formatted_suggestions) |
| 54 | + self.llm_service.add_to_chat_history("assistant", assistant_response) |
| 55 | + |
| 56 | + # Use existing domain model and PlantUML (already set above) |
| 57 | + # If DMD exists but PlantUML doesn't, generate PlantUML |
| 58 | + if current_dmd and not current_plant_uml: |
| 59 | + client = self.llm_service.client |
| 60 | + current_plant_uml = gpt_v2_interface(current_dmd, client) |
| 61 | + |
| 62 | + self.project_service.save_version( |
| 63 | + project_name, |
| 64 | + user_input, |
| 65 | + assistant_response, |
| 66 | + current_dmd, |
| 67 | + current_plant_uml |
| 68 | + ) |
47 | 69 | return jsonify({ |
48 | | - "response": formatted_suggestions, |
49 | | - "history": self.llm_service.get_chat_history() |
| 70 | + "response": assistant_response, |
| 71 | + "history": self.llm_service.get_chat_history(), |
| 72 | + "domain_model_description": current_dmd, |
| 73 | + "plant_uml": current_plant_uml |
50 | 74 | }) |
51 | 75 |
|
52 | | - elif decision: |
53 | | - # Enough information for domain modeling (either new model or update) |
54 | | - domain_model_description = self.llm_service.generate_domain_model_description(chat_history_text) |
| 76 | + elif decision: # Enough information for domain modeling (new or update) |
| 77 | + new_dmd = self.llm_service.generate_domain_model_description(chat_history_text) |
| 78 | + self.llm_service.add_to_chat_history("assistant", assistant_response) |
55 | 79 |
|
56 | | - # Add response to chat history |
57 | | - self.llm_service.add_to_chat_history("assistant", formatted_suggestions) |
| 80 | + # Generate PlantUML for the new/updated DMD |
| 81 | + client = self.llm_service.client |
| 82 | + new_plant_uml = "" |
| 83 | + if new_dmd: |
| 84 | + new_plant_uml = gpt_v2_interface(new_dmd, client) |
| 85 | + else: |
| 86 | + new_dmd = current_dmd # Fallback to existing DMD |
| 87 | + new_plant_uml = current_plant_uml # Fallback to existing PlantUML |
| 88 | + |
| 89 | + self.project_service.save_version( |
| 90 | + project_name, |
| 91 | + user_input, |
| 92 | + assistant_response, |
| 93 | + new_dmd, |
| 94 | + new_plant_uml |
| 95 | + ) |
58 | 96 |
|
59 | 97 | return jsonify({ |
60 | | - "domain_model_description": domain_model_description, |
61 | | - "suggestion": formatted_suggestions |
| 98 | + "domain_model_description": new_dmd, |
| 99 | + "suggestion": assistant_response, |
| 100 | + "plant_uml": new_plant_uml |
62 | 101 | }) |
63 | 102 |
|
64 | | - else: |
65 | | - # Not enough info for domain modeling |
66 | | - self.llm_service.add_to_chat_history("assistant", formatted_suggestions) |
| 103 | + else: # Not enough info for domain modeling |
| 104 | + self.llm_service.add_to_chat_history("assistant", assistant_response) |
| 105 | + |
| 106 | + # Use existing domain model and PlantUML |
| 107 | + # If we have a domain model but no PlantUML, generate it |
| 108 | + if current_dmd and not current_plant_uml: |
| 109 | + client = self.llm_service.client |
| 110 | + current_plant_uml = gpt_v2_interface(current_dmd, client) |
| 111 | + |
| 112 | + self.project_service.save_version( |
| 113 | + project_name, |
| 114 | + user_input, |
| 115 | + assistant_response, |
| 116 | + current_dmd, |
| 117 | + current_plant_uml |
| 118 | + ) |
67 | 119 |
|
68 | 120 | return jsonify({ |
69 | | - "response": formatted_suggestions, |
70 | | - "history": self.llm_service.get_chat_history() |
| 121 | + "response": assistant_response, |
| 122 | + "history": self.llm_service.get_chat_history(), |
| 123 | + "domain_model_description": current_dmd, |
| 124 | + "plant_uml": current_plant_uml |
71 | 125 | }) |
72 | 126 |
|
73 | 127 | except Exception as e: |
74 | 128 | print(f"Error in chat request: {e}") |
75 | 129 | import traceback |
76 | | - traceback.print_exc() # Add traceback for better error debugging |
| 130 | + traceback.print_exc() |
77 | 131 | return jsonify({"error": "An unexpected error occurred"}), 500 |
78 | 132 |
|
79 | | - # Remove submit_name method |
80 | | - |
81 | 133 | def generate_uml(self): |
82 | 134 | """Generate UML diagram from domain model description""" |
83 | 135 | try: |
|
0 commit comments