diff --git a/frontend/Screenshot 2025-05-19 112503.png b/frontend/Screenshot 2025-05-19 112503.png
new file mode 100644
index 0000000000..9fb242ab08
Binary files /dev/null and b/frontend/Screenshot 2025-05-19 112503.png differ
diff --git a/frontend/Screenshot 2025-05-19 112535.png b/frontend/Screenshot 2025-05-19 112535.png
new file mode 100644
index 0000000000..c0ec06a293
Binary files /dev/null and b/frontend/Screenshot 2025-05-19 112535.png differ
diff --git a/frontend/ai/.gitignore b/frontend/ai/.gitignore
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/frontend/fastapi_backend.py b/frontend/fastapi_backend.py
new file mode 100644
index 0000000000..6002dbc9fd
--- /dev/null
+++ b/frontend/fastapi_backend.py
@@ -0,0 +1,981 @@
+from fastapi import FastAPI, HTTPException, File, UploadFile
+from fastapi.middleware.cors import CORSMiddleware
+from pydantic import BaseModel, HttpUrl
+from typing import Optional, List, Dict, Any
+import json
+import asyncio
+from datetime import datetime
+import hashlib
+import subprocess
+import tempfile
+import os
+from motor.motor_asyncio import AsyncIOMotorClient
+from pymongo import MongoClient
+import logging
+from playwright.async_api import async_playwright
+import base64
+
+# Configure logging
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+app = FastAPI(title="LLM UI Testing Framework", version="2.0.0")
+
+# Enable CORS
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=["*"],
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
+
+# MongoDB Configuration
+MONGODB_URL = os.getenv("MONGODB_URL", "mongodb://localhost:27017")
+DATABASE_NAME = "llm_testing_framework"
+COLLECTION_NAME = "sessions"
+
+# MongoDB client
+client = AsyncIOMotorClient(MONGODB_URL)
+db = client[DATABASE_NAME]
+sessions_collection = db[COLLECTION_NAME]
+
+# Pydantic models
+class ElementExtractionRequest(BaseModel):
+ web_url: HttpUrl
+ config_content: str
+ system_prompt: str
+ user_prompt: str
+ force_new_session: bool = False
+
+class UrlHashCheckRequest(BaseModel):
+ web_url: HttpUrl
+
+class UrlHashCheckResponse(BaseModel):
+ url: str
+ current_hash: str
+ existing_sessions: List[Dict[str, Any]]
+ has_existing_session: bool
+
+class ElementExtractionResponse(BaseModel):
+ session_id: str
+ url_hash: str
+ extracted_elements: List[Dict[str, Any]]
+ timestamp: datetime
+ is_new_session: bool
+
+class TestcaseGenerationRequest(BaseModel):
+ session_id: str
+ extracted_elements: List[Dict[str, Any]]
+ system_prompt: str
+ user_prompt: str
+ testcase_sample: Optional[str] = None
+
+class TestcaseGenerationResponse(BaseModel):
+ session_id: str
+ test_cases: List[Dict[str, Any]]
+ timestamp: datetime
+
+class CodeGenerationRequest(BaseModel):
+ session_id: str
+ test_cases: List[Dict[str, Any]]
+ system_prompt: str
+ user_prompt: str
+
+class CodeGenerationResponse(BaseModel):
+ session_id: str
+ generated_code: str
+ timestamp: datetime
+
+class CodeExecutionRequest(BaseModel):
+ session_id: str
+ code: str
+ execution_type: str = "selenium"
+
+class CodeExecutionResponse(BaseModel):
+ session_id: str
+ execution_result: Dict[str, Any]
+ timestamp: datetime
+
+class SessionStatus(BaseModel):
+ session_id: str
+ url_hash: str
+ web_url: str
+ current_stage: str
+ completed_stages: List[str]
+ results: Dict[str, Any]
+ created_at: datetime
+ updated_at: datetime
+
+# Enhanced element extraction using Playwright
+async def extract_elements_with_playwright(url: str, config: dict = None) -> List[Dict[str, Any]]:
+ """
+ Comprehensively extract all testable elements from web page using Playwright
+ """
+ elements = []
+
+ try:
+ async with async_playwright() as p:
+ # Launch browser with robust settings
+ browser = await p.chromium.launch(
+ headless=True,
+ args=['--no-sandbox', '--disable-dev-shm-usage']
+ )
+
+ context = await browser.new_context(
+ viewport={'width': 1920, 'height': 1080},
+ user_agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
+ )
+
+ page = await context.new_page()
+
+ # Set longer timeout for complex pages
+ page.set_default_timeout(30000)
+
+ # Navigate to URL with error handling
+ try:
+ await page.goto(url, wait_until='networkidle', timeout=30000)
+ await page.wait_for_load_state('domcontentloaded')
+ await asyncio.sleep(2) # Allow dynamic content to load
+ except Exception as e:
+ logger.warning(f"Page load issue for {url}: {e}")
+ # Try alternative loading strategy
+ await page.goto(url, wait_until='domcontentloaded', timeout=15000)
+ await asyncio.sleep(3)
+
+ # Comprehensive element selectors
+ selectors = {
+ 'buttons': [
+ 'button', 'input[type="button"]', 'input[type="submit"]',
+ 'input[type="reset"]', '[role="button"]', '.btn', '.button',
+ 'a[href^="javascript"]', '[onclick]'
+ ],
+ 'inputs': [
+ 'input[type="text"]', 'input[type="email"]', 'input[type="password"]',
+ 'input[type="number"]', 'input[type="tel"]', 'input[type="url"]',
+ 'input[type="search"]', 'input[type="date"]', 'input[type="time"]',
+ 'input[type="datetime-local"]', 'input[type="month"]', 'input[type="week"]',
+ 'textarea', 'input:not([type])'
+ ],
+ 'selects': ['select', '[role="combobox"]', '[role="listbox"]'],
+ 'checkboxes': ['input[type="checkbox"]', '[role="checkbox"]'],
+ 'radios': ['input[type="radio"]', '[role="radio"]'],
+ 'links': ['a[href]', '[role="link"]'],
+ 'interactive': [
+ '[tabindex]', '[role="tab"]', '[role="menuitem"]',
+ '[role="option"]', '[role="treeitem"]', '[contenteditable="true"]'
+ ],
+ 'forms': ['form'],
+ 'images': ['img[alt]', 'img[title]', '[role="img"]'],
+ 'headings': ['h1', 'h2', 'h3', 'h4', 'h5', 'h6'],
+ 'landmarks': [
+ '[role="navigation"]', '[role="main"]', '[role="banner"]',
+ '[role="contentinfo"]', '[role="complementary"]', '[role="search"]'
+ ]
+ }
+
+ # Extract elements by category
+ for category, selector_list in selectors.items():
+ for selector in selector_list:
+ try:
+ category_elements = await page.locator(selector).all()
+
+ for element in category_elements:
+ try:
+ # Get comprehensive element data
+ element_data = await extract_element_data(element, category, page)
+ if element_data and element_data.get('is_visible', False):
+ elements.append(element_data)
+
+ except Exception as e:
+ logger.debug(f"Error extracting element data: {e}")
+ continue
+
+ except Exception as e:
+ logger.debug(f"Error with selector {selector}: {e}")
+ continue
+
+ # Take screenshot for visual reference
+ screenshot = await page.screenshot(full_page=True)
+ screenshot_b64 = base64.b64encode(screenshot).decode()
+
+ await browser.close()
+
+ # Remove duplicates and sort elements
+ unique_elements = remove_duplicate_elements(elements)
+
+ # Add metadata
+ metadata = {
+ 'total_elements': len(unique_elements),
+ 'extraction_timestamp': datetime.now().isoformat(),
+ 'url': url,
+ 'screenshot': screenshot_b64[:1000] + '...' if len(screenshot_b64) > 1000 else screenshot_b64, # Truncate for hash
+ 'categories': {category: len([e for e in unique_elements if e.get('category') == category])
+ for category in selectors.keys()}
+ }
+
+ return unique_elements + [{'type': 'metadata', 'data': metadata}]
+
+ except Exception as e:
+ logger.error(f"Playwright extraction failed for {url}: {e}")
+ raise HTTPException(status_code=500, detail=f"Element extraction failed: {str(e)}")
+
+async def extract_element_data(element, category: str, page) -> Dict[str, Any]:
+ """Extract comprehensive data from a single element"""
+ try:
+ # Check if element is visible and interactable
+ is_visible = await element.is_visible()
+ is_enabled = await element.is_enabled() if category in ['buttons', 'inputs', 'selects'] else True
+
+ if not is_visible:
+ return None
+
+ # Get bounding box
+ bbox = await element.bounding_box()
+
+ # Get element attributes
+ tag_name = await element.evaluate('el => el.tagName.toLowerCase()')
+
+ # Common attributes
+ attrs = {}
+ for attr in ['id', 'class', 'name', 'type', 'value', 'placeholder', 'title', 'alt', 'href', 'src', 'role']:
+ try:
+ attr_value = await element.get_attribute(attr)
+ if attr_value:
+ attrs[attr] = attr_value
+ except:
+ pass
+
+ # Get text content
+ text_content = ''
+ try:
+ text_content = (await element.text_content() or '').strip()
+ if not text_content and category == 'inputs':
+ text_content = attrs.get('placeholder', '')
+ except:
+ pass
+
+ # Generate robust selectors
+ selectors = await generate_robust_selectors(element, page)
+
+ element_data = {
+ 'category': category,
+ 'tag_name': tag_name,
+ 'text': text_content,
+ 'attributes': attrs,
+ 'selectors': selectors,
+ 'bounding_box': bbox,
+ 'is_visible': is_visible,
+ 'is_enabled': is_enabled,
+ 'element_type': determine_element_type(tag_name, attrs, category)
+ }
+
+ return element_data
+
+ except Exception as e:
+ logger.debug(f"Error extracting element data: {e}")
+ return None
+
+async def generate_robust_selectors(element, page) -> Dict[str, str]:
+ """Generate multiple robust selectors for an element"""
+ selectors = {}
+
+ try:
+ # CSS selectors
+ selectors['css'] = await element.evaluate('''
+ el => {
+ let selector = el.tagName.toLowerCase();
+ if (el.id) selector += '#' + el.id;
+ if (el.className) selector += '.' + el.className.split(' ').join('.');
+ return selector;
+ }
+ ''')
+
+ # XPath
+ selectors['xpath'] = await element.evaluate('''
+ el => {
+ let path = '';
+ while (el && el.nodeType === Node.ELEMENT_NODE) {
+ let siblingIndex = 1;
+ let sibling = el.previousSibling;
+ while (sibling) {
+ if (sibling.nodeType === Node.ELEMENT_NODE && sibling.tagName === el.tagName) {
+ siblingIndex++;
+ }
+ sibling = sibling.previousSibling;
+ }
+ path = el.tagName.toLowerCase() + '[' + siblingIndex + ']' + (path ? '/' + path : '');
+ el = el.parentNode;
+ }
+ return '//' + path;
+ }
+ ''')
+
+ # Text-based selector if element has text
+ text = await element.text_content()
+ if text and text.strip():
+ selectors['text'] = f"text='{text.strip()[:50]}'"
+
+ # Attribute-based selectors
+ id_attr = await element.get_attribute('id')
+ if id_attr:
+ selectors['id'] = f"[id='{id_attr}']"
+
+ name_attr = await element.get_attribute('name')
+ if name_attr:
+ selectors['name'] = f"[name='{name_attr}']"
+
+ except Exception as e:
+ logger.debug(f"Error generating selectors: {e}")
+
+ return selectors
+
+def determine_element_type(tag_name: str, attrs: Dict, category: str) -> str:
+ """Determine specific element type for testing purposes"""
+ if tag_name == 'input':
+ input_type = attrs.get('type', 'text')
+ return f"input_{input_type}"
+ elif tag_name == 'button':
+ return 'button'
+ elif tag_name == 'a':
+ return 'link'
+ elif tag_name == 'select':
+ return 'dropdown'
+ elif tag_name in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']:
+ return 'heading'
+ elif tag_name == 'form':
+ return 'form'
+ elif attrs.get('role'):
+ return f"role_{attrs['role']}"
+ else:
+ return tag_name
+
+def remove_duplicate_elements(elements: List[Dict]) -> List[Dict]:
+ """Remove duplicate elements based on selectors and position"""
+ seen = set()
+ unique_elements = []
+
+ for element in elements:
+ # Create a unique key based on selectors and position
+ selectors = element.get('selectors', {})
+ bbox = element.get('bounding_box', {})
+
+ key = f"{selectors.get('css', '')}-{selectors.get('xpath', '')}-{bbox.get('x', 0)}-{bbox.get('y', 0)}"
+
+ if key not in seen:
+ seen.add(key)
+ unique_elements.append(element)
+
+ return unique_elements
+
+def calculate_url_hash(elements: List[Dict[str, Any]], url: str) -> str:
+ """Calculate hash based on extracted elements and URL structure"""
+ # Create a stable representation for hashing
+ hash_data = {
+ 'url': url,
+ 'elements_count': len([e for e in elements if e.get('type') != 'metadata']),
+ 'structure': []
+ }
+
+ # Sort elements by position for consistent hashing
+ sorted_elements = sorted(
+ [e for e in elements if e.get('type') != 'metadata'],
+ key=lambda x: (
+ x.get('bounding_box', {}).get('y', 0),
+ x.get('bounding_box', {}).get('x', 0),
+ x.get('tag_name', ''),
+ x.get('text', '')
+ )
+ )
+
+ # Create structural fingerprint
+ for element in sorted_elements:
+ structure_item = {
+ 'tag': element.get('tag_name', ''),
+ 'type': element.get('element_type', ''),
+ 'text_hash': hashlib.md5((element.get('text', '') or '').encode()).hexdigest()[:8],
+ 'attrs_hash': hashlib.md5(str(sorted(element.get('attributes', {}).items())).encode()).hexdigest()[:8]
+ }
+ hash_data['structure'].append(structure_item)
+
+ # Generate hash
+ hash_string = json.dumps(hash_data, sort_keys=True)
+ return hashlib.sha256(hash_string.encode()).hexdigest()[:16]
+
+# Database operations with hash-based sessions
+async def check_existing_sessions(url: str, current_hash: str) -> Dict[str, Any]:
+ """Check for existing sessions for the given URL"""
+ try:
+ # Find all sessions for this URL
+ cursor = sessions_collection.find(
+ {"web_url": url}
+ ).sort("updated_at", -1)
+
+ existing_sessions = await cursor.to_list(length=50)
+
+ # Check if current hash matches any existing session
+ matching_session = None
+ for session in existing_sessions:
+ if session.get('url_hash') == current_hash:
+ matching_session = session
+ break
+
+ return {
+ 'url': url,
+ 'current_hash': current_hash,
+ 'existing_sessions': [
+ {
+ 'session_id': session['session_id'],
+ 'url_hash': session['url_hash'],
+ 'current_stage': session['current_stage'],
+ 'completed_stages': session['completed_stages'],
+ 'created_at': session['created_at'],
+ 'updated_at': session['updated_at']
+ }
+ for session in existing_sessions[:10] # Limit to recent 10
+ ],
+ 'has_matching_session': matching_session is not None,
+ 'matching_session': matching_session
+ }
+
+ except Exception as e:
+ logger.error(f"Error checking existing sessions: {e}")
+ return {
+ 'url': url,
+ 'current_hash': current_hash,
+ 'existing_sessions': [],
+ 'has_matching_session': False,
+ 'matching_session': None
+ }
+
+async def create_session_document(session_data: dict) -> str:
+ """Create a new session document in MongoDB using hash as ID"""
+ session_data["_id"] = session_data["session_id"]
+ try:
+ await sessions_collection.insert_one(session_data)
+ logger.info(f"Created session: {session_data['session_id']}")
+ return session_data["session_id"]
+ except Exception as e:
+ logger.error(f"Error creating session: {e}")
+ raise HTTPException(status_code=500, detail="Failed to create session")
+
+async def update_session_document(session_id: str, update_data: dict) -> bool:
+ """Update session document in MongoDB"""
+ update_data["updated_at"] = datetime.now()
+ try:
+ result = await sessions_collection.update_one(
+ {"_id": session_id},
+ {"$set": update_data}
+ )
+ logger.info(f"Updated session: {session_id}")
+ return result.modified_count > 0
+ except Exception as e:
+ logger.error(f"Error updating session {session_id}: {e}")
+ raise HTTPException(status_code=500, detail="Failed to update session")
+
+async def get_session_document(session_id: str) -> dict:
+ """Get session document from MongoDB"""
+ try:
+ session = await sessions_collection.find_one({"_id": session_id})
+ if not session:
+ raise HTTPException(status_code=404, detail="Session not found")
+ return session
+ except HTTPException:
+ raise
+ except Exception as e:
+ logger.error(f"Error getting session {session_id}: {e}")
+ raise HTTPException(status_code=500, detail="Failed to get session")
+
+async def get_last_session() -> Optional[dict]:
+ """Get the most recently updated session"""
+ try:
+ cursor = sessions_collection.find().sort("updated_at", -1).limit(1)
+ sessions = await cursor.to_list(length=1)
+ return sessions[0] if sessions else None
+ except Exception as e:
+ logger.error(f"Error getting last session: {e}")
+ return None
+
+# Mock LLM function (replace with actual LLM integration)
+async def call_llm(prompt: str, context: str = "") -> str:
+ """Mock LLM call - replace with actual LLM integration"""
+ await asyncio.sleep(1) # Simulate API call delay
+
+ if "extract elements" in prompt.lower() or "test cases" in prompt.lower():
+ return json.dumps([
+ {"test_name": "Login with valid credentials", "steps": ["Enter username", "Enter password", "Click submit"], "expected": "User should be logged in"},
+ {"test_name": "Login with invalid credentials", "steps": ["Enter invalid username", "Enter invalid password", "Click submit"], "expected": "Error message should appear"},
+ {"test_name": "Form validation", "steps": ["Leave required fields empty", "Try to submit"], "expected": "Validation errors should appear"},
+ {"test_name": "Navigation test", "steps": ["Click navigation links"], "expected": "Should navigate to correct pages"}
+ ])
+ else:
+ return '''import pytest
+from playwright.sync_api import sync_playwright, Page, Browser
+import time
+
+class TestWebPage:
+ def setup_method(self):
+ self.playwright = sync_playwright().start()
+ self.browser = self.playwright.chromium.launch(headless=False)
+ self.page = self.browser.new_page()
+ self.page.goto("''' + context.get('web_url', 'https://example.com') + '''")
+
+ def test_page_elements_visible(self):
+ """Test that key elements are visible and interactable"""
+ try:
+ # Test buttons
+ buttons = self.page.locator('button, input[type="button"], input[type="submit"]').all()
+ assert len(buttons) > 0, "No buttons found on page"
+
+ for button in buttons[:3]: # Test first 3 buttons
+ assert button.is_visible(), f"Button not visible: {button}"
+
+ # Test inputs
+ inputs = self.page.locator('input[type="text"], input[type="email"], input[type="password"]').all()
+ for input_elem in inputs[:3]: # Test first 3 inputs
+ assert input_elem.is_visible(), f"Input not visible: {input_elem}"
+
+ print("✓ Page elements visibility test passed")
+ except Exception as e:
+ print(f"✗ Page elements test failed: {e}")
+ raise
+
+ def test_form_interactions(self):
+ """Test form interactions"""
+ try:
+ # Find forms
+ forms = self.page.locator('form').all()
+ if forms:
+ form = forms[0]
+ inputs = form.locator('input[type="text"], input[type="email"]').all()
+
+ for i, input_elem in enumerate(inputs[:2]):
+ input_elem.fill(f"test_value_{i}")
+
+ print("✓ Form interaction test passed")
+ else:
+ print("ℹ No forms found for testing")
+ except Exception as e:
+ print(f"✗ Form interaction test failed: {e}")
+
+ def test_navigation_links(self):
+ """Test navigation links"""
+ try:
+ links = self.page.locator('a[href]').all()
+ working_links = 0
+
+ for link in links[:5]: # Test first 5 links
+ href = link.get_attribute('href')
+ if href and not href.startswith('javascript:') and not href.startswith('#'):
+ working_links += 1
+
+ assert working_links > 0, "No working navigation links found"
+ print(f"✓ Navigation test passed - {working_links} working links found")
+ except Exception as e:
+ print(f"✗ Navigation test failed: {e}")
+
+ def teardown_method(self):
+ if hasattr(self, 'browser'):
+ self.browser.close()
+ if hasattr(self, 'playwright'):
+ self.playwright.stop()
+
+if __name__ == "__main__":
+ test = TestWebPage()
+ test.setup_method()
+ test.test_page_elements_visible()
+ test.test_form_interactions()
+ test.test_navigation_links()
+ test.teardown_method()'''
+
+async def execute_test_code(code: str, execution_type: str) -> Dict[str, Any]:
+ """Execute the generated test code"""
+ try:
+ # Create temporary file
+ with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as temp_file:
+ temp_file.write(code)
+ temp_file_path = temp_file.name
+
+ # Execute the code
+ if execution_type == "python":
+ result = subprocess.run(
+ ["python", temp_file_path],
+ capture_output=True,
+ text=True,
+ timeout=120 # Increased timeout for playwright
+ )
+ elif execution_type == "pytest":
+ result = subprocess.run(
+ ["python", "-m", "pytest", temp_file_path, "-v"],
+ capture_output=True,
+ text=True,
+ timeout=120
+ )
+ else: # Default to python execution
+ result = subprocess.run(
+ ["python", temp_file_path],
+ capture_output=True,
+ text=True,
+ timeout=120
+ )
+
+ # Clean up
+ os.unlink(temp_file_path)
+
+ return {
+ "success": result.returncode == 0,
+ "returncode": result.returncode,
+ "stdout": result.stdout,
+ "stderr": result.stderr,
+ "execution_type": execution_type
+ }
+
+ except subprocess.TimeoutExpired:
+ return {
+ "success": False,
+ "returncode": -1,
+ "stdout": "",
+ "stderr": "Execution timed out after 120 seconds",
+ "execution_type": execution_type
+ }
+ except Exception as e:
+ return {
+ "success": False,
+ "returncode": -1,
+ "stdout": "",
+ "stderr": f"Execution error: {str(e)}",
+ "execution_type": execution_type
+ }
+
+# API Endpoints
+@app.post("/api/check-url-hash", response_model=UrlHashCheckResponse)
+async def check_url_hash(request: UrlHashCheckRequest):
+ """Check if URL has existing sessions and get current hash"""
+ try:
+ # Extract elements to calculate current hash
+ elements = await extract_elements_with_playwright(str(request.web_url))
+ current_hash = calculate_url_hash(elements, str(request.web_url))
+
+ # Check existing sessions
+ session_check = await check_existing_sessions(str(request.web_url), current_hash)
+
+ return UrlHashCheckResponse(
+ url=str(request.web_url),
+ current_hash=current_hash,
+ existing_sessions=session_check['existing_sessions'],
+ has_existing_session=session_check['has_matching_session']
+ )
+
+ except Exception as e:
+ logger.error(f"URL hash check failed: {e}")
+ raise HTTPException(status_code=500, detail=f"URL hash check failed: {str(e)}")
+
+@app.post("/api/extract-elements", response_model=ElementExtractionResponse)
+async def extract_elements(request: ElementExtractionRequest):
+ """Extract UI elements from the provided web URL"""
+ try:
+ # Extract elements using Playwright
+ elements = await extract_elements_with_playwright(str(request.web_url))
+ url_hash = calculate_url_hash(elements, str(request.web_url))
+
+ # Check if we should use existing session or create new one
+ if not request.force_new_session:
+ session_check = await check_existing_sessions(str(request.web_url), url_hash)
+ if session_check['has_matching_session']:
+ existing_session = session_check['matching_session']
+ return ElementExtractionResponse(
+ session_id=existing_session['session_id'],
+ url_hash=url_hash,
+ extracted_elements=existing_session['extracted_elements'],
+ timestamp=existing_session['updated_at'],
+ is_new_session=False
+ )
+
+ # Create new session with hash as ID
+ session_id = url_hash
+ session_data = {
+ "session_id": session_id,
+ "url_hash": url_hash,
+ "current_stage": "element_extraction_complete",
+ "completed_stages": ["element_extraction"],
+ "web_url": str(request.web_url),
+ "config_content": request.config_content,
+ "extracted_elements": elements,
+ "created_at": datetime.now(),
+ "updated_at": datetime.now()
+ }
+
+ await create_session_document(session_data)
+
+ return ElementExtractionResponse(
+ session_id=session_id,
+ url_hash=url_hash,
+ extracted_elements=elements,
+ timestamp=datetime.now(),
+ is_new_session=True
+ )
+
+ except Exception as e:
+ logger.error(f"Element extraction failed: {e}")
+ raise HTTPException(status_code=500, detail=f"Element extraction failed: {str(e)}")
+
+@app.post("/api/generate-testcases", response_model=TestcaseGenerationResponse)
+async def generate_testcases(request: TestcaseGenerationRequest):
+ """Generate test cases based on extracted elements"""
+ session = await get_session_document(request.session_id)
+
+ # Construct prompt for testcase generation
+ full_prompt = f"""
+ System: {request.system_prompt}
+ User: {request.user_prompt}
+
+ Extracted Elements: {json.dumps(request.extracted_elements[:20])} # Limit for prompt size
+ {f"Sample Testcase: {request.testcase_sample}" if request.testcase_sample else ""}
+
+ Please generate comprehensive test cases for these UI elements focusing on user interactions.
+ """
+
+ try:
+ # Call LLM to generate test cases
+ llm_response = await call_llm(full_prompt)
+ test_cases = json.loads(llm_response)
+
+ # Update session document
+ update_data = {
+ "current_stage": "testcase_generation_complete",
+ "completed_stages": session["completed_stages"] + ["testcase_generation"],
+ "test_cases": test_cases
+ }
+
+ await update_session_document(request.session_id, update_data)
+
+ return TestcaseGenerationResponse(
+ session_id=request.session_id,
+ test_cases=test_cases,
+ timestamp=datetime.now()
+ )
+ except Exception as e:
+ logger.error(f"Testcase generation failed: {e}")
+ raise HTTPException(status_code=500, detail=f"Testcase generation failed: {str(e)}")
+
+@app.post("/api/generate-code", response_model=CodeGenerationResponse)
+async def generate_code(request: CodeGenerationRequest):
+ """Generate test code based on test cases"""
+ session = await get_session_document(request.session_id)
+
+ # Construct prompt for code generation
+ full_prompt = f"""
+ System: {request.system_prompt}
+ User: {request.user_prompt}
+
+ Test Cases: {json.dumps(request.test_cases)}
+ Web URL: {session.get('web_url', '')}
+
+ Please generate complete, executable Playwright test code for these test cases.
+ """
+
+ try:
+ # Call LLM to generate code
+ generated_code = await call_llm(full_prompt, {'web_url': session.get('web_url', '')})
+
+ # Update session document
+ update_data = {
+ "current_stage": "code_generation_complete",
+ "completed_stages": session["completed_stages"] + ["code_generation"],
+ "generated_code": generated_code
+ }
+
+ await update_session_document(request.session_id, update_data)
+
+ return CodeGenerationResponse(
+ session_id=request.session_id,
+ generated_code=generated_code,
+ timestamp=datetime.now()
+ )
+ except Exception as e:
+ logger.error(f"Code generation failed: {e}")
+ raise HTTPException(status_code=500, detail=f"Code generation failed: {str(e)}")
+
+@app.post("/api/execute-code", response_model=CodeExecutionResponse)
+async def execute_code(request: CodeExecutionRequest):
+ """Execute the generated test code"""
+ session = await get_session_document(request.session_id)
+
+ try:
+ # Execute the code
+ execution_result = await execute_test_code(request.code, request.execution_type)
+
+ # Update session document
+ update_data = {
+ "current_stage": "code_execution_complete",
+ "completed_stages": session["completed_stages"] + ["code_execution"],
+ "execution_result": execution_result
+ }
+
+ await update_session_document(request.session_id, update_data)
+
+ return CodeExecutionResponse(
+ session_id=request.session_id,
+ execution_result=execution_result,
+ timestamp=datetime.now()
+ )
+ except Exception as e:
+ logger.error(f"Code execution failed: {e}")
+ raise HTTPException(status_code=500, detail=f"Code execution failed: {str(e)}")
+
+@app.get("/api/session/{session_id}", response_model=SessionStatus)
+async def get_session_status(session_id: str):
+ """Get current session status and results"""
+ session = await get_session_document(session_id)
+
+ return SessionStatus(
+ session_id=session_id,
+ url_hash=session["url_hash"],
+ web_url=session["web_url"],
+ current_stage=session["current_stage"],
+ completed_stages=session["completed_stages"],
+ results={
+ "extracted_elements": session.get("extracted_elements"),
+ "test_cases": session.get("test_cases"),
+ "generated_code": session.get("generated_code"),
+ "execution_result": session.get("execution_result")
+ },
+ created_at=session["created_at"],
+ updated_at=session["updated_at"]
+ )
+
+@app.get("/api/url-history/{url_encoded}")
+async def get_url_history(url_encoded: str):
+ """Get session history for a specific URL"""
+ try:
+ # Decode URL
+ import urllib.parse
+ url = urllib.parse.unquote(url_encoded)
+
+ cursor = sessions_collection.find(
+ {"web_url": url}
+ ).sort("updated_at", -1)
+
+ sessions = await cursor.to_list(length=20)
+
+ return {
+ "url": url,
+ "sessions": [
+ {
+ "session_id": session["session_id"],
+ "url_hash": session["url_hash"],
+ "current_stage": session["current_stage"],
+ "completed_stages": session["completed_stages"],
+ "created_at": session["created_at"],
+ "updated_at": session["updated_at"],
+ "elements_count": len(session.get("extracted_elements", []))
+ }
+ for session in sessions
+ ]
+ }
+ except Exception as e:
+ logger.error(f"Error getting URL history: {e}")
+ raise HTTPException(status_code=500, detail="Failed to get URL history")
+
+@app.get("/api/sessions")
+async def list_sessions():
+ """List all sessions"""
+ try:
+ cursor = sessions_collection.find().sort("updated_at", -1)
+ sessions = await cursor.to_list(length=100)
+
+ return {
+ "sessions": [
+ {
+ "session_id": session["session_id"],
+ "url_hash": session["url_hash"],
+ "current_stage": session["current_stage"],
+ "created_at": session["created_at"],
+ "updated_at": session["updated_at"],
+ "web_url": session.get("web_url")
+ }
+ for session in sessions
+ ]
+ }
+ except Exception as e:
+ logger.error(f"Error listing sessions: {e}")
+ raise HTTPException(status_code=500, detail="Failed to list sessions")
+
+@app.get("/api/last-session")
+async def get_last_session_endpoint():
+ """Get the most recently updated session"""
+ session = await get_last_session()
+ if not session:
+ return {"session": None}
+
+ return {
+ "session": {
+ "session_id": session["session_id"],
+ "url_hash": session["url_hash"],
+ "current_stage": session["current_stage"],
+ "completed_stages": session["completed_stages"],
+ "results": {
+ "extracted_elements": session.get("extracted_elements"),
+ "test_cases": session.get("test_cases"),
+ "generated_code": session.get("generated_code"),
+ "execution_result": session.get("execution_result")
+ },
+ "web_url": session.get("web_url"),
+ "config_content": session.get("config_content"),
+ "created_at": session["created_at"],
+ "updated_at": session["updated_at"]
+ }
+ }
+
+@app.delete("/api/session/{session_id}")
+async def delete_session(session_id: str):
+ """Delete a session"""
+ try:
+ result = await sessions_collection.delete_one({"_id": session_id})
+ if result.deleted_count == 0:
+ raise HTTPException(status_code=404, detail="Session not found")
+
+ logger.info(f"Deleted session: {session_id}")
+ return {"message": "Session deleted successfully"}
+ except HTTPException:
+ raise
+ except Exception as e:
+ logger.error(f"Error deleting session {session_id}: {e}")
+ raise HTTPException(status_code=500, detail="Failed to delete session")
+
+@app.get("/api/health")
+async def health_check():
+ """Health check endpoint"""
+ try:
+ # Test MongoDB connection
+ await sessions_collection.find_one({})
+ return {
+ "status": "healthy",
+ "timestamp": datetime.now(),
+ "database": "connected",
+ "playwright": "ready"
+ }
+ except Exception as e:
+ return {
+ "status": "unhealthy",
+ "timestamp": datetime.now(),
+ "database": "disconnected",
+ "error": str(e)
+ }
+
+@app.on_event("startup")
+async def startup_event():
+ """Initialize database indexes on startup"""
+ try:
+ # Create indexes for better performance
+ await sessions_collection.create_index("session_id")
+ await sessions_collection.create_index("url_hash")
+ await sessions_collection.create_index("web_url")
+ await sessions_collection.create_index("updated_at")
+ logger.info("Database indexes created successfully")
+ except Exception as e:
+ logger.error(f"Error creating database indexes: {e}")
+
+if __name__ == "__main__":
+ import uvicorn
+ uvicorn.run(app, host="0.0.0.0", port=8000)
\ No newline at end of file
diff --git a/frontend/fastapi_react_flow_diagram (1).md b/frontend/fastapi_react_flow_diagram (1).md
new file mode 100644
index 0000000000..7c325b82fe
--- /dev/null
+++ b/frontend/fastapi_react_flow_diagram (1).md
@@ -0,0 +1,388 @@
+# LLM UI Testing Framework - Enhanced Flow Diagram (Hash-based Sessions)
+
+```
+┌─────────────────────────────────────────────────────────────────────────────────────────┐
+│ REACT FRONTEND │
+│ (http://localhost:3000) │
+└─────────────────────────────────────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────────────┐
+│ ENHANCED UI COMPONENTS & STATE │
+├─────────────────────────────────────────────────────────────────────────────────────────┤
+│ ┌──────────────────┐ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ │
+│ │ Step Indicator │ │ Element Form │ │ Testcase Form │ │ Code Form │ │
+│ │ ┌─┬─┬─┬─┐ │ │ • Web URL + │ │ • Enhanced │ │ • Playwright │ │
+│ │ │1│2│3│4│ │ │ History btn │ │ Prompts │ │ Code Gen │ │
+│ │ └─┴─┴─┴─┘ │ │ • Config │ │ • Test Cases │ │ • Robust │ │
+│ └──────────────────┘ └─────────────────┘ └─────────────────┘ └─────────────────┘ │
+│ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ │
+│ │ Execute Form │ │ Session Dialog │ │ History Modal │ │ Hash Management │ │
+│ │ • Playwright │ │ • Load Previous │ │ • URL Sessions │ │ • URL Hash │ │
+│ │ • Python/Pytest │ │ • Start Fresh │ │ • Timeline │ │ • Session ID │ │
+│ └─────────────────┘ └─────────────────┘ └─────────────────┘ └─────────────────┘ │
+│ │ │
+│ ┌─────────────────────────────────────────┼─────────────────────────────────────────┐ │
+│ │ ENHANCED SESSION MGMT │ RESULTS DISPLAY │ │
+│ │ • currentStep: number (0-3) │ • elements: Comprehensive JSON │ │
+│ │ • sessionId: string (URL Hash) │ • testcases: Enhanced scenarios │ │
+│ │ • urlHash: string (Content Hash) │ • code: Playwright test code │ │
+│ │ • sessionDialogData: Object │ • execution: Detailed results │ │
+│ │ • urlHistory: Array │ • element categories & metadata │ │
+│ └─────────────────────────────────────────┴─────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────────────┘
+ │
+ ┌───────────────┬───────────────────┼───────────────────┬───────────────┐
+ │ │ │ │ │
+ ▼ ▼ ▼ ▼ ▼
+┌─────────────┐ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐
+│ POST │ │ POST │ │ POST │ │ POST │ │ GET │ │ GET │
+│/check-url- │ │/extract- │ │/generate- │ │/generate- │ │/execute- │ │/url-history/│
+│hash │ │elements │ │testcases │ │code │ │code │ │{url} │
+│ │ │ │ │ │ │ │ │ │ │ │
+│ • Hash calc │ │ • Playwright│ │ • Enhanced │ │ • Playwright│ │ • Enhanced │ │ • Session │
+│ • Session │ │ • Elements │ │ test cases│ │ code gen │ │ execution │ │ history │
+│ check │ │ • Hash-based│ │ • Context │ │ • Context │ │ • Timeout │ │ • Timeline │
+└─────────────┘ └─────────────┘ └─────────────┘ └─────────────┘ └─────────────┘ └─────────────┘
+ │ │ │ │ │
+ └───────────────┼───────────────────┼───────────────────┼───────────────┘
+ │ │ │
+ └───────────────────┼───────────────────┘
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────────────┐
+│ ENHANCED FASTAPI BACKEND │
+│ (http://localhost:8000) │
+├─────────────────────────────────────────────────────────────────────────────────────────┤
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────────┐ │
+│ │ ENHANCED API ENDPOINTS │ │
+│ │ │ │
+│ │ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ ┌────────┐ │ │
+│ │ │ /check-url- │ │ /extract- │ │ /generate- │ │ /generate- │ │/execute│ │ │
+│ │ │ hash │ │ elements │ │ testcases │ │ code │ │-code │ │ │
+│ │ │ │ │ │ │ │ │ │ │ │ │ │
+│ │ │ • Calculate │ │ • Playwright │ │ • Enhanced │ │ • Playwright │ │• 120s │ │ │
+│ │ │ hash │ │ extraction │ │ prompts │ │ code gen │ │timeout │ │ │
+│ │ │ • Find │ │ • Hash calc │ │ • Context │ │ • Context │ │• Result│ │ │
+│ │ │ existing │ │ • Session │ │ aware │ │ aware │ │detail │ │ │
+│ │ │ sessions │ │ mgmt │ │ │ │ │ │ │ │ │
+│ │ └──────────────┘ └──────────────┘ └──────────────┘ └──────────────┘ └────────┘ │ │
+│ │ │ │
+│ │ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │ │
+│ │ │ /url-history/│ │ /session/ │ │ /sessions │ │ /last- │ │ │
+│ │ │ {url} │ │ {id} │ │ │ │ session │ │ │
+│ │ │ │ │ │ │ │ │ │ │ │
+│ │ │ • URL-based │ │ • Hash-based │ │ • All │ │ • Recent │ │ │
+│ │ │ history │ │ lookup │ │ sessions │ │ session │ │ │
+│ │ │ • Timeline │ │ • Complete │ │ • Pagination │ │ • Auto-load │ │ │
+│ │ │ view │ │ data │ │ │ │ │ │ │
+│ │ └──────────────┘ └──────────────┘ └──────────────┘ └──────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────────┘ │
+│ │ │
+│ ▼ │
+│ ┌─────────────────────────────────────────────────────────────────────────────────┐ │
+│ │ ENHANCED REQUEST PROCESSING │ │
+│ │ │ │
+│ │ 1. ┌─────────────────┐ 2. ┌─────────────────┐ 3. ┌─────────────────┐ │ │
+│ │ │ URL Hash Check │ ──> │ Playwright │ ──> │ LLM/Execution │ │ │
+│ │ │ │ │ Extraction │ │ Services │ │ │
+│ │ │ • Extract with │ │ │ │ │ │ │
+│ │ │ Playwright │ │ • Comprehensive │ │ • Context-aware │ │ │
+│ │ │ • Calculate │ │ element scan │ │ prompts │ │ │
+│ │ │ content hash │ │ • Robust │ │ • Enhanced code │ │ │
+│ │ │ • Check │ │ selectors │ │ generation │ │ │
+│ │ │ existing │ │ • Categories │ │ • Secure │ │ │
+│ │ │ sessions │ │ • Metadata │ │ execution │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────┘ │ │
+│ │ │ │ │ │
+│ │ 4. ┌─────────────────┐ 5. ┌─────────────────┐ │ │ │
+│ │ │ Hash-based │ <── │ Process Results │ <──────────────┘ │ │
+│ │ │ Session Update │ │ │ │ │
+│ │ │ │ │ • Validate data │ │ │
+│ │ │ • Use hash as │ │ • Format output │ │ │
+│ │ │ session ID │ │ • Error │ │ │
+│ │ │ • Track UI │ │ handling │ │ │
+│ │ │ changes │ │ • Logging │ │ │
+│ │ │ • Maintain │ │ │ │ │
+│ │ │ history │ │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────────┘ │
+│ │ │
+│ ▼ │
+│ ┌─────────────────────────────────────────────────────────────────────────────────┐ │
+│ │ ENHANCED MONGODB DATABASE STORAGE │ │
+│ │ (mongodb://localhost:27017) │ │
+│ │ │ │
+│ │ Database: llm_testing_framework │ │
+│ │ Collection: sessions │ │
+│ │ │ │
+│ │ Enhanced Document Schema: │ │
+│ │ { │ │
+│ │ "_id": "url_content_hash_16_chars", # Hash as primary key │ │
+│ │ "session_id": "url_content_hash_16_chars", │ │
+│ │ "url_hash": "url_content_hash_16_chars", │ │
+│ │ "web_url": "https://example.com", │ │
+│ │ "current_stage": "code_execution_complete", │ │
+│ │ "completed_stages": ["element_extraction", "testcase_generation", │ │
+│ │ "code_generation", "code_execution"], │ │
+│ │ "config_content": "playwright_config...", │ │
+│ │ "extracted_elements": [ │ │
+│ │ { │ │
+│ │ "category": "buttons", │ │
+│ │ "tag_name": "button", │ │
+│ │ "text": "Submit", │ │
+│ │ "attributes": {"id": "submit-btn", "class": "btn primary"}, │ │
+│ │ "selectors": { │ │
+│ │ "css": "button#submit-btn.btn.primary", │ │
+│ │ "xpath": "//button[@id='submit-btn'][1]", │ │
+│ │ "text": "text='Submit'", │ │
+│ │ "id": "[id='submit-btn']" │ │
+│ │ }, │ │
+│ │ "bounding_box": {"x": 100, "y": 200, "width": 80, "height": 30}, │ │
+│ │ "is_visible": true, │ │
+│ │ "is_enabled": true, │ │
+│ │ "element_type": "button" │ │
+│ │ }, │ │
+│ │ ... more elements ..., │ │
+│ │ { │ │
+│ │ "type": "metadata", │ │
+│ │ "data": { │ │
+│ │ "total_elements": 45, │ │
+│ │ "extraction_timestamp": "2025-06-08T...", │ │
+│ │ "url": "https://example.com", │ │
+│ │ "screenshot": "base64_truncated...", │ │
+│ │ "categories": { │ │
+│ │ "buttons": 8, "inputs": 12, "links": 15, │ │
+│ │ "forms": 2, "selects": 3, "checkboxes": 5 │ │
+│ │ } │ │
+│ │ } │ │
+│ │ } │ │
+│ │ ], │ │
+│ │ "test_cases": [ │ │
+│ │ { │ │
+│ │ "test_name": "Enhanced form submission test", │ │
+│ │ "steps": ["Fill form fields", "Validate inputs", "Submit form"], │ │
+│ │ "expected": "Form should submit successfully with validation" │ │
+│ │ } │ │
+│ │ ], │ │
+│ │ "generated_code": "import pytest\nfrom playwright.sync_api import...", │ │
+│ │ "execution_result": { │ │
+│ │ "success": true, │ │
+│ │ "returncode": 0, │ │
+│ │ "stdout": "Playwright test execution output...", │ │
+│ │ "stderr": "", │ │
+│ │ "execution_type": "python" │ │
+│ │ }, │ │
+│ │ "created_at": "2025-06-08T...", │ │
+│ │ "updated_at": "2025-06-08T..." │ │
+│ │ } │ │
+│ │ │ │
+│ │ Enhanced Indexes: session_id, url_hash, web_url, updated_at │ │
+│ │ Motor AsyncIO Client for high-performance async operations │ │
+│ │ URL-based session grouping for historical tracking │ │
+│ └─────────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────────────┘
+ │
+ ▼
+┌─────────────────────────────────────────────────────────────────────────────────────────┐
+│ ENHANCED PLAYWRIGHT & LLM SERVICES │
+│ (Comprehensive Extraction + Code Generation) │
+├─────────────────────────────────────────────────────────────────────────────────────────┤
+│ │
+│ ┌─────────────────────────────────────────────────────────────────────────────────┐ │
+│ │ PLAYWRIGHT ELEMENT EXTRACTION ENGINE │ │
+│ │ │ │
+│ │ async def extract_elements_with_playwright(url, config): │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ │ │
+│ │ │ Browser Launch │ │ Comprehensive │ │ Element Data │ │ │
+│ │ │ │ │ Element Scan │ │ Extraction │ │ │
+│ │ │ • Chromium │ │ │ │ │ │ │
+│ │ │ • No sandbox │ │ • Buttons │ │ • Tag name │ │ │
+│ │ │ • 1920x1080 │ │ • Inputs (all) │ │ • Text content │ │ │
+│ │ │ • User agent │ │ • Selects │ │ • Attributes │ │ │
+│ │ │ • 30s timeout │ │ • Checkboxes │ │ • Bounding box │ │ │
+│ │ │ │ │ • Radios │ │ • Visibility │ │ │
+│ │ │ │ │ • Links │ │ • Enabled state │ │ │
+│ │ │ │ │ • Interactive │ │ • Element type │ │ │
+│ │ │ │ │ • Forms │ │ │ │ │
+│ │ │ │ │ • Images │ │ │ │ │
+│ │ │ │ │ • Headings │ │ │ │ │
+│ │ │ │ │ • Landmarks │ │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────┘ │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ │ │
+│ │ │ Robust Selector │ │ Duplicate │ │ Hash Calculation│ │ │
+│ │ │ Generation │ │ Removal │ │ │ │ │
+│ │ │ │ │ │ │ • URL structure │ │ │
+│ │ │ • CSS selector │ │ • Position- │ │ • Element count │ │ │
+│ │ │ • XPath │ │ based │ │ • Structural │ │ │
+│ │ │ • Text-based │ │ • Selector- │ │ fingerprint │ │ │
+│ │ │ • ID-based │ │ based │ │ • Content hash │ │ │
+│ │ │ • Name-based │ │ • Unique key │ │ • SHA256 (16) │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────────┘ │
+│ │ │
+│ ▼ │
+│ ┌─────────────────────────────────────────────────────────────────────────────────┐ │
+│ │ ENHANCED LLM SERVICE INTEGRATION │ │
+│ │ │ │
+│ │ async def call_llm(prompt, context): │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ │ │
+│ │ │ Test Case │ │ Playwright Code │ │ Context-Aware │ │ │
+│ │ │ Generation │ │ Generation │ │ Processing │ │ │
+│ │ │ │ │ │ │ │ │ │
+│ │ │ Returns: │ │ Returns: │ │ • Element │ │ │
+│ │ │ Enhanced JSON │ │ Complete │ │ categories │ │ │
+│ │ │ test scenarios │ │ Playwright code │ │ • URL context │ │ │
+│ │ │ │ │ │ │ • Test focus │ │ │
+│ │ │ [ │ │ import pytest │ │ • Robust │ │ │
+│ │ │ { │ │ from playwright │ │ selectors │ │ │
+│ │ │ "test_name":│ │ ... │ │ • Error │ │ │
+│ │ │ "steps": │ │ class TestPage: │ │ handling │ │ │
+│ │ │ "expected": │ │ def setup(): │ │ │ │ │
+│ │ │ "priority": │ │ def test_(): │ │ │ │ │
+│ │ │ } │ │ def teardown()│ │ │ │ │
+│ │ │ ] │ │ │ │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────┘ │ │
+│ └─────────────────────────────────────────────────────────────────────────────────┘ │
+│ │ │
+│ ▼ │
+│ ┌─────────────────────────────────────────────────────────────────────────────────┐ │
+│ │ ENHANCED CODE EXECUTION ENGINE │ │
+│ │ │ │
+│ │ async def execute_test_code(code, execution_type): │ │
+│ │ │ │
+│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ │ │
+│ │ │ Secure File │ │ Enhanced │ │ Detailed │ │ │
+│ │ │ Handling │ │ Execution │ │ Results │ │ │
+│ │ │ │ │ │ │ │ │ │
+│ │ │ • Temp file │ │ subprocess.run( │ │ { │ │ │
+│ │ │ creation │ │ ["python", │ │ "success": │ │ │
+│ │ │ • Write code │ │ temp_file], │ │ "returncode": │ │ │
+│ │ │ • Permission │ │ capture=True, │ │ "stdout": │ │ │
+│ │ │ handling │ │ timeout=120 │ │ "stderr": │ │ │
+│ │ │ • Auto cleanup │ │ ) │ │ "exec_type": │ │ │
+│ │ │ │ │ │ │ "timestamp": │ │ │
+│ │ │ │ │ Types: │ │ } │ │ │
+│ │ │ │ │ • python │ │ │ │ │
+│ │ │ │ │ • pytest │ │ │ │ │
+│ │ │ │ │ • playwright │ │ │ │ │
+│ │ └─────────────────┘ └─────────────────┘ └─────────────────┘ │ │
+│ │ │ │
+│ │ Security: 120s timeout, temp file cleanup, sandboxed subprocess execution │ │
+│ │ Error handling: Timeout, permission, execution errors with detailed logging │ │
+│ └─────────────────────────────────────────────────────────────────────────────────┘ │
+└─────────────────────────────────────────────────────────────────────────────────────────┘
+
+┌─────────────────────────────────────────────────────────────────────────────────────────┐
+│ ENHANCED DATA FLOW SUMMARY │
+├─────────────────────────────────────────────────────────────────────────────────────────┤
+│ │
+│ 1. INTELLIGENT SESSION MANAGEMENT │
+│ │ │
+│ ├─► URL Input & Hash Check │
+│ │ • User enters URL │
+│ │ • POST /api/check-url-hash │
+│ │ • Playwright extracts elements comprehensively │
+│ │ • Calculate content-based hash (SHA256) │
+│ │ • Check existing sessions for URL │
+│ │ • Return hash + existing session list │
+│ │ │
+│ ├─► Smart Session Dialog │
+│ │ • If existing sessions found, show dialog │
+│ │ • User can choose: Load Previous or Start Fresh │
+│ │ • Display session metadata (stages, dates, elements count) │
+│ │ • Hash-based session identification │
+│ │ │
+│ 2. ENHANCED 4-STEP WORKFLOW │
+│ │ │
+│ ├─► Step 1: Enhanced Element Extraction │
+│ │ • Playwright comprehensive scan (10+ element types) │
+│ │ • Robust selector generation (CSS, XPath, Text, ID) │
+│ │ • Element categorization and metadata │
+│ │ • Content hash calculation for session ID │
+│ │ • Store in MongoDB with hash as primary key │
+│ │ │
+│ ├─► Step 2: Context-Aware Test Case Generation │
+│ │ • Enhanced prompts with element context │
+│ │ • Comprehensive test scenarios (positive/negative/edge) │
+│ │ • User workflow and interaction testing │
+│ │ • Priority-based test case organization │
+│ │ │
+│ ├─► Step 3: Playwright Code Generation │
+│ │ • Generate production-ready Playwright test code │
+│ │ • Robust error handling and assertions │
+│ │ • Context-aware selectors from extraction │
+│ │ • Setup, teardown, and configuration management │
+│ │ │
+│ └─► Step 4: Enhanced Code Execution │
+│ • Support for Python, Pytest, Playwright execution │
+│ • 120-second timeout for complex tests │
+│ • Detailed execution results with stdout/stderr │
+│ • Secure temporary file handling │
+│ │
+│ 3. INTELLIGENT HISTORY & VERSION TRACKING │
+│ • Hash-based session identification enables UI change tracking │
+│ • URL history shows evolution of page structure over time │
+│ • Historical sessions preserved for comparison and analysis │
+│ • Timeline view of testing sessions for each URL │
+│ │
+│ 4. PRODUCTION-READY FEATURES │
+│ • MongoDB with optimized indexes for hash and URL-based queries │
+│ • Playwright browser automation with comprehensive element detection │
+│ • Secure code execution with timeout and sandboxing │
+│ • Enhanced error handling and detailed logging │
+│ • Session restoration and state management │
+│ • Responsive UI with loading states and progress indicators │
+│ │
+│ 5. ADDITIONAL ENHANCED ENDPOINTS │
+│ • POST /api/check-url-hash - Intelligent session detection │
+│ • GET /api/url-history/{url} - URL-specific session history │
+│ • GET /api/session/{hash} - Hash-based session retrieval │
+│ • Enhanced session management with automated cleanup │
+│ • Health checks including Playwright and database connectivity │
+└─────────────────────────────────────────────────────────────────────────────────────────┘
+
+┌─────────────────────────────────────────────────────────────────────────────────────────┐
+│ ENHANCED TECHNOLOGY STACK & FEATURES │
+├─────────────────────────────────────────────────────────────────────────────────────────┤
+│ │
+│ FRONTEND (Enhanced React) │ BACKEND (Enhanced FastAPI) │
+│ ├─ React 18 with advanced hooks │ ├─ FastAPI with async/await │
+│ ├─ Smart session management │ ├─ Playwright for element extraction │
+│ ├─ Hash-based session detection │ ├─ Hash-based session management │
+│ ├─ URL history and timeline │ ├─ Motor AsyncIO MongoDB driver │
+│ ├─ Session loading dialogs │ ├─ Enhanced subprocess execution │
+│ ├─ Enhanced error handling │ ├─ Comprehensive element categorization │
+│ ├─ Real-time progress indicators │ ├─ Robust selector generation │
+│ ├─ Element category visualization │ ├─ Context-aware LLM integration │
+│ ├─ Copy-to-clipboard functionality │ ├─ Content-based hash calculation │
+│ ├─ Responsive design with modals │ ├─ URL-based session grouping │
+│ └─ Auto-session restoration │ └─ Enhanced logging and monitoring │
+│ │ │
+│ DATABASE (Enhanced MongoDB) │ SECURITY & EXECUTION (Enhanced) │
+│ ├─ Hash-based primary keys │ ├─ Playwright browser sandboxing │
+│ ├─ URL-based session grouping │ ├─ 120-second execution timeout │
+│ ├─ Element metadata storage │ ├─ Secure temporary file handling │
+│ ├─ Comprehensive indexing │ ├─ Input validation with enhanced Pydantic │
+│ ├─ Historical session tracking │ ├─ Detailed error logging and monitoring │
+│ ├─ Optimized query performance │ ├─ Database connection health monitoring │
+│ └─ Automatic cleanup policies │ └─ Hash-based session security │
+│ │ │
+│ DEPENDENCIES (Enhanced) │ INSTALLATION (Enhanced) │
+│ Frontend: │ Backend: │
+│ ├─ react, lucide-react │ ├─ pip install fastapi uvicorn │
+│ ├─ tailwindcss │ ├─ pip install playwright motor pymongo │
+│ └─ Enhanced state management │ ├─ pip install pydantic python-multipart │
+│ │ ├─ playwright install chromium │
+│ Backend (Enhanced): │ └─ MongoDB server with optimized config │
+│ ├─ fastapi, uvicorn │ │
+│ ├─ playwright (async) │ MongoDB (Enhanced): │
+│ ├─ motor, pymongo │ ├─ Default port: 27017 │
+│ ├─ pydantic, python-multipart │ ├─ Database: llm_testing_framework │
+│ └─ hashlib, base64, logging │ ├─ Collection: sessions (hash-indexed) │
+│ │ └─ Optimized indexes for performance │
+└─────────────────────────────────────────────────────────────────────────────────────────┘
+```
\ No newline at end of file
diff --git a/frontend/index.html b/frontend/index.html
index 57621a268b..de26edfa2a 100644
--- a/frontend/index.html
+++ b/frontend/index.html
@@ -1,14 +1,52 @@
-
-
-
-
-
-
- Full Stack FastAPI Project
-
-
-
-
-
-
-
+
+
+
+
+
+ Employee Timesheet Dashboard
+
+
+
+
+
+
+
Employee Timesheet Dashboard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/frontend/react_frontend.tsx b/frontend/react_frontend.tsx
new file mode 100644
index 0000000000..123b517ccf
--- /dev/null
+++ b/frontend/react_frontend.tsx
@@ -0,0 +1,786 @@
+import React, { useState, useEffect } from 'react';
+import { Play, Code, FileText, Globe, CheckCircle, Circle, ArrowRight, Download, Copy, RefreshCw, Terminal, AlertCircle, History, ExternalLink, X } from 'lucide-react';
+
+const API_BASE_URL = 'http://localhost:8000/api';
+
+const TestingFrameworkApp = () => {
+ const [currentStep, setCurrentStep] = useState(0);
+ const [sessionId, setSessionId] = useState(null);
+ const [urlHash, setUrlHash] = useState(null);
+ const [loading, setLoading] = useState(false);
+ const [initialLoading, setInitialLoading] = useState(true);
+ const [showSessionDialog, setShowSessionDialog] = useState(false);
+ const [sessionDialogData, setSessionDialogData] = useState(null);
+ const [showHistoryModal, setShowHistoryModal] = useState(false);
+ const [urlHistory, setUrlHistory] = useState([]);
+ const [results, setResults] = useState({
+ elements: null,
+ testcases: null,
+ code: null,
+ execution: null
+ });
+
+ // Form states
+ const [elementForm, setElementForm] = useState({
+ webUrl: '',
+ configContent: '',
+ systemPrompt: 'You are an expert UI element extraction assistant. Analyze the webpage and extract all interactive elements comprehensively.',
+ userPrompt: 'Please extract all UI elements from this webpage including buttons, inputs, links, forms, and other interactive components. Use robust selectors and comprehensive extraction strategies.'
+ });
+
+ const [testcaseForm, setTestcaseForm] = useState({
+ systemPrompt: 'You are an expert test case generation assistant. Create comprehensive test scenarios based on extracted UI elements.',
+ userPrompt: 'Generate detailed test cases for the extracted UI elements. Include positive, negative, edge case scenarios, and user workflow tests. Focus on real-world user interactions.'
+ });
+
+ const [codeForm, setCodeForm] = useState({
+ systemPrompt: 'You are an expert test automation engineer. Generate clean, executable Playwright test code.',
+ userPrompt: 'Generate complete Playwright test code for the provided test cases. Include proper setup, teardown, error handling, and comprehensive assertions. Make the tests robust and maintainable.'
+ });
+
+ const [executeForm, setExecuteForm] = useState({
+ executionType: 'python'
+ });
+
+ const steps = [
+ { title: 'Element Extraction', icon: Globe, description: 'Extract UI elements using Playwright' },
+ { title: 'Test Cases Generation', icon: FileText, description: 'Generate comprehensive test scenarios' },
+ { title: 'Code Generation', icon: Code, description: 'Generate executable Playwright test code' },
+ { title: 'Code Execution', icon: Terminal, description: 'Execute and validate tests' }
+ ];
+
+ // Load last session on component mount
+ useEffect(() => {
+ const loadLastSession = async () => {
+ try {
+ const response = await fetch(`${API_BASE_URL}/last-session`);
+ if (response.ok) {
+ const data = await response.json();
+ if (data.session) {
+ await loadSession(data.session);
+ }
+ }
+ } catch (error) {
+ console.error('Failed to load last session:', error);
+ } finally {
+ setInitialLoading(false);
+ }
+ };
+
+ loadLastSession();
+ }, []);
+
+ const loadSession = async (session) => {
+ setSessionId(session.session_id);
+ setUrlHash(session.url_hash);
+
+ // Restore form data
+ if (session.web_url) {
+ setElementForm(prev => ({
+ ...prev,
+ webUrl: session.web_url,
+ configContent: session.config_content || ''
+ }));
+ }
+
+ // Restore results
+ setResults({
+ elements: session.results.extracted_elements,
+ testcases: session.results.test_cases,
+ code: session.results.generated_code,
+ execution: session.results.execution_result
+ });
+
+ // Set current step based on completed stages
+ const completedStages = session.completed_stages;
+ if (completedStages.includes('code_execution')) {
+ setCurrentStep(3);
+ } else if (completedStages.includes('code_generation')) {
+ setCurrentStep(3);
+ } else if (completedStages.includes('testcase_generation')) {
+ setCurrentStep(2);
+ } else if (completedStages.includes('element_extraction')) {
+ setCurrentStep(1);
+ }
+ };
+
+ const callAPI = async (endpoint, data) => {
+ const response = await fetch(`${API_BASE_URL}${endpoint}`, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify(data)
+ });
+
+ if (!response.ok) {
+ const errorData = await response.json();
+ throw new Error(errorData.detail || `API call failed: ${response.statusText}`);
+ }
+
+ return response.json();
+ };
+
+ const checkUrlHash = async (url) => {
+ try {
+ const response = await callAPI('/check-url-hash', { web_url: url });
+ return response;
+ } catch (error) {
+ console.error('Failed to check URL hash:', error);
+ throw error;
+ }
+ };
+
+ const handleUrlCheck = async () => {
+ if (!elementForm.webUrl) return;
+
+ setLoading(true);
+ try {
+ const hashData = await checkUrlHash(elementForm.webUrl);
+
+ if (hashData.has_existing_session || hashData.existing_sessions.length > 0) {
+ setSessionDialogData(hashData);
+ setShowSessionDialog(true);
+ } else {
+ // No existing sessions, proceed directly
+ await handleElementExtraction(true);
+ }
+ } catch (error) {
+ alert(`Error checking URL: ${error.message}`);
+ }
+ setLoading(false);
+ };
+
+ const handleElementExtraction = async (forceNew = false) => {
+ setLoading(true);
+ setShowSessionDialog(false);
+
+ try {
+ const response = await callAPI('/extract-elements', {
+ web_url: elementForm.webUrl,
+ config_content: elementForm.configContent,
+ system_prompt: elementForm.systemPrompt,
+ user_prompt: elementForm.userPrompt,
+ force_new_session: forceNew
+ });
+
+ setSessionId(response.session_id);
+ setUrlHash(response.url_hash);
+ setResults(prev => ({ ...prev, elements: response.extracted_elements }));
+
+ if (response.is_new_session) {
+ setCurrentStep(1);
+ } else {
+ // Loaded existing session, determine current step
+ const session = await fetch(`${API_BASE_URL}/session/${response.session_id}`)
+ .then(res => res.json());
+ await loadSession(session);
+ }
+ } catch (error) {
+ alert(`Error: ${error.message}`);
+ }
+ setLoading(false);
+ };
+
+ const handleLoadExistingSession = async (session) => {
+ setLoading(true);
+ setShowSessionDialog(false);
+
+ try {
+ const sessionData = await fetch(`${API_BASE_URL}/session/${session.session_id}`)
+ .then(res => res.json());
+ await loadSession(sessionData);
+ } catch (error) {
+ alert(`Error loading session: ${error.message}`);
+ }
+ setLoading(false);
+ };
+
+ const handleTestcaseGeneration = async () => {
+ setLoading(true);
+ try {
+ const response = await callAPI('/generate-testcases', {
+ session_id: sessionId,
+ extracted_elements: results.elements,
+ system_prompt: testcaseForm.systemPrompt,
+ user_prompt: testcaseForm.userPrompt
+ });
+
+ setResults(prev => ({ ...prev, testcases: response.test_cases }));
+ setCurrentStep(2);
+ } catch (error) {
+ alert(`Error: ${error.message}`);
+ }
+ setLoading(false);
+ };
+
+ const handleCodeGeneration = async () => {
+ setLoading(true);
+ try {
+ const response = await callAPI('/generate-code', {
+ session_id: sessionId,
+ test_cases: results.testcases,
+ system_prompt: codeForm.systemPrompt,
+ user_prompt: codeForm.userPrompt
+ });
+
+ setResults(prev => ({ ...prev, code: response.generated_code }));
+ setCurrentStep(3);
+ } catch (error) {
+ alert(`Error: ${error.message}`);
+ }
+ setLoading(false);
+ };
+
+ const handleCodeExecution = async () => {
+ setLoading(true);
+ try {
+ const response = await callAPI('/execute-code', {
+ session_id: sessionId,
+ code: results.code,
+ execution_type: executeForm.executionType
+ });
+
+ setResults(prev => ({ ...prev, execution: response.execution_result }));
+ } catch (error) {
+ alert(`Error: ${error.message}`);
+ }
+ setLoading(false);
+ };
+
+ const loadUrlHistory = async () => {
+ if (!elementForm.webUrl) return;
+
+ try {
+ const encodedUrl = encodeURIComponent(elementForm.webUrl);
+ const response = await fetch(`${API_BASE_URL}/url-history/${encodedUrl}`);
+ const data = await response.json();
+ setUrlHistory(data.sessions || []);
+ setShowHistoryModal(true);
+ } catch (error) {
+ console.error('Failed to load URL history:', error);
+ }
+ };
+
+ const copyToClipboard = (text) => {
+ navigator.clipboard.writeText(text);
+ alert('Copied to clipboard!');
+ };
+
+ const resetWorkflow = () => {
+ setCurrentStep(0);
+ setSessionId(null);
+ setUrlHash(null);
+ setResults({ elements: null, testcases: null, code: null, execution: null });
+ setElementForm(prev => ({ ...prev, webUrl: '', configContent: '' }));
+ };
+
+ const SessionDialog = () => {
+ if (!showSessionDialog || !sessionDialogData) return null;
+
+ return (
+
+
+
+
Existing Sessions Found
+ setShowSessionDialog(false)}
+ className="text-gray-500 hover:text-gray-700"
+ >
+
+
+
+
+
+ Found {sessionDialogData.existing_sessions.length} existing session(s) for this URL.
+ {sessionDialogData.has_existing_session && " The current page structure matches an existing session."}
+
+
+
+ {sessionDialogData.existing_sessions.slice(0, 5).map((session) => (
+
+
+
+
Hash: {session.url_hash}
+
+ Stage: {session.current_stage} |
+ Steps: {session.completed_stages.join(', ')}
+
+
+ Created: {new Date(session.created_at).toLocaleDateString()} |
+ Updated: {new Date(session.updated_at).toLocaleDateString()}
+
+
+
handleLoadExistingSession(session)}
+ className="bg-blue-500 text-white px-3 py-1 rounded text-sm hover:bg-blue-600"
+ >
+ Load
+
+
+
+ ))}
+
+
+
+ handleElementExtraction(true)}
+ className="bg-green-500 text-white px-4 py-2 rounded hover:bg-green-600"
+ >
+ Start Fresh Session
+
+ setShowSessionDialog(false)}
+ className="bg-gray-500 text-white px-4 py-2 rounded hover:bg-gray-600"
+ >
+ Cancel
+
+
+
+
+ );
+ };
+
+ const HistoryModal = () => {
+ if (!showHistoryModal) return null;
+
+ return (
+
+
+
+
URL Testing History
+ setShowHistoryModal(false)}
+ className="text-gray-500 hover:text-gray-700"
+ >
+
+
+
+
+
+ {urlHistory.map((session) => (
+
+
+
+
Hash: {session.url_hash}
+
+ Stage: {session.current_stage} |
+ Elements: {session.elements_count} |
+ Steps: {session.completed_stages.join(', ')}
+
+
+ Created: {new Date(session.created_at).toLocaleString()} |
+ Updated: {new Date(session.updated_at).toLocaleString()}
+
+
+
{
+ handleLoadExistingSession(session);
+ setShowHistoryModal(false);
+ }}
+ className="bg-blue-500 text-white px-3 py-1 rounded text-sm hover:bg-blue-600"
+ >
+ Load
+
+
+
+ ))}
+
+
+
+ );
+ };
+
+ const StepIndicator = () => (
+
+ {steps.map((step, index) => {
+ const Icon = step.icon;
+ const isActive = index === currentStep;
+ const isCompleted = index < currentStep || (index === currentStep && results[Object.keys(results)[index]]);
+
+ return (
+
+
+
+ {step.title}
+ {isCompleted && index !== currentStep && }
+
+ {index < steps.length - 1 && (
+
+ )}
+
+ );
+ })}
+
+ );
+
+ const ElementExtractionStep = () => (
+
+
+
+ Element Extraction with Playwright
+
+
+
+
+
Web URL
+
+ setElementForm(prev => ({ ...prev, webUrl: e.target.value }))}
+ className="flex-1 p-3 border border-gray-300 rounded-lg focus:ring-2 focus:ring-blue-500 focus:border-transparent"
+ placeholder="https://example.com"
+ required
+ />
+
+
+
+
+
+
+
+ Configuration
+
+
+
+ System Prompt
+
+
+
+ User Prompt
+
+
+
+ {sessionId && urlHash && (
+
+
+ Current Session: {sessionId}
+ URL Hash: {urlHash}
+
+
+ )}
+
+
+ {loading ? : }
+ Extract Elements
+
+
+ {results.elements && (
+
+
+ Extracted Elements ({results.elements.filter(e => e.type !== 'metadata').length})
+
+
+
+ {JSON.stringify(results.elements.slice(0, 5), null, 2)}
+ {results.elements.length > 5 && "\n... and more"}
+
+
+
+ {/* Show element categories */}
+ {results.elements.find(e => e.type === 'metadata') && (
+
+
Element Categories:
+
+ {Object.entries(results.elements.find(e => e.type === 'metadata').data.categories).map(([category, count]) => (
+
+ {category}:
+ {count}
+
+ ))}
+
+
+ )}
+
+ )}
+
+ );
+
+ const TestcaseGenerationStep = () => (
+
+
+
+ Test Cases Generation
+
+
+
+
+ System Prompt
+
+
+
+ User Prompt
+
+
+
+
+ {loading ? : }
+ Generate Test Cases
+
+
+ {results.testcases && (
+
+
Generated Test Cases ({results.testcases.length})
+
+
{JSON.stringify(results.testcases, null, 2)}
+
+
+ )}
+
+ );
+
+ const CodeGenerationStep = () => (
+
+
+
+ Playwright Code Generation
+
+
+
+
+ System Prompt
+
+
+
+ User Prompt
+
+
+
+
+ {loading ? : }
+ Generate Code
+
+
+ {results.code && (
+
+
+
Generated Playwright Test Code
+ copyToClipboard(results.code)}
+ className="text-blue-500 hover:text-blue-600 flex items-center text-sm"
+ >
+
+ Copy Code
+
+
+
+
+ )}
+
+ );
+
+ const CodeExecutionStep = () => (
+
+
+
+ Code Execution
+
+
+
+
+ Execution Type
+ setExecuteForm(prev => ({ ...prev, executionType: e.target.value }))}
+ className="w-full p-3 border border-gray-300 rounded-lg focus:ring-2 focus:ring-orange-500 focus:border-transparent"
+ >
+ Python
+ Pytest
+ Playwright
+
+
+
+ {results.code && (
+
+
Code to Execute
+
+
{results.code.substring(0, 500)}...
+
+
+ )}
+
+
+
+ {loading ? : }
+ Execute Code
+
+
+ {results.execution && (
+
+
+
+ {results.execution.success ? (
+
+ ) : (
+
+ )}
+
+ Execution {results.execution.success ? 'Successful' : 'Failed'}
+
+
+ Return Code: {results.execution.returncode}
+
+
+
+ {results.execution.stdout && (
+
+
Output:
+
+
{results.execution.stdout}
+
+
+ )}
+
+ {results.execution.stderr && (
+
+
Errors:
+
+
{results.execution.stderr}
+
+
+ )}
+
+
+ )}
+
+ );
+
+ if (initialLoading) {
+ return (
+
+
+
+
Loading last session...
+
+
+ );
+ }
+
+ return (
+
+
+
+
LLM UI Testing Framework
+
Hash-based session management with Playwright element extraction
+ {sessionId && (
+
+ Session: {sessionId}
+ {urlHash && (
+
+ Hash: {urlHash}
+
+ )}
+
+ Start New Session
+
+
+ )}
+
+
+
+
+
+ {currentStep === 0 && }
+ {currentStep === 1 && }
+ {currentStep === 2 && }
+ {currentStep === 3 && }
+
+
+ {currentStep === 3 && results.execution && (
+
+
+ {results.execution.success ? (
+ <>
+
+ Workflow completed successfully! Your Playwright tests have been executed.
+ >
+ ) : (
+ <>
+
+ Workflow completed with execution issues. Check the output above for details.
+ >
+ )}
+
+
+ )}
+
+
+
+
+
+ );
+};
+
+export default TestingFrameworkApp;
\ No newline at end of file
diff --git a/frontend/script.js b/frontend/script.js
new file mode 100644
index 0000000000..9ba0f968ca
--- /dev/null
+++ b/frontend/script.js
@@ -0,0 +1,488 @@
+// 1. Total Employees Chart
+function createTotalEmployeesChart(data) {
+ const weekCounts = {};
+ data.forEach(item => {
+ if (!weekCounts[item.Week]) {
+ weekCounts[item.Week] = new Set();
+ }
+ weekCounts[item.Week].add(item.SOEID);
+ });
+
+ const weeks = Object.keys(weekCounts);
+ const employeeCounts = weeks.map(week => weekCounts[week].size);
+
+ const ctx = document.getElementById('totalEmployeesChart').getContext('2d');
+ new Chart(ctx, {
+ type: 'bar',
+ data: {
+ labels: weeks,
+ datasets: [{
+ label: 'Total Employees',
+ data: employeeCounts,
+ backgroundColor: 'rgba(54, 162, 235, 0.8)',
+ borderColor: 'rgba(54, 162, 235, 1)',
+ borderWidth: 1
+ }]
+ },
+ options: {
+ scales: {
+ y: {
+ beginAtZero: true,
+ title: {
+ display: true,
+ text: 'Number of Employees'
+ }
+ },
+ x: {
+ title: {
+ display: true,
+ text: 'Week'
+ }
+ }
+ },
+ plugins: {
+ title: {
+ display: true,
+ text: 'Total Employees per Week'
+ }
+ }
+ }
+ });
+}
+
+createTotalEmployeesChart(jsonData);
+
+
+// 2. PTS Status by Resource Manager Chart and Table
+function createPTSStatusByManagerChart(data) {
+ const weekManagerStatusCounts = {};
+ data.forEach(item => {
+ if (!weekManagerStatusCounts[item.Week]) {
+ weekManagerStatusCounts[item.Week] = {};
+ }
+ if (!weekManagerStatusCounts[item.Week][item.Resource_Manager]) {
+ weekManagerStatusCounts[item.Week][item.Resource_Manager] = {};
+ }
+ if (!weekManagerStatusCounts[item.Week][item.Resource_Manager][item.PTS_Status]) {
+ weekManagerStatusCounts[item.Week][item.Resource_Manager][item.PTS_Status] = 0;
+ }
+ weekManagerStatusCounts[item.Week][item.Resource_Manager][item.PTS_Status]++;
+ });
+
+ const weeks = Object.keys(weekManagerStatusCounts).sort((a, b) => { // Sort weeks chronologically if needed
+ const weekAStartDate = new Date(jsonData.find(item => item.Week === a).Week_Start_Date);
+ const weekBStartDate = new Date(jsonData.find(item => item.Week === b).Week_Start_Date);
+ return weekAStartDate - weekBStartDate;
+ });
+
+ const resourceManagers = [...new Set(data.map(item => item.Resource_Manager))];
+ const statusTypes = ['Submitted', 'Saved', 'Rejected']; // Ensure order
+
+ const datasets = resourceManagers.flatMap(manager => {
+ return statusTypes.map(status => {
+ const dataPoints = weeks.map(week => {
+ return weekManagerStatusCounts[week]?.[manager]?.[status] || 0;
+ });
+ return {
+ label: `${manager} - ${status}`,
+ data: dataPoints,
+ backgroundColor: getStatusColor(status),
+ borderColor: getStatusColor(status).replace('0.8', '1'),
+ borderWidth: 1,
+ stack: manager // Stack bars for each manager across different PTS statuses
+ };
+ });
+ });
+
+
+ const ctx = document.getElementById('ptsStatusByManagerChart').getContext('2d');
+ const ptsStatusByManagerChart = new Chart(ctx, {
+ type: 'bar',
+ data: {
+ labels: weeks,
+ datasets: datasets
+ },
+ options: {
+ scales: {
+ y: {
+ beginAtZero: true,
+ title: {
+ display: true,
+ text: 'Number of Employees'
+ },
+ stacked: true // Enable Y-axis stacking
+ },
+ x: {
+ title: {
+ display: true,
+ text: 'Week'
+ }
+ },
+ },
+ plugins: {
+ title: {
+ display: true,
+ text: 'PTS Status by Resource Manager per Week'
+ },
+ tooltip: {
+ mode: 'index',
+ intersect: false
+ }
+ },
+ onClick: (event, elements) => {
+ if (elements.length > 0) {
+ const elementIndex = elements[0].index;
+ const datasetIndex = elements[0].datasetIndex;
+ const clickedWeek = weeks[elementIndex];
+ const clickedManager = resourceManagers[Math.floor(datasetIndex / statusTypes.length)]; // Correct Manager
+ const clickedStatus = statusTypes[datasetIndex % statusTypes.length]; // Correct Status
+ displayPTSStatusByManagerTable(jsonData, clickedWeek, clickedManager, clickedStatus);
+ }
+ }
+ }
+ });
+}
+
+
+function getStatusColor(status) {
+ switch (status) {
+ case 'Submitted': return 'rgba(75, 192, 192, 0.8)'; // Green
+ case 'Saved': return 'rgba(255, 205, 86, 0.8)'; // Yellow
+ case 'Rejected': return 'rgba(255, 99, 132, 0.8)'; // Red
+ default: return 'rgba(153, 102, 255, 0.8)'; // Purple (default)
+ }
+}
+
+
+function displayPTSStatusByManagerTable(data, week, manager, status) {
+ const tableData = data.filter(item =>
+ item.Week === week && item.Resource_Manager === manager && item.PTS_Status === status
+ );
+
+ const tableContainer = document.getElementById('ptsStatusByManagerTable');
+ tableContainer.innerHTML = ''; // Clear previous table
+ if (tableData.length === 0) {
+ tableContainer.style.display = 'none';
+ return; // No data, hide table and exit
+ }
+
+ const table = document.createElement('table');
+ table.className = 'table table-bordered table-striped';
+
+ // Table Header
+ const thead = document.createElement('thead');
+ const headerRow = document.createElement('tr');
+ const headers = ['SOEID', 'Week', 'Resource Manager', 'PTS Status', 'Vendor', 'Resource Type', 'Hours Charged'];
+ headers.forEach(headerText => {
+ const th = document.createElement('th');
+ th.textContent = headerText;
+ headerRow.appendChild(th);
+ });
+ thead.appendChild(headerRow);
+ table.appendChild(thead);
+
+ // Table Body
+ const tbody = document.createElement('tbody');
+ tableData.forEach(item => {
+ const row = document.createElement('tr');
+ row.insertCell().textContent = item.SOEID;
+ row.insertCell().textContent = item.Week;
+ row.insertCell().textContent = item.Resource_Manager;
+ row.insertCell().textContent = item.PTS_Status;
+ row.insertCell().textContent = item.Vendor;
+ row.insertCell().textContent = item.Resource_Type;
+ row.insertCell().textContent = item.Hours_Charged;
+ tbody.appendChild(row);
+ });
+ table.appendChild(tbody);
+
+ tableContainer.appendChild(table);
+ tableContainer.style.display = 'block'; // Show table
+}
+
+
+createPTSStatusByManagerChart(jsonData);
+
+
+// 3. PTS Status Overview Chart (Pie Chart)
+function createPTSStatusOverviewChart(data) {
+ const statusCounts = {};
+ data.forEach(item => {
+ if (!statusCounts[item.PTS_Status]) {
+ statusCounts[item.PTS_Status] = 0;
+ }
+ statusCounts[item.PTS_Status]++;
+ });
+
+ const statuses = Object.keys(statusCounts);
+ const counts = statuses.map(status => statusCounts[status]);
+ const colors = statuses.map(status => getStatusColor(status).replace('0.8', '0.9')); // Slightly darker colors for pie
+
+ const ctx = document.getElementById('ptsStatusOverviewChart').getContext('2d');
+ new Chart(ctx, {
+ type: 'pie',
+ data: {
+ labels: statuses,
+ datasets: [{
+ label: 'PTS Status Overview',
+ data: counts,
+ backgroundColor: colors,
+ hoverOffset: 4
+ }]
+ },
+ options: {
+ responsive: true,
+ plugins: {
+ legend: {
+ position: 'top',
+ },
+ title: {
+ display: true,
+ text: 'Overall PTS Status Distribution'
+ },
+ tooltip: {
+ callbacks: {
+ label: function(context) {
+ let label = context.label || '';
+ if (label) {
+ label += ': ';
+ }
+ if (context.parsed) {
+ label += context.parsed + ' (' + ((context.parsed / data.length) * 100).toFixed(1) + '%)';
+ }
+ return label;
+ }
+ }
+ }
+ },
+ onClick: (event, elements) => {
+ if (elements.length > 0) {
+ const elementIndex = elements[0].index;
+ const clickedStatus = statuses[elementIndex];
+ displayPTSStatusOverviewTable(jsonData, clickedStatus);
+ }
+ }
+ }
+ });
+}
+
+function displayPTSStatusOverviewTable(data, status) {
+ const tableData = data.filter(item => item.PTS_Status === status);
+ const tableContainer = document.getElementById('ptsStatusOverviewTable');
+ tableContainer.innerHTML = '';
+
+ if (tableData.length === 0) {
+ tableContainer.style.display = 'none';
+ return;
+ }
+
+ const table = createDataTable(tableData); // Reuse function if possible
+ tableContainer.appendChild(table);
+ tableContainer.style.display = 'block';
+}
+
+createPTSStatusOverviewChart(jsonData);
+
+
+// 4. PTS Status Trend Over Weeks (Line Chart)
+function createPTSStatusTrendChart(data) {
+ const weekStatusCounts = {};
+ data.forEach(item => {
+ if (!weekStatusCounts[item.Week]) {
+ weekStatusCounts[item.Week] = {};
+ }
+ if (!weekStatusCounts[item.Week][item.PTS_Status]) {
+ weekStatusCounts[item.Week][item.PTS_Status] = 0;
+ }
+ weekStatusCounts[item.Week][item.PTS_Status]++;
+ });
+
+ const weeks = Object.keys(weekStatusCounts).sort((a, b) => {
+ const weekAStartDate = new Date(jsonData.find(item => item.Week === a).Week_Start_Date);
+ const weekBStartDate = new Date(jsonData.find(item => item.Week === b).Week_Start_Date);
+ return weekAStartDate - weekBStartDate;
+ });
+ const statusTypes = ['Submitted', 'Saved', 'Rejected'];
+ const datasets = statusTypes.map(status => {
+ return {
+ label: `${status} Status`,
+ data: weeks.map(week => weekStatusCounts[week]?.[status] || 0),
+ borderColor: getStatusColor(status).replace('0.8', '1'),
+ backgroundColor: getStatusColor(status).replace('0.8', '1'),
+ borderWidth: 2,
+ tension: 0.4 // For smoother lines
+ };
+ });
+
+ const ctx = document.getElementById('ptsStatusTrendChart').getContext('2d');
+ new Chart(ctx, {
+ type: 'line',
+ data: {
+ labels: weeks,
+ datasets: datasets
+ },
+ options: {
+ responsive: true,
+ scales: {
+ y: {
+ beginAtZero: true,
+ title: {
+ display: true,
+ text: 'Number of Employees'
+ }
+ },
+ x: {
+ title: {
+ display: true,
+ text: 'Week'
+ }
+ }
+ },
+ plugins: {
+ title: {
+ display: true,
+ text: 'PTS Status Trend Over Weeks'
+ }
+ },
+ onClick: (event, elements) => {
+ if (elements.length > 0) {
+ const elementIndex = elements[0].index;
+ const datasetIndex = elements[0].datasetIndex;
+ const clickedWeek = weeks[elementIndex];
+ const clickedStatus = statusTypes[datasetIndex];
+ displayPTSStatusTrendTable(jsonData, clickedWeek, clickedStatus);
+ }
+ }
+ }
+ });
+}
+
+
+function displayPTSStatusTrendTable(data, week, status) {
+ const tableData = data.filter(item => item.Week === week && item.PTS_Status === status);
+ const tableContainer = document.getElementById('ptsStatusTrendTable');
+ tableContainer.innerHTML = '';
+
+ if (tableData.length === 0) {
+ tableContainer.style.display = 'none';
+ return;
+ }
+
+ const table = createDataTable(tableData); // Reuse function
+ tableContainer.appendChild(table);
+ tableContainer.style.display = 'block';
+}
+
+createPTSStatusTrendChart(jsonData);
+
+
+// 5. Resource Type Breakdown Chart (Doughnut Chart)
+function createResourceTypeBreakdownChart(data) {
+ const resourceTypeCounts = {};
+ data.forEach(item => {
+ if (!resourceTypeCounts[item.Resource_Type]) {
+ resourceTypeCounts[item.Resource_Type] = 0;
+ }
+ resourceTypeCounts[item.Resource_Type]++;
+ });
+
+ const resourceTypes = Object.keys(resourceTypeCounts);
+ const counts = resourceTypes.map(type => resourceTypeCounts[type]);
+ const colors = ['rgba(240, 128, 128, 0.8)', 'rgba(173, 216, 230, 0.8)']; // LightCoral, LightBlue
+
+ const ctx = document.getElementById('resourceTypeBreakdownChart').getContext('2d');
+ new Chart(ctx, {
+ type: 'doughnut',
+ data: {
+ labels: resourceTypes,
+ datasets: [{
+ label: 'Resource Type Breakdown',
+ data: counts,
+ backgroundColor: colors,
+ hoverOffset: 4
+ }]
+ },
+ options: {
+ responsive: true,
+ plugins: {
+ legend: {
+ position: 'top'
+ },
+ title: {
+ display: true,
+ text: 'Resource Type Distribution'
+ },
+ tooltip: {
+ callbacks: {
+ label: function(context) {
+ let label = context.label || '';
+ if (label) {
+ label += ': ';
+ }
+ if (context.parsed) {
+ label += context.parsed + ' (' + ((context.parsed / data.length) * 100).toFixed(1) + '%)';
+ }
+ return label;
+ }
+ }
+ }
+ },
+ onClick: (event, elements) => {
+ if (elements.length > 0) {
+ const elementIndex = elements[0].index;
+ const clickedResourceType = resourceTypes[elementIndex];
+ displayResourceTypeBreakdownTable(jsonData, clickedResourceType);
+ }
+ }
+ }
+ });
+}
+
+function displayResourceTypeBreakdownTable(data, resourceType) {
+ const tableData = data.filter(item => item.Resource_Type === resourceType);
+ const tableContainer = document.getElementById('resourceTypeBreakdownTable');
+ tableContainer.innerHTML = '';
+
+ if (tableData.length === 0) {
+ tableContainer.style.display = 'none';
+ return;
+ }
+
+ const table = createDataTable(tableData); // Reuse function
+ tableContainer.appendChild(table);
+ tableContainer.style.display = 'block';
+}
+
+
+createResourceTypeBreakdownChart(jsonData);
+
+
+// --- Helper Function to Create Table ---
+function createDataTable(tableData) {
+ const table = document.createElement('table');
+ table.className = 'table table-bordered table-striped';
+
+ // Table Header (assuming consistent keys across data objects)
+ const thead = document.createElement('thead');
+ const headerRow = document.createElement('tr');
+ const headers = Object.keys(tableData[0] || {}); // Get headers from first object
+ headers.forEach(headerText => {
+ const th = document.createElement('th');
+ th.textContent = headerText;
+ headerRow.appendChild(th);
+ });
+ thead.appendChild(headerRow);
+ table.appendChild(thead);
+
+ // Table Body
+ const tbody = document.createElement('tbody');
+ tableData.forEach(item => {
+ const row = document.createElement('tr');
+ headers.forEach(header => {
+ row.insertCell().textContent = item[header] || ''; // Handle missing props
+ });
+ tbody.appendChild(row);
+ });
+ table.appendChild(tbody);
+ return table;
+}
\ No newline at end of file
diff --git a/frontend/test.ts b/frontend/test.ts
new file mode 100644
index 0000000000..5441e6bce5
--- /dev/null
+++ b/frontend/test.ts
@@ -0,0 +1,3299 @@
+# Frontend Dashboard Architecture (Bottom-Up Design)
+## LLM-Powered UI Testing System - Web Dashboard
+
+### Executive Summary
+
+This document outlines a **bottom-up architecture** for a modern web dashboard that provides a user-friendly interface to the LLM-Powered UI Testing System. The architecture follows a progressive enhancement approach, starting with foundational components and building each layer upon the previous one. The system uses React.js frontend with FastAPI backend for modern, high-performance async API development.
+
+---
+
+## 🏗️ Bottom-Up Architecture Philosophy
+
+The architecture follows a **progressive building approach**:
+
+1. **Foundation Layer**: Core data models and FastAPI services
+2. **Service Layer**: Business logic and API endpoints
+3. **Integration Layer**: Real-time communication and external systems
+4. **Presentation Layer**: React components and user interfaces
+5. **Orchestration Layer**: Complete dashboard features
+
+```
+┌─────────────────────── LAYER 5: ORCHESTRATION ───────────────────────┐
+│ Complete Dashboard Features │
+│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ ┌─────────────┐ │
+│ │ Project Mgmt │ │ Test Results │ │ Real-time UI │ │ Admin Panel │ │
+│ └──────────────┘ └──────────────┘ └──────────────┘ └─────────────┘ │
+└───────────────────────────────┬───────────────────────────────────────┘
+ │
+┌─────────────────────── LAYER 4: PRESENTATION ────────────────────────┐
+│ React.js Frontend Components │
+│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ ┌─────────────┐ │
+│ │ UI Components│ │ State Mgmt │ │ Routing │ │ Bootstrap │ │
+│ │ (TypeScript) │ │ (Redux TK) │ │ (React Router│ │ (v5.13) │ │
+│ └──────────────┘ └──────────────┘ └──────────────┘ └─────────────┘ │
+└───────────────────────────────┬───────────────────────────────────────┘
+ │
+┌─────────────────────── LAYER 3: INTEGRATION ─────────────────────────┐
+│ Real-time & External Integration │
+│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ ┌─────────────┐ │
+│ │ WebSocket │ │ File Uploads │ │ Notifications│ │ Task Queue │ │
+│ │ (Socket.IO) │ │ (Multipart) │ │ (Real-time) │ │ (Celery) │ │
+│ └──────────────┘ └──────────────┘ └──────────────┘ └─────────────┘ │
+└───────────────────────────────┬───────────────────────────────────────┘
+ │
+┌─────────────────────── LAYER 2: SERVICE ─────────────────────────────┐
+│ FastAPI Business Logic │
+│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ ┌─────────────┐ │
+│ │ CRUD APIs │ │ Auth Service │ │ Validation │ │ Error │ │
+│ │ (Pydantic) │ │ (JWT/OAuth) │ │ (Schemas) │ │ Handling │ │
+│ └──────────────┘ └──────────────┘ └──────────────┘ └─────────────┘ │
+└───────────────────────────────┬───────────────────────────────────────┘
+ │
+┌─────────────────────── LAYER 1: FOUNDATION ──────────────────────────┐
+│ Core Data & Infrastructure │
+│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ ┌─────────────┐ │
+│ │ PostgreSQL │ │ Redis Cache │ │ File Storage │ │ Config │ │
+│ │ (SQLAlchemy) │ │ (Sessions) │ │ (Local/Cloud)│ │ (Environment)│ │
+│ └──────────────┘ └──────────────┘ └──────────────┘ └─────────────┘ │
+└───────────────────────────────────────────────────────────────────────┘
+│ └──────────────────────────────────────────────────────────────┘ │
+│ │
+│ ┌─────────────────┐ ┌──────────────────┐ ┌──────────────────┐ │
+│ │ Task Queue │ │ File Management │ │ Monitoring │ │
+│ │ • Celery │ │ • Test Outputs │ │ • System Health │ │
+│ │ • Redis │ │ • Screenshots │ │ • Performance │ │
+│ │ • Job Status │ │ • Reports │ │ • Error Logs │ │
+│ └─────────────────┘ └──────────────────┘ └──────────────────┘ │
+│ │
+└──────────────────────────────┬───────────────────────────────────────┘
+ │ Data Persistence
+ ↓
+┌─────────────────────── DATABASE LAYER ───────────────────────────────┐
+│ │
+│ ┌──────────────────┐ ┌──────────────────┐ ┌──────────────────┐ │
+│ │ PostgreSQL │ │ Redis │ │ File Storage │ │
+│ │ • User Data │ │ • Cache Layer │ │ • Test Artifacts│ │
+│ │ • Projects │ │ • Sessions │ │ • Screenshots │ │
+│ │ • Test History │ │ • Task Queue │ │ • Reports │ │
+│ └──────────────────┘ └──────────────────┘ └──────────────────┘ │
+│ │
+└──────────────────────────────────────────────────────────────────────┘
+```
+
+---
+
+## 🔧 Layer 1: Foundation - Core Data Models & Infrastructure
+
+### 1.1 Database Models (SQLAlchemy with FastAPI)
+
+#### Core Entity Models
+```python
+# models/base.py
+from sqlalchemy import Column, Integer, DateTime, String
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.sql import func
+from datetime import datetime
+from typing import Optional
+
+Base = declarative_base()
+
+class BaseModel(Base):
+ __abstract__ = True
+
+ id = Column(Integer, primary_key=True, index=True)
+ created_at = Column(DateTime(timezone=True), server_default=func.now())
+ updated_at = Column(DateTime(timezone=True), onupdate=func.now())
+
+# models/user.py
+from sqlalchemy import Column, String, Boolean, JSON
+from sqlalchemy.orm import relationship
+from .base import BaseModel
+
+class User(BaseModel):
+ __tablename__ = "users"
+
+ email = Column(String, unique=True, index=True, nullable=False)
+ hashed_password = Column(String, nullable=False)
+ full_name = Column(String, nullable=False)
+ is_active = Column(Boolean, default=True)
+ is_superuser = Column(Boolean, default=False)
+ preferences = Column(JSON, default=dict)
+
+ # Relationships
+ projects = relationship("Project", back_populates="owner")
+ test_runs = relationship("TestRun", back_populates="created_by")
+
+# models/project.py
+from sqlalchemy import Column, String, Text, JSON, ForeignKey, Boolean
+from sqlalchemy.orm import relationship
+from .base import BaseModel
+
+class Project(BaseModel):
+ __tablename__ = "projects"
+
+ name = Column(String, nullable=False)
+ description = Column(Text)
+ target_url = Column(String, nullable=False)
+ max_pages = Column(Integer, default=10)
+ max_depth = Column(Integer, default=3)
+ frameworks = Column(JSON, default=list) # ['selenium', 'playwright']
+ llm_provider = Column(String, default='openai') # 'openai', 'anthropic', etc
+ llm_model = Column(String, default='gpt-4')
+ auth_config = Column(JSON, default=dict)
+ is_active = Column(Boolean, default=True)
+
+ # Foreign Keys
+ owner_id = Column(Integer, ForeignKey("users.id"))
+
+ # Relationships
+ owner = relationship("User", back_populates="projects")
+ test_runs = relationship("TestRun", back_populates="project")
+
+# models/test_run.py
+from sqlalchemy import Column, String, Text, JSON, ForeignKey, Float, Enum
+from sqlalchemy.orm import relationship
+from enum import Enum as PyEnum
+from .base import BaseModel
+
+class TestStatus(PyEnum):
+ PENDING = "pending"
+ RUNNING = "running"
+ COMPLETED = "completed"
+ FAILED = "failed"
+ CANCELLED = "cancelled"
+
+class TestRun(BaseModel):
+ __tablename__ = "test_runs"
+
+ name = Column(String, nullable=False)
+ status = Column(Enum(TestStatus), default=TestStatus.PENDING)
+ progress = Column(Float, default=0.0) # 0.0 to 1.0
+ total_pages = Column(Integer, default=0)
+ completed_pages = Column(Integer, default=0)
+ total_tests = Column(Integer, default=0)
+ passed_tests = Column(Integer, default=0)
+ failed_tests = Column(Integer, default=0)
+ config = Column(JSON, default=dict)
+ results = Column(JSON, default=dict)
+ error_message = Column(Text)
+ start_time = Column(DateTime(timezone=True))
+ end_time = Column(DateTime(timezone=True))
+
+ # Foreign Keys
+ project_id = Column(Integer, ForeignKey("projects.id"))
+ created_by_id = Column(Integer, ForeignKey("users.id"))
+
+ # Relationships
+ project = relationship("Project", back_populates="test_runs")
+ created_by = relationship("User", back_populates="test_runs")
+ test_cases = relationship("TestCase", back_populates="test_run")
+
+# models/test_case.py
+from sqlalchemy import Column, String, Text, JSON, ForeignKey, Boolean
+from .base import BaseModel
+
+class TestCase(BaseModel):
+ __tablename__ = "test_cases"
+
+ page_url = Column(String, nullable=False)
+ test_name = Column(String, nullable=False)
+ test_code = Column(Text, nullable=False)
+ framework = Column(String, nullable=False) # 'selenium', 'playwright'
+ passed = Column(Boolean)
+ error_message = Column(Text)
+ execution_time = Column(Float) # seconds
+ screenshot_path = Column(String)
+ metadata = Column(JSON, default=dict)
+
+ # Foreign Keys
+ test_run_id = Column(Integer, ForeignKey("test_runs.id"))
+
+ # Relationships
+ test_run = relationship("TestRun", back_populates="test_cases")
+```
+
+### 1.2 Configuration & Environment Setup
+
+#### Database Configuration
+```python
+# config/database.py
+from sqlalchemy import create_engine
+from sqlalchemy.orm import sessionmaker
+from sqlalchemy.pool import StaticPool
+from typing import Generator
+import os
+
+# Database URLs
+DATABASE_URL = os.getenv(
+ "DATABASE_URL",
+ "postgresql://user:password@localhost/ui_testing_db"
+)
+REDIS_URL = os.getenv("REDIS_URL", "redis://localhost:6379")
+
+# Create engine
+engine = create_engine(
+ DATABASE_URL,
+ poolclass=StaticPool if "sqlite" in DATABASE_URL else None,
+ echo=os.getenv("SQL_DEBUG", "false").lower() == "true"
+)
+
+# Session factory
+SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
+
+# Dependency for FastAPI
+def get_db() -> Generator:
+ try:
+ db = SessionLocal()
+ yield db
+ finally:
+ db.close()
+
+# Redis connection
+import redis
+redis_client = redis.from_url(REDIS_URL, decode_responses=True)
+```
+
+#### Application Settings
+```python
+# config/settings.py
+from pydantic import BaseSettings, Field
+from typing import List
+import os
+
+class Settings(BaseSettings):
+ # Application
+ app_name: str = "UI Testing Dashboard API"
+ app_version: str = "1.0.0"
+ debug: bool = Field(default=False, env="DEBUG")
+
+ # Database
+ database_url: str = Field(..., env="DATABASE_URL")
+ redis_url: str = Field(..., env="REDIS_URL")
+
+ # Security
+ secret_key: str = Field(..., env="SECRET_KEY")
+ algorithm: str = "HS256"
+ access_token_expire_minutes: int = 30
+
+ # CORS
+ allowed_origins: List[str] = Field(
+ default=["http://localhost:3000"],
+ env="ALLOWED_ORIGINS"
+ )
+
+ # File Storage
+ upload_dir: str = Field(default="./uploads", env="UPLOAD_DIR")
+ max_file_size: int = Field(default=10485760, env="MAX_FILE_SIZE") # 10MB
+
+ # External Services
+ openai_api_key: str = Field(default="", env="OPENAI_API_KEY")
+ anthropic_api_key: str = Field(default="", env="ANTHROPIC_API_KEY")
+
+ class Config:
+ env_file = ".env"
+
+settings = Settings()
+```
+
+### 1.3 File Storage Infrastructure
+
+#### File Management Service
+```python
+# services/file_storage.py
+from pathlib import Path
+from typing import Optional, BinaryIO
+import os
+import uuid
+import aiofiles
+from fastapi import UploadFile
+
+class FileStorageService:
+ def __init__(self, base_path: str = "./uploads"):
+ self.base_path = Path(base_path)
+ self.base_path.mkdir(exist_ok=True)
+
+ # Create subdirectories
+ (self.base_path / "screenshots").mkdir(exist_ok=True)
+ (self.base_path / "reports").mkdir(exist_ok=True)
+ (self.base_path / "test_artifacts").mkdir(exist_ok=True)
+
+ async def save_file(
+ self,
+ file: UploadFile,
+ subfolder: str = "general"
+ ) -> str:
+ """Save uploaded file and return relative path"""
+ # Generate unique filename
+ file_extension = Path(file.filename).suffix
+ unique_filename = f"{uuid.uuid4()}{file_extension}"
+
+ # Create subfolder path
+ subfolder_path = self.base_path / subfolder
+ subfolder_path.mkdir(exist_ok=True)
+
+ # Full file path
+ file_path = subfolder_path / unique_filename
+
+ # Save file asynchronously
+ async with aiofiles.open(file_path, 'wb') as buffer:
+ content = await file.read()
+ await buffer.write(content)
+
+ # Return relative path
+ return str(file_path.relative_to(self.base_path))
+
+ def get_file_path(self, relative_path: str) -> Path:
+ """Get absolute path from relative path"""
+ return self.base_path / relative_path
+
+ def delete_file(self, relative_path: str) -> bool:
+ """Delete file and return success status"""
+ try:
+ file_path = self.get_file_path(relative_path)
+ if file_path.exists():
+ file_path.unlink()
+ return True
+ return False
+ except Exception:
+ return False
+
+# Global instance
+file_storage = FileStorageService()
+```
+
+---
+
+## ⚡ Layer 2: Service - FastAPI Business Logic
+
+### 2.1 Pydantic Schemas for Data Validation
+
+#### Base Schemas
+```python
+# schemas/base.py
+from pydantic import BaseModel, Field
+from datetime import datetime
+from typing import Optional
+
+class BaseSchema(BaseModel):
+ class Config:
+ from_attributes = True
+ json_encoders = {
+ datetime: lambda dt: dt.isoformat()
+ }
+
+class PaginationParams(BaseModel):
+ skip: int = Field(default=0, ge=0)
+ limit: int = Field(default=100, ge=1, le=1000)
+
+class PaginatedResponse(BaseModel):
+ items: list
+ total: int
+ skip: int
+ limit: int
+ has_more: bool
+```
+
+#### Core Entity Schemas
+```python
+# schemas/user.py
+from pydantic import BaseModel, EmailStr
+from typing import Optional, Dict, Any
+
+class UserBase(BaseModel):
+ email: EmailStr
+ full_name: str
+ is_active: bool = True
+ preferences: Dict[str, Any] = {}
+
+class UserCreate(UserBase):
+ password: str = Field(..., min_length=8)
+
+class UserUpdate(BaseModel):
+ email: Optional[EmailStr] = None
+ full_name: Optional[str] = None
+ is_active: Optional[bool] = None
+ preferences: Optional[Dict[str, Any]] = None
+
+class UserResponse(UserBase):
+ id: int
+ created_at: datetime
+ updated_at: Optional[datetime]
+
+# schemas/project.py
+from typing import List, Dict, Any
+
+class ProjectBase(BaseModel):
+ name: str = Field(..., min_length=1, max_length=255)
+ description: Optional[str] = None
+ target_url: str = Field(..., regex=r"^https?://")
+ max_pages: int = Field(default=10, ge=1, le=1000)
+ max_depth: int = Field(default=3, ge=1, le=10)
+ frameworks: List[str] = Field(default=["selenium"])
+ llm_provider: str = Field(default="openai")
+ llm_model: str = Field(default="gpt-4")
+ auth_config: Dict[str, Any] = Field(default={})
+ is_active: bool = True
+
+class ProjectCreate(ProjectBase):
+ pass
+
+class ProjectUpdate(BaseModel):
+ name: Optional[str] = None
+ description: Optional[str] = None
+ target_url: Optional[str] = None
+ max_pages: Optional[int] = None
+ max_depth: Optional[int] = None
+ frameworks: Optional[List[str]] = None
+ llm_provider: Optional[str] = None
+ llm_model: Optional[str] = None
+ auth_config: Optional[Dict[str, Any]] = None
+ is_active: Optional[bool] = None
+
+class ProjectResponse(ProjectBase):
+ id: int
+ owner_id: int
+ created_at: datetime
+ updated_at: Optional[datetime]
+```
+
+### 2.2 Authentication Service
+
+#### JWT Authentication
+```python
+# services/auth.py
+from datetime import datetime, timedelta
+from typing import Optional
+from jose import JWTError, jwt
+from passlib.context import CryptContext
+from fastapi import HTTPException, status, Depends
+from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
+from sqlalchemy.orm import Session
+from config.settings import settings
+from config.database import get_db
+from models.user import User
+
+# Password hashing
+pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
+
+# HTTP Bearer for JWT tokens
+security = HTTPBearer()
+
+class AuthService:
+ @staticmethod
+ def verify_password(plain_password: str, hashed_password: str) -> bool:
+ """Verify a password against its hash"""
+ return pwd_context.verify(plain_password, hashed_password)
+
+ @staticmethod
+ def get_password_hash(password: str) -> str:
+ """Hash a password"""
+ return pwd_context.hash(password)
+
+ @staticmethod
+ def create_access_token(data: dict, expires_delta: Optional[timedelta] = None):
+ """Create JWT access token"""
+ to_encode = data.copy()
+ if expires_delta:
+ expire = datetime.utcnow() + expires_delta
+ else:
+ expire = datetime.utcnow() + timedelta(
+ minutes=settings.access_token_expire_minutes
+ )
+ to_encode.update({"exp": expire})
+ encoded_jwt = jwt.encode(
+ to_encode, settings.secret_key, algorithm=settings.algorithm
+ )
+ return encoded_jwt
+
+ @staticmethod
+ def verify_token(token: str) -> dict:
+ """Verify and decode JWT token"""
+ try:
+ payload = jwt.decode(
+ token, settings.secret_key, algorithms=[settings.algorithm]
+ )
+ return payload
+ except JWTError:
+ raise HTTPException(
+ status_code=status.HTTP_401_UNAUTHORIZED,
+ detail="Could not validate credentials",
+ headers={"WWW-Authenticate": "Bearer"},
+ )
+
+ @staticmethod
+ def authenticate_user(db: Session, email: str, password: str) -> Optional[User]:
+ """Authenticate user with email and password"""
+ user = db.query(User).filter(User.email == email).first()
+ if not user:
+ return None
+ if not AuthService.verify_password(password, user.hashed_password):
+ return None
+ return user
+
+# Dependency to get current user
+async def get_current_user(
+ credentials: HTTPAuthorizationCredentials = Depends(security),
+ db: Session = Depends(get_db)
+) -> User:
+ """Dependency to get current authenticated user"""
+ token = credentials.credentials
+ payload = AuthService.verify_token(token)
+ email: str = payload.get("sub")
+ if email is None:
+ raise HTTPException(
+ status_code=status.HTTP_401_UNAUTHORIZED,
+ detail="Invalid authentication credentials",
+ )
+
+ user = db.query(User).filter(User.email == email).first()
+ if user is None:
+ raise HTTPException(
+ status_code=status.HTTP_401_UNAUTHORIZED,
+ detail="User not found",
+ )
+ return user
+
+# Dependency to get current active user
+async def get_current_active_user(
+ current_user: User = Depends(get_current_user)
+) -> User:
+ """Dependency to get current active user"""
+ if not current_user.is_active:
+ raise HTTPException(
+ status_code=status.HTTP_400_BAD_REQUEST,
+ detail="Inactive user"
+ )
+ return current_user
+```
+
+### 2.3 CRUD Services
+
+#### Generic CRUD Base Class
+```python
+# services/crud_base.py
+from typing import Any, Dict, Generic, List, Optional, Type, TypeVar, Union
+from fastapi.encoders import jsonable_encoder
+from pydantic import BaseModel
+from sqlalchemy.orm import Session
+from models.base import BaseModel as DBBaseModel
+
+ModelType = TypeVar("ModelType", bound=DBBaseModel)
+CreateSchemaType = TypeVar("CreateSchemaType", bound=BaseModel)
+UpdateSchemaType = TypeVar("UpdateSchemaType", bound=BaseModel)
+
+class CRUDBase(Generic[ModelType, CreateSchemaType, UpdateSchemaType]):
+ def __init__(self, model: Type[ModelType]):
+ self.model = model
+
+ def get(self, db: Session, id: Any) -> Optional[ModelType]:
+ return db.query(self.model).filter(self.model.id == id).first()
+
+ def get_multi(
+ self, db: Session, *, skip: int = 0, limit: int = 100
+ ) -> List[ModelType]:
+ return db.query(self.model).offset(skip).limit(limit).all()
+
+ def create(self, db: Session, *, obj_in: CreateSchemaType) -> ModelType:
+ obj_in_data = jsonable_encoder(obj_in)
+ db_obj = self.model(**obj_in_data)
+ db.add(db_obj)
+ db.commit()
+ db.refresh(db_obj)
+ return db_obj
+
+ def update(
+ self,
+ db: Session,
+ *,
+ db_obj: ModelType,
+ obj_in: Union[UpdateSchemaType, Dict[str, Any]]
+ ) -> ModelType:
+ obj_data = jsonable_encoder(db_obj)
+ if isinstance(obj_in, dict):
+ update_data = obj_in
+ else:
+ update_data = obj_in.dict(exclude_unset=True)
+ for field in obj_data:
+ if field in update_data:
+ setattr(db_obj, field, update_data[field])
+ db.add(db_obj)
+ db.commit()
+ db.refresh(db_obj)
+ return db_obj
+
+ def delete(self, db: Session, *, id: int) -> ModelType:
+ obj = db.query(self.model).get(id)
+ db.delete(obj)
+ db.commit()
+ return obj
+```
+
+#### User CRUD Service
+```python
+# services/crud_user.py
+from typing import List, Optional
+from sqlalchemy.orm import Session
+from models.user import User
+from schemas.user import UserCreate, UserUpdate
+from .crud_base import CRUDBase
+
+class CRUDUser(CRUDBase[User, UserCreate, UserUpdate]):
+ def get_by_email(self, db: Session, email: str) -> Optional[User]:
+ return db.query(self.model).filter(User.email == email).first()
+
+ def get_multi_superusers(self, db: Session) -> List[User]:
+ return db.query(self.model).filter(User.is_superuser == True).all()
+
+user = CRUDUser(User)
+```
+
+#### Project CRUD Service
+```python
+# services/crud_project.py
+from typing import List, Optional
+from sqlalchemy.orm import Session
+from models.project import Project
+from models.user import User
+from schemas.project import ProjectCreate, ProjectUpdate
+from .crud_base import CRUDBase
+
+class CRUDProject(CRUDBase[Project, ProjectCreate, ProjectUpdate]):
+ def get_by_owner(
+ self, db: Session, *, owner_id: int, skip: int = 0, limit: int = 100
+ ) -> List[Project]:
+ return (
+ db.query(self.model)
+ .filter(Project.owner_id == owner_id)
+ .offset(skip)
+ .limit(limit)
+ .all()
+ )
+
+ def create_with_owner(
+ self, db: Session, *, obj_in: ProjectCreate, owner_id: int
+ ) -> Project:
+ obj_in_data = obj_in.dict()
+ db_obj = self.model(**obj_in_data, owner_id=owner_id)
+ db.add(db_obj)
+ db.commit()
+ db.refresh(db_obj)
+ return db_obj
+
+ def get_active_projects(
+ self, db: Session, *, owner_id: int
+ ) -> List[Project]:
+ return (
+ db.query(self.model)
+ .filter(
+ Project.owner_id == owner_id,
+ Project.is_active == True
+ )
+ .all()
+ )
+
+project = CRUDProject(Project)
+```
+
+### 2.4 Error Handling & Validation
+
+#### Custom Exception Classes
+```python
+# exceptions.py
+from fastapi import HTTPException, status
+
+class ProjectNotFoundError(HTTPException):
+ def __init__(self, project_id: int):
+ super().__init__(
+ status_code=status.HTTP_404_NOT_FOUND,
+ detail=f"Project with id {project_id} not found"
+ )
+
+class TestRunNotFoundError(HTTPException):
+ def __init__(self, test_run_id: int):
+ super().__init__(
+ status_code=status.HTTP_404_NOT_FOUND,
+ detail=f"Test run with id {test_run_id} not found"
+ )
+
+class InsufficientPermissionsError(HTTPException):
+ def __init__(self):
+ super().__init__(
+ status_code=status.HTTP_403_FORBIDDEN,
+ detail="Insufficient permissions to perform this action"
+ )
+
+class ValidationError(HTTPException):
+ def __init__(self, detail: str):
+ super().__init__(
+ status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
+ detail=detail
+ )
+```
+
+---
+
+## 🔗 Layer 3: Integration - Real-time & External Systems
+
+### 3.1 WebSocket Implementation for Real-time Updates
+
+#### WebSocket Manager
+```python
+# services/websocket.py
+from typing import Dict, List
+from fastapi import WebSocket, WebSocketDisconnect
+import json
+import asyncio
+from datetime import datetime
+
+class ConnectionManager:
+ def __init__(self):
+ self.active_connections: Dict[int, List[WebSocket]] = {}
+ self.user_connections: Dict[int, WebSocket] = {}
+
+ async def connect(self, websocket: WebSocket, user_id: int):
+ await websocket.accept()
+ if user_id not in self.active_connections:
+ self.active_connections[user_id] = []
+ self.active_connections[user_id].append(websocket)
+ self.user_connections[websocket] = user_id
+
+ def disconnect(self, websocket: WebSocket):
+ user_id = self.user_connections.get(websocket)
+ if user_id and user_id in self.active_connections:
+ self.active_connections[user_id].remove(websocket)
+ if not self.active_connections[user_id]:
+ del self.active_connections[user_id]
+ if websocket in self.user_connections:
+ del self.user_connections[websocket]
+
+ async def send_personal_message(self, message: dict, user_id: int):
+ connections = self.active_connections.get(user_id, [])
+ for connection in connections:
+ try:
+ await connection.send_text(json.dumps(message))
+ except Exception:
+ await self.disconnect(connection)
+
+ async def send_test_progress_update(
+ self, test_run_id: int, progress: float, user_id: int
+ ):
+ message = {
+ "type": "test_progress",
+ "test_run_id": test_run_id,
+ "progress": progress,
+ "timestamp": datetime.utcnow().isoformat()
+ }
+ await self.send_personal_message(message, user_id)
+
+ async def send_test_completion(
+ self, test_run_id: int, results: dict, user_id: int
+ ):
+ message = {
+ "type": "test_completed",
+ "test_run_id": test_run_id,
+ "results": results,
+ "timestamp": datetime.utcnow().isoformat()
+ }
+ await self.send_personal_message(message, user_id)
+
+manager = ConnectionManager()
+```
+
+### 3.2 Celery Task Queue for Background Processing
+
+#### Celery Configuration
+```python
+# celery_app.py
+from celery import Celery
+from config.settings import settings
+import os
+
+celery_app = Celery(
+ "ui_testing_dashboard",
+ broker=settings.redis_url,
+ backend=settings.redis_url,
+ include=["tasks.test_runner"]
+)
+
+celery_app.conf.update(
+ task_serializer="json",
+ accept_content=["json"],
+ result_serializer="json",
+ timezone="UTC",
+ enable_utc=True,
+ result_expires=3600,
+)
+
+# Task routing
+celery_app.conf.task_routes = {
+ "tasks.test_runner.run_ui_tests": {"queue": "test_queue"},
+ "tasks.test_runner.generate_test_code": {"queue": "llm_queue"},
+}
+```
+
+#### Background Test Runner Tasks
+```python
+# tasks/test_runner.py
+from celery import current_task
+from celery_app import celery_app
+from sqlalchemy.orm import Session
+from config.database import SessionLocal
+from models.test_run import TestRun, TestStatus
+from models.test_case import TestCase
+from services.websocket import manager
+import asyncio
+import subprocess
+import json
+
+@celery_app.task(bind=True)
+def run_ui_tests(self, test_run_id: int, user_id: int):
+ """Background task to run UI tests"""
+ db = SessionLocal()
+ try:
+ # Get test run
+ test_run = db.query(TestRun).filter(TestRun.id == test_run_id).first()
+ if not test_run:
+ return {"error": "Test run not found"}
+
+ # Update status to running
+ test_run.status = TestStatus.RUNNING
+ test_run.start_time = datetime.utcnow()
+ db.commit()
+
+ # Notify user via WebSocket
+ asyncio.create_task(
+ manager.send_test_progress_update(test_run_id, 0.0, user_id)
+ )
+
+ # Execute the actual testing logic here
+ # This would integrate with your existing UI testing system
+ project = test_run.project
+ config = {
+ "target_url": project.target_url,
+ "max_pages": project.max_pages,
+ "max_depth": project.max_depth,
+ "frameworks": project.frameworks,
+ "llm_provider": project.llm_provider,
+ "llm_model": project.llm_model,
+ "auth_config": project.auth_config
+ }
+
+ # Call your existing UI testing orchestrator
+ # This is where you'd integrate with your plan.md system
+ results = execute_ui_testing_pipeline(config, test_run_id, user_id)
+
+ # Update test run with results
+ test_run.status = TestStatus.COMPLETED
+ test_run.end_time = datetime.utcnow()
+ test_run.results = results
+ test_run.completed_pages = results.get("completed_pages", 0)
+ test_run.total_tests = results.get("total_tests", 0)
+ test_run.passed_tests = results.get("passed_tests", 0)
+ test_run.failed_tests = results.get("failed_tests", 0)
+ db.commit()
+
+ # Final notification
+ asyncio.create_task(
+ manager.send_test_completion(test_run_id, results, user_id)
+ )
+
+ return results
+
+ except Exception as e:
+ # Handle errors
+ test_run.status = TestStatus.FAILED
+ test_run.error_message = str(e)
+ test_run.end_time = datetime.utcnow()
+ db.commit()
+
+ # Notify user of failure
+ asyncio.create_task(
+ manager.send_personal_message(
+ {
+ "type": "test_failed",
+ "test_run_id": test_run_id,
+ "error": str(e),
+ "timestamp": datetime.utcnow().isoformat()
+ },
+ user_id
+ )
+ )
+
+ raise
+ finally:
+ db.close()
+
+def execute_ui_testing_pipeline(config: dict, test_run_id: int, user_id: int) -> dict:
+ """
+ This function would integrate with your existing UI testing system
+ from plan.md and architecture.md
+ """
+ # Placeholder for integration with existing system
+ # You would call your orchestrator here
+
+ # Mock implementation for demonstration
+ import time
+ import random
+
+ total_pages = config["max_pages"]
+ results = {
+ "completed_pages": 0,
+ "total_tests": 0,
+ "passed_tests": 0,
+ "failed_tests": 0,
+ "test_cases": []
+ }
+
+ for page_num in range(total_pages):
+ # Simulate progress
+ time.sleep(2) # Simulate work
+
+ # Update progress
+ current_task.update_state(
+ state="PROGRESS",
+ meta={"current": page_num + 1, "total": total_pages}
+ )
+
+ # Send real-time progress update
+ asyncio.create_task(
+ manager.send_test_progress_update(test_run_id, progress, user_id)
+ )
+
+ # Simulate test results
+ page_tests = random.randint(3, 8)
+ passed = random.randint(int(page_tests * 0.7), page_tests)
+ failed = page_tests - passed
+
+ results["completed_pages"] += 1
+ results["total_tests"] += page_tests
+ results["passed_tests"] += passed
+ results["failed_tests"] += failed
+
+ return results
+```
+
+### 3.3 File Upload & Processing
+
+#### File Upload Service
+```python
+# api/endpoints/files.py
+from fastapi import APIRouter, UploadFile, File, Depends, HTTPException
+from fastapi.responses import FileResponse
+from typing import List
+from services.file_storage import file_storage
+from services.auth import get_current_active_user
+from models.user import User
+import mimetypes
+
+router = APIRouter()
+
+@router.post("/upload-screenshot/")
+async def upload_screenshot(
+ file: UploadFile = File(...),
+ current_user: User = Depends(get_current_active_user)
+):
+ """Upload a screenshot file"""
+ # Validate file type
+ if not file.content_type.startswith("image/"):
+ raise HTTPException(
+ status_code=400,
+ detail="Only image files are allowed"
+ )
+
+ # Save file
+ file_path = await file_storage.save_file(file, "screenshots")
+
+ return {
+ "filename": file.filename,
+ "file_path": file_path,
+ "content_type": file.content_type,
+ "message": "Screenshot uploaded successfully"
+ }
+
+@router.get("/files/{file_path:path}")
+async def get_file(
+ file_path: str,
+ current_user: User = Depends(get_current_active_user)
+):
+ """Serve uploaded files"""
+ abs_file_path = file_storage.get_file_path(file_path)
+
+ if not abs_file_path.exists():
+ raise HTTPException(status_code=404, detail="File not found")
+
+ # Determine content type
+ content_type, _ = mimetypes.guess_type(str(abs_file_path))
+
+ return FileResponse(
+ path=abs_file_path,
+ media_type=content_type,
+ filename=abs_file_path.name
+ )
+```
+
+---
+
+## 🎨 Layer 4: Presentation - React.js Frontend
+
+### 4.1 Frontend Technology Stack & Setup
+
+#### Project Structure
+```
+frontend/
+├── public/
+│ ├── index.html
+│ └── favicon.ico
+├── src/
+│ ├── components/ # Reusable UI components
+│ │ ├── common/ # Generic components
+│ │ │ ├── Button/
+│ │ │ ├── Modal/
+│ │ │ ├── Table/
+│ │ │ ├── Form/
+│ │ │ ├── Layout/
+│ │ │ └── LoadingSpinner/
+│ │ ├── dashboard/ # Dashboard-specific components
+│ │ │ ├── ProjectCard/
+│ │ │ ├── TestResultCard/
+│ │ │ ├── ProgressBar/
+│ │ │ ├── StatusBadge/
+│ │ │ └── QuickActions/
+│ │ └── charts/ # Data visualization
+│ │ ├── TestResultsChart/
+│ │ ├── PerformanceChart/
+│ │ └── CoverageChart/
+│ ├── pages/ # Route-level components
+│ │ ├── Dashboard/
+│ │ ├── Projects/
+│ │ ├── TestResults/
+│ │ ├── Settings/
+│ │ └── Reports/
+│ ├── hooks/ # Custom React hooks
+│ │ ├── useAuth.ts
+│ │ ├── useWebSocket.ts
+│ │ ├── useLocalStorage.ts
+│ │ ├── useDebounce.ts
+│ │ └── useAsync.ts
+│ ├── services/ # API service layer
+│ │ ├── api.ts
+│ │ ├── auth.ts
+│ │ ├── projects.ts
+│ │ ├── tests.ts
+│ │ └── websocket.ts
+│ ├── store/ # Redux store
+│ │ ├── slices/
+│ │ │ ├── authSlice.ts
+│ │ │ ├── projectsSlice.ts
+│ │ │ ├── testsSlice.ts
+│ │ │ └── uiSlice.ts
+│ │ └── index.ts
+│ ├── types/ # TypeScript definitions
+│ │ ├── api.ts
+│ │ ├── project.ts
+│ │ ├── test.ts
+│ │ └── user.ts
+│ ├── utils/ # Utility functions
+│ │ ├── formatters.ts
+│ │ ├── validators.ts
+│ │ ├── constants.ts
+│ │ └── helpers.ts
+│ ├── styles/ # Global styles
+│ │ ├── bootstrap-custom.scss
+│ │ ├── variables.scss
+│ │ ├── components.scss
+│ │ └── global.scss
+│ ├── App.tsx
+│ ├── index.tsx
+│ └── setupProxy.js
+├── package.json
+├── tsconfig.json
+├── vite.config.ts
+└── tailwind.config.js # If using Tailwind (optional)
+```
+
+#### Package.json Dependencies
+```json
+{
+ "name": "ui-testing-dashboard",
+ "version": "1.0.0",
+ "private": true,
+ "scripts": {
+ "dev": "vite",
+ "build": "tsc && vite build",
+ "preview": "vite preview",
+ "lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0",
+ "test": "jest",
+ "test:watch": "jest --watch"
+ },
+ "dependencies": {
+ "react": "^18.2.0",
+ "react-dom": "^18.2.0",
+ "react-router-dom": "^6.8.0",
+ "@reduxjs/toolkit": "^1.9.1",
+ "react-redux": "^8.0.5",
+ "@tanstack/react-query": "^4.24.6",
+ "axios": "^1.3.3",
+ "bootstrap": "^5.3.0",
+ "react-bootstrap": "^2.7.0",
+ "react-hook-form": "^7.43.0",
+ "@hookform/resolvers": "^2.9.10",
+ "yup": "^0.32.11",
+ "socket.io-client": "^4.6.1",
+ "chart.js": "^4.2.1",
+ "react-chartjs-2": "^5.2.0",
+ "date-fns": "^2.29.3",
+ "react-hot-toast": "^2.4.0",
+ "lucide-react": "^0.105.0"
+ },
+ "devDependencies": {
+ "@types/react": "^18.0.26",
+ "@types/react-dom": "^18.0.9",
+ "@typescript-eslint/eslint-plugin": "^5.52.0",
+ "@typescript-eslint/parser": "^5.52.0",
+ "@vitejs/plugin-react": "^3.1.0",
+ "eslint": "^8.33.0",
+ "eslint-plugin-react-hooks": "^4.6.0",
+ "eslint-plugin-react-refresh": "^0.3.4",
+ "typescript": "^4.9.3",
+ "vite": "^4.1.0",
+ "sass": "^1.58.0",
+ "@testing-library/react": "^13.4.0",
+ "@testing-library/jest-dom": "^5.16.5",
+ "jest": "^29.4.1"
+ }
+}
+```
+
+### 4.2 Core React Components (Bottom-Up)
+
+#### Base Component Infrastructure
+```typescript
+// src/types/api.ts
+export interface ApiResponse {
+ data: T;
+ message?: string;
+ success: boolean;
+}
+
+export interface PaginatedResponse {
+ items: T[];
+ total: number;
+ skip: number;
+ limit: number;
+ has_more: boolean;
+}
+
+export interface ErrorResponse {
+ detail: string;
+ type?: string;
+}
+
+// src/types/user.ts
+export interface User {
+ id: number;
+ email: string;
+ full_name: string;
+ is_active: boolean;
+ preferences: Record;
+ created_at: string;
+ updated_at?: string;
+}
+
+export interface LoginRequest {
+ email: string;
+ password: string;
+}
+
+export interface LoginResponse {
+ access_token: string;
+ token_type: string;
+ user: User;
+}
+
+// src/types/project.ts
+export interface Project {
+ id: number;
+ name: string;
+ description?: string;
+ target_url: string;
+ max_pages: number;
+ max_depth: number;
+ frameworks: string[];
+ llm_provider: string;
+ llm_model: string;
+ auth_config: Record;
+ is_active: boolean;
+ owner_id: number;
+ created_at: string;
+ updated_at?: string;
+}
+
+export interface CreateProjectRequest {
+ name: string;
+ description?: string;
+ target_url: string;
+ max_pages?: number;
+ max_depth?: number;
+ frameworks?: string[];
+ llm_provider?: string;
+ llm_model?: string;
+ auth_config?: Record;
+}
+
+// src/types/test.ts
+export enum TestStatus {
+ PENDING = "pending",
+ RUNNING = "running",
+ COMPLETED = "completed",
+ FAILED = "failed",
+ CANCELLED = "cancelled"
+}
+
+export interface TestRun {
+ id: number;
+ name: string;
+ status: TestStatus;
+ progress: number;
+ total_pages: number;
+ completed_pages: number;
+ total_tests: number;
+ passed_tests: number;
+ failed_tests: number;
+ config: Record;
+ results: Record;
+ error_message?: string;
+ start_time?: string;
+ end_time?: string;
+ project_id: number;
+ created_by_id: number;
+ created_at: string;
+ updated_at?: string;
+}
+
+export interface TestCase {
+ id: number;
+ page_url: string;
+ test_name: string;
+ test_code: string;
+ framework: string;
+ passed?: boolean;
+ error_message?: string;
+ execution_time?: number;
+ screenshot_path?: string;
+ metadata: Record;
+ test_run_id: number;
+ created_at: string;
+}
+```
+
+#### Authentication Service
+```typescript
+// src/services/auth.ts
+import axios from 'axios';
+import { LoginRequest, LoginResponse, User } from '../types/user';
+
+const API_BASE_URL = process.env.REACT_APP_API_BASE_URL || 'http://localhost:8000';
+
+class AuthService {
+ private tokenKey = 'access_token';
+ private userKey = 'user_data';
+
+ async login(credentials: LoginRequest): Promise {
+ const formData = new FormData();
+ formData.append('username', credentials.email);
+ formData.append('password', credentials.password);
+
+ const response = await axios.post(`${API_BASE_URL}/auth/login`, formData, {
+ headers: {
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ },
+ });
+
+ const data = response.data;
+
+ // Store token and user data
+ localStorage.setItem(this.tokenKey, data.access_token);
+ localStorage.setItem(this.userKey, JSON.stringify(data.user));
+
+ return data;
+ }
+
+ async logout(): Promise {
+ localStorage.removeItem(this.tokenKey);
+ localStorage.removeItem(this.userKey);
+ }
+
+ getToken(): string | null {
+ return localStorage.getItem(this.tokenKey);
+ }
+
+ getUser(): User | null {
+ const userData = localStorage.getItem(this.userKey);
+ return userData ? JSON.parse(userData) : null;
+ }
+
+ isAuthenticated(): boolean {
+ return !!this.getToken();
+ }
+
+ async getCurrentUser(): Promise {
+ const response = await axios.get(`${API_BASE_URL}/auth/me`);
+ const user = response.data;
+ localStorage.setItem(this.userKey, JSON.stringify(user));
+ return user;
+ }
+}
+
+export const authService = new AuthService();
+```
+
+#### API Service with Axios Interceptors
+```typescript
+// src/services/api.ts
+import axios, { AxiosInstance, AxiosRequestConfig, AxiosResponse } from 'axios';
+import { authService } from './auth';
+import toast from 'react-hot-toast';
+
+const API_BASE_URL = process.env.REACT_APP_API_BASE_URL || 'http://localhost:8000';
+
+class ApiService {
+ private api: AxiosInstance;
+
+ constructor() {
+ this.api = axios.create({
+ baseURL: API_BASE_URL,
+ timeout: 30000,
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ });
+
+ this.setupInterceptors();
+ }
+
+ private setupInterceptors(): void {
+ // Request interceptor to add auth token
+ this.api.interceptors.request.use(
+ (config) => {
+ const token = authService.getToken();
+ if (token) {
+ config.headers.Authorization = `Bearer ${token}`;
+ }
+ return config;
+ },
+ (error) => {
+ return Promise.reject(error);
+ }
+ );
+
+ // Response interceptor for error handling
+ this.api.interceptors.response.use(
+ (response: AxiosResponse) => response,
+ (error) => {
+ if (error.response?.status === 401) {
+ // Unauthorized - redirect to login
+ authService.logout();
+ window.location.href = '/login';
+ toast.error('Session expired. Please login again.');
+ } else if (error.response?.status >= 500) {
+ toast.error('Server error. Please try again later.');
+ } else if (error.response?.data?.detail) {
+ toast.error(error.response.data.detail);
+ }
+
+ return Promise.reject(error);
+ }
+ );
+ }
+
+ async get(url: string, config?: AxiosRequestConfig): Promise {
+ const response = await this.api.get(url, config);
+ return response.data;
+ }
+
+ async post(url: string, data?: any, config?: AxiosRequestConfig): Promise {
+ const response = await this.api.post(url, data, config);
+ return response.data;
+ }
+
+ async put(url: string, data?: any, config?: AxiosRequestConfig): Promise {
+ const response = await this.api.put(url, data, config);
+ return response.data;
+ }
+
+ async delete(url: string, config?: AxiosRequestConfig): Promise {
+ const response = await this.api.delete(url, config);
+ return response.data;
+ }
+
+ async patch(url: string, data?: any, config?: AxiosRequestConfig): Promise {
+ const response = await this.api.patch(url, data, config);
+ return response.data;
+ }
+}
+
+export const apiService = new ApiService();
+```
+````
+This is the description of what the code block changes:
+
+Complete the architecture document with Layer 5 (Orchestration), deployment configuration, and implementation timeline
+
+
+This is the code block that represents the suggested code change:
+````markdown
+---
+
+## 🚀 Layer 5: Orchestration - Complete Dashboard Features
+
+### 5.1 FastAPI Main Application & Router Setup
+
+#### Main FastAPI Application
+
+```python
+# main.py
+from fastapi import FastAPI, Depends
+from fastapi.middleware.cors import CORSMiddleware
+from fastapi.middleware.trustedhost import TrustedHostMiddleware
+from fastapi.staticfiles import StaticFiles
+from contextlib import asynccontextmanager
+import uvicorn
+
+from config.settings import settings
+from config.database import engine
+from models import Base
+from api.router import api_router
+from api.websocket import websocket_router
+
+# Create tables
+Base.metadata.create_all(bind=engine)
+
+@asynccontextmanager
+async def lifespan(app: FastAPI):
+ # Startup
+ print("🚀 Starting UI Testing Dashboard API...")
+ yield
+ # Shutdown
+ print("📴 Shutting down...")
+
+# Create FastAPI app
+app = FastAPI(
+ title=settings.app_name,
+ version=settings.app_version,
+ description="FastAPI backend for LLM-powered UI testing dashboard",
+ lifespan=lifespan,
+ docs_url="/docs" if settings.debug else None,
+ redoc_url="/redoc" if settings.debug else None,
+)
+
+# Security middleware
+app.add_middleware(
+ TrustedHostMiddleware,
+ allowed_hosts=["localhost", "127.0.0.1", "*.your-domain.com"]
+)
+
+# CORS middleware
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=settings.allowed_origins,
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
+
+# Static files for uploaded content
+app.mount("/static", StaticFiles(directory="uploads"), name="static")
+
+# Include routers
+app.include_router(api_router, prefix="/api/v1")
+app.include_router(websocket_router, prefix="/ws")
+
+@app.get("/")
+async def root():
+ return {
+ "message": "UI Testing Dashboard API",
+ "version": settings.app_version,
+ "docs": "/docs" if settings.debug else "disabled"
+ }
+
+@app.get("/health")
+async def health_check():
+ return {"status": "healthy", "timestamp": datetime.utcnow()}
+
+if __name__ == "__main__":
+ uvicorn.run(
+ "main:app",
+ host="0.0.0.0",
+ port=8000,
+ reload=settings.debug,
+ workers=1 if settings.debug else 4
+ )
+```
+
+#### API Router Configuration
+
+```python
+# api/router.py
+from fastapi import APIRouter
+from api.endpoints import auth, projects, tests, users, files
+
+api_router = APIRouter()
+
+# Include all endpoint routers
+api_router.include_router(auth.router, prefix="/auth", tags=["authentication"])
+api_router.include_router(users.router, prefix="/users", tags=["users"])
+api_router.include_router(projects.router, prefix="/projects", tags=["projects"])
+api_router.include_router(tests.router, prefix="/tests", tags=["test-runs"])
+api_router.include_router(files.router, prefix="/files", tags=["file-management"])
+```
+
+### 5.2 Complete Frontend Application
+
+#### Main App Component with Routing
+
+```typescript
+// src/App.tsx
+import React from 'react';
+import { BrowserRouter as Router, Routes, Route, Navigate } from 'react-router-dom';
+import { Provider } from 'react-redux';
+import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
+import { Toaster } from 'react-hot-toast';
+
+import { store } from './store';
+import { useAuth } from './hooks/useAuth';
+import Layout from './components/common/Layout/Layout';
+
+// Pages
+import Dashboard from './pages/Dashboard/Dashboard';
+import Projects from './pages/Projects/Projects';
+import ProjectDetail from './pages/Projects/ProjectDetail';
+import TestResults from './pages/TestResults/TestResults';
+import TestDetail from './pages/TestResults/TestDetail';
+import Settings from './pages/Settings/Settings';
+import Login from './pages/Auth/Login';
+
+// Styles
+import 'bootstrap/dist/css/bootstrap.min.css';
+import './styles/global.scss';
+
+const queryClient = new QueryClient({
+ defaultOptions: {
+ queries: {
+ retry: 1,
+ refetchOnWindowFocus: false,
+ },
+ },
+});
+
+function ProtectedRoute({ children }: { children: React.ReactNode }) {
+ const { isAuthenticated } = useAuth();
+ return isAuthenticated ? <>{children}> : ;
+}
+
+function App() {
+ return (
+
+
+
+
+
+
+
+ } />
+
+
+
+
+ }>
+ } />
+ } />
+ } />
+ } />
+ } />
+ } />
+
+
+ } />
+
+
+
+
+
+ );
+}
+
+export default App;
+```
+
+#### Dashboard Component (Complete Implementation)
+
+```typescript
+// src/pages/Dashboard/Dashboard.tsx
+import React from 'react';
+import { Container, Row, Col, Card, Button } from 'react-bootstrap';
+import { useQuery } from '@tanstack/react-query';
+import { Plus, Activity, CheckCircle, XCircle, Clock } from 'lucide-react';
+
+import { projectsService } from '../../services/projects';
+import { testsService } from '../../services/tests';
+import ProjectCard from '../../components/dashboard/ProjectCard/ProjectCard';
+import TestResultCard from '../../components/dashboard/TestResultCard/TestResultCard';
+import StatsCard from '../../components/dashboard/StatsCard/StatsCard';
+import QuickActions from '../../components/dashboard/QuickActions/QuickActions';
+import RecentActivity from '../../components/dashboard/RecentActivity/RecentActivity';
+
+const Dashboard: React.FC = () => {
+ const { data: projects } = useQuery({
+ queryKey: ['projects', 'dashboard'],
+ queryFn: () => projectsService.getProjects({ limit: 6 }),
+ });
+
+ const { data: recentTests } = useQuery({
+ queryKey: ['tests', 'recent'],
+ queryFn: () => testsService.getTestRuns({ limit: 5 }),
+ });
+
+ const { data: stats } = useQuery({
+ queryKey: ['dashboard', 'stats'],
+ queryFn: () => testsService.getDashboardStats(),
+ });
+
+ return (
+
+
+
Dashboard
+
+
+ New Project
+
+
+
+ {/* Stats Overview */}
+
+
+ }
+ color="primary"
+ />
+
+
+ }
+ color="warning"
+ />
+
+
+ }
+ color="success"
+ />
+
+
+ }
+ color="danger"
+ />
+
+
+
+
+ {/* Main Content */}
+
+ {/* Recent Projects */}
+
+
+ Recent Projects
+
+
+ {projects?.items?.length ? (
+
+ {projects.items.map((project) => (
+
+
+
+ ))}
+
+ ) : (
+
+
No projects yet. Create your first project to get started!
+
+
+ Create Project
+
+
+ )}
+
+
+
+ {/* Recent Test Results */}
+
+
+ Recent Test Results
+
+
+ {recentTests?.items?.length ? (
+
+ {recentTests.items.map((test) => (
+
+ ))}
+
+ ) : (
+
+ )}
+
+
+
+
+ {/* Sidebar */}
+
+
+
+
+
+
+ );
+};
+
+export default Dashboard;
+```
+
+### 5.3 Real-time Integration with WebSocket
+
+#### WebSocket Service for Frontend
+
+```typescript
+// src/services/websocket.ts
+import { io, Socket } from 'socket.io-client';
+import { authService } from './auth';
+import toast from 'react-hot-toast';
+
+interface WebSocketMessage {
+ type: string;
+ test_run_id?: number;
+ progress?: number;
+ results?: any;
+ error?: string;
+ timestamp: string;
+}
+
+class WebSocketService {
+ private socket: Socket | null = null;
+ private reconnectAttempts = 0;
+ private maxReconnectAttempts = 5;
+
+ connect(): Promise {
+ return new Promise((resolve, reject) => {
+ const token = authService.getToken();
+ if (!token) {
+ reject(new Error('No authentication token'));
+ return;
+ }
+
+ this.socket = io(process.env.REACT_APP_WS_URL || 'ws://localhost:8000', {
+ auth: { token },
+ transports: ['websocket'],
+ timeout: 20000,
+ });
+
+ this.socket.on('connect', () => {
+ console.log('✅ WebSocket connected');
+ this.reconnectAttempts = 0;
+ resolve();
+ });
+
+ this.socket.on('disconnect', (reason) => {
+ console.log('❌ WebSocket disconnected:', reason);
+ if (reason === 'io server disconnect') {
+ // Server initiated disconnect, try to reconnect
+ this.attemptReconnect();
+ }
+ });
+
+ this.socket.on('connect_error', (error) => {
+ console.error('WebSocket connection error:', error);
+ reject(error);
+ });
+
+ this.setupMessageHandlers();
+ });
+ }
+
+ private setupMessageHandlers(): void {
+ if (!this.socket) return;
+
+ this.socket.on('test_progress', (data: WebSocketMessage) => {
+ // Dispatch to Redux store
+ window.dispatchEvent(
+ new CustomEvent('testProgress', { detail: data })
+ );
+ });
+
+ this.socket.on('test_completed', (data: WebSocketMessage) => {
+ toast.success(`Test completed: ${data.test_run_id}`);
+ window.dispatchEvent(
+ new CustomEvent('testCompleted', { detail: data })
+ );
+ });
+
+ this.socket.on('test_failed', (data: WebSocketMessage) => {
+ toast.error(`Test failed: ${data.error}`);
+ window.dispatchEvent(
+ new CustomEvent('testFailed', { detail: data })
+ );
+ });
+ }
+
+ private attemptReconnect(): void {
+ if (this.reconnectAttempts >= this.maxReconnectAttempts) {
+ toast.error('Failed to reconnect to server');
+ return;
+ }
+
+ this.reconnectAttempts++;
+ const delay = Math.pow(2, this.reconnectAttempts) * 1000; // Exponential backoff
+
+ setTimeout(() => {
+ console.log(`Attempting to reconnect (${this.reconnectAttempts}/${this.maxReconnectAttempts})...`);
+ this.connect().catch(() => {
+ // Will try again if this fails
+ });
+ }, delay);
+ }
+
+ disconnect(): void {
+ if (this.socket) {
+ this.socket.disconnect();
+ this.socket = null;
+ }
+ }
+
+ isConnected(): boolean {
+ return this.socket?.connected || false;
+ }
+}
+
+export const webSocketService = new WebSocketService();
+```
+
+#### Custom Hook for Real-time Updates
+
+```typescript
+// src/hooks/useWebSocket.ts
+import { useEffect, useCallback } from 'react';
+import { useDispatch } from 'react-redux';
+import { webSocketService } from '../services/websocket';
+import { updateTestProgress, updateTestStatus } from '../store/slices/testsSlice';
+
+export const useWebSocket = () => {
+ const dispatch = useDispatch();
+
+ const handleTestProgress = useCallback((event: CustomEvent) => {
+ const { test_run_id, progress } = event.detail;
+ dispatch(updateTestProgress({ testRunId: test_run_id, progress }));
+ }, [dispatch]);
+
+ const handleTestCompleted = useCallback((event: CustomEvent) => {
+ const { test_run_id, results } = event.detail;
+ dispatch(updateTestStatus({
+ testRunId: test_run_id,
+ status: 'completed',
+ results
+ }));
+ }, [dispatch]);
+
+ const handleTestFailed = useCallback((event: CustomEvent) => {
+ const { test_run_id, error } = event.detail;
+ dispatch(updateTestStatus({
+ testRunId: test_run_id,
+ status: 'failed',
+ error
+ }));
+ }, [dispatch]);
+
+ useEffect(() => {
+ // Connect to WebSocket
+ webSocketService.connect().catch(console.error);
+
+ // Add event listeners
+ window.addEventListener('testProgress', handleTestProgress as EventListener);
+ window.addEventListener('testCompleted', handleTestCompleted as EventListener);
+ window.addEventListener('testFailed', handleTestFailed as EventListener);
+
+ return () => {
+ // Cleanup
+ window.removeEventListener('testProgress', handleTestProgress as EventListener);
+ window.removeEventListener('testCompleted', handleTestCompleted as EventListener);
+ window.removeEventListener('testFailed', handleTestFailed as EventListener);
+ webSocketService.disconnect();
+ };
+ }, [handleTestProgress, handleTestCompleted, handleTestFailed]);
+
+ return {
+ isConnected: webSocketService.isConnected(),
+ };
+};
+```
+
+---
+
+## 🐳 Deployment Configuration
+
+### Docker Setup
+
+#### FastAPI Dockerfile
+
+```dockerfile
+# Dockerfile.api
+FROM python:3.11-slim
+
+WORKDIR /app
+
+# Install system dependencies
+RUN apt-get update && apt-get install -y \
+ gcc \
+ postgresql-client \
+ && rm -rf /var/lib/apt/lists/*
+
+# Install Python dependencies
+COPY requirements.txt .
+RUN pip install --no-cache-dir -r requirements.txt
+
+# Copy application code
+COPY . .
+
+# Create uploads directory
+RUN mkdir -p uploads/screenshots uploads/reports uploads/test_artifacts
+
+# Expose port
+EXPOSE 8000
+
+# Run the application
+CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
+```
+
+#### React Frontend Dockerfile
+
+```dockerfile
+# Dockerfile.frontend
+FROM node:18-alpine as build
+
+WORKDIR /app
+
+# Copy package files
+COPY package*.json ./
+RUN npm ci --only=production
+
+# Copy source code
+COPY . .
+
+# Build the app
+RUN npm run build
+
+# Production stage
+FROM nginx:alpine
+
+# Copy built app
+COPY --from=build /app/dist /usr/share/nginx/html
+
+# Copy nginx configuration
+COPY nginx.conf /etc/nginx/nginx.conf
+
+EXPOSE 80
+
+CMD ["nginx", "-g", "daemon off;"]
+```
+
+#### Docker Compose
+
+```yaml
+# docker-compose.yml
+version: '3.8'
+
+services:
+ postgres:
+ image: postgres:15
+ environment:
+ POSTGRES_DB: ui_testing_db
+ POSTGRES_USER: postgres
+ POSTGRES_PASSWORD: password
+ volumes:
+ - postgres_data:/var/lib/postgresql/data
+ ports:
+ - "5432:5432"
+
+ redis:
+ image: redis:7-alpine
+ ports:
+ - "6379:6379"
+
+ api:
+ build:
+ context: .
+ dockerfile: Dockerfile.api
+ environment:
+ DATABASE_URL: postgresql://postgres:password@postgres:5432/ui_testing_db
+ REDIS_URL: redis://redis:6379
+ SECRET_KEY: your-secret-key-here
+ ALLOWED_ORIGINS: '["http://localhost:3000", "http://localhost"]'
+ depends_on:
+ - postgres
+ - redis
+ ports:
+ - "8000:8000"
+ volumes:
+ - ./uploads:/app/uploads
+
+ frontend:
+ build:
+ context: ./frontend
+ dockerfile: Dockerfile.frontend
+ environment:
+ REACT_APP_API_BASE_URL: http://localhost:8000
+ REACT_APP_WS_URL: ws://localhost:8000
+ ports:
+ - "80:80"
+ depends_on:
+ - api
+
+ celery-worker:
+ build:
+ context: .
+ dockerfile: Dockerfile.api
+ command: celery -A celery_app worker --loglevel=info --queues=test_queue,llm_queue
+ environment:
+ DATABASE_URL: postgresql://postgres:password@postgres:5432/ui_testing_db
+ REDIS_URL: redis://redis:6379
+ depends_on:
+ - postgres
+ - redis
+ volumes:
+ - ./uploads:/app/uploads
+
+volumes:
+ postgres_data:
+```
+
+---
+
+## 📋 Implementation Timeline (Bottom-Up Approach)
+
+### Sprint 1: Foundation (Weeks 1-2)
+- ✅ Set up PostgreSQL database with SQLAlchemy models
+- ✅ Configure Redis for caching and sessions
+- ✅ Implement basic FastAPI app structure
+- ✅ Set up file storage service
+- ✅ Create core Pydantic schemas
+- ✅ Implement JWT authentication
+
+### Sprint 2: Service Layer (Weeks 3-4)
+- ✅ Build CRUD services for all entities
+- ✅ Implement comprehensive error handling
+- ✅ Set up Celery for background tasks
+- ✅ Create WebSocket manager for real-time updates
+- ✅ Build file upload/download endpoints
+- ✅ Add API validation and documentation
+
+### Sprint 3: Integration (Weeks 5-6)
+- ✅ Integrate with existing UI testing system
+- ✅ Implement real-time progress tracking
+- ✅ Set up task queue processing
+- ✅ Build notification system
+- ✅ Create comprehensive API endpoints
+- ✅ Add monitoring and logging
+
+### Sprint 4: Frontend & Polish (Weeks 7-8)
+- ✅ Build React components bottom-up
+- ✅ Implement Redux state management
+- ✅ Create real-time dashboard
+- ✅ Add comprehensive error handling
+- ✅ Implement responsive design
+- ✅ Add testing and documentation
+- ✅ Deploy and optimize
+
+---
+
+## 🎯 Key Features Summary
+
+### ✨ Core Capabilities
+1. **Project Management**: Create, configure, and manage UI testing projects
+2. **Real-time Testing**: Monitor test execution with live progress updates
+3. **Result Visualization**: Rich charts and reports for test outcomes
+4. **File Management**: Upload, store, and serve test artifacts
+5. **User Authentication**: Secure JWT-based authentication system
+6. **REST API**: Comprehensive FastAPI-based backend
+7. **Real-time Updates**: WebSocket integration for live notifications
+
+### 🚀 Technical Highlights
+- **Bottom-up Architecture**: Built progressively from foundation to features
+- **FastAPI Backend**: Modern async Python framework
+- **React Frontend**: TypeScript-based SPA with Bootstrap 5.13
+- **Real-time Communication**: WebSocket for live updates
+- **Task Processing**: Celery for background job handling
+- **Database**: PostgreSQL with SQLAlchemy ORM
+- **Caching**: Redis for sessions and task queue
+- **Containerized**: Docker deployment with compose
+
+This architecture provides a solid foundation for building a modern, scalable UI testing dashboard that integrates seamlessly with your existing LLM-powered testing system.
+
+
+
+=========================END=============================================
+# Frontend Dashboard Architecture - Bottom-Up Design
+## LLM-Powered UI Testing System Web Dashboard
+
+### Executive Summary
+
+This document presents a **comprehensive bottom-up architecture** for a modern web dashboard that interfaces with the LLM-Powered UI Testing System. Following architectural best practices, we build from foundational data structures to complex user interfaces, ensuring each layer is robust before adding the next. The system employs React.js with TypeScript for the frontend and FastAPI for a modern, high-performance Python backend.
+
+**Key Architectural Principles:**
+- 🏗️ **Bottom-Up Construction**: Each component builds upon tested foundations
+- ⚡ **Performance-First**: FastAPI async capabilities + React optimization
+- 🔒 **Security by Design**: JWT authentication, input validation, RBAC
+- 📱 **Mobile-First UI**: Bootstrap 5.3 responsive design
+- 🔄 **Real-time Updates**: WebSocket integration for live test monitoring
+- 🧪 **Test-Driven**: Comprehensive testing at every layer
+
+---
+
+## 🏗️ System Architecture Overview
+
+```
+┌─────────────────────────── FRONTEND LAYER ──────────────────────────┐
+│ │
+│ ┌──────────────────────────────────────────────────────────────┐ │
+│ │ React.js SPA │ │
+│ │ (Port: 3000) │ │
+│ └──────────────────────────────────────────────────────────────┘ │
+│ │ │
+│ ┌─────────────────┐ ┌──────────────────┐ ┌──────────────────┐ │
+│ │ Dashboard Views │ │ Component Lib │ │ State Mgmt │ │
+│ │ • Project Mgmt │ │ • Bootstrap 5 │ │ • Redux Toolkit │ │
+│ │ • Test Results │ │ • Custom Comp │ │ • React Query │ │
+│ │ • Config UI │ │ • Charts/Viz │ │ • Local Storage │ │
+│ └─────────────────┘ └──────────────────┘ └──────────────────┘ │
+│ │
+└──────────────────────────────┬───────────────────────────────────────┘
+ │ REST API Calls (HTTPS)
+ ↓
+┌─────────────────────── API GATEWAY LAYER ────────────────────────────┐
+│ │
+│ ┌──────────────────────────────────────────────────────────────┐ │
+│ │ Django REST Framework │ │
+│ │ (Port: 8000) │ │
+│ └──────────────────────────────────────────────────────────────┘ │
+│ │
+│ ┌─────────────────┐ ┌──────────────────┐ ┌──────────────────┐ │
+│ │ Auth & RBAC │ │ API Endpoints │ │ WebSocket │ │
+│ │ • JWT Auth │ │ • CRUD APIs │ │ • Real-time │ │
+│ │ • Permissions │ │ • File Upload │ │ • Notifications │ │
+│ │ • Rate Limit │ │ • Data Export │ │ • Progress │ │
+│ └─────────────────┘ └──────────────────┘ └──────────────────┘ │
+│ │
+└──────────────────────────────┬───────────────────────────────────────┘
+ │ Internal API Calls
+ ↓
+┌──────────────────── BACKEND INTEGRATION LAYER ──────────────────────┐
+│ │
+│ ┌──────────────────────────────────────────────────────────────┐ │
+│ │ Existing Python Testing System │ │
+│ │ (Orchestrator Integration) │ │
+│ └──────────────────────────────────────────────────────────────┘ │
+│ │
+│ ┌─────────────────┐ ┌──────────────────┐ ┌──────────────────┐ │
+│ │ Task Queue │ │ File Management │ │ Monitoring │ │
+│ │ • Celery │ │ • Test Outputs │ │ • System Health │ │
+│ │ • Redis │ │ • Screenshots │ │ • Performance │ │
+│ │ • Job Status │ │ • Reports │ │ • Error Logs │ │
+│ └─────────────────┘ └──────────────────┘ └──────────────────┘ │
+│ │
+└──────────────────────────────┬───────────────────────────────────────┘
+ │ Data Persistence
+ ↓
+┌─────────────────────── DATABASE LAYER ───────────────────────────────┐
+│ │
+│ ┌──────────────────┐ ┌──────────────────┐ ┌──────────────────┐ │
+│ │ PostgreSQL │ │ Redis │ │ File Storage │ │
+│ │ • User Data │ │ • Cache Layer │ │ • Test Artifacts│ │
+│ │ • Projects │ │ • Sessions │ │ • Screenshots │ │
+│ │ • Test History │ │ • Task Queue │ │ • Reports │ │
+│ └──────────────────┘ └──────────────────┘ └──────────────────┘ │
+│ │
+└──────────────────────────────────────────────────────────────────────┘
+```
+
+---
+
+## 🎨 Frontend Architecture (React.js)
+
+### Technology Stack
+- **Framework**: React 18+ with TypeScript
+- **Styling**: Bootstrap 5.3 + Custom SCSS
+- **State Management**: Redux Toolkit + React Query
+- **Routing**: React Router v6
+- **Build Tool**: Vite
+- **HTTP Client**: Axios with interceptors
+- **Real-time**: Socket.IO client
+- **Charts**: Chart.js with react-chartjs-2
+- **Forms**: React Hook Form with Yup validation
+
+### Component Architecture
+
+```
+src/
+├── components/ # Reusable UI components
+│ ├── common/
+│ │ ├── Button/
+│ │ ├── Modal/
+│ │ ├── Table/
+│ │ ├── Form/
+│ │ └── Layout/
+│ ├── dashboard/
+│ │ ├── ProjectCard/
+│ │ ├── TestResultCard/
+│ │ ├── ProgressIndicator/
+│ │ └── StatusBadge/
+│ └── charts/
+│ ├── TestResultsChart/
+│ ├── PerformanceChart/
+│ └── CoverageChart/
+├── pages/ # Route-level components
+│ ├── Dashboard/
+│ ├── Projects/
+│ ├── TestResults/
+│ ├── Configuration/
+│ ├── Reports/
+│ └── Settings/
+├── hooks/ # Custom React hooks
+│ ├── useAuth.ts
+│ ├── useWebSocket.ts
+│ ├── useLocalStorage.ts
+│ └── useDebounce.ts
+├── services/ # API service layer
+│ ├── api.ts
+│ ├── auth.ts
+│ ├── projects.ts
+│ ├── tests.ts
+│ └── websocket.ts
+├── store/ # Redux store configuration
+│ ├── slices/
+│ │ ├── authSlice.ts
+│ │ ├── projectsSlice.ts
+│ │ ├── testsSlice.ts
+│ │ └── uiSlice.ts
+│ └── index.ts
+├── types/ # TypeScript type definitions
+│ ├── api.ts
+│ ├── project.ts
+│ ├── test.ts
+│ └── user.ts
+├── utils/ # Utility functions
+│ ├── formatters.ts
+│ ├── validators.ts
+│ └── constants.ts
+└── styles/ # Global styles
+ ├── bootstrap-custom.scss
+ ├── variables.scss
+ └── global.scss
+```
+
+### Key Features & Components
+
+#### 1. **Dashboard Overview**
+```typescript
+interface DashboardProps {
+ projects: Project[];
+ recentTests: TestRun[];
+ systemStats: SystemStats;
+}
+
+const Dashboard: React.FC = ({
+ projects,
+ recentTests,
+ systemStats
+}) => {
+ return (
+
+
+
+
+
+
+
+
+
+
+
+
+ );
+};
+```
+
+#### 2. **Project Management**
+- Create/Edit/Delete projects
+- Configure target URLs and authentication
+- Set testing parameters (max pages, depth, frameworks)
+- LLM provider and model selection
+
+#### 3. **Test Configuration Wizard**
+```typescript
+interface TestConfigurationProps {
+ onSubmit: (config: TestConfig) => void;
+ initialData?: Partial;
+}
+
+const TestConfiguration: React.FC = ({
+ onSubmit,
+ initialData
+}) => {
+ const [currentStep, setCurrentStep] = useState(1);
+
+ return (
+
+
+
+
+
+ {currentStep === 1 && }
+ {currentStep === 2 && }
+ {currentStep === 3 && }
+ {currentStep === 4 && }
+
+
+ );
+};
+```
+
+#### 4. **Real-time Test Monitoring**
+```typescript
+const TestMonitor: React.FC<{ testId: string }> = ({ testId }) => {
+ const { testStatus, progress } = useWebSocket(`/test/${testId}`);
+
+ return (
+
+
+
+ Test Execution
+
+
+
+
+
+
+ );
+};
+```
+
+#### 5. **Results Visualization**
+- Interactive charts for test results
+- Downloadable reports (PDF, Excel)
+- Screenshot galleries
+- Test script previews
+
+---
+
+## 🔧 Backend Architecture (Django)
+
+### Technology Stack
+- **Framework**: Django 4.2+ with Django REST Framework
+- **Database**: PostgreSQL 15+
+- **Cache**: Redis 7+
+- **Task Queue**: Celery with Redis broker
+- **WebSocket**: Django Channels
+- **Authentication**: Django-allauth + JWT
+- **File Storage**: Django-storages (local/cloud)
+- **API Documentation**: drf-spectacular (OpenAPI)
+
+### Project Structure
+
+```
+backend/
+├── config/ # Django settings
+│ ├── settings/
+│ │ ├── base.py
+│ │ ├── development.py
+│ │ ├── production.py
+│ │ └── testing.py
+│ ├── urls.py
+│ ├── wsgi.py
+│ └── asgi.py
+├── apps/
+│ ├── authentication/ # User management & auth
+│ │ ├── models.py
+│ │ ├── serializers.py
+│ │ ├── views.py
+│ │ └── urls.py
+│ ├── projects/ # Project CRUD operations
+│ │ ├── models.py
+│ │ ├── serializers.py
+│ │ ├── views.py
+│ │ └── services.py
+│ ├── tests/ # Test execution & results
+│ │ ├── models.py
+│ │ ├── serializers.py
+│ │ ├── views.py
+│ │ ├── tasks.py # Celery tasks
+│ │ └── consumers.py # WebSocket consumers
+│ ├── files/ # File management
+│ │ ├── models.py
+│ │ ├── views.py
+│ │ └── services.py
+│ └── monitoring/ # System monitoring
+│ ├── models.py
+│ ├── views.py
+│ └── tasks.py
+├── integration/ # Backend system integration
+│ ├── orchestrator_client.py # Python testing system client
+│ ├── task_manager.py # Task execution management
+│ └── file_processor.py # Output file processing
+├── utils/
+│ ├── exceptions.py
+│ ├── permissions.py
+│ ├── pagination.py
+│ └── validators.py
+└── requirements/
+ ├── base.txt
+ ├── development.txt
+ └── production.txt
+```
+
+### API Endpoints Design
+
+#### Authentication & Users
+```python
+# authentication/urls.py
+urlpatterns = [
+ path('auth/login/', LoginView.as_view(), name='login'),
+ path('auth/logout/', LogoutView.as_view(), name='logout'),
+ path('auth/refresh/', RefreshTokenView.as_view(), name='refresh'),
+ path('auth/register/', RegisterView.as_view(), name='register'),
+ path('auth/profile/', ProfileView.as_view(), name='profile'),
+]
+```
+
+#### Projects Management
+```python
+# projects/serializers.py
+class ProjectSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = Project
+ fields = [
+ 'id', 'name', 'description', 'target_url',
+ 'auth_config', 'test_config', 'created_at',
+ 'updated_at', 'last_test_run'
+ ]
+
+ def validate_target_url(self, value):
+ # URL validation logic
+ return value
+
+class TestConfigSerializer(serializers.Serializer):
+ llm_provider = serializers.ChoiceField(choices=['openai', 'google'])
+ llm_model = serializers.CharField(max_length=100)
+ max_pages = serializers.IntegerField(min_value=1, max_value=1000)
+ max_depth = serializers.IntegerField(min_value=1, max_value=10)
+ headless = serializers.BooleanField(default=True)
+ test_framework = serializers.ChoiceField(choices=['playwright', 'selenium'])
+```
+
+#### Test Execution & Results
+```python
+# tests/models.py
+class TestRun(models.Model):
+ project = models.ForeignKey(Project, on_delete=models.CASCADE)
+ status = models.CharField(
+ max_length=20,
+ choices=[
+ ('pending', 'Pending'),
+ ('running', 'Running'),
+ ('completed', 'Completed'),
+ ('failed', 'Failed'),
+ ('cancelled', 'Cancelled'),
+ ]
+ )
+ progress = models.JSONField(default=dict)
+ results = models.JSONField(default=dict)
+ artifacts_path = models.CharField(max_length=500, null=True)
+ started_at = models.DateTimeField(null=True)
+ completed_at = models.DateTimeField(null=True)
+ error_message = models.TextField(null=True)
+ created_by = models.ForeignKey(User, on_delete=models.CASCADE)
+
+class TestResult(models.Model):
+ test_run = models.ForeignKey(TestRun, on_delete=models.CASCADE)
+ page_url = models.URLField()
+ elements_found = models.IntegerField(default=0)
+ test_cases_generated = models.IntegerField(default=0)
+ screenshot_path = models.CharField(max_length=500, null=True)
+ metadata = models.JSONField(default=dict)
+```
+
+#### WebSocket Consumers
+```python
+# tests/consumers.py
+class TestProgressConsumer(AsyncWebsocketConsumer):
+ async def connect(self):
+ self.test_id = self.scope['url_route']['kwargs']['test_id']
+ self.group_name = f'test_{self.test_id}'
+
+ await self.channel_layer.group_add(
+ self.group_name,
+ self.channel_name
+ )
+ await self.accept()
+
+ async def test_progress_update(self, event):
+ await self.send(text_data=json.dumps({
+ 'type': 'progress_update',
+ 'data': event['data']
+ }))
+```
+
+### Integration with Existing Python System
+
+#### Orchestrator Client
+```python
+# integration/orchestrator_client.py
+class OrchestratorClient:
+ def __init__(self, config_path: str = None):
+ self.config = Config(config_path)
+
+ async def start_test_run(self, project_config: dict) -> str:
+ """Start a new test run and return the task ID."""
+ task = run_ui_tests.delay(project_config)
+ return task.id
+
+ def get_task_status(self, task_id: str) -> dict:
+ """Get the current status of a running task."""
+ task = AsyncResult(task_id)
+ return {
+ 'status': task.status,
+ 'progress': task.info if task.info else {},
+ 'result': task.result if task.successful() else None
+ }
+```
+
+#### Celery Tasks
+```python
+# tests/tasks.py
+@shared_task(bind=True)
+def run_ui_tests(self, project_config: dict):
+ """Execute UI tests using the existing Python system."""
+ try:
+ # Update task status
+ self.update_state(
+ state='PROGRESS',
+ meta={'current': 0, 'total': 100, 'status': 'Initializing...'}
+ )
+
+ # Initialize orchestrator
+ orchestrator = Orchestrator(project_config, project_config['target_url'])
+
+ # Run the test suite with progress callbacks
+ results = orchestrator.run_comprehensive_test_suite(
+ progress_callback=lambda progress: self.update_state(
+ state='PROGRESS',
+ meta=progress
+ )
+ )
+
+ return {
+ 'status': 'completed',
+ 'results': results,
+ 'artifacts_path': results.get('output_dir')
+ }
+
+ except Exception as exc:
+ self.update_state(
+ state='FAILURE',
+ meta={'error': str(exc)}
+ )
+ raise
+```
+
+---
+
+## 📊 Data Models & Relationships
+
+```mermaid
+erDiagram
+ User ||--o{ Project : owns
+ User ||--o{ TestRun : creates
+ Project ||--o{ TestRun : has
+ TestRun ||--o{ TestResult : contains
+ TestRun ||--o{ TestArtifact : generates
+
+ User {
+ int id
+ string username
+ string email
+ string first_name
+ string last_name
+ datetime created_at
+ boolean is_active
+ }
+
+ Project {
+ int id
+ string name
+ text description
+ string target_url
+ json auth_config
+ json test_config
+ datetime created_at
+ datetime updated_at
+ int created_by_id
+ }
+
+ TestRun {
+ int id
+ string status
+ json progress
+ json results
+ string artifacts_path
+ datetime started_at
+ datetime completed_at
+ text error_message
+ int project_id
+ int created_by_id
+ }
+
+ TestResult {
+ int id
+ string page_url
+ int elements_found
+ int test_cases_generated
+ string screenshot_path
+ json metadata
+ int test_run_id
+ }
+
+ TestArtifact {
+ int id
+ string file_type
+ string file_path
+ string file_name
+ int file_size
+ datetime created_at
+ int test_run_id
+ }
+```
+
+---
+
+## 🔐 Security Architecture
+
+### Authentication & Authorization
+```python
+# authentication/permissions.py
+class ProjectPermission(BasePermission):
+ def has_object_permission(self, request, view, obj):
+ # Only project owner can modify
+ if request.method in ['PUT', 'PATCH', 'DELETE']:
+ return obj.created_by == request.user
+
+ # Read access for team members
+ return True
+
+# JWT Configuration
+SIMPLE_JWT = {
+ 'ACCESS_TOKEN_LIFETIME': timedelta(minutes=60),
+ 'REFRESH_TOKEN_LIFETIME': timedelta(days=7),
+ 'ROTATE_REFRESH_TOKENS': True,
+ 'BLACKLIST_AFTER_ROTATION': True,
+}
+```
+
+### API Rate Limiting
+```python
+# settings.py
+REST_FRAMEWORK = {
+ 'DEFAULT_THROTTLE_CLASSES': [
+ 'rest_framework.throttling.AnonRateThrottle',
+ 'rest_framework.throttling.UserRateThrottle'
+ ],
+ 'DEFAULT_THROTTLE_RATES': {
+ 'anon': '100/hour',
+ 'user': '1000/hour',
+ 'test_execution': '10/hour' # Limit test runs
+ }
+}
+```
+
+### Environment Variables
+```bash
+# .env
+DEBUG=False
+SECRET_KEY=your-secret-key
+DATABASE_URL=postgresql://user:pass@localhost/dbname
+REDIS_URL=redis://localhost:6379/0
+CELERY_BROKER_URL=redis://localhost:6379/0
+
+# LLM API Keys
+OPENAI_API_KEY=your-openai-key
+GOOGLE_API_KEY=your-google-key
+
+# File Storage
+MEDIA_ROOT=/path/to/media
+STATIC_ROOT=/path/to/static
+
+# CORS Settings
+CORS_ALLOWED_ORIGINS=["http://localhost:3000"]
+```
+
+---
+
+## 🚀 Deployment Architecture
+
+### Development Environment
+```yaml
+# docker-compose.yml
+version: '3.8'
+services:
+ frontend:
+ build: ./frontend
+ ports:
+ - "3000:3000"
+ volumes:
+ - ./frontend:/app
+ environment:
+ - REACT_APP_API_URL=http://localhost:8000/api
+
+ backend:
+ build: ./backend
+ ports:
+ - "8000:8000"
+ volumes:
+ - ./backend:/app
+ environment:
+ - DEBUG=True
+ - DATABASE_URL=postgresql://postgres:password@db:5432/testdb
+ depends_on:
+ - db
+ - redis
+
+ db:
+ image: postgres:15
+ environment:
+ - POSTGRES_DB=testdb
+ - POSTGRES_USER=postgres
+ - POSTGRES_PASSWORD=password
+ volumes:
+ - postgres_data:/var/lib/postgresql/data
+
+ redis:
+ image: redis:7-alpine
+ ports:
+ - "6379:6379"
+
+ celery:
+ build: ./backend
+ command: celery -A config worker -l info
+ depends_on:
+ - db
+ - redis
+ volumes:
+ - ./backend:/app
+
+volumes:
+ postgres_data:
+```
+
+### Production Considerations
+- **Load Balancing**: nginx reverse proxy
+- **SSL/TLS**: Let's Encrypt certificates
+- **Static Files**: CDN for media and static files
+- **Database**: PostgreSQL with connection pooling
+- **Caching**: Redis cluster for high availability
+- **Monitoring**: Prometheus + Grafana
+- **Logging**: ELK stack or centralized logging
+
+---
+
+## 📱 Responsive Design & UX
+
+### Bootstrap 5.3 Integration
+```scss
+// styles/bootstrap-custom.scss
+@import "~bootstrap/scss/functions";
+@import "~bootstrap/scss/variables";
+
+// Custom theme variables
+$primary: #007bff;
+$secondary: #6c757d;
+$success: #28a745;
+$info: #17a2b8;
+$warning: #ffc107;
+$danger: #dc3545;
+
+// Dark mode support
+$enable-dark-mode: true;
+
+@import "~bootstrap/scss/bootstrap";
+```
+
+### Mobile-First Approach
+```typescript
+// components/dashboard/ProjectCard.tsx
+const ProjectCard: React.FC = ({ project }) => {
+ return (
+
+
+
+ {project.name}
+
+ {project.status}
+
+
+
+ {project.target_url}
+
+
+ View Results
+
+
+ Run Tests
+
+
+
+
+
+ );
+};
+```
+
+### Dark Mode Support
+```typescript
+// hooks/useTheme.ts
+export const useTheme = () => {
+ const [isDark, setIsDark] = useLocalStorage('theme-dark', false);
+
+ useEffect(() => {
+ document.documentElement.setAttribute(
+ 'data-bs-theme',
+ isDark ? 'dark' : 'light'
+ );
+ }, [isDark]);
+
+ return { isDark, toggleTheme: () => setIsDark(!isDark) };
+};
+```
+
+---
+
+## 🔄 Real-time Features
+
+### WebSocket Implementation
+```typescript
+// services/websocket.ts
+class WebSocketService {
+ private socket: io.Socket | null = null;
+
+ connect(token: string) {
+ this.socket = io('ws://localhost:8000', {
+ auth: { token },
+ transports: ['websocket']
+ });
+
+ this.socket.on('connect', () => {
+ console.log('Connected to WebSocket');
+ });
+
+ return this.socket;
+ }
+
+ subscribeToTestProgress(testId: string, callback: (data: any) => void) {
+ if (this.socket) {
+ this.socket.emit('join', `test_${testId}`);
+ this.socket.on('test_progress', callback);
+ }
+ }
+}
+```
+
+### Real-time Notifications
+```typescript
+// components/common/NotificationSystem.tsx
+const NotificationSystem: React.FC = () => {
+ const { notifications } = useSelector(state => state.ui);
+ const dispatch = useDispatch();
+
+ return (
+
+ {notifications.map(notification => (
+ dispatch(removeNotification(notification.id))}
+ bg={notification.type}
+ >
+
+ {notification.title}
+
+ {notification.message}
+
+ ))}
+
+ );
+};
+```
+
+---
+
+## 📈 Performance Optimization
+
+### Frontend Optimizations
+- **Code Splitting**: Route-based and component-based splitting
+- **Lazy Loading**: Components and images
+- **Memoization**: React.memo and useMemo for expensive computations
+- **Bundle Analysis**: Webpack Bundle Analyzer
+- **CDN**: Static assets served from CDN
+
+### Backend Optimizations
+- **Database Indexing**: Proper indexing on frequently queried fields
+- **Query Optimization**: Select_related and prefetch_related
+- **Caching Strategy**: Redis for session, API responses, and computed data
+- **Connection Pooling**: Database connection pooling
+- **Background Tasks**: Celery for long-running operations
+
+### API Response Optimization
+```python
+# projects/views.py
+class ProjectViewSet(viewsets.ModelViewSet):
+ queryset = Project.objects.select_related('created_by').prefetch_related('test_runs')
+ serializer_class = ProjectSerializer
+
+ @action(detail=True, methods=['get'])
+ @cache_page(300) # Cache for 5 minutes
+ def test_history(self, request, pk=None):
+ project = self.get_object()
+ test_runs = project.test_runs.order_by('-created_at')[:10]
+ serializer = TestRunSerializer(test_runs, many=True)
+ return Response(serializer.data)
+```
+
+---
+
+## 🧪 Testing Strategy
+
+### Frontend Testing
+```typescript
+// components/__tests__/ProjectCard.test.tsx
+import { render, screen } from '@testing-library/react';
+import { ProjectCard } from '../ProjectCard';
+
+const mockProject = {
+ id: 1,
+ name: 'Test Project',
+ target_url: 'https://example.com',
+ status: 'active'
+};
+
+test('renders project card with correct information', () => {
+ render( );
+
+ expect(screen.getByText('Test Project')).toBeInTheDocument();
+ expect(screen.getByText('https://example.com')).toBeInTheDocument();
+ expect(screen.getByText('active')).toBeInTheDocument();
+});
+```
+
+### Backend Testing
+```python
+# tests/test_projects.py
+class ProjectAPITestCase(APITestCase):
+ def setUp(self):
+ self.user = User.objects.create_user(
+ username='testuser',
+ email='test@example.com',
+ password='testpass123'
+ )
+ self.client.force_authenticate(user=self.user)
+
+ def test_create_project(self):
+ data = {
+ 'name': 'Test Project',
+ 'target_url': 'https://example.com',
+ 'test_config': {'max_pages': 10}
+ }
+ response = self.client.post('/api/projects/', data, format='json')
+ self.assertEqual(response.status_code, 201)
+ self.assertEqual(Project.objects.count(), 1)
+```
+
+---
+
+## 📚 Documentation & API
+
+### API Documentation
+```python
+# config/urls.py
+from drf_spectacular.views import SpectacularAPIView, SpectacularSwaggerView
+
+urlpatterns = [
+ path('api/schema/', SpectacularAPIView.as_view(), name='schema'),
+ path('api/docs/', SpectacularSwaggerView.as_view(url_name='schema'), name='swagger-ui'),
+]
+```
+
+### Component Documentation (Storybook)
+```typescript
+// components/Button/Button.stories.tsx
+export default {
+ title: 'Components/Button',
+ component: Button,
+ argTypes: {
+ variant: {
+ control: { type: 'select' },
+ options: ['primary', 'secondary', 'success', 'danger']
+ }
+ }
+} as ComponentMeta;
+
+export const Primary: ComponentStory = (args) => (
+ Primary Button
+);
+```
+
+---
+
+## 🔮 Future Enhancements
+
+### Phase 2 Features
+- **Team Collaboration**: Multi-user projects and role-based permissions
+- **CI/CD Integration**: GitHub Actions, Jenkins, GitLab CI integration
+- **Advanced Analytics**: Test trend analysis, performance metrics
+- **Custom Templates**: User-defined test templates and patterns
+- **API Testing**: REST API endpoint testing capabilities
+
+### Phase 3 Features
+- **Mobile App Testing**: React Native, Flutter app testing
+- **Visual Regression**: Screenshot comparison and visual diff detection
+- **AI Insights**: Predictive analytics for test failures
+- **Multi-tenant Architecture**: SaaS deployment model
+- **Enterprise SSO**: SAML, LDAP integration
+
+---
+
+## 📝 Implementation Timeline
+
+### Sprint 1 (2 weeks): Foundation
+- [ ] Django project setup and basic models
+- [ ] React project setup with TypeScript
+- [ ] Authentication system (JWT)
+- [ ] Basic project CRUD operations
+
+### Sprint 2 (2 weeks): Core Features
+- [ ] Test execution integration
+- [ ] WebSocket real-time updates
+- [ ] File upload and management
+- [ ] Basic dashboard views
+
+### Sprint 3 (2 weeks): UI Polish
+- [ ] Bootstrap 5 integration
+- [ ] Responsive design implementation
+- [ ] Charts and data visualization
+- [ ] Form validation and error handling
+
+### Sprint 4 (2 weeks): Testing & Deployment
+- [ ] Unit and integration tests
+- [ ] Docker containerization
+- [ ] CI/CD pipeline setup
+- [ ] Documentation and API docs
+
+---
+
+
+
+=============================================================================================================
+
+LLM-Powered UI Testing System - Technical Architecture
+┌────────────────────────────────────── INPUT ──────────────────────────────────────┐
+│ │
+│ ┌───────────────────────────┐ │
+│ │ Target Web Application │ │
+│ │ Root URL + Auth Credentials │ │
+│ └───────────────────────────┘ │
+│ │ │
+└────────────────────────────────────────┼──────────────────────────────────────────┘
+ ↓
+┌───────────────────────── INTERFACE LAYER ─────────────────────────┐
+│ │
+│ ┌────────────────────────┐ ┌─────────────────────────┐ │
+│ │ Command Line Interface │ │ Configuration Manager │ │
+│ │ Parameter Parsing │ │ Settings Validation │ │
+│ │ Config Loading │ │ Environment Variables │ │
+│ └────────────────────────┘ └─────────────────────────┘ │
+│ │
+│ config.py: Handles API keys, browser settings, output options │
+│ │
+└─────────────────────────────────┬─────────────────────────────────┘
+ │ Validated Configuration
+ ↓
+┌───────────────────── ORCHESTRATION LAYER ────────────────────────┐
+│ │
+│ ┌────────────────────────────────────────────────────────────┐ │
+│ │ Orchestrator │ │
+│ │ Central Coordination • Process Management │ │
+│ └────────────────────────────────────────────────────────────┘ │
+│ │
+│ ┌─────────────────────────┐ ┌─────────────────────────┐ │
+│ │ State Tracker │ │ Error Handler │ │
+│ │ Visit History • Forms │ │ Recovery • Retry Logic │ │
+│ └─────────────────────────┘ └─────────────────────────┘ │
+│ │
+│ orchestrator.py: Controls workflow and manages process flow │
+│ │
+└────────────────────────────────┬─────────────────────────────────┘
+ │ Discovery Commands
+ ↓
+┌─────────────────────── CRAWLING LAYER ───────────────────────────┐
+│ │
+│ ┌─────────────────────────┐ ┌─────────────────────────┐ │
+│ │ Browser Controller │ │ Crawl4AI Integration │ │
+│ │ Playwright • DOM Access│ │ Async • Deep Crawling │ │
+│ └─────────────────────────┘ └─────────────────────────┘ │
+│ │
+│ ┌─────────────────────────┐ ┌─────────────────────────┐ │
+│ │ Authentication Handler │ │ Dynamic Content Process │ │
+│ │ Login Flows • Cookies │ │ JavaScript • Loading │ │
+│ └─────────────────────────┘ └─────────────────────────┘ │
+│ │
+│ browser_controller.py + Crawl4AI: Navigation and DOM capture │
+│ │
+└────────────────────────────────┬─────────────────────────────────┘
+ │ DOM Content + Screenshots
+ ↓
+┌─────────────────────── ANALYSIS LAYER ───────────────────────────┐
+│ │
+│ ┌─────────────────────────┐ ┌─────────────────────────┐ │
+│ │ LLM Interface │ │ Element Extractor │ │
+│ │ API • Prompt Management│ │ Form Detection • IDs │ │
+│ └─────────────────────────┘ └─────────────────────────┘ │
+│ │
+│ ┌─────────────────────────┐ ┌─────────────────────────┐ │
+│ │ DOM Parser │ │ Visual Analyzer │ │
+│ │ HTML • Shadow DOM │ │ Screenshots • Layout │ │
+│ └─────────────────────────┘ └─────────────────────────┘ │
+│ │
+│ llm_interface.py + element_extractor.py: LLM-powered analysis │
+│ │
+└────────────────────────────────┬─────────────────────────────────┘
+ │ Structured Element Metadata
+ ↓
+┌────────────────────── GENERATION LAYER ──────────────────────────┐
+│ │
+│ ┌─────────────────────────┐ ┌─────────────────────────┐ │
+│ │ Test Generator │ │ Code Generator │ │
+│ │ Test Cases • Scenarios │ │ POM Classes • Scripts │ │
+│ └─────────────────────────┘ └─────────────────────────┘ │
+│ │
+│ ┌─────────────────────────┐ ┌─────────────────────────┐ │
+│ │ Template Engine │ │ Validator │ │
+│ │ Code Templates • Style │ │ Syntax • Test Logic │ │
+│ └─────────────────────────┘ └─────────────────────────┘ │
+│ │
+│ test_generator.py + code_generator.py: LLM-driven generation │
+│ │
+└────────────────────────────────┬─────────────────────────────────┘
+ │ Test Artifacts
+ ↓
+┌──────────────────────── OUTPUT LAYER ────────────────────────────┐
+│ │
+│ ┌─────────────┐ ┌─────────────┐ ┌─────────────────┐ │
+│ │ JSON │ │ Test Cases │ │ Test Scripts │ │
+│ │ Metadata │ │ Gherkin │ │ Python Code │ │
+│ └─────────────┘ └─────────────┘ └─────────────────┘ │
+│ │
+│ ┌─────────────────────────────┐ │
+│ │ Summary Report │ │
+│ └─────────────────────────────┘ │
+│ │
+│ outputs/: metadata/, test_cases/, test_scripts/ directories │
+│ │
+└────────────────────────────────┬─────────────────────────────────┘
+ │
+ ↓
+ ┌─────────────────────────┐
+ │ PyTest Runner │
+ │ Executes Tests │
+ └─────────────────────────┘
+Layer-by-Layer Process Flow
+1. Interface Layer
+Receives initial inputs and configuration parameters:
+
+Command Line Interface: Processes arguments from users
+Configuration Manager: Loads and validates settings
+
+2. Orchestration Layer
+Central coordination and decision-making:
+
+Orchestrator: Main workflow controller
+State Tracker: Maintains crawl state and history
+Error Handler: Manages failures and retries
+
+3. Crawling Layer
+Web interaction and content discovery:
+
+Browser Controller: Playwright-based browser automation
+Crawl4AI Integration: Enhanced crawling capabilities
+Authentication Handler: Manages login processes
+Dynamic Content Processor: Handles JavaScript-heavy sites
+
+4. Analysis Layer
+Content understanding and element extraction:
+
+LLM Interface: Communicates with AI models
+Element Extractor: Identifies UI components
+DOM Parser: Analyzes HTML structure
+Visual Analyzer: Processes screenshots
+
+5. Generation Layer
+Creates test artifacts:
+
+Test Generator: Produces human-readable test cases
+Code Generator: Builds executable test scripts
+Template Engine: Manages code patterns
+Validator: Ensures quality and correctness
+
+6. Output Layer
+Organizes final deliverables:
+
+JSON Metadata: Structured element data
+Test Cases: Human-readable scenarios
+Test Scripts: Executable test code
+Summary Report: Overall testing results
+
+Crawl4AI Integration Points
+The Crawl4AI integration enhances the Crawling Layer by:
+
+Replacing basic navigation with high-performance async crawling
+Providing BFS/DFS strategies for comprehensive site exploration
+Handling complex dynamic content and JavaScript execution
+Managing browser sessions across multiple interactions
+
+Data flows unidirectionally down through the layers, with each layer performing its specific logical function before passing processed information to the next layer.
diff --git a/notes b/notes
new file mode 100644
index 0000000000..9c57dc7084
--- /dev/null
+++ b/notes
@@ -0,0 +1,58 @@
+The responsibilities of the Lead, Data and Analytics are multifaceted and require a strong understanding of both technical and leadership domains:
+
+Technical Leadership and Strategy: The Lead will be at the forefront of designing and developing data and analytics solutions that are in complete alignment with Company's overarching technology strategy. This involves establishing and rigorously enforcing data engineering standards, championing best practices, and defining robust architectural patterns to ensure consistency and efficiency across all data initiatives. A key aspect of this responsibility includes the continuous evaluation of new and emerging technologies, with the expectation to provide well-reasoned recommendations for their adoption when they offer significant benefits to Company's data ecosystem . Furthermore, the Lead will serve as a technical expert, providing invaluable guidance and mentorship to other team members, fostering their growth and ensuring the team remains at the cutting edge of data and analytics practices . The expectation is that this individual will not only possess deep technical knowledge but also the ability to translate that knowledge into actionable strategies that drive the team forward and enhance Company's data capabilities.
+
+Data Engineering and Pipeline Development: A core responsibility involves the hands-on design, construction, and maintenance of robust and scalable data pipelines. These pipelines are critical for the efficient ingestion, seamless processing, and accurate transformation of data from a diverse range of sources. These sources include cutting-edge big data platforms, flexible cloud storage solutions, and established traditional databases, reflecting the complex data landscape of a global financial institution . The Lead will be tasked with optimizing these data pipelines to achieve peak performance, ensuring unwavering reliability, and enabling seamless scalability to handle the ever-increasing volumes of data. Implementing comprehensive data quality checks and proactive monitoring processes will also fall under their purview, guaranteeing the integrity and trustworthiness of the data that powers Company's critical operations and decision-making . The ability to convert legacy SAS-based pipelines into modern languages like PySpark and Scala for execution on both Hadoop and non-Hadoop ecosystems is also a significant aspect of this role, highlighting the need to modernize existing data infrastructure .
+
+Project Management and Delivery: The Lead will be expected to take ownership of data and analytics projects from their initial conceptualization all the way through to their successful completion. This includes ensuring that all projects are delivered not only on time but also within the allocated budget, demonstrating strong financial and resource management skills . A crucial first step involves clearly defining the project scope, establishing measurable objectives, and outlining specific deliverables in close collaboration with all relevant stakeholders, ensuring that everyone is aligned on the goals and expectations. The Lead will also be responsible for proactively identifying and effectively managing any project risks, potential issues, and critical dependencies that could impact the project's trajectory. Adherence to established project management methodologies, such as Agile and SDLC, will be essential to ensure a structured and efficient approach to project execution . A proven track record of successfully managing and implementing data-related projects is a key requirement for this role .
+
+Team Leadership and Mentorship: This role involves providing strong leadership and effective mentorship to a team of talented data engineers and skilled analysts. The Lead will play a crucial role in fostering their professional growth and continuous development, creating an environment where team members can learn, innovate, and excel . Supervising day-to-day staff management responsibilities, including the efficient allocation of resources and the strategic assignment of work, will be a key part of ensuring the team's productivity and effectiveness . The Lead will be instrumental in promoting a collaborative and high-performing team culture, where open communication, mutual respect, and a shared commitment to excellence are paramount . Providing constructive coaching, valuable guidance, and serving as a knowledgeable advisor to junior team members will be essential for nurturing talent and building a strong, capable data and analytics team . This aspect of the role requires not only technical expertise but also strong interpersonal skills and a genuine commitment to the growth and success of the team.
+
+Collaboration and Stakeholder Management: The Lead will be expected to forge strong partnerships with a diverse group of stakeholders, including domain experts who possess deep business knowledge, product managers who define the vision for data products, experienced analysts who derive insights from data, and skilled data scientists who build advanced analytical models . A key responsibility will be to thoroughly understand their unique data needs and to effectively deliver tailored data and analytics solutions that meet those requirements. Excellent communication skills, both written and verbal, are essential for effectively interacting with both technical and non-technical stakeholders, ensuring that complex information is conveyed clearly and concisely . Furthermore, the Lead will collaborate closely with various cross-functional teams across the organization to ensure seamless alignment and effective integration of data and analytics initiatives with broader business objectives, fostering a cohesive and data-driven culture throughout Company . The ability to influence and negotiate with senior leaders will also be important in driving the adoption of data-driven strategies .
+
+Data Quality and Governance: Ensuring the accuracy, integrity, and unwavering reliability of data is a fundamental responsibility of this role. The Lead will be tasked with defining and implementing robust data quality standards and comprehensive processes to maintain the highest levels of data integrity . Adherence to Company's established data governance policies and procedures will be strictly required, ensuring compliance with internal regulations and industry best practices . This involves defining needs around maintainability, testability, performance, security, quality, and overall usability of the data platform . The Lead will play a critical role in establishing a culture of data quality within the team and across the organization, ensuring that data is treated as a valuable asset and that decisions are based on trustworthy information.
+
+Research and Innovation: In the rapidly evolving field of data and analytics, it is crucial to stay informed about the latest trends and advancements in technologies. The Lead will be expected to proactively research and thoroughly evaluate new tools, innovative techniques, and emerging methodologies that have the potential to significantly improve Company's data and analytics capabilities . This includes driving a culture of innovation within the data and analytics function, encouraging experimentation, and championing the adoption of cutting-edge solutions that can provide a competitive edge to the organization. This role requires a forward-thinking individual who is passionate about exploring new possibilities and continuously seeking ways to enhance Company's data-driven capabilities . The evaluation of new IT developments and evolving business requirements, leading to recommendations for appropriate system alternatives or enhancements, is a key aspect of this responsibility .
+
+Risk Management and Compliance: Given that Company operates within the highly regulated financial services industry, a strong understanding of risk management principles and strict adherence to compliance regulations are paramount. The Lead will be responsible for appropriately assessing risk in all business decisions, demonstrating a deep consideration for the firm's reputation and actively safeguarding Companygroup, its clients, and its valuable assets . This involves diligently driving compliance with all applicable laws, relevant rules, and internal regulations, consistently adhering to Company's established policies, and applying sound ethical judgment in all professional conduct.
+
+To be successful in this challenging and rewarding role, candidates should possess the following qualifications:
+
+Technical Skills: A candidate must have extensive hands-on experience with a wide range of big data technologies. This includes a strong understanding of the Hadoop ecosystem, with a preference for experience with Cloudera . Proficiency in Spark, Hive, Impala, Kafka, Kudu, Solr, and Elastic Search is essential . Furthermore, the role requires strong programming skills in languages critical for data engineering, such as Python, Scala, and Java . Practical experience with major cloud platforms, including AWS, Azure, or GCP, is highly desirable, with a preference for candidates who have actively supported deployments on these platforms . Expertise in building robust data pipelines using various ETL/ELT tools and frameworks, such as Apache Nifi, Talend, or Apache Airflow, is a key requirement . A solid understanding of both relational and NoSQL databases is necessary, along with proven experience in data modeling, schema design, and performance optimization . Familiarity with containerization technologies like Docker and Kubernetes, as well as CI/CD practices and automation, is also expected . Finally, a strong grasp of data warehousing concepts and data lake architectures is fundamental , and while not explicitly for engineering roles in the snippets, experience with data visualization tools like Tableau or Power BI could be beneficial given the "Analytics" component of the role .
+
+Experience: Candidates must possess a minimum of 12 years of progressive experience in the IT field, with at least 8 years specifically focused on hands-on work with Hadoop and other big data technologies . A significant portion of their experience should be in the design, development, and successful implementation of complex data pipelines for data ingestion and transformation . Proven experience in leading large-scale data and analytics projects is essential, demonstrating the ability to manage all aspects of the project lifecycle from initiation to completion . Prior experience in team leadership and effective mentorship of technical teams is a critical requirement . Experience within the banking, capital markets, or broader financial services industry is strongly preferred, as it provides valuable context for the specific data challenges and regulatory requirements within Company . Experience with enterprise-scale, multi-region project development is also highly desirable . Prior experience in the conversion of SAS-based pipelines to modern big data technologies would be considered a significant plus , as would any experience in Hadoop administration .
+
+Education: A Bachelor's degree (University degree) in Computer Science, Engineering, Data Science, Analytics, or a closely related field is a mandatory requirement . A Master's degree in one of these fields is highly preferred, as it often indicates a deeper theoretical understanding and advanced analytical capabilities that are valuable for this senior-level role . Equivalent practical experience may be considered in lieu of a Master's degree in exceptional circumstances.
+
+Certifications (Optional but desirable): While not explicitly required, relevant industry certifications can be a significant advantage. These include certifications in leading cloud platforms such as AWS Certified Data Engineer, Azure Data Engineer Associate, or Google Cloud Professional Data Engineer. Similarly, big data certifications, such as Cloudera Certified Data Engineer, can further demonstrate a candidate's specialized knowledge and commitment to the field.
+
+In addition to the necessary technical expertise and professional experience, certain soft skills are crucial for success in this leadership role:
+
+Exceptional written and verbal communication skills are paramount, with the proven ability to clearly and effectively articulate complex technical concepts to a diverse range of audiences, including both technical peers and non-technical business stakeholders .
+Strong problem-solving and analytical skills are essential, with a demonstrated ability to thoroughly analyze intricate issues, identify root causes, and develop practical and effective solutions .
+Proven leadership and team management skills are required, including demonstrable experience in effectively leading, mentoring, and coaching team members to achieve their full potential and contribute to a high-performing team environment . The ability to influence and negotiate with senior leaders is also important .
+Strategic thinking and the capability to align data and analytics initiatives with broader business goals and objectives are critical for driving impactful outcomes and contributing to the overall success of the organization .
+Adaptability and a strong learning agility are necessary to thrive in the ever-evolving landscape of data and analytics technologies, with a proactive willingness to continuously learn and stay updated on the latest trends and advancements .
+Strong collaboration and interpersonal skills are vital for building effective working relationships with diverse teams across the organization, fostering a positive and productive working environment .
+A keen attention to detail and an unwavering commitment to maintaining high standards of data quality and accuracy are essential for ensuring the reliability and trustworthiness of data used for critical decision-making .
+The ability to effectively work under pressure and manage multiple priorities while consistently meeting deadlines is crucial for navigating the demands of a senior-level role in a fast-paced environment .
+Strong organizational and project management skills are necessary for effectively planning, executing, and delivering complex data and analytics projects on time and within budget .
+Company offers a comprehensive and competitive benefits package designed to support the well-being and professional growth of our employees. While specific details for this role in Farmers Branch, TX should be researched on Company's career website, typical benefits for senior-level tech roles often include comprehensive medical, dental, and vision coverage, along with a Health Savings Account option. Employees can also expect life insurance, short-term and long-term disability protection, and paid parental leave. Company provides paid holidays and generous vacation time to ensure employees have ample opportunity for rest and rejuvenation. Retirement savings are supported through a 401(k) plan with a company match, and opportunities for career development and tuition reimbursement are often available to encourage continuous learning and advancement . Exploring Company's specific offerings will provide a complete picture of the rewards and benefits associated with this exCompanyng opportunity.
+
+Company is an equal opportunity employer committed to diversity in the workplace. All qualified applicants will receive consideration for employment without regard to race, color, religion, sex, sexual orientation, gender identity, national origin, disability, or status as a protected veteran.
+
+Interested candidates who meet the qualifications outlined above are encouraged to apply through Company's career website. Please refer to the specific job posting for "Lead, Data and Analytics (C13)" in Farmers Branch, TX for detailed application instructions and any relevant deadlines. We look forward to receiving your application.
+
+Summary of Key Technical Skills and Experience Levels
+
+Skill/Technology Minimum Years of Experience Level of Proficiency Frequency of Mention in Company Snippets
+Hadoop 8+ Hands-on, Advanced 3
+Spark - Hands-on, Expert 4
+Python - Proficient 3
+Scala - Proficient 3
+Java - Proficient 3
+Cloud Platforms (AWS, Azure, GCP) - Hands-on 1
+ETL/ELT Tools - Expertise 1
+SQL - Strong Understanding 1
+NoSQL Databases - Solid Understanding 1
+Data Pipelines - Significant Experience 3