diff --git a/.claude/commands/README.md b/.claude/commands/README.md new file mode 100644 index 00000000..994bdc7f --- /dev/null +++ b/.claude/commands/README.md @@ -0,0 +1,162 @@ +# 🔧 Command Templates + +Orchestration templates that enable Claude Code to coordinate multi-agent workflows for different development tasks. + +## Overview + +After reading the [main kit documentation](../README.md), you'll understand how these commands fit into the integrated system. Each command: + +- **Auto-loads** the appropriate documentation tier for its task +- **Spawns specialized agents** based on complexity +- **Integrates MCP servers** when external expertise helps +- **Maintains documentation** to keep AI context current + +### 🚀 Automatic Context Injection + +All commands benefit from automatic context injection via the `subagent-context-injector.sh` hook: + +- **Core documentation auto-loaded**: Every command and sub-agent automatically receives `@/docs/CLAUDE.md`, `@/docs/ai-context/project-structure.md`, and `@/docs/ai-context/docs-overview.md` +- **No manual context loading**: Sub-agents spawned by commands automatically have access to essential project documentation +- **Consistent knowledge**: All agents start with the same foundational understanding + +## Available Commands + +### 📊 `/full-context` +**Purpose**: Comprehensive context gathering and analysis when you need deep understanding or plan to execute code changes. + +**When to use**: +- Starting work on a new feature or bug +- Need to understand how systems interconnect +- Planning architectural changes +- Any task requiring thorough analysis before implementation + +**How it works**: Adaptively scales from direct analysis to multi-agent orchestration based on request complexity. Agents read documentation, analyze code, map dependencies, and consult MCP servers as needed. + +### 🔍 `/code-review` +**Purpose**: Get multiple expert perspectives on code quality, focusing on high-impact findings rather than nitpicks. + +**When to use**: +- After implementing new features +- Before merging important changes +- When you want security, performance, and architecture insights +- Need confidence in code quality + +**How it works**: Spawns specialized agents (security, performance, architecture) that analyze in parallel. Each agent focuses on critical issues that matter for production code. + +### 🧠 `/gemini-consult` *(Requires Gemini MCP Server)* +**Purpose**: Engage in deep, iterative conversations with Gemini for complex problem-solving and architectural guidance. + +**When to use**: +- Tackling complex architectural decisions +- Need expert guidance on implementation approaches +- Debugging intricate issues across multiple files +- Exploring optimization strategies +- When you need a thinking partner for difficult problems + +**How it works**: Creates persistent conversation sessions with Gemini, automatically attaching project context and MCP-ASSISTANT-RULES.md. Supports iterative refinement through follow-up questions and implementation feedback. + +**Key features**: +- Context-aware problem detection when no arguments provided +- Persistent sessions maintained throughout problem lifecycle +- Automatic attachment of foundational project documentation +- Support for follow-up questions with session continuity + +### 📝 `/update-docs` +**Purpose**: Keep documentation synchronized with code changes, ensuring AI context remains current. + +**When to use**: +- After modifying code +- After adding new features +- When project structure changes +- Following any significant implementation + +**How it works**: Analyzes what changed and updates the appropriate CLAUDE.md files across all tiers. Maintains the context that future AI sessions will rely on. + +### 📄 `/create-docs` +**Purpose**: Generate initial documentation structure for existing projects that lack AI-optimized documentation. + +**When to use**: +- Adopting the framework in an existing project +- Starting documentation from scratch +- Need to document legacy code +- Setting up the 3-tier structure + +**How it works**: Analyzes your project structure and creates appropriate CLAUDE.md files at each tier, establishing the foundation for AI-assisted development. + +### ♻️ `/refactor` +**Purpose**: Intelligently restructure code while maintaining functionality and updating all dependencies. + +**When to use**: +- Breaking up large files +- Improving code organization +- Extracting reusable components +- Cleaning up technical debt + +**How it works**: Analyzes file structure, maps dependencies, identifies logical split points, and handles all import/export updates across the codebase. + +### 🤝 `/handoff` +**Purpose**: Preserve context when ending a session or when the conversation becomes too long. + +**When to use**: +- Ending a work session +- Context limit approaching +- Switching between major tasks +- Supplementing `/compact` with permanent storage + +**How it works**: Updates the handoff documentation with session achievements, current state, and next steps. Ensures smooth continuation in future sessions. + +## Integration Patterns + +### Typical Workflow +```bash +/full-context "implement user notifications" # Understand +# ... implement the feature ... +/code-review "review notification system" # Validate +/update-docs "document notification feature" # Synchronize +/handoff "completed notification system" # Preserve +``` + +### Quick Analysis +```bash +/full-context "why is the API slow?" # Investigate +# ... apply fixes ... +/update-docs "document performance fixes" # Update context +``` + +### Major Refactoring +```bash +/full-context "analyze authentication module" # Understand current state +/refactor "@auth/large-auth-file.ts" # Restructure +/code-review "review refactored auth" # Verify quality +/update-docs "document new auth structure" # Keep docs current +``` + +### Complex Problem Solving +```bash +/gemini-consult "optimize real-time data pipeline" # Start consultation +# ... implement suggested approach ... +/gemini-consult # Follow up with results +/update-docs "document optimization approach" # Capture insights +``` + +## Customization + +Each command template can be adapted: + +- **Adjust agent strategies** - Modify how many agents spawn and their specializations +- **Change context loading** - Customize which documentation tiers load +- **Tune MCP integration** - Adjust when to consult external services +- **Modify output formats** - Tailor results to your preferences + +Commands are stored in `.claude/commands/` and can be edited directly. + +## Key Principles + +1. **Commands work together** - Each command builds on others' outputs +2. **Documentation stays current** - Commands maintain their own context +3. **Complexity scales naturally** - Simple tasks stay simple, complex tasks get sophisticated analysis +4. **Context is continuous** - Information flows between sessions through documentation + +--- + +*For detailed implementation of each command, see the individual command files in this directory.* \ No newline at end of file diff --git a/.claude/commands/code-review.md b/.claude/commands/code-review.md new file mode 100644 index 00000000..1af045da --- /dev/null +++ b/.claude/commands/code-review.md @@ -0,0 +1,322 @@ +# /code-review + +*Performs focused multi-agent code review that surfaces only critical, high-impact findings for solo developers using AI tools.* + +## Core Philosophy + +This command prioritizes **needle-moving discoveries** over exhaustive lists. Every finding must demonstrate significant impact on: +- System reliability & stability +- Security vulnerabilities with real exploitation risk +- Performance bottlenecks affecting user experience +- Architectural decisions blocking future scalability +- Critical technical debt threatening maintainability + +### 🚨 Critical Findings Only +Issues that could cause production failures, security breaches, or severe user impact within 48 hours. + +### 🔥 High-Value Improvements +Changes that unlock new capabilities, remove significant constraints, or improve metrics by >25%. + +### ❌ Excluded from Reports +Minor style issues, micro-optimizations (<10%), theoretical best practices, edge cases affecting <1% of users. + + +## Auto-Loaded Project Context: +@/CLAUDE.md +@/docs/ai-context/project-structure.md +@/docs/ai-context/docs-overview.md + + +## Command Execution + +User provided context: "$ARGUMENTS" + +### Step 1: Understand User Intent & Gather Context + +#### Parse the Request +Analyze the natural language input to determine: +1. **What to review**: Parse file paths, component names, feature descriptions, or commit references +2. **Review focus**: Identify any specific concerns mentioned (security, performance, etc.) +3. **Scope inference**: Intelligently determine the breadth of review needed + +Examples of intent parsing: +- "the authentication flow" → Find all files related to auth across the codebase +- "voice pipeline implementation" → Locate voice processing components +- "recent changes" → Parse git history for relevant commits +- "the API routes" → Identify all API endpoint files + +#### Read Relevant Documentation +Before allocating agents, **read the documentation** to understand: +1. Use `/docs/ai-context/docs-overview.md` to identify relevant docs +2. Read documentation related to the code being reviewed: + - Architecture docs for subsystem understanding + - API documentation for integration points + - Security guidelines for sensitive areas + - Performance considerations for critical paths +3. Build a mental model of risks, constraints, and priorities + +This context ensures intelligent agent allocation based on actual project knowledge. + +### Step 2: Define Mandatory Coverage Areas + +Every code review MUST analyze these core areas, with depth determined by scope: + +#### 🎯 Mandatory Coverage Areas: + +1. **Critical Path Analysis** + - User-facing functionality that could break + - Data integrity and state management + - Error handling and recovery mechanisms + +2. **Security Surface** + - Input validation and sanitization + - Authentication/authorization flows + - Data exposure and API security + +3. **Performance Impact** + - Real-time processing bottlenecks + - Resource consumption (memory, CPU) + - Scalability constraints + +4. **Integration Points** + - API contracts and boundaries + - Service dependencies + - External system interactions + +#### 📊 Dynamic Agent Allocation: + +Based on review scope, allocate agents proportionally: + +**Small to medium Scope (small set of files or small feature)** +- 2-3 agents covering mandatory areas +- Each agent handles 1-2 coverage areas +- Focus on highest-risk aspects + +**Large Scope (many files, major feature or subsystem)** +- 4-6 agents with specialized focus +- Each mandatory area gets dedicated coverage +- Additional agents for cross-cutting concerns + +### Step 3: Dynamic Agent Generation + +Based on scope analysis and mandatory coverage areas, dynamically create specialized agents: + +#### Agent Generation Strategy: + +**With your documentation knowledge from Step 1, think deeply** about optimal agent allocation: +- Leverage your understanding of the project architecture and risks +- Consider the specific documentation you read about this subsystem +- Apply insights about critical paths and security considerations +- Use documented boundaries and integration points to partition work +- Factor in any performance or scalability concerns from the docs + +Use your understanding of the project to intuitively determine: +1. **How many agents are needed** - Let the code's complexity and criticality guide you +2. **How to partition the work** - Follow natural architectural boundaries +3. **Which specializations matter most** - Focus agents where risk is highest + +**Generate Specialized Agents** + + For each allocated agent, create a focused role: + + **Example for 6-agent allocation:** + - Agent 1: Critical_Path_Validator (user flows + error handling) + - Agent 2: Security_Scanner (input validation + auth) + - Agent 3: API_Security_Auditor (data exposure + boundaries) + - Agent 4: Performance_Profiler (bottlenecks + resource usage) + - Agent 5: Scalability_Analyst (constraints + growth paths) + - Agent 6: Integration_Verifier (dependencies + contracts) + + **Example for 3-agent allocation:** + - Agent 1: Security_Performance_Analyst (security + performance areas) + - Agent 2: Critical_Path_Guardian (functionality + integrations) + - Agent 3: Risk_Quality_Assessor (technical debt + code quality) + +#### Dynamic Focus Areas: + +Each agent receives specialized instructions based on: +- **File characteristics**: API endpoints → security focus +- **Code patterns**: Loops/algorithms → performance focus +- **Dependencies**: External services → integration focus +- **User touchpoints**: UI/voice → critical path focus + +### Step 4: Execute Dynamic Multi-Agent Review + +**Before launching agents, pause and think deeply:** +- What are the real risks in this code? +- Which areas could cause the most damage if they fail? +- Where would a solo developer need the most help? + +Generate and launch agents based on your thoughtful analysis: + +``` +For each dynamically generated agent: + Task: "As [Agent_Role], analyze [assigned_coverage_areas] in [target_scope]. + + MANDATORY COVERAGE CHECKLIST: + ☐ Critical Path: [assigned aspects] + ☐ Security: [assigned aspects] + ☐ Performance: [assigned aspects] + ☐ Integration: [assigned aspects] + + HIGH-IMPACT REVIEW MANDATE: + Focus ONLY on findings that significantly move the needle for a solo developer. + + Review workflow: + 1. Review auto-loaded project context (CLAUDE.md, project-structure.md, docs-overview.md) + 2. Analyze your assigned coverage areas with deep focus + 3. For complex issues, use: + - mcp__gemini__consult_gemini for architectural analysis + - mcp__context7__get-library-docs for framework best practices + 4. Cross-reference with other coverage areas for systemic issues + 5. Document ONLY high-impact findings: + + ## [Coverage_Area] Analysis by [Agent_Role] + + ### 🚨 Critical Issues (Production Risk) + - Issue: [description] + - Location: [file:line_number] + - Impact: [quantified - downtime hours, users affected, data at risk] + - Fix: [specific code snippet] + - Consequence if ignored: [what happens in 48 hours] + + ### 🎯 Strategic Improvements (Capability Unlocks) + - Limitation: [what's currently blocked] + - Solution: [architectural change or implementation] + - Unlocks: [new capability or scale] + - ROI: [effort hours vs benefit quantified] + + ### ⚡ Quick Wins (Optional) + - Only include if <2 hours for >20% improvement + - Must show measurable impact + + REMEMBER: Every finding must pass the 'so what?' test for a solo developer." +``` + +#### Parallel Execution Strategy: + +**Launch all agents simultaneously** for maximum efficiency + + +### Step 5: Synthesize Findings with Maximum Analysis Power + +After all sub-agents complete their analysis: + +**ultrathink** + +Activate maximum cognitive capabilities to: + +1. **Filter for Impact** + - Discard all low-priority findings + - Quantify real-world impact of each issue + - Focus on production risks and capability unlocks + +2. **Deep Pattern Analysis** + - Identify systemic issues vs isolated problems + - Find root causes across agent reports + - Detect subtle security vulnerabilities + +3. **Strategic Prioritization** + - Calculate ROI for each improvement + - Consider solo developer constraints + - Create actionable fix sequence + ```markdown + # Code Review Summary + + **Reviewed**: [scope description] + **Date**: [current date] + **Overall Quality Score**: [A-F grade with justification] + + ## Key Metrics + - Security Risk Level: [Critical/High/Medium/Low] + - Performance Impact: [description] + - Technical Debt: [assessment] + - Test Coverage: [if applicable] + ``` + +### Step 6: Present Comprehensive Review + +Structure the final output as: + +```markdown +# 🔍 Code Review Report + +## Executive Summary +[High-level findings and overall assessment] + +## 🚨 Production Risks (Fix Within 48 Hours) +[Only issues that could cause downtime, data loss, or security breaches] + +## 🎯 Strategic Improvements (High ROI) +[Only changes that unlock capabilities or improve metrics >25%] + +## ⚡ Quick Wins (Optional) +[Only if <2 hours effort for significant improvement] + +## Detailed Analysis + +### Security Assessment +[Detailed security findings from Security_Auditor] + +### Performance Analysis +[Detailed performance findings from Performance_Analyzer] + +### Architecture Review +[Detailed architecture findings from Architecture_Validator] + +### Code Quality Evaluation +[Detailed quality findings from Quality_Inspector] + +[Additional sections based on sub-agents used] + +## Action Plan +1. Critical fixes preventing production failures +2. High-ROI improvements unlocking capabilities + +## Impact Matrix +| Issue | User Impact | Effort | ROI | +|-------|-------------|--------|-----| +| [Only high-impact issues with quantified metrics] | +``` + +### Step 7: Interactive Follow-up + +After presenting the review, offer interactive follow-ups. For example: +- "Would you like me to fix any of the critical issues?" +- "Should I create a detailed refactoring plan for any component?" +- "Do you want me to generate tests for uncovered code?" +- "Should I create GitHub issues for tracking these improvements?" + +## Implementation Notes + +1. **Use parallel Task execution** for all sub-agents to minimize review time +2. **Include file:line_number references** for easy navigation +3. **Balance criticism with recognition** of good practices +4. **Provide actionable fixes**, not just problem identification +5. **Consider project phase** and priorities when recommending changes +6. **Use MCP servers** for specialized analysis when beneficial +7. **Keep security findings sensitive** - don't expose vulnerabilities publicly + +## Error Handling + +### Coverage Verification + +Before presenting results, verify complete coverage: + +``` +☑ Critical Path Analysis: [Covered by agents X, Y] +☑ Security Surface: [Covered by agents Y, Z] +☑ Performance Impact: [Covered by agents X, Z] +☑ Integration Points: [Covered by agents W, X] +``` + +If any area lacks coverage, deploy additional focused agents. + +## Error Handling + +If issues occur during review: +- **Ambiguous input**: Use search tools to find relevant files before asking for clarification +- **File not found**: Search for similar names or components across the codebase +- **Large scope detected**: Dynamically scale agents based on calculated complexity +- **No files found**: Provide helpful suggestions based on project structure +- **Coverage gaps**: Deploy supplementary agents for missed areas diff --git a/.claude/commands/create-docs.md b/.claude/commands/create-docs.md new file mode 100644 index 00000000..77f359e5 --- /dev/null +++ b/.claude/commands/create-docs.md @@ -0,0 +1,309 @@ +You are working on the VR Language Learning App project. The user has requested to create or regenerate documentation with the arguments: "$ARGUMENTS" + +## Auto-Loaded Project Context: +@/CLAUDE.md +@/docs/ai-context/project-structure.md +@/docs/ai-context/docs-overview.md + +## CRITICAL: AI-Optimized Documentation Principles +All documentation must be optimized for AI consumption and future-proofing: +- **Structured & Concise**: Use clear sections, lists, and hierarchies. Provide essential information only. +- **Contextually Complete**: Include necessary context, decision rationale ("why"), and cross-references. +- **Pattern-Oriented**: Make architectural patterns, conventions, and data flow explicit. +- **Modular & Scalable**: Structure for partial updates and project growth. +- **Cross-references**: Link related concepts with file paths, function names, and stable identifiers + + +--- + +## Step 1: Analyze & Strategize + +Using the auto-loaded project context, analyze the user's request and determine the optimal documentation strategy. + +### 1.1. Parse Target & Assess Complexity +**Action**: Analyze `$ARGUMENTS` to identify the `target_path` and its documentation tier. + +**Target Classification:** +- **Tier 3 (Feature-Specific)**: Paths containing `/src/` and ending in `/CONTEXT.md` +- **Tier 2 (Component-Level)**: Paths ending in component root `/CONTEXT.md` + +**Complexity Assessment Criteria:** +- **Codebase Size**: File count and lines of code in target directory +- **Technology Mix**: Diversity of languages and frameworks (Python, TypeScript, etc.) +- **Architectural Complexity**: Dependency graph and cross-component imports +- **Existing Documentation**: Presence and state of any CLAUDE.md files in the area + +### 1.2. Select Strategy +Think deeply about this documentation generation task and strategy based on the auto-loaded project context. Based on the assessment, select and announce the strategy. + +**Strategy Logic:** +- **Direct Creation**: Simple targets (< 15 files, single tech, standard patterns) +- **Focused Analysis**: Moderate complexity (15-75 files, 2-3 techs, some novel patterns) +- **Comprehensive Analysis**: High complexity (> 75 files, 3+ techs, significant architectural depth) + +--- + +## Step 2: Information Gathering (Analysis Phase) + +Based on the chosen strategy, gather the necessary information. + +### Strategy A: Direct Creation +Proceed directly to **Step 3.1**. Perform lightweight analysis during content generation. + +### Strategy B: Focused or Comprehensive Analysis (Sub-Agent Orchestration) + +#### 2.1. Sub-Agent Roles +Select from these specialized roles based on complexity assessment: +- **`Code_Analyzer`**: File structure, implementation patterns, logic flow, coding conventions +- **`Tech_Stack_Identifier`**: Frameworks, libraries, dependencies, technology-specific patterns +- **`Architecture_Mapper`**: Cross-component dependencies, integration points, data flow +- **`Doc_Validator`**: Existing documentation accuracy, gaps, valuable insights, content overlap analysis + +#### 2.2. Launch Sub-Agents +**Execution Plan:** +- **Focused Analysis (2-3 agents)**: `Code_Analyzer` + `Tech_Stack_Identifier` + `Doc_Validator` (if existing docs) +- **Comprehensive Analysis (3-4 agents)**: All agents as needed + +**CRITICAL: Launch agents in parallel using a single message with multiple Task tool invocations for optimal performance.** + +**Task Template:** +``` +Task: "As the [Agent_Role], analyze the codebase at `[target_path]` to support documentation generation. + +Your focus: [role-specific goal, e.g., 'identifying all architectural patterns and dependencies'] + +Standard workflow: +1. Review auto-loaded project context (CLAUDE.md, project-structure.md, docs-overview.md) +2. Analyze the target path for your specialized area +3. Return structured findings for documentation generation + +Return a comprehensive summary of your findings for this role." +``` + +--- + +## Step 3: Documentation Generation + +Think deeply about synthesizing findings and generating comprehensive documentation. Using gathered information, intelligently synthesize and generate the documentation content. + +### 3.1. Content Synthesis & Generation + +#### For Direct Creation (No Sub-Agents) +**Code-First Analysis Methodology:** +1. **Directory Structure Analysis**: Map file organization and purposes using Glob/LS +2. **Import Dependency Analysis**: Use Grep to identify integration patterns and dependencies +3. **Pattern Extraction**: Read key files to identify architectural patterns and coding conventions +4. **Technology Usage Analysis**: Detect frameworks, libraries, and technology-specific patterns +5. **Existing Documentation Assessment**: Read any current CLAUDE.md files for valuable insights + +#### For Sub-Agent Strategies +**Synthesis Integration Process:** +1. **Compile Core Findings**: Merge agent findings for immediate documentation generation +2. **Extract Cross-Tier Patterns**: Identify system-wide patterns that may impact foundational documentation +3. **Resolve Information Conflicts**: When code contradicts existing docs, use code as source of truth +4. **Identify Content Gaps**: Find areas needing new documentation based on analysis +5. **Apply Project Conventions**: Use coding standards and naming conventions from the auto-loaded /CLAUDE.md +6. **Content Overlap Identification**: From Doc_Validator findings, identify existing documentation that overlaps with target content for later migration analysis + +#### Content Generation Process +**For Both Approaches:** +1. **Select Template**: Choose Tier 2 or Tier 3 based on target classification +2. **Apply Content Treatment Strategy**: + - **Preserve**: Validated architectural insights from existing documentation + - **Enhance**: Extend existing patterns with newly discovered implementation details + - **Replace**: Outdated content that conflicts with current code reality + - **Create**: New documentation for undocumented patterns and decisions +3. **Populate Sections**: Fill template sections with synthesized findings +4. **Ensure Completeness**: Include architectural decisions, patterns, dependencies, and integration points +5. **Follow AI-Optimized Principles**: Structure for AI consumption with clear cross-references + +### 3.2. Template Guidelines + +**Tier 2 (Component-Level):** +```markdown +# [Component Name] - Component Context + +## Purpose +[Component purpose and key responsibilities] + +## Current Status: [Status] +[Status with evolution context and rationale] + +## Component-Specific Development Guidelines +[Technology-specific patterns and conventions] + +## Major Subsystem Organization +[High-level structure based on actual code organization] + +## Architectural Patterns +[Core patterns and design decisions] + +## Integration Points +[Dependencies and connections with other components] +``` + +**Tier 3 (Feature-Specific):** +```markdown +# [Feature Area] Documentation + +## [Area] Architecture +[Key architectural elements and integration patterns] + +## Implementation Patterns +[Core patterns and error handling strategies] + +## Key Files and Structure +[File organization with purposes] + +## Integration Points +[How this integrates with other parts of the system] + +## Development Patterns +[Testing approaches and debugging strategies] +``` + +--- + +## Step 4: Finalization & Housekeeping + +### 4.1. Write Documentation File +**Action**: Write the generated content to the target path. + +### 4.2. Update Documentation Registry + +#### Update docs-overview.md +**For new documentation files:** +- Add to appropriate tier section (Feature-Specific or Component-Level) +- Follow established entry format with path and description +- Maintain alphabetical ordering within sections + +**For updated existing files:** +- Verify entry exists and description is current +- Update any changed purposes or scopes + +#### Update Project Structure (if needed) +**If new directories were created:** +- Update file tree in `/docs/ai-context/project-structure.md` +- Add directory comments explaining purpose +- Maintain tree structure formatting and organization + +### 4.3. Quality Validation +**Action**: Verify tier appropriateness, code accuracy, cross-reference validity, and consistency with existing documentation patterns. + +### 4.4. Tier 1 Validation & Recommendations + +**Action**: Compare discovered code patterns against foundational documentation to identify inconsistencies and improvement opportunities. + +#### Process +1. **Discover Tier 1 Files**: Read `/docs/ai-context/docs-overview.md` to identify all foundational documentation files +2. **Read Foundational Docs**: Load discovered Tier 1 files to understand documented architecture +3. **Cross-Tier Analysis**: Using analysis findings from previous steps, compare: + - **Technology Stack**: Discovered frameworks/tools vs documented stack + - **Architecture Patterns**: Implementation reality vs documented decisions + - **Integration Points**: Actual dependencies vs documented integrations +4. **Generate Recommendations**: Output evidence-based suggestions for foundational documentation updates + +### 4.5. Content Migration & Redundancy Management + +**Action**: Intelligently manage content hierarchy and eliminate redundancy across documentation tiers. + +#### Cross-Reference Analysis +1. **Identify Related Documentation**: Using Doc_Validator findings from Step 3.1 synthesis and target tier classification, identify existing documentation that may contain overlapping content +2. **Content Overlap Detection**: Compare new documentation content with existing files to identify: + - **Duplicate Information**: Identical content that should exist in only one location + - **Hierarchical Overlaps**: Content that exists at wrong tier level (implementation details in architectural docs) + - **Cross-Reference Opportunities**: Content that should be linked rather than duplicated + +#### Smart Content Migration Strategy +**Content Classification Framework:** +- **Tier-Appropriate Duplication**: High-level architectural context can exist at both Tier 2 and Tier 3 with different detail levels +- **Migration Candidates**: Detailed implementation patterns, specific code examples, feature-specific technical details +- **Reference Targets**: Stable architectural decisions, design rationale, cross-cutting concerns + +**Migration Decision Logic:** +1. **For Tier 3 Creation (Feature-Specific)**: + - **Extract from Tier 2**: Move feature-specific implementation details to new Tier 3 file + - **Preserve in Tier 2**: Keep high-level architectural overview and design decisions + - **Add Cross-References**: Link Tier 2 overview to detailed Tier 3 implementation + +2. **For Tier 2 Creation (Component-Level)**: + - **Consolidate from Multiple Tier 3**: Aggregate architectural insights from existing feature docs + - **Preserve Tier 3 Details**: Keep implementation specifics in feature documentation + - **Create Navigation Structure**: Add references to relevant Tier 3 documentation + +#### Content Migration Execution +**Migration Process:** +1. **Identify Source Content**: Extract content that should migrate from existing files +2. **Content Transformation**: Adapt content to appropriate tier level (architectural vs implementation focus) +3. **Update Source Files**: Remove migrated content and add cross-references to new location +4. **Preserve Context**: Ensure source files maintain coherence after content removal +5. **Validate Migrations**: Confirm no broken references or lost information + +**Safety Framework:** +- **Conservative Defaults**: When uncertain, preserve content in original location and add references +- **Content Preservation**: Never delete content without creating it elsewhere first +- **Migration Reversibility**: Document all migrations to enable rollback if needed + +--- + +## Step 5: Generate Summary + +Provide a comprehensive summary including: + +### Documentation Creation Results +- **Documentation type and location** (Tier 2 or Tier 3) +- **Strategy used** (Direct Creation, Focused Analysis, or Comprehensive Analysis) +- **Key patterns documented** (architectural decisions, implementation patterns) +- **Registry updates made** (docs-overview.md, project-structure.md entries) + +### Tier 1 Architectural Intelligence +**Based on Step 4.4 analysis, provide structured recommendations:** + +#### Critical Updates Needed +- **File**: [specific foundational doc path] +- **Issue**: [specific inconsistency with evidence] +- **Recommendation**: [specific update needed] +- **Evidence**: [code references supporting the recommendation] + +#### Architecture Enhancement Opportunities +- **Gap Identified**: [missing foundational documentation area] +- **Scope**: [what should be documented] +- **Rationale**: [why this deserves foundational documentation] +- **Implementation Evidence**: [code patterns discovered] + +#### Documentation Health Assessment +- **Alignment Score**: [overall consistency between code and docs] +- **Most Accurate Areas**: [foundational docs that match implementation well] +- **Areas Needing Attention**: [foundational docs with significant gaps/inconsistencies] +- **Systematic Improvement Priority**: [recommended order for addressing issues] + +#### Content Migration Results +**Document all content hierarchy changes and redundancy eliminations:** + +- **Content Migrated From**: [source file path] → [target file path] + - **Content Type**: [e.g., "implementation patterns", "technical details", "architectural decisions"] + - **Rationale**: [why this content belongs at the target tier] + - **Cross-References Added**: [navigation links created between tiers] + +- **Content Preserved At**: [broader tier file] + - **Content Type**: [e.g., "architectural overview", "design decisions", "integration patterns"] + - **Rationale**: [why this content remains at the broader tier] + +- **Redundancies Eliminated**: + - **Duplicate Content Removed**: [specific duplications eliminated] + - **Hierarchical Corrections**: [content moved to appropriate tier level] + - **Reference Consolidations**: [areas where links replaced duplication] + +- **Migration Safety**: + - **Content Preserved**: [confirmation that no information was lost] + - **Rollback Information**: [documentation of changes for potential reversal] + - **Validation Results**: [confirmation of no broken references] + +#### Next Documentation Steps (Optional Recommendations) +- **Feature-Specific Documentation Candidates**: [suggest additional Tier 3 docs that would be valuable] +- **Cross-Component Documentation Needs**: [identify other components needing similar analysis] +- **Documentation Debt Eliminated**: [summary of redundancies and inconsistencies resolved] + +--- + +Now proceed to create/regenerate documentation based on the request: $ARGUMENTS diff --git a/.claude/commands/full-context.md b/.claude/commands/full-context.md new file mode 100644 index 00000000..92f12d90 --- /dev/null +++ b/.claude/commands/full-context.md @@ -0,0 +1,121 @@ +You are working on the VR Language Learning App project. Before proceeding with the user's request "$ARGUMENTS", you need to intelligently gather relevant project context using an adaptive sub-agent strategy. + +## Auto-Loaded Project Context: +@/CLAUDE.md +@/docs/ai-context/project-structure.md +@/docs/ai-context/docs-overview.md + +## Step 1: Intelligent Analysis Strategy Decision +Think deeply about the optimal approach based on the project context that has been auto-loaded above. Based on the user's request "$ARGUMENTS" and the project structure/documentation overview, intelligently decide the optimal approach: + +### Strategy Options: +**Direct Approach** (0-1 sub-agents): +- When the request can be handled efficiently with targeted documentation reading and direct analysis +- Simple questions about existing code or straightforward tasks + +**Focused Investigation** (2-3 sub-agents): +- When deep analysis of a specific area would benefit the response +- For complex single-domain questions or tasks requiring thorough exploration +- When dependencies and impacts need careful assessment + +**Multi-Perspective Analysis** (3+ sub-agents): +- When the request involves multiple areas, components, or technical domains +- When comprehensive understanding requires different analytical perspectives +- For tasks requiring careful dependency mapping and impact assessment +- Scale the number of agents based on actual complexity, not predetermined patterns + +## Step 2: Autonomous Sub-Agent Design + +### For Sub-Agent Approach: +You have complete freedom to design sub-agent tasks based on: +- **Project structure discovered** from the auto-loaded `/docs/ai-context/project-structure.md` file tree +- **Documentation architecture** from the auto-loaded `/docs/ai-context/docs-overview.md` +- **Specific user request requirements** +- **Your assessment** of what investigation approach would be most effective + +**CRITICAL: When using sub-agents, always launch them in parallel using a single message with multiple Task tool invocations. Never launch sequentially.** + +### Sub-Agent Autonomy Principles: +- **Custom Specialization**: Define agent focus areas based on the specific request and project structure +- **Flexible Scope**: Agents can analyze any combination of documentation, code files, and architectural patterns +- **Adaptive Coverage**: Ensure all relevant aspects of the user's request are covered without overlap +- **Documentation + Code**: Each agent should read relevant documentation files AND examine actual implementation code +- **Dependency Mapping**: For tasks involving code changes, analyze import/export relationships and identify all files that would be affected +- **Impact Assessment**: Consider ripple effects across the codebase, including tests, configurations, and related components +- **Pattern Compliance**: Ensure solutions follow existing project conventions for naming, structure, and architecture +- **Cleanup Planning**: For structural changes, identify obsolete code, unused imports, and deprecated files that should be removed to prevent code accumulation +- **Web Research**: Consider, optionally, deploying sub-agents for web searches when current best practices, security advisories, or external compatibility research would enhance the response + +### Sub-Agent Task Design Template: +``` +Task: "Analyze [SPECIFIC_COMPONENT(S)] for [TASK_OBJECTIVE] related to user request '$ARGUMENTS'" + +Standard Investigation Workflow: +1. Review auto-loaded project context (CLAUDE.md, project-structure.md, docs-overview.md) +2. (Optionally) Read additional relevant documentation files for architectural context +3. Analyze actual code files in [COMPONENT(S)] for implementation reality +4. For code-related tasks: Map import/export dependencies and identify affected files +5. Assess impact on tests, configurations, and related components +6. Verify alignment with project patterns and conventions +7. For structural changes: Identify obsolete code, unused imports, and files that should be removed + +Return comprehensive findings that address the user's request from this component perspective, including architectural insights, implementation details, dependency mapping, and practical considerations for safe execution." +``` + +Example Usage: +``` +Analysis Task: "Analyze web-dashboard audio processing components to understand current visualization capabilities and identify integration points for user request about adding waveform display" + +Implementation Task: "Analyze agents/tutor-server voice pipeline components for latency optimization related to user request about improving response times, including dependency mapping and impact assessment" + +Cross-Component Task: "Analyze Socket.IO integration patterns across web-dashboard and tutor-server to plan streaming enhancement for user request about adding live transcription, focusing on import/export changes, affected test files, and cleanup of deprecated socket handlers" +``` + +## Step 3: Execution and Synthesis + +### For Sub-Agent Approach: +Think deeply about integrating findings from all investigation perspectives. +1. **Design and launch custom sub-agents** based on your strategic analysis +2. **Collect findings** from all successfully completed agents +3. **Synthesize comprehensive understanding** by combining all perspectives +4. **Handle partial failures** by working with available agent findings +5. **Create implementation plan** (for code changes): Include dependency updates, affected files, cleanup tasks, and verification steps +6. **Execute user request** using the integrated knowledge from all agents + +### For Direct Approach: +1. **Load relevant documentation and code** based on request analysis +2. **Proceed directly** with user request using targeted context + +## Step 4: Consider MCP Server Usage (Optional) + +After gathering context, you may leverage MCP servers for complex technical questions as specified in the auto-loaded `/CLAUDE.md` Section 4: +- **Gemini Consultation**: Deep analysis of complex coding problems +- **Context7**: Up-to-date documentation for external libraries + +## Step 5: Context Summary and Implementation Plan + +After gathering context using your chosen approach: +1. **Provide concise status update** summarizing findings and approach: + - Brief description of what was discovered through your analysis + - Your planned implementation strategy based on the findings + - Keep it informative but concise (2-4 sentences max) + +Example status updates: +``` +"Analysis revealed the voice pipelines use Socket.IO for real-time communication with separate endpoints for each pipeline type. I'll implement the new transcription feature by extending the existing Socket.IO event handling in both the FastAPI backend and SvelteKit frontend, following the established pattern used in the Gemini Live pipeline. This will require updating 3 import statements and adding exports to the socket handler module." + +"Found that audio processing currently uses a modular client architecture with separate recorder, processor, and stream-player components. I'll add the requested audio visualization by creating a new component that taps into the existing audio stream data and integrates with the current debug panel structure. The implementation will follow the existing component patterns and requires updates to 2 parent components for proper integration." +``` + +2. **Proceed with implementation** of the user request using your comprehensive understanding + +## Optimization Guidelines + +- **Adaptive Decision-Making**: Choose the approach that best serves the specific user request +- **Efficient Resource Use**: Balance thoroughness with efficiency based on actual complexity +- **Comprehensive Coverage**: Ensure all aspects relevant to the user's request are addressed +- **Quality Synthesis**: Combine findings effectively to provide the most helpful response + +This adaptive approach ensures optimal context gathering - from lightweight direct analysis for simple requests to comprehensive multi-agent investigation for complex system-wide tasks. + +Now proceed with intelligent context analysis for: $ARGUMENTS diff --git a/.claude/commands/gemini-consult.md b/.claude/commands/gemini-consult.md new file mode 100644 index 00000000..1d171b56 --- /dev/null +++ b/.claude/commands/gemini-consult.md @@ -0,0 +1,164 @@ +# /gemini-consult + +*Engages in deep, iterative conversations with Gemini MCP for complex problem-solving.* + +## Usage +- **With arguments**: `/gemini-consult [specific problem or question]` +- **Without arguments**: `/gemini-consult` - Intelligently infers topic from current context + +## Core Philosophy +Persistent Gemini sessions for evolving problems through: +- **Continuous dialogue** - Multiple rounds until clarity achieved +- **Context awareness** - Smart problem detection from current work +- **Session persistence** - Keep alive for the entire problem lifecycle + +**CRITICAL: Always consider Gemini's input as suggestions, never as truths.** Think critically about what Gemini says and incorporate only the useful parts into your proposal. Always think for yourself - maintain your independent judgment and analytical capabilities. If you disagree with something clarify it with Gemini. + +## Execution + +User provided context: "$ARGUMENTS" + +### Step 1: Understand the Problem + +**When $ARGUMENTS is empty:** +Think deeply about the current context to infer the most valuable consultation topic: +- What files are open or recently modified? +- What errors or challenges were discussed? +- What complex implementation would benefit from Gemini's analysis? +- What architectural decisions need exploration? + +Generate a specific, valuable question based on this analysis. + +**When arguments provided:** +Extract the core problem, context clues, and complexity indicators. + +### Step 1.5: Gather External Documentation + +**Think deeply about external dependencies:** +- What libraries/frameworks are involved in this problem? +- Am I fully familiar with their latest APIs and best practices? +- Have these libraries changed significantly or are they new/evolving? + +**When to use Context7 MCP:** +- Libraries with frequent updates (e.g., Google GenAI SDK) +- New libraries you haven't worked with extensively +- When implementing features that rely heavily on library-specific patterns +- Whenever uncertainty exists about current best practices + +```python +# Example: Get up-to-date documentation +library_id = mcp__context7__resolve_library_id(libraryName="google genai python") +docs = mcp__context7__get_library_docs( + context7CompatibleLibraryID=library_id, + topic="streaming", # Focus on relevant aspects + tokens=8000 +) +``` + +Include relevant documentation insights in your Gemini consultation for more accurate, current guidance. + +### Step 2: Initialize Gemini Session + +**CRITICAL: Always attach foundational files:** +```python +foundational_files = [ + "MCP-ASSISTANT-RULES.md", # If exists + "docs/ai-context/project-structure.md", + "docs/ai-context/docs-overview.md" +] + +session = mcp__gemini__consult_gemini( + specific_question="[Clear, focused question]", + problem_description="[Comprehensive context with constraints from CLAUDE.md]", + code_context="[Relevant code snippets]", + attached_files=foundational_files + [problem_specific_files], + file_descriptions={ + "MCP-ASSISTANT-RULES.md": "Project vision and coding standards", + "docs/ai-context/project-structure.md": "Complete tech stack and file structure", + "docs/ai-context/docs-overview.md": "Documentation architecture", + # Add problem-specific descriptions + }, + preferred_approach="[solution/review/debug/optimize/explain]" +) +``` + +### Step 3: Engage in Deep Dialogue + +**Think deeply about how to maximize value from the conversation:** + +1. **Active Analysis** + - What assumptions did Gemini make? + - What needs clarification or deeper exploration? + - What edge cases or alternatives should be discussed? + - **If Gemini mentions external libraries:** Check Context7 MCP for current documentation to verify or supplement Gemini's guidance + +2. **Iterative Refinement** + ```python + follow_up = mcp__gemini__consult_gemini( + specific_question="[Targeted follow-up]", + session_id=session["session_id"], + additional_context="[New insights, questions, or implementation feedback]", + attached_files=[newly_relevant_files] + ) + ``` + +3. **Implementation Feedback Loop** + Share actual code changes and real-world results to refine the approach. + +### Step 4: Session Management + +**Keep Sessions Open** - Don't close immediately. Maintain for the entire problem lifecycle. + +**Only close when:** +- Problem is definitively solved and tested +- Topic is no longer relevant +- Fresh start would be more beneficial + +**Monitor sessions:** +```python +active = mcp__gemini__list_sessions() +requests = mcp__gemini__get_gemini_requests(session_id="...") +``` + +## Key Patterns + +### Clarification Pattern +"You mentioned [X]. In our context of [project specifics], how does this apply to [specific concern]?" + +### Deep Dive Pattern +"Let's explore [aspect] further. What are the trade-offs given our [constraints]?" + +### Alternative Pattern +"What if we approached this as [alternative]? How would that affect [concern]?" + +### Progress Check Pattern +"I've implemented [changes]. Here's what happened: [results]. Should I adjust the approach?" + +## Best Practices + +1. **Think deeply** before each interaction - what will extract maximum insight? +2. **Be specific** - Vague questions get vague answers +3. **Show actual code** - Not descriptions +4. **Challenge assumptions** - Don't accept unclear guidance +5. **Document decisions** - Capture the "why" for future reference +6. **Stay curious** - Explore alternatives and edge cases +7. **Trust but verify** - Test all suggestions thoroughly + +## Implementation Approach + +When implementing Gemini's suggestions: +1. Start with the highest-impact changes +2. Test incrementally +3. Share results back with Gemini +4. Iterate based on real-world feedback +5. Document key insights in appropriate CONTEXT.md files + +## Remember + +- This is a **conversation**, not a query service +- **Context is king** - More context yields better guidance +- **Gemini sees patterns you might miss** - Be open to unexpected insights +- **Implementation reveals truth** - Share what actually happens +- Treat Gemini as a **collaborative thinking partner**, not an oracle + +The goal is deep understanding and optimal solutions through iterative refinement, not quick answers. \ No newline at end of file diff --git a/.claude/commands/handoff.md b/.claude/commands/handoff.md new file mode 100644 index 00000000..0312e730 --- /dev/null +++ b/.claude/commands/handoff.md @@ -0,0 +1,146 @@ +You are concluding work on the VR Language Learning App project and need to create a comprehensive handoff for the next AI session. This command intelligently analyzes your current session achievements and updates the handoff document with both auto-detected progress and user-provided context. + +## Auto-Loaded Project Context: +@docs/ai-context/HANDOFF.md +@/CLAUDE.md + +## Step 1: Process User Arguments + +Handle the arguments flexibly: +- **With Arguments**: `$ARGUMENTS` provides user context about what was accomplished or attempted +- **Without Arguments**: Focus purely on auto-detection from session analysis + +User provided context: "$ARGUMENTS" + +## Step 2: Analyze Current Session Achievements + +Think about what was accomplished in this session and how to best capture it for handoff. Review your recent conversation and tool usage to identify significant work: + +**Auto-Detect Evidence of:** +- **File Operations** (Write, Edit, MultiEdit tools) - what files were modified and why +- **New Features** - functionality added or implemented +- **Bug Fixes** - issues resolved or debugging attempts +- **Architecture Changes** - structural improvements or refactoring +- **Configuration Updates** - settings, dependencies, or environment changes +- **Documentation Work** - updates to documentation files +- **Incomplete Work** - attempts that didn't reach completion +- **Blockers Encountered** - issues that prevented completion + +**Generate Session Summary:** +``` +Session Analysis: +- Primary work area: [component/domain affected] +- Main accomplishments: [key achievements] +- Files modified: [list of changed files] +- Status: [completed/in-progress/blocked] +- User context: [if $ARGUMENTS provided] +``` + +## Step 3: Analyze Auto-Loaded HANDOFF.md + +Analyze the auto-loaded `docs/ai-context/HANDOFF.md` to understand: +- **Existing sections** and their current status +- **Related ongoing work** that might connect to your session +- **Structure and formatting** patterns to maintain consistency +- **Unrelated content** that should be preserved + +## Step 4: Determine Update Strategy + +Think about how to best update the handoff based on this session's work. Based on your session analysis and the auto-loaded existing handoff content, decide: + +**If Current Work Relates to Existing Task:** +- Update the existing section with new progress +- Add accomplishments to "What Was Accomplished" +- Update "Current Status" and "Current Issue" if resolved +- Modify "Next Steps" based on new state + +**If Current Work is New/Unrelated:** +- Create a new section with descriptive title +- Include timestamp for session identification +- Follow existing document structure and formatting + +**If Work Completed an Existing Task:** +- Mark the task as completed +- Summarize final outcome +- Consider archiving or removing if fully resolved + +## Step 5: Update HANDOFF.md Intelligently + +Make targeted updates to the auto-loaded HANDOFF.md: + +### For New Sections, Include: +```markdown +## [Task Title] - [Status] + +### Current Status +[Brief description of current state] + +### What Was Accomplished +[Bulleted list of concrete achievements with file paths] + +### Current Issue (if applicable) +[Any blockers or unresolved problems] + +### Next Steps to [Objective] +[Actionable items for continuation] + +### Key Files to Review +[List of relevant files organized by category] + +### Context for Next Session +[Important notes for continuity] +``` + +### For Updates to Existing Sections: +- **Add to accomplishments** without duplicating existing content +- **Update status** if progress changed the situation +- **Modify current issues** if problems were resolved or new ones discovered +- **Refresh next steps** based on new progress + +## Step 6: Maintain Document Quality + +Ensure your updates follow these guidelines: + +**Content Quality:** +- **Specific**: Include exact file paths and technical details +- **Actionable**: Provide clear next steps for continuation +- **Contextual**: Explain the reasoning behind decisions +- **Current**: Reflect the actual state after your session + +**Formatting Consistency:** +- Follow existing markdown structure and patterns +- Use consistent heading levels and formatting +- Maintain bullet point styles and organization +- Preserve the document's overall structure + +**Information Management:** +- **Don't duplicate** existing information unless updating it +- **Preserve unrelated** sections that weren't part of your work +- **Consolidate** related information rather than fragmenting it +- **Archive completed** work appropriately + +## Step 7: Final Verification + +Before completing, verify that your handoff: +- **Accurately reflects** what was accomplished in the session +- **Combines** auto-detected technical changes with user-provided context +- **Provides clear direction** for the next AI session +- **Maintains continuity** with existing handoff content +- **Is immediately actionable** for someone picking up the work + +## Quality Standards + +**Be Comprehensive But Concise:** +- Include all relevant technical details +- Focus on actionable information +- Avoid redundancy with existing content + +**Maintain Professional Handoff Quality:** +- Clear problem statements and current status +- Specific file references and technical context +- Logical next steps that build on current progress +- Helpful context that speeds up the next session + +This intelligent handoff approach ensures smooth continuity between AI sessions while capturing both the technical reality of what was accomplished and the user's perspective on the work. + +Now analyze your session, combine it with the user context "$ARGUMENTS", and update the handoff document accordingly. diff --git a/.claude/commands/refactor.md b/.claude/commands/refactor.md new file mode 100644 index 00000000..2377d91e --- /dev/null +++ b/.claude/commands/refactor.md @@ -0,0 +1,188 @@ +You are working on the VR Language Learning App project. The user has requested to refactor specific files tagged with @ symbols in their arguments: "$ARGUMENTS" + +## Auto-Loaded Project Context: +@/CLAUDE.md +@/docs/ai-context/project-structure.md +@/docs/ai-context/docs-overview.md + +## Step 1: Parse Tagged Files +Extract all @ tagged file paths from the user's arguments. Only process files that are explicitly tagged with @ symbols. + +**Example parsing:** +- Input: "refactor @src/big-file.ts @components/Large.svelte" +- Extract: ["src/big-file.ts", "components/Large.svelte"] + +## Step 2: Validate and Analyze Files +For each tagged file: +1. **Verify file exists** - If file doesn't exist, inform user and skip +2. **Read file contents** - Understand the structure and dependencies +3. **Analyze current directory structure** - Map existing patterns around the file + +## Step 3: Intelligent Analysis Strategy Decision +Think deeply about the safest and most effective refactoring approach based on the auto-loaded project context. Based on the initial analysis from Step 2 and the auto-loaded project context, intelligently decide the optimal approach for each file: + +### Strategy Options: + +**Direct Refactoring** (0-1 sub-agents): +- Simple files with clear, obvious split points +- Files with minimal external dependencies +- Standard refactoring patterns (e.g., extract utils, split large classes) +- Low risk of breaking changes + +**Focused Analysis** (2-3 sub-agents): +- Moderate complexity with specific concerns +- Files with moderate dependency footprint +- When one aspect needs deep analysis (e.g., complex dependencies OR intricate file structure) + +**Comprehensive Analysis** (3+ sub-agents): +- High complexity files with multiple concerns +- Extensive dependency networks +- Novel refactoring patterns not seen in project +- High risk of breaking changes +- Files that are central to multiple systems + +## Step 4: Execute Chosen Strategy + +### For Direct Refactoring: +Proceed with straightforward refactoring using the initial analysis and project context. + +### For Sub-Agent Approaches: +You have complete autonomy to design and launch sub-agents based on the specific refactoring needs identified. Consider these key investigation areas and design custom agents to cover what's most relevant: + +**Core Investigation Areas to Consider:** +- **File Structure Analysis**: Logical component boundaries, split points, cohesion assessment +- **Dependency Network Mapping**: Import/export analysis, usage patterns, circular dependency risks +- **Project Pattern Compliance**: Directory structures, naming conventions, organizational patterns +- **Impact Assessment**: Test files, configuration files, build scripts that need updates +- **Import Update Analysis**: All files that import from the target file and need updated import paths +- **Technology Stack Considerations**: Language-specific patterns, framework conventions + +**Autonomous Sub-Agent Design Principles:** +- **Custom Specialization**: Define agents based on the specific file's complexity and risks +- **Flexible Agent Count**: Use as many agents as needed - scale based on actual complexity +- **Adaptive Coverage**: Ensure critical aspects are covered without unnecessary overlap +- **Risk-Focused Analysis**: Prioritize investigation of the highest-risk refactoring aspects + +**Sub-Agent Task Template:** +``` +Task: "Analyze [SPECIFIC_INVESTIGATION_AREA] for safe refactoring of [TARGET_FILE] related to user request '$ARGUMENTS'" + +Standard Investigation Workflow: +1. Review auto-loaded project context (CLAUDE.md, project-structure.md, docs-overview.md) +2. [CUSTOM_ANALYSIS_STEPS] - Investigate the specific area thoroughly +3. Return actionable findings that support safe and effective refactoring + +Return comprehensive findings addressing this investigation area." +``` + +**CRITICAL: When launching sub-agents, always use parallel execution with a single message containing multiple Task tool invocations.** + + +## Step 5: Synthesize Analysis and Plan Refactoring + +Think deeply about integrating findings from all sub-agent investigations for safe and effective refactoring. Combine findings from all agents to create optimal refactoring strategy: + +### Integration Analysis +- **File Structure**: Use File Analysis Agent's component breakdown +- **Organization**: Apply Pattern Recognition Agent's directory recommendations +- **Safety**: Implement Dependency Analysis Agent's import/export strategy +- **Completeness**: Address Impact Assessment Agent's broader concerns + +### Refactoring Strategy Decision +Based on synthesized analysis, determine: +- **Split granularity**: How many files and what logical divisions +- **Directory structure**: Same-level, subdirectory, or existing directory placement +- **Import/export strategy**: How to restructure exports and update all consuming files +- **File naming**: Following project conventions and clarity + +### Risk Assessment +- **Breaking changes**: Identify and mitigate potential issues +- **Dependency conflicts**: Plan import/export restructuring +- **Test impacts**: Plan for test file updates +- **Documentation needs**: Identify doc updates required + +## Step 6: Refactoring Value Assessment + +### Evaluate Refactoring Worth +After synthesizing all analysis, critically evaluate whether the proposed refactoring will actually improve the codebase: + +**Positive Indicators (Worth Refactoring):** +- File significantly exceeds reasonable size limits (500+ lines for components, 1000+ for utilities) +- Clear separation of concerns violations (UI mixed with business logic, multiple unrelated features) +- High cyclomatic complexity that would be reduced +- Repeated code patterns that could be abstracted +- Poor testability that would improve with modularization +- Dependencies would become cleaner and more maintainable +- Aligns with project's architectural patterns + +**Negative Indicators (Not Worth Refactoring):** +- File is already well-organized despite its size +- Splitting would create artificial boundaries that reduce clarity +- Would introduce unnecessary complexity or abstraction +- Dependencies would become more convoluted +- File serves a single, cohesive purpose effectively +- Refactoring would violate project conventions +- Minimal actual improvement in maintainability + +### Decision Point +Based on the assessment: + +**If Refactoring IS Worth It:** +- Print clear summary of benefits: "✅ This refactoring will improve the codebase by: [specific benefits]" +- Proceed automatically to Step 7 (Execute Refactoring) + +**If Refactoring IS NOT Worth It:** +- Be brutally honest about why: "❌ This refactoring is not recommended because: [specific reasons]" +- Explain what makes the current structure acceptable +- Ask user explicitly: "The file is currently well-structured for its purpose. Do you still want to proceed with the refactoring? (yes/no)" +- Only continue if user confirms + +## Step 7: Execute Refactoring + +Implement the refactoring based on the synthesized analysis: + +### File Creation Order +1. **Create directories** - Create any new subdirectories needed +2. **Create core files** - Start with main/index files +3. **Create supporting files** - Types, utils, constants +4. **Update imports** - Fix all import/export statements +5. **Update original file** - Replace with new modular structure + +### Import/Export Management +- **Update all consuming files** - Modify import statements to point to new file locations +- **Restructure exports** - Organize exports in the new file structure +- **Update relative imports** - Fix paths throughout the codebase +- **Follow naming conventions** - Use project's established patterns + +### Quality Assurance +- **Preserve functionality** - Ensure no breaking changes +- **Maintain type safety** - Keep all TypeScript types intact +- **Follow coding standards** - Apply project's style guidelines +- **Test compatibility** - Verify imports work correctly + + +## Step 8: Quality Verification + +For each refactored file: +- **Check imports** - Verify all imports resolve correctly +- **Run type checks** - Ensure TypeScript compilation passes +- **Test functionality** - Confirm no breaking changes +- **Validate structure** - Ensure new organization follows project patterns + + +## Error Handling +- **File not found** - Skip and inform user +- **Not worth refactoring** - Skip files that are good as is and give users an explanation. +- **Parse errors** - Report syntax issues and skip +- **Import conflicts** - Resolve or report issues + +## Summary Format +Provide a comprehensive summary of: +- **Analysis Results**: Key findings from each sub-agent +- **Refactoring Strategy**: Chosen approach and rationale +- **Value Assessment**: Whether refactoring improves the code (from Step 6) +- **Files Created**: New structure with explanations (if refactoring proceeded) +- **Dependencies Fixed**: Import/export changes made (if refactoring proceeded) +- **Issues Encountered**: Any problems and resolutions + +Now proceed with multi-agent analysis and refactoring of the tagged files: $ARGUMENTS diff --git a/.claude/commands/update-docs.md b/.claude/commands/update-docs.md new file mode 100644 index 00000000..1e1ddd35 --- /dev/null +++ b/.claude/commands/update-docs.md @@ -0,0 +1,314 @@ +You have just completed work on the VR Language Learning App project. Analyze changes based on the provided context and automatically update relevant documentation. + +## Auto-Loaded Project Context: +@/CLAUDE.md +@/docs/ai-context/project-structure.md +@/docs/ai-context/docs-overview.md + +## Core Documentation Principle: Document Current State Only + +**CRITICAL: Always document the current "is" state of the system. Never reference legacy implementations, describe improvements made, or explain what changed. Documentation should read as if the current implementation has always existed.** + +### Documentation Anti-Patterns to Avoid: +- ❌ "Refactored the voice pipeline to use streaming instead of batch processing" +- ❌ "Improved performance by implementing caching" +- ❌ "Previously used X, now uses Y for better results" +- ❌ "Legacy implementation has been replaced with..." + +### Documentation Best Practices: +- ✅ "The voice pipeline uses streaming for real-time processing" +- ✅ "Implements caching for frequently accessed data" +- ✅ "Uses Y for optimal results" +- ✅ "The system architecture follows..." + +## Step 1: Analyze Changes Based on Input + +### Determine Analysis Mode: +- **No input (default)**: Analyze recent conversation context +- **Git commit ID** (e.g., "3b8d24e" or full hash): Analyze specific commit +- **"uncommitted"/"staged"/"working"**: Analyze uncommitted changes +- **"last N commits"** (e.g., "last 3 commits"): Analyze recent commits + +### Execute Analysis: +Based on the input parameter: + +#### For Git Commit Analysis: +```bash +# Get commit details +git show --name-status [COMMIT_ID] +git diff [COMMIT_ID]^ [COMMIT_ID] +``` + +#### For Uncommitted Changes: +```bash +# Get staged and unstaged changes +git status --porcelain +git diff HEAD +git diff --cached +``` + +#### For Recent Commits: +```bash +# Get recent commit history +git log --oneline -n [N] +git diff HEAD~[N] HEAD +``` + +#### For Session Context (default): +Review your recent conversation and tool usage for significant changes. + +**Look for Evidence of Documentation-Relevant Changes:** +- **New features or components** (functionality that needs documenting) +- **Architecture decisions** (new patterns, structural changes, design decisions) +- **Technology stack changes** (new dependencies, framework additions, integration changes) +- **API changes** (new endpoints, modified interfaces, breaking changes) +- **Configuration changes** (new environment variables, settings, deployment requirements) +- **File structure changes** (new directories, moved components, reorganized code) + +**Exclude from Documentation Updates:** +- Performance optimizations without architectural impact +- Bug fixes that don't change interfaces or patterns +- Code cleanup, refactoring that doesn't affect usage +- Logging improvements, debugging enhancements +- Test additions without new functionality + +**Generate a brief summary** of what was accomplished: +``` +Analysis source: [session context/commit ID/uncommitted changes] +Detected changes: [1-2 sentence summary of main work done] +``` + +## Step 2: Understand Project Context and Documentation Structure + +Analyze the auto-loaded foundational files: +1. `/CLAUDE.md` - **CRITICAL:** Understand AI instructions, coding standards, and development protocols that govern the project +2. `/docs/ai-context/project-structure.md` - **FOUNDATION:** Technology stack, complete file tree and architecture overview +3. `/docs/ai-context/docs-overview.md` - Understand: + - What documentation files exist and their purposes + - How the documentation is organized + - Which types of changes map to which documentation + +**AI-First Documentation Principle**: Remember that documentation is primarily for AI consumption - optimize for file path references, clear structure markers, and machine-readable patterns that enable efficient context loading. + +## Step 3: Intelligent Update Strategy Decision + +Think deeply about the documentation updates needed based on the auto-loaded project context and detected changes. Based on the detected changes from Step 1 AND the auto-loaded project context, intelligently decide the optimal approach: + +### Strategy Options: + +**Direct Update** (0-1 sub-agents): +- Simple file modifications with clear documentation mapping +- Bug fixes or minor enhancements that don't affect architecture +- Changes confined to a single component or feature area +- Standard patterns already well-documented in the project + +**Focused Analysis** (2-3 sub-agents): +- Moderate complexity changes affecting multiple files +- New features that introduce novel patterns +- Changes that span 2-3 components or documentation tiers +- Technology stack updates requiring validation across docs + +**Comprehensive Analysis** (3+ sub-agents): +- Complex architectural changes affecting multiple system areas +- Major refactoring that restructures component relationships +- New integrations that create cross-system dependencies +- Changes that require extensive documentation cascade updates + +## Step 4: Execute Chosen Strategy + +### For Direct Update: +Proceed with straightforward documentation updates using the detected changes and auto-loaded foundational context. Continue with Step 5 (Final Decision Making). + +### For Sub-Agent Approaches: +You have complete autonomy to design sub-agents based on the specific changes detected. Consider these investigation areas and design custom agents to cover what's most relevant: + +**Core Investigation Areas to Consider:** +- **Change Impact Analysis**: Map file modifications to affected documentation across all tiers +- **Architecture Validation**: Verify existing architectural docs still reflect current implementation +- **Cross-Component Dependency Mapping**: Identify documentation updates needed due to integration changes +- **Documentation Accuracy Assessment**: Validate current docs against modified code patterns +- **Tier Cascade Requirements**: Determine which documentation levels need updates based on change scope +- **Technology Stack Verification**: Ensure tech stack changes are reflected across relevant documentation + +**Autonomous Sub-Agent Design Principles:** +- **Custom Specialization**: Define agents based on the specific change complexity and documentation impact +- **Flexible Agent Count**: Use as many agents as needed - scale based on actual change scope +- **Adaptive Coverage**: Ensure all affected documentation areas are covered without unnecessary overlap +- **Update-Focused Analysis**: Prioritize investigation that directly supports accurate documentation updates + +**Sub-Agent Task Template:** +``` +Task: "Analyze [SPECIFIC_INVESTIGATION_AREA] for documentation updates based on changes from [SOURCE]: [DETECTED_CHANGES]" + +Standard Investigation Workflow: +1. Review auto-loaded project context (CLAUDE.md, project-structure.md, docs-overview.md) +2. [CUSTOM_ANALYSIS_STEPS] - Investigate the specific area thoroughly +3. Return actionable findings that identify required documentation updates + +Return comprehensive findings addressing this investigation area for documentation updates. +``` + +**CRITICAL: When using sub-agents, always launch them in parallel using a single message with multiple Task tool invocations.** + +## Step 5: Synthesize Analysis and Plan Updates + +### For Sub-Agent Approaches: +Think deeply about integrating findings from all sub-agent investigations for optimal documentation updates. Combine findings from all agents to create optimal documentation update strategy: + +**Integration Analysis:** +- **Change Impact**: Use Change Impact Agent's mapping of modifications to documentation +- **Architecture Validation**: Apply Architecture Validation Agent's findings on outdated information +- **Dependency Updates**: Implement Cross-Component Agent's integration change requirements +- **Accuracy Corrections**: Address Documentation Accuracy Agent's identified inconsistencies +- **Cascade Planning**: Execute Tier Cascade Agent's multi-level update requirements + +**Update Strategy Decision:** +Based on synthesized analysis, determine: +- **Documentation scope**: Which files need updates and at what detail level +- **Update priority**: Critical architectural changes vs. minor pattern updates +- **Cascade requirements**: Which tier levels need coordinated updates +- **New file creation**: Whether new documentation files are warranted + +## Step 6: Final Decision Making + +Based on your context analysis and the auto-loaded documentation structure (either direct or synthesized from sub-agents), decide: +- **Which documents need updates** (match changes to appropriate documentation) +- **What type of updates** (component changes, architecture decisions, new patterns, etc.) +- **Update scope** (major changes get more detail, minor changes get brief updates) +- **Whether new documentation files are needed** (see Smart File Creation guidelines below) + +## Step 7: Smart File Creation (If Needed) + +Before updating existing documentation, assess if new documentation files should be created based on the 3-tier system: + +### Guidelines for Creating New Documentation Files + +**Create new Component CONTEXT.md when:** +- You detect an entirely new top-level component (new directory under `agents/`, `unity-client/`, `supabase-functions/`, etc.) +- The component has significant functionality (5+ meaningful files) +- Example: Adding `agents/lesson-generator/` → Create `agents/lesson-generator/CONTEXT.md` + +**Create new Feature-Specific CONTEXT.md when:** +- You detect a new complex subsystem within an existing component +- The subsystem has 3+ files and represents a distinct functional area +- No existing granular CONTEXT.md file covers this area +- Example: Adding `agents/tutor-server/src/features/translation/` with multiple files → Create `agents/tutor-server/src/features/CONTEXT.md` + +**When NOT to create new files:** +- Small additions (1-2 files) that fit existing documentation scope +- Bug fixes or minor modifications +- Temporary or experimental code + +**File Creation Process:** +1. **Create the new CONTEXT.md file** with placeholder content following the pattern of existing granular docs +2. **Update `/docs/ai-context/docs-overview.md`** to include the new file in the appropriate tier +3. **Document the addition** in the current update process + +### File Content Template for New Granular CONTEXT.md: +```markdown +# [Feature Area] Documentation + +*This file documents [specific area] patterns and implementations within [component].* + +## [Area] Architecture +- [Key architectural elements] + +## Implementation Patterns +- [Key patterns used] + +## Integration Points +- [How this integrates with other parts] + +--- + +*This file was created as part of the 3-tier documentation system to document [brief reason].* +``` + +## Step 8: Tier-First Documentation Updates + +**CRITICAL: Always start with Tier 3 (feature-specific) documentation and work upward through the tiers. Never skip tiers.** + +### Tier 3 (Feature-Specific) - START HERE +**Always begin with the most granular documentation closest to your changes:** +- **Identify affected Tier 3 files** (feature-specific CONTEXT.md files in subdirectories) +- **Update these granular files first** with specific implementation details, patterns, and integration points +- **Examples**: `agents/tutor-server/src/core/pipelines/CONTEXT.md`, `web-dashboard/src/lib/api/CONTEXT.md`, `agents/tutor-server/src/features/*/CONTEXT.md` +- **Update guidelines**: Be specific about file names, technologies, implementation patterns + +### Tier 2 (Component-Level) - CASCADE UP +**After completing Tier 3 updates, evaluate if component-level changes are needed:** +- **Check parent component CONTEXT.md files** (e.g., `agents/tutor-server/CONTEXT.md` for changes in `agents/tutor-server/src/*/`) +- **Update if changes represent significant architectural shifts** affecting the overall component +- **Focus on**: How granular changes affect component architecture, new integration patterns, major feature additions +- **Examples**: `agents/tutor-server/CONTEXT.md`, `web-dashboard/CONTEXT.md`, `unity-client/CONTEXT.md` + +### Tier 1 (Foundational) - CASCADE UP +**Finally, check if foundational documentation needs updates for system-wide impacts:** + +#### Project Structure Updates (`/docs/ai-context/project-structure.md`) +Update for any of these changes: +- **File tree changes**: Created, moved, deleted files/directories; renamed components; restructured organization +- **Technology stack updates**: New dependencies (check pyproject.toml, package.json), major version updates, new frameworks, AI service changes, development tool modifications + +#### Other Foundational Documentation +Update other `/docs/ai-context/` files if changes affect: +- **System-wide architectural patterns** +- **Cross-component integration approaches** +- **Development workflow or standards** + +### Cascade Decision Logic +**What Constitutes "Significant Updates" Requiring Cascade:** +- **New major feature areas** (not just bug fixes or minor enhancements) +- **Architectural pattern changes** that affect how components integrate with others +- **New technologies or frameworks** introduced to a component +- **Major refactoring** that changes component structure or responsibilities +- **New integration points** between components or external systems + +### Update Quality Guidelines (All Tiers) +- **Be concise** (max 3 sentences unless major architectural change) +- **Be specific** (include file names, technologies, key benefits) +- **Follow existing patterns** in each document +- **Avoid redundancy** (don't repeat what's already documented) +- **Co-locate knowledge** (keep documentation near relevant code) + +## Step 9: Update Documentation Overview + +**IMPORTANT:** After updating any documentation files in steps 1-8, check if the documentation overview needs updates: +- Reference the auto-loaded `/docs/ai-context/docs-overview.md` +- If you added new documentation files (especially new CONTEXT.md files), update the overview to include them in the appropriate tier +- If you significantly changed the structure/purpose of existing documentation, update the overview to reflect these changes +- Keep the overview accurate and current so it serves as a reliable guide to the documentation architecture + +### Special Note for New CONTEXT.md Files: +When you create new granular CONTEXT.md files, you MUST add them to the appropriate section in docs-overview.md: +- **Tier 2 (Component-Level)**: For new top-level components +- **Tier 3 (Feature-Specific)**: For new subsystem documentation within existing components + +## Quality Guidelines + +- **Concise:** Keep updates brief and focused +- **Specific:** Include file names, technologies, key benefits +- **Accurate:** Based on actual changes made, not assumptions +- **Helpful:** Information that would be useful to another developer +- **Current:** Ensure file tree reflects actual project structure +- **Organized:** Follow the 3-tier documentation system principles + +## When Not to Update or Create Documentation + +Skip documentation updates/creation for: +- Bug fixes (unless they change architecture) +- Minor tweaks or cleanup +- Debugging or temporary changes +- Code formatting or comments +- Trivial modifications +- Single-file additions that fit existing documentation scope + +## 3-Tier System Benefits + +This enhanced approach leverages the 3-tier documentation system to: +- **Minimize cascade effects**: Most changes update 1-2 granular files +- **Scale intelligently**: New documentation created only when warranted +- **Co-locate knowledge**: Documentation lives near relevant code +- **Maintain consistency**: Clear guidelines for when and how to extend documentation + +Now analyze the specified changes and update the relevant documentation accordingly. \ No newline at end of file diff --git a/.claude/hooks/README.md b/.claude/hooks/README.md new file mode 100644 index 00000000..6d760bb9 --- /dev/null +++ b/.claude/hooks/README.md @@ -0,0 +1,270 @@ +# Claude Code Hooks + +This directory contains battle-tested hooks that enhance your Claude Code development experience with automated security scanning, intelligent context injection, and pleasant audio feedback. + +## Architecture + +``` +Claude Code Lifecycle + │ + ├── PreToolUse ──────► Security Scanner + │ ├── Context Injector (Gemini) + │ └── Context Injector (Subagents) + │ + ├── Tool Execution + │ + ├── PostToolUse + │ + ├── Notification ────────► Audio Feedback + │ + └── Stop/SubagentStop ───► Completion Sound +``` + +These hooks execute at specific points in Claude Code's lifecycle, providing deterministic control over AI behavior. + +## Available Hooks + +### 1. Gemini Context Injector (`gemini-context-injector.sh`) + +**Purpose**: Automatically includes your project documentation and assistant rules when starting new Gemini consultation sessions, ensuring the AI has complete context about your codebase and project standards. + +**Trigger**: `PreToolUse` for `mcp__gemini__consult_gemini` + +**Features**: +- Detects new Gemini consultation sessions (no session_id) +- Automatically attaches two key files: + - `docs/ai-context/project-structure.md` - Complete project structure and tech stack + - `MCP-ASSISTANT-RULES.md` - Project-specific coding standards and guidelines +- Preserves existing file attachments +- Session-aware (only injects on new sessions) +- Logs all injection events for debugging +- Fails gracefully if either file is missing +- Handles partial availability (will attach whichever files exist) + +**Customization**: +- Copy `docs/MCP-ASSISTANT-RULES.md` template to your project root +- Customize it with your project-specific standards, principles, and constraints +- The hook will automatically include it in Gemini consultations + +### 2. MCP Security Scanner (`mcp-security-scan.sh`) + +**Purpose**: Prevents accidental exposure of secrets, API keys, and sensitive data when using MCP servers like Gemini or Context7. + +**Trigger**: `PreToolUse` for all MCP tools (`mcp__.*`) + +**Features**: +- Pattern-based detection for API keys, passwords, and secrets +- Scans code context, problem descriptions, and attached files +- File content scanning with size limits +- Configurable pattern matching via `config/sensitive-patterns.json` +- Whitelisting for placeholder values +- Command injection protection for Context7 +- Comprehensive logging of security events to `.claude/logs/` + +**Customization**: Edit `config/sensitive-patterns.json` to: +- Add custom API key patterns +- Modify credential detection rules +- Update sensitive file patterns +- Extend the whitelist for your placeholders + +### 3. Subagent Context Injector (`subagent-context-injector.sh`) + +**Purpose**: Automatically includes core project documentation in all sub-agent Task prompts, ensuring consistent context across multi-agent workflows. + +**Trigger**: `PreToolUse` for `Task` tool + +**Features**: +- Intercepts all Task tool calls before execution +- Prepends references to three core documentation files: + - `docs/CLAUDE.md` - Project overview, coding standards, AI instructions + - `docs/ai-context/project-structure.md` - Complete file tree and tech stack + - `docs/ai-context/docs-overview.md` - Documentation architecture +- Passes through non-Task tools unchanged +- Preserves original task prompt by prepending context +- Enables consistent knowledge across all sub-agents +- Eliminates need for manual context inclusion in Task prompts + +**Benefits**: +- Every sub-agent starts with the same foundational knowledge +- No manual context specification needed in each Task prompt +- Token-efficient through @ references instead of content duplication +- Update context in one place, affects all sub-agents +- Clean operation with simple pass-through for non-Task tools + +### 4. Notification System (`notify.sh`) + +**Purpose**: Provides pleasant audio feedback when Claude Code needs your attention or completes tasks. + +**Triggers**: +- `Notification` events (all notifications including input needed) +- `Stop` events (main task completion) + +**Features**: +- Cross-platform audio support (macOS, Linux, Windows) +- Non-blocking audio playback (runs in background) +- Multiple audio playback fallbacks +- Pleasant notification sounds +- Two notification types: + - `input`: When Claude needs user input + - `complete`: When Claude completes tasks + +## Installation + +1. **Copy the hooks to your project**: + ```bash + cp -r hooks your-project/.claude/ + ``` + +2. **Configure hooks in your project**: + ```bash + cp hooks/setup/settings.json.template your-project/.claude/settings.json + ``` + Then edit the WORKSPACE path in the settings file. + +3. **Test the hooks**: + ```bash + # Test notification + .claude/hooks/notify.sh input + .claude/hooks/notify.sh complete + + # View logs + tail -f .claude/logs/context-injection.log + tail -f .claude/logs/security-scan.log + ``` + +## Hook Configuration + +Add to your Claude Code `settings.json`: + +```json +{ + "hooks": { + "PreToolUse": [ + { + "matcher": "mcp__gemini__consult_gemini", + "hooks": [ + { + "type": "command", + "command": "${WORKSPACE}/.claude/hooks/gemini-context-injector.sh" + } + ] + }, + { + "matcher": "mcp__.*", + "hooks": [ + { + "type": "command", + "command": "${WORKSPACE}/.claude/hooks/mcp-security-scan.sh" + } + ] + }, + { + "matcher": "Task", + "hooks": [ + { + "type": "command", + "command": "${WORKSPACE}/.claude/hooks/subagent-context-injector.sh" + } + ] + } + ], + "Notification": [ + { + "matcher": ".*", + "hooks": [ + { + "type": "command", + "command": "${WORKSPACE}/.claude/hooks/notify.sh input" + } + ] + } + ], + "Stop": [ + { + "matcher": ".*", + "hooks": [ + { + "type": "command", + "command": "${WORKSPACE}/.claude/hooks/notify.sh complete" + } + ] + } + ] + } +} +``` + +See `hooks/setup/settings.json.template` for the complete configuration including all hooks and MCP servers. + +## Security Model + +1. **Execution Context**: Hooks run with full user permissions +2. **Blocking Behavior**: Exit code 2 blocks tool execution +3. **Data Flow**: Hooks can modify tool inputs via JSON transformation +4. **Isolation**: Each hook runs in its own process +5. **Logging**: All security events logged to `.claude/logs/` + +## Integration with MCP Servers + +The hooks system complements MCP server integrations: + +- **Gemini Consultation**: Context injector ensures both project structure and MCP assistant rules are included +- **Context7 Documentation**: Security scanner protects library ID inputs +- **All MCP Tools**: Universal security scanning before external calls + +## Best Practices + +1. **Hook Design**: + - Fail gracefully - never break the main workflow + - Log important events for debugging + - Use exit codes appropriately (0=success, 2=block) + - Keep execution time minimal + +2. **Security**: + - Regularly update sensitive patterns + - Review security logs periodically + - Test hooks in safe environments first + - Never log sensitive data in hooks + +3. **Configuration**: + - Use `${WORKSPACE}` variable for portability + - Keep hooks executable (`chmod +x`) + - Version control hook configurations + - Document custom modifications + +## Troubleshooting + +### Hooks not executing +- Check file permissions: `chmod +x *.sh` +- Verify paths in settings.json +- Check Claude Code logs for errors + +### Security scanner too restrictive +- Review patterns in `config/sensitive-patterns.json` +- Add legitimate patterns to the whitelist +- Check logs for what triggered the block + +### No sound playing +- Verify sound files exist in `sounds/` directory +- Test audio playback: `.claude/hooks/notify.sh input` +- Check system audio settings +- Ensure you have an audio player installed (afplay, paplay, aplay, pw-play, play, ffplay, or PowerShell on Windows) + +## Hook Setup Command + +For comprehensive setup verification and testing, use: + +``` +/hook-setup +``` + +This command uses multi-agent orchestration to verify installation, check configuration, and run comprehensive tests. See [hook-setup.md](setup/hook-setup.md) for details. + +## Extension Points + +The kit is designed for extensibility: + +1. **Custom Hooks**: Add new scripts following the existing patterns +2. **Event Handlers**: Configure hooks for any Claude Code event +3. **Pattern Updates**: Modify security patterns for your needs +4. **Sound Customization**: Replace audio files with your preferences \ No newline at end of file diff --git a/.claude/hooks/config/sensitive-patterns.json b/.claude/hooks/config/sensitive-patterns.json new file mode 100644 index 00000000..95f2ef85 --- /dev/null +++ b/.claude/hooks/config/sensitive-patterns.json @@ -0,0 +1,86 @@ +{ + "patterns": { + "api_keys": [], + "credentials": [ + "password\\s*[:=]\\s*[\"']?[^\\s\"']+[\"']?", + "passwd\\s*[:=]\\s*[\"']?[^\\s\"']+[\"']?", + "secret\\s*[:=]\\s*[\"']?[^\\s\"']+[\"']?", + "private[_-]?key\\s*[:=]\\s*[\"']?[^\\s\"']+[\"']?", + "access[_-]?key\\s*[:=]\\s*[\"']?[^\\s\"']+[\"']?", + "auth[_-]?token\\s*[:=]\\s*[\"']?[^\\s\"']+[\"']?", + "api[_-]?key\\s*[:=]\\s*[\"']?[^\\s\"']+[\"']?", + "client[_-]?secret\\s*[:=]\\s*[\"']?[^\\s\"']+[\"']?" + ], + "sensitive_files": [ + ".env", + ".env.local", + ".env.production", + ".env.development", + ".env.staging", + "credentials.json", + "google-credentials.json", + "service-account.json", + "private.key", + "id_rsa", + "id_ed25519", + "id_dsa", + "id_ecdsa", + ".pem", + ".key", + ".p12", + ".pfx", + "keystore", + ".jks" + ], + "regex_patterns": [ + "sk-[a-zA-Z0-9]{32,}", + "sk-proj-[a-zA-Z0-9]{32,}", + "AIza[0-9A-Za-z\\-_]{35}", + "gsk_[a-zA-Z0-9]{32,}", + "AKIA[0-9A-Z]{16}", + "aws_secret_access_key\\s*=\\s*[a-zA-Z0-9/+=]{40}", + "(postgres|postgresql|mysql|mongodb|redis)://[^:]+:[^@]+@[^/]+", + "-----BEGIN.*(RSA|DSA|EC|OPENSSH|PGP).*PRIVATE KEY-----", + "Bearer\\s+[a-zA-Z0-9\\-_]+\\.[a-zA-Z0-9\\-_]+\\.[a-zA-Z0-9\\-_]+", + "[A-Z_]+(KEY|TOKEN|SECRET|PASSWORD|PASSWD)\\s*=\\s*[\"']?[^\\s\"']+[\"']?", + "(api[_-]?key|access[_-]?token|auth[_-]?token)\\s*[:=]\\s*[\"']?[a-zA-Z0-9_\\-]{20,}[\"']?", + "(password|secret|key)\\s*[:=]\\s*[\"']?[A-Za-z0-9+/]{40,}={0,2}[\"']?", + "ghp_[a-zA-Z0-9]{36}", + "gho_[a-zA-Z0-9]{36}", + "github_pat_[a-zA-Z0-9]{22}_[a-zA-Z0-9]{59}", + "glpat-[a-zA-Z0-9\\-_]{20}", + "xoxb-[0-9]{10,13}-[0-9]{10,13}-[a-zA-Z0-9]{24}", + "sq0atp-[0-9A-Za-z\\-_]{22}", + "sq0csp-[0-9A-Za-z\\-_]{43}", + "SK[a-z0-9]{32}" + ] + }, + "whitelist": { + "allowed_mentions": [ + "GOOGLE_API_KEY=your_google_api_key_here", + "GROQ_API_KEY=your_groq_api_key_here", + "OPENAI_API_KEY=your_openai_api_key_here", + "GEMINI_API_KEY=your_gemini_api_key_here", + "AWS_ACCESS_KEY_ID=your_access_key_here", + "api_key=settings.api_key", + "api_key=process.env.API_KEY", + "api_key=YOUR_API_KEY", + "password=your_password_here", + "password=", + "password=${PASSWORD}", + "token=your_token_here", + "token=", + "secret=your_secret_here", + "Bearer YOUR_TOKEN_HERE", + "Bearer ", + "postgres://user:password@localhost", + "mysql://root:password@localhost", + "mongodb://user:password@localhost", + "redis://user:password@localhost", + "connection_string = \"your_connection_string_here\"", + "API_KEY=", + "SECRET_KEY=", + "DATABASE_URL=" + ] + } +} \ No newline at end of file diff --git a/.claude/hooks/gemini-context-injector.sh b/.claude/hooks/gemini-context-injector.sh new file mode 100755 index 00000000..95781966 --- /dev/null +++ b/.claude/hooks/gemini-context-injector.sh @@ -0,0 +1,129 @@ +#!/bin/bash +# Gemini Context Injector Hook +# Automatically adds project context files to new Gemini consultation sessions: +# - docs/ai-context/project-structure.md +# - MCP-ASSISTANT-RULES.md +# +# This hook enhances Gemini consultations by automatically including your project's +# structure documentation and assistant rules, ensuring the AI has complete context. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +PROJECT_STRUCTURE_FILE="$PROJECT_ROOT/docs/ai-context/project-structure.md" +MCP_RULES_FILE="$PROJECT_ROOT/MCP-ASSISTANT-RULES.md" +LOG_FILE="$SCRIPT_DIR/../logs/context-injection.log" + +# Ensure log directory exists +mkdir -p "$(dirname "$LOG_FILE")" + +# Read input from stdin +INPUT_JSON=$(cat) + +# Function to log injection events +log_injection_event() { + local event_type="$1" + local details="$2" + local timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ") + echo "{\"timestamp\": \"$timestamp\", \"event\": \"$event_type\", \"details\": \"$details\"}" >> "$LOG_FILE" +} + +# Main logic +main() { + # Extract tool information from stdin + local tool_name=$(echo "$INPUT_JSON" | jq -r '.tool_name // ""') + + # Only process Gemini consultation requests + if [[ "$tool_name" != "mcp__gemini__consult_gemini" ]]; then + echo '{"continue": true}' + exit 0 + fi + + # Extract tool arguments + local tool_args=$(echo "$INPUT_JSON" | jq -r '.tool_input // "{}"') + + # Check if this is a new session (no session_id provided) + local session_id=$(echo "$tool_args" | jq -r '.session_id // ""' 2>/dev/null || echo "") + + if [[ -z "$session_id" || "$session_id" == "null" ]]; then + log_injection_event "new_session_detected" "preparing_context_injection" + + # Check if required files exist + local missing_files="" + if [[ ! -f "$PROJECT_STRUCTURE_FILE" ]]; then + missing_files="$missing_files project_structure.md" + fi + if [[ ! -f "$MCP_RULES_FILE" ]]; then + missing_files="$missing_files MCP-ASSISTANT-RULES.md" + fi + + # If either file is missing, log warning but continue + if [[ -n "$missing_files" ]]; then + log_injection_event "warning" "missing_files:$missing_files" + fi + + # If both files are missing, exit early + if [[ ! -f "$PROJECT_STRUCTURE_FILE" ]] && [[ ! -f "$MCP_RULES_FILE" ]]; then + echo '{"continue": true}' + exit 0 + fi + + # Extract current attached_files if any + local current_files=$(echo "$tool_args" | jq -c '.attached_files // []' 2>/dev/null || echo "[]") + + # Check if files are already included + local has_project_structure=$(echo "$current_files" | jq -e ".[] | select(. == \"$PROJECT_STRUCTURE_FILE\")" > /dev/null 2>&1 && echo "true" || echo "false") + local has_mcp_rules=$(echo "$current_files" | jq -e ".[] | select(. == \"$MCP_RULES_FILE\")" > /dev/null 2>&1 && echo "true" || echo "false") + + # If both files exist and are already included, skip + if [[ -f "$PROJECT_STRUCTURE_FILE" ]] && [[ "$has_project_structure" == "true" ]] && \ + [[ -f "$MCP_RULES_FILE" ]] && [[ "$has_mcp_rules" == "true" ]]; then + log_injection_event "skipped" "all_required_files_already_included" + echo '{"continue": true}' + exit 0 + fi + + # Add missing files to attached_files + local modified_args="$tool_args" + local files_added="" + + if [[ -f "$PROJECT_STRUCTURE_FILE" ]] && [[ "$has_project_structure" == "false" ]]; then + modified_args=$(echo "$modified_args" | jq --arg file "$PROJECT_STRUCTURE_FILE" ' + .attached_files = ((.attached_files // []) + [$file]) + ' 2>/dev/null) + files_added="$files_added project_structure.md" + fi + + if [[ -f "$MCP_RULES_FILE" ]] && [[ "$has_mcp_rules" == "false" ]]; then + modified_args=$(echo "$modified_args" | jq --arg file "$MCP_RULES_FILE" ' + .attached_files = ((.attached_files // []) + [$file]) + ' 2>/dev/null) + files_added="$files_added MCP-ASSISTANT-RULES.md" + fi + + if [[ -n "$modified_args" ]] && [[ "$modified_args" != "$tool_args" ]]; then + log_injection_event "context_injected" "added_files:$files_added" + + # Update the input JSON with modified tool_input + local output_json=$(echo "$INPUT_JSON" | jq --argjson new_args "$modified_args" '.tool_input = $new_args') + + # Return the modified input to stdout + echo "$output_json" + exit 0 + else + log_injection_event "error" "failed_to_modify_arguments" + # Continue without modification on error + echo '{"continue": true}' + exit 0 + fi + else + log_injection_event "existing_session" "session_id:$session_id" + # For existing sessions, continue without modification + echo '{"continue": true}' + exit 0 + fi +} + +# Run main function +main \ No newline at end of file diff --git a/.claude/hooks/mcp-security-scan.sh b/.claude/hooks/mcp-security-scan.sh new file mode 100755 index 00000000..c21b28ba --- /dev/null +++ b/.claude/hooks/mcp-security-scan.sh @@ -0,0 +1,147 @@ +#!/bin/bash +# MCP Security Scanner Hook +# Scans MCP requests for sensitive data before sending to external services +# +# This hook protects against accidental exposure of secrets, API keys, and other +# sensitive information when using MCP servers like Gemini or Context7. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PATTERNS_FILE="$SCRIPT_DIR/config/sensitive-patterns.json" +LOG_FILE="$SCRIPT_DIR/../logs/security-scan.log" + +# Ensure log directory exists +mkdir -p "$(dirname "$LOG_FILE")" + +# Read input from stdin +INPUT_JSON=$(cat) + +# Function to log security events +log_security_event() { + local event_type="$1" + local details="$2" + local tool_name="${3:-unknown}" + local timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ") + echo "{\"timestamp\": \"$timestamp\", \"tool\": \"$tool_name\", \"event\": \"$event_type\", \"details\": \"$details\"}" >> "$LOG_FILE" +} + +# Function to check if content matches sensitive patterns +check_sensitive_content() { + local content="$1" + local pattern_type="$2" + + # Get patterns from JSON config + local patterns=$(jq -r ".patterns.$pattern_type[]" "$PATTERNS_FILE" 2>/dev/null || echo "") + + for pattern in $patterns; do + if echo "$content" | grep -qiE "$pattern"; then + # Check whitelist + local whitelisted=false + local whitelist_patterns=$(jq -r '.whitelist.allowed_mentions[]' "$PATTERNS_FILE" 2>/dev/null || echo "") + + for whitelist in $whitelist_patterns; do + if echo "$content" | grep -qF "$whitelist"; then + whitelisted=true + break + fi + done + + if [[ "$whitelisted" == "false" ]]; then + return 0 # Found sensitive data + fi + fi + done + + return 1 # No sensitive data found +} + +# Function to scan file content +scan_file_content() { + local file_path="$1" + + # Check if file name itself is sensitive + local filename=$(basename "$file_path") + if check_sensitive_content "$filename" "sensitive_files"; then + return 0 # Sensitive file + fi + + # Don't scan files that don't exist or are too large + if [[ ! -f "$file_path" ]] || [[ $(stat -f%z "$file_path" 2>/dev/null || stat -c%s "$file_path" 2>/dev/null || echo "999999999") -gt 1048576 ]]; then + return 1 + fi + + # Read and scan file content + local content=$(cat "$file_path" 2>/dev/null || echo "") + + # Check all pattern types + for pattern_type in api_keys credentials regex_patterns; do + if check_sensitive_content "$content" "$pattern_type"; then + return 0 # Found sensitive data + fi + done + + return 1 +} + +# Main scanning logic +main() { + # Extract tool information from stdin + local tool_name=$(echo "$INPUT_JSON" | jq -r '.tool_name // ""') + local tool_args=$(echo "$INPUT_JSON" | jq -r '.tool_input // "{}"') + + log_security_event "scan_started" "$tool_name" "$tool_name" + + # Check code_context for sensitive data + local code_context=$(echo "$tool_args" | jq -r '.code_context // ""' 2>/dev/null || echo "") + if [[ -n "$code_context" ]]; then + for pattern_type in api_keys credentials regex_patterns; do + if check_sensitive_content "$code_context" "$pattern_type"; then + log_security_event "blocked" "sensitive_data_in_code_context" "$tool_name" + echo '{"decision": "block", "reason": "Security Alert: Detected sensitive data in code_context. Found patterns matching actual credentials (API keys, passwords, or secrets with values). For discussions about security topics, use placeholders like YOUR_API_KEY, , or example values instead of real credentials."}' + exit 2 + fi + done + fi + + # Check problem_description for sensitive data + local problem_desc=$(echo "$tool_args" | jq -r '.problem_description // ""' 2>/dev/null || echo "") + if [[ -n "$problem_desc" ]]; then + for pattern_type in api_keys credentials regex_patterns; do + if check_sensitive_content "$problem_desc" "$pattern_type"; then + log_security_event "blocked" "sensitive_data_in_problem_description" "$tool_name" + echo '{"decision": "block", "reason": "Security Alert: Detected sensitive data in problem description. Found patterns matching actual credentials (API keys, passwords, connection strings, or tokens with values). For security discussions, use placeholders: YOUR_API_KEY, , postgres://user:password@localhost, or example-token-here."}' + exit 2 + fi + done + fi + + # Check attached files + local attached_files=$(echo "$tool_args" | jq -r '.attached_files[]?' 2>/dev/null || echo "") + for file in $attached_files; do + if scan_file_content "$file"; then + log_security_event "blocked" "sensitive_file_attached:$file" "$tool_name" + echo "{\"decision\": \"block\", \"reason\": \"Security Alert: Detected sensitive content in attached file $file. Found credentials, private keys, or environment files. Remove actual secrets and use placeholders like YOUR_SECRET_HERE or example values for demonstrations.\"}" + exit 2 + fi + done + + # Check specific question for Context7 + if [[ "$tool_name" == "mcp__context7__get-library-docs" ]]; then + local library_id=$(echo "$tool_args" | jq -r '.context7CompatibleLibraryID // ""' 2>/dev/null || echo "") + # Basic check to prevent injection attacks + if echo "$library_id" | grep -qE '(\$|`|;|&&|\|\||>|<)'; then + log_security_event "blocked" "suspicious_library_id" "$tool_name" + echo '{"decision": "block", "reason": "Security Alert: Detected suspicious characters in library ID that could indicate command injection. Please use only alphanumeric characters, hyphens, underscores, and forward slashes."}' + exit 2 + fi + fi + + log_security_event "scan_completed" "no_sensitive_data_found" "$tool_name" + + # All checks passed, allow the tool to continue + # No output needed when allowing - just exit 0 +} + +# Run main function +main \ No newline at end of file diff --git a/.claude/hooks/notify.sh b/.claude/hooks/notify.sh new file mode 100755 index 00000000..a41fea88 --- /dev/null +++ b/.claude/hooks/notify.sh @@ -0,0 +1,103 @@ +#!/bin/bash +# Claude Code notification hook script +# Plays pleasant sounds when Claude needs input or completes tasks + +# Get the directory where this script is located +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SOUNDS_DIR="$SCRIPT_DIR/sounds" + +# Function to play a sound file with cross-platform support +play_sound_file() { + local sound_file="$1" + + # Check if file exists + if [[ ! -f "$sound_file" ]]; then + echo "Warning: Sound file not found: $sound_file" >&2 + return 1 + fi + + # Detect OS and use appropriate command-line audio player + local os_type="$(uname -s)" + + case "$os_type" in + Darwin*) # macOS + if command -v afplay &> /dev/null; then + afplay "$sound_file" 2>/dev/null & + return 0 # Exit immediately after starting playback + fi + ;; + + Linux*) # Linux + # Try PulseAudio first (most common on modern desktop Linux) + if command -v paplay &> /dev/null; then + paplay "$sound_file" 2>/dev/null & + return 0 # Exit immediately after starting playback + fi + + # Try ALSA + if command -v aplay &> /dev/null; then + aplay -q "$sound_file" 2>/dev/null & + return 0 # Exit immediately after starting playback + fi + + # Try PipeWire (newer systems) + if command -v pw-play &> /dev/null; then + pw-play "$sound_file" 2>/dev/null & + return 0 # Exit immediately after starting playback + fi + + # Try sox play command + if command -v play &> /dev/null; then + play -q "$sound_file" 2>/dev/null & + return 0 # Exit immediately after starting playback + fi + ;; + + MINGW*|CYGWIN*|MSYS*) # Windows (Git Bash, WSL, etc.) + # Try PowerShell + if command -v powershell.exe &> /dev/null; then + # Use Windows Media Player COM object for better compatibility + # Run in background and exit immediately + powershell.exe -NoProfile -Command " + Start-Job -ScriptBlock { + \$player = New-Object -ComObject WMPlayer.OCX + \$player.URL = '$sound_file' + \$player.controls.play() + Start-Sleep -Milliseconds 1000 + \$player.close() + } + " 2>/dev/null + return 0 # Exit immediately after starting playback + fi + ;; + esac + + # If we have ffplay (cross-platform) + if command -v ffplay &> /dev/null; then + ffplay -nodisp -autoexit -loglevel quiet "$sound_file" 2>/dev/null & + return 0 # Exit immediately after starting playback + fi + + # No audio player found - fail silently + return 1 +} + +# Main script logic +case "$1" in + "input") + play_sound_file "$SOUNDS_DIR/input-needed.wav" + ;; + + "complete") + play_sound_file "$SOUNDS_DIR/complete.wav" + ;; + + *) + echo "Usage: $0 {input|complete}" >&2 + echo " input - Play sound when Claude needs user input" >&2 + echo " complete - Play sound when Claude completes tasks" >&2 + exit 1 + ;; +esac + +exit 0 \ No newline at end of file diff --git a/.claude/hooks/setup/hook-setup.md b/.claude/hooks/setup/hook-setup.md new file mode 100644 index 00000000..d525a783 --- /dev/null +++ b/.claude/hooks/setup/hook-setup.md @@ -0,0 +1,96 @@ +## Description + +This command uses specialized agents to verify, configure, and test your Claude Code hooks installation. It ensures everything is properly set up and working correctly. + +## Process + +### Phase 1: Multi-Agent Setup Verification + +The command spawns specialized agents to handle different aspects: + +1. **Installation Agent** + - Verifies `.claude/hooks/` directory exists + - Checks all hook scripts are present + - Ensures executable permissions (`chmod +x`) + - Validates sound files and configuration files + +2. **Configuration Agent** + - Locates Claude Code settings.json for your OS + - Verifies hook configurations in settings + - Checks WORKSPACE environment variable + - Validates MCP server configurations + +3. **Documentation Agent** + - Ensures project structure documentation exists + - Verifies paths used by context injector + - Checks log directory setup + +### Phase 2: Comprehensive Testing + +After setup verification, the main agent runs comprehensive tests: + +1. **Security Scanner Tests** + - API key detection patterns + - Password and secret detection + - Whitelist functionality + - Command injection protection + - File scanning capabilities + +2. **Context Injector Tests** + - New session detection + - File attachment logic + - Path resolution + - Error handling scenarios + +3. **Notification Tests** + - Audio playback on current platform + - Fallback mechanism verification + - Both input and complete sounds + +## Expected Output + +``` +Starting multi-agent hook setup verification... + +[Installation Agent] +✓ Hooks directory found: .claude/hooks/ +✓ All hook scripts present and executable +✓ Configuration files valid +✓ Sound files present + +[Configuration Agent] +✓ Project settings found: .claude/settings.json +✓ Hook configurations verified +✓ WORKSPACE environment variable set correctly + +[Documentation Agent] +✓ Project structure documentation found +✓ Log directories configured + +Running comprehensive tests... + +[Security Scanner] +✓ Detected: sk-1234567890abcdef (API key) +✓ Detected: password=mysecret123 +✓ Allowed: YOUR_API_KEY (whitelisted) +✓ Blocked: $(malicious) (injection attempt) + +[Context Injector] +✓ New session handling correct +✓ File attachment working +✓ Error handling graceful + +[Notifications] +✓ Audio playback successful +✓ Platform: darwin (macOS) + +All hooks configured and tested successfully! +``` + +## Troubleshooting + +The command provides specific guidance for any issues found: +- Missing files or permissions +- Configuration problems +- Test failures with debugging steps +- Platform-specific audio issues \ No newline at end of file diff --git a/.claude/hooks/setup/settings.json.template b/.claude/hooks/setup/settings.json.template new file mode 100644 index 00000000..678606c6 --- /dev/null +++ b/.claude/hooks/setup/settings.json.template @@ -0,0 +1,63 @@ +{ + "hooks": { + "PreToolUse": [ + { + "matcher": "mcp__gemini__consult_gemini", + "hooks": [ + { + "type": "command", + "command": "${WORKSPACE}/.claude/hooks/gemini-context-injector.sh", + "description": "Automatically adds project structure to new Gemini sessions" + } + ] + }, + { + "matcher": "mcp__.*", + "hooks": [ + { + "type": "command", + "command": "${WORKSPACE}/.claude/hooks/mcp-security-scan.sh", + "description": "Scans for sensitive data before sending to external services" + } + ] + }, + { + "matcher": "Task", + "hooks": [ + { + "type": "command", + "command": "${WORKSPACE}/.claude/hooks/subagent-context-injector.sh", + "description": "Automatically adds project context to sub-agent prompts" + } + ] + } + ], + "Notification": [ + { + "matcher": ".*", + "hooks": [ + { + "type": "command", + "command": "${WORKSPACE}/.claude/hooks/notify.sh input", + "description": "Plays sound when Claude needs user input" + } + ] + } + ], + "Stop": [ + { + "matcher": ".*", + "hooks": [ + { + "type": "command", + "command": "${WORKSPACE}/.claude/hooks/notify.sh complete", + "description": "Plays sound when Claude completes tasks" + } + ] + } + ] + }, + "environment": { + "WORKSPACE": "/path/to/your/project" + } +} \ No newline at end of file diff --git a/.claude/hooks/sounds/complete.wav b/.claude/hooks/sounds/complete.wav new file mode 100644 index 00000000..9d3f11f6 Binary files /dev/null and b/.claude/hooks/sounds/complete.wav differ diff --git a/.claude/hooks/sounds/input-needed.wav b/.claude/hooks/sounds/input-needed.wav new file mode 100644 index 00000000..a282acfb Binary files /dev/null and b/.claude/hooks/sounds/input-needed.wav differ diff --git a/.claude/hooks/subagent-context-injector.sh b/.claude/hooks/subagent-context-injector.sh new file mode 100755 index 00000000..c0e61f8c --- /dev/null +++ b/.claude/hooks/subagent-context-injector.sh @@ -0,0 +1,65 @@ +#!/bin/bash +# Sub-Agent Context Auto-Loader +# Automatically enhances Task tool prompts with essential project context +# +# This hook ensures every sub-agent spawned via the Task tool automatically +# receives core project documentation, eliminating the need to manually +# include context in each Task prompt. +# +# IMPLEMENTATION OVERVIEW: +# - Registered as a PreToolUse hook in .claude/settings.json +# - Intercepts all Task tool calls before execution +# - Injects references to CLAUDE.md, project-structure.md, and docs-overview.md +# - Preserves original prompt by prepending context, not replacing +# - Passes through non-Task tools unchanged with {"continue": true} + + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" + +# Read input from stdin +INPUT_JSON=$(cat) + +# Extract tool information +tool_name=$(echo "$INPUT_JSON" | jq -r '.tool_name // ""') + +# Only process Task tool calls - pass through all other tools unchanged +if [[ "$tool_name" != "Task" ]]; then + echo '{"continue": true}' + exit 0 +fi + +# Extract current prompt from the Task tool input +current_prompt=$(echo "$INPUT_JSON" | jq -r '.tool_input.prompt // ""') + +# Build context injection header with project documentation references +# These files are automatically available to all sub-agents via @ references +context_injection="## Auto-Loaded Project Context + +This sub-agent has automatic access to the following project documentation: +- @$PROJECT_ROOT/docs/CLAUDE.md (Project overview, coding standards, and AI instructions) +- @$PROJECT_ROOT/docs/ai-context/project-structure.md (Complete file tree and tech stack) +- @$PROJECT_ROOT/docs/ai-context/docs-overview.md (Documentation architecture) + +These files provide essential context about the project structure, +conventions, and development patterns. Reference them as needed for your task. + +--- + +## Your Task + +" + +# Combine context injection with original prompt +# The context is prepended to preserve the original task instructions +modified_prompt="${context_injection}${current_prompt}" + +# Update the input JSON with the modified prompt +# This maintains all other tool input fields unchanged +output_json=$(echo "$INPUT_JSON" | jq --arg new_prompt "$modified_prompt" '.tool_input.prompt = $new_prompt') + +# Output the modified JSON for Claude Code to process +# The Task tool will receive the enhanced prompt with context +echo "$output_json" \ No newline at end of file diff --git a/.gitignore b/.gitignore index beaeaf3e..2d7ec26b 100644 --- a/.gitignore +++ b/.gitignore @@ -44,4 +44,6 @@ yarn-error.log* *.sln *.sw* -.pnpm-store \ No newline at end of file +.pnpm-store +*storybook.log +storybook-static diff --git a/.storybook/main.ts b/.storybook/main.ts new file mode 100644 index 00000000..e096e85e --- /dev/null +++ b/.storybook/main.ts @@ -0,0 +1,25 @@ +import type { StorybookConfig } from '@storybook/react-vite'; + +const config: StorybookConfig = { + "stories": [ + "../src/**/*.mdx", + "../src/**/*.stories.@(js|jsx|mjs|ts|tsx)" + ], + "addons": [ + "@storybook/addon-docs", + "@storybook/addon-onboarding" + ], + "framework": { + "name": "@storybook/react-vite", + "options": {} + }, + "typescript": { + "check": false, + "reactDocgen": "react-docgen-typescript", + "reactDocgenTypescriptOptions": { + "shouldExtractLiteralValuesFromEnum": true, + "propFilter": (prop) => (prop.parent ? !/node_modules/.test(prop.parent.fileName) : true), + }, + }, +}; +export default config; \ No newline at end of file diff --git a/.storybook/preview.ts b/.storybook/preview.ts new file mode 100644 index 00000000..d94deb5c --- /dev/null +++ b/.storybook/preview.ts @@ -0,0 +1,29 @@ +import type { Preview } from '@storybook/react-vite' +import '../src/index.css' + +const preview: Preview = { + parameters: { + controls: { + matchers: { + color: /(background|color)$/i, + date: /Date$/i, + }, + }, + backgrounds: { + default: 'light', + values: [ + { + name: 'light', + value: '#ffffff', + }, + { + name: 'dark', + value: '#333333', + }, + ], + }, + actions: { argTypesRegex: '^on[A-Z].*' }, + }, +}; + +export default preview; \ No newline at end of file diff --git a/MCP-ASSISTANT-RULES.md b/MCP-ASSISTANT-RULES.md new file mode 100644 index 00000000..ab0d0ed4 --- /dev/null +++ b/MCP-ASSISTANT-RULES.md @@ -0,0 +1,85 @@ +# MCP Assistant Rules - [Project Name] + +## Project Context +[Brief description of what your project does and its main purpose. Keep it concise - 2-3 sentences max.] + +### Core Vision & Architecture +- **Product Goal**: [Primary goal of your product] +- **Target Platform**: [Primary platform(s) - web, mobile, desktop, etc.] +- **Architecture**: [High-level architecture overview] +- **Key Technologies**: [Main technologies/frameworks used] + +### Key Technical Principles +[List 4-6 core technical principles that guide your project] +- **Example**: Session-based architecture with clear boundaries +- **Example**: API-first design with versioning from day one +- **Example**: Security by default - validate all inputs at boundaries +- **Example**: Observable systems with structured logging + +**Note:** The complete project structure and technology stack are provided in the attached `project-structure.md` file. + +## Key Project Standards + +### Core Principles +[List your fundamental development principles] +- Follow KISS, YAGNI, and DRY - prefer proven solutions over custom implementations +- Never mock, use placeholders, or omit code - always implement fully +- Be brutally honest about whether an idea is good or bad +- [Add project-specific principles] + +### Code Organization +[Define your code organization standards] +- Keep files under [X] lines - split by extracting utilities, constants, types +- Single responsibility per file with clear purpose +- Prefer composition over inheritance +- [Add language/framework specific organization rules] + +### [Language] Standards +[Replace with your primary language and its standards] +- Type safety requirements +- Naming conventions (classes, functions, constants) +- Documentation requirements (docstring style, required elements) +- Error handling patterns + +### Error Handling & Logging +- Use specific exceptions with helpful messages +- Structured logging only - define your logging approach +- [Specify logging categories or patterns] +- Every request needs correlation ID for tracing + +### API Design +[If applicable - define API standards] +- RESTful with consistent URL patterns +- Version from day one (/v1/, /v2/) +- Consistent response format +- Proper HTTP status codes + +### Security & State +- Never trust external inputs - validate at boundaries +- [Define session/state management approach] +- [Specify data retention policies] +- Keep secrets in environment variables only + +## Project-Specific Guidelines +[Add any project-specific guidelines that AI assistants should know] + +### Domain-Specific Rules +[Add rules specific to your problem domain] + +### Integration Points +[List key integration points or external services] + +### Performance Considerations +[Add any performance-critical aspects] + +## Important Constraints +- You cannot create, modify, or execute code +- You operate in a read-only support capacity +- Your suggestions are for the primary AI (Claude Code) to implement +- Focus on analysis, understanding, and advisory support + +## Quick Reference +[Add frequently needed information] +- Key commands: [List common commands] +- Important paths: [List critical file paths] +- Documentation links: [Add links to detailed docs] \ No newline at end of file diff --git a/docs/CONTEXT-tier2-component.md b/docs/CONTEXT-tier2-component.md new file mode 100644 index 00000000..ae10e011 --- /dev/null +++ b/docs/CONTEXT-tier2-component.md @@ -0,0 +1,96 @@ +# [COMPONENT NAME] - Component Context (Tier 2) + +> **Note**: This is component-specific context. See root **CLAUDE.md** for master project context and coding standards. + +## Purpose +[Brief description of this component's role in the system. What problem does it solve and how does it fit into the overall architecture?] + +## Current Status: [Status Description] ✅/🚧/📋 +[Current implementation state, what's working, what's in progress, and key milestones achieved] + +## Component-Specific Development Guidelines +- **[Technology/Framework]**: [Specific technology requirements for this component] +- **[Architecture Pattern]**: [Component-specific architectural approach] +- **[Code Organization]**: [How code should be structured within this component] +- **[Integration Patterns]**: [How this component integrates with others] +- **[Quality Standards]**: [Component-specific quality requirements] + +## Key Component Structure + +### Core Modules (`[path]/`) +- **[module1]/** - [Purpose and key functionality] + - **[file1].[ext]** - [Specific file purpose and key features] + - **[file2].[ext]** - [Specific file purpose and key features] +- **[module2]/** - [Purpose and key functionality] +- **[module3]/** - [Purpose and key functionality] + +### [Secondary Structure] (`[path]/`) +- **[component].[ext]** - [Component purpose and architecture pattern] +- **[utilities].[ext]** - [Utility functions and helpers] +- **[config].[ext]** - [Configuration and settings management] + +### [Integration Layer] (`[path]/`) +- **[integration1].[ext]** - [External service integration patterns] +- **[integration2].[ext]** - [Inter-component communication] + +## Implementation Highlights + +### [Key Feature 1] +- **[Technical Implementation]**: [How this feature is implemented] +- **[Architecture Decision]**: [Why this approach was chosen] +- **[Performance Considerations]**: [Optimization details] +- **[Integration Points]**: [How it connects to other components] + +### [Key Feature 2] +- **[Implementation Pattern]**: [Technical implementation approach] +- **[Quality Measures]**: [Testing, monitoring, error handling] +- **[Scalability Considerations]**: [How it handles growth/load] + +### [Key Feature 3] +- **[Technical Details]**: [Implementation specifics] +- **[Dependencies]**: [External dependencies and integration points] +- **[Configuration]**: [How it's configured and customized] + +## Critical Implementation Details + +### [Technical Pattern 1] +**[Pattern Description]**: [What problem this pattern solves] + +```[language] +// Example implementation showing the pattern +[code example demonstrating the critical implementation] +``` + +### [Technical Pattern 2] +**[Architecture Decision]**: [Why this approach was chosen] + +```[language] +// Code example showing architecture implementation +[code example demonstrating the architecture] +``` + +### [Integration Pattern] +**[Integration Description]**: [How this component integrates with others] + +```[language] +// Integration implementation example +[code example showing integration patterns] +``` + +## Development Notes + +### [Current Challenges] +- **[Challenge 1]**: [Description and current approach] +- **[Challenge 2]**: [Description and mitigation strategy] + +### [Future Considerations] +- **[Enhancement 1]**: [Planned improvement and rationale] +- **[Enhancement 2]**: [Future architectural evolution] + +### [Performance Metrics] +- **[Key Metric 1]**: [Current performance and targets] +- **[Key Metric 2]**: [Monitoring and optimization approach] + +--- + +*This component documentation provides context for AI-assisted development within [COMPONENT NAME]. For system-wide patterns and standards, reference the master CLAUDE.md file.* \ No newline at end of file diff --git a/docs/CONTEXT-tier3-feature.md b/docs/CONTEXT-tier3-feature.md new file mode 100644 index 00000000..9711ecd4 --- /dev/null +++ b/docs/CONTEXT-tier3-feature.md @@ -0,0 +1,162 @@ +# [FEATURE NAME] Documentation (Tier 3) + +*This file documents [feature/module] patterns, architectural decisions, and implementations within [component name].* + +## [Feature] Architecture Overview + +### [Architecture Decision Title] + +**Context**: [Situation that led to this architectural decision] + +**Decision**: [What was decided and implemented] + +**Reasoning**: +- **[Benefit 1]**: [Why this approach provides this benefit] +- **[Benefit 2]**: [Technical or business advantage] +- **[Benefit 3]**: [Performance or maintainability benefit] +- **[Benefit 4]**: [Developer experience or operational benefit] + +**Consequences**: +- [Positive outcome from this decision] +- [Technical improvement achieved] +- [Operational or maintenance benefit] +- [User experience enhancement] + +## [Feature] Implementation Patterns + +### [Implementation Pattern 1] + +**File Organization**: +``` +[feature-directory]/ +├── [file1].[ext] # [Purpose and responsibility] +├── [file2].[ext] # [Purpose and responsibility] +├── [file3].[ext] # [Purpose and responsibility] +└── [file4].[ext] # [Purpose and responsibility] +``` + +**Architecture Benefits**: +- **[Benefit 1]**: [How this organization provides this benefit] +- **[Benefit 2]**: [Technical advantage of this structure] +- **[Benefit 3]**: [Maintainability or scalability benefit] +- **[Benefit 4]**: [Developer experience improvement] + +### [Implementation Pattern 2] + +**Architecture Decision**: [Technical approach taken] + +**Context**: [Background and requirements that led to this approach] + +**Decision**: [Specific implementation choice made] + +**Reasoning**: +- **[Technical Reason]**: [Why this was the best technical choice] +- **[Performance Reason]**: [Performance benefits] +- **[Maintainability Reason]**: [Long-term maintenance benefits] +- **[Integration Reason]**: [How it integrates with other components] + +**Implementation Details**: +```[language] +// [Description of what this code demonstrates] +[detailed code example showing the implementation pattern] +``` + +### [Implementation Pattern 3] + +**[Pattern Name]**: [Description of the pattern] + +```[language] +// [Code example title] +[comprehensive code example showing the pattern in action] +``` + +**Implementation Benefits**: +- **[Benefit 1]**: [Specific advantage this implementation provides] +- **[Benefit 2]**: [Performance or reliability improvement] +- **[Benefit 3]**: [Developer experience enhancement] + +## [Technical Domain] Implementation + +### [Technical Feature 1] + +**[Feature Description]**: [What this feature does and why it's important] + +**Architecture Pattern**: +```[language] +// [Description of the architectural approach] +[code example demonstrating the architecture] +``` + +**Key Implementation Details**: +- **[Detail 1]**: [Important implementation consideration] +- **[Detail 2]**: [Technical constraint or optimization] +- **[Detail 3]**: [Integration or performance consideration] + +### [Technical Feature 2] + +**Implementation Approach**: [How this feature is implemented] + +```[language] +// [Code example description] +[detailed implementation example] +``` + +**Technical Considerations**: +- **[Consideration 1]**: [Important technical factor] +- **[Consideration 2]**: [Performance or scalability factor] +- **[Consideration 3]**: [Maintenance or testing consideration] + +## [Integration/Communication] Patterns + +### [Integration Pattern 1] + +**Context**: [When and why this integration pattern is used] + +**Implementation**: +```[language] +// [Integration example description] +[code showing integration implementation] +``` + +**Benefits**: +- **[Integration Benefit 1]**: [How this improves system integration] +- **[Integration Benefit 2]**: [Performance or reliability improvement] + +### [Integration Pattern 2] + +**Pattern Description**: [What problem this integration pattern solves] + +```[language] +// [Integration code example] +[implementation showing integration pattern] +``` + +## Performance & Optimization Details + +### [Performance Optimization 1] +**Optimization**: [What was optimized and how] +- **Before**: [Previous performance characteristics] +- **After**: [Improved performance metrics] +- **Implementation**: [How the optimization was achieved] + +### [Performance Optimization 2] +**Technical Improvement**: [Specific performance enhancement] +- **Impact**: [Measurable improvement achieved] +- **Method**: [Technical approach used] +- **Trade-offs**: [Any compromises made for the optimization] + +## Error Handling & Edge Cases + +### [Error Scenario 1] +**Scenario**: [What error condition this handles] +**Handling**: [How the error is detected and managed] +**Recovery**: [How the system recovers from this error] + +### [Error Scenario 2] +**Edge Case**: [Unusual condition that needs handling] +**Solution**: [How the implementation handles this case] +**Validation**: [How this handling is tested or verified] + +--- + +*This feature documentation provides detailed implementation context for AI-assisted development. For broader component context, see the component-level CONTEXT.md file.* \ No newline at end of file diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 00000000..238da18e --- /dev/null +++ b/docs/README.md @@ -0,0 +1,207 @@ +# Documentation System Guide + +This guide explains how the 3-tier documentation architecture powers the Claude Code Development Kit and why it provides superior results compared to traditional documentation approaches. + +## Critical Foundation Files + +Two files form the cornerstone of the entire documentation system: + +1. **docs-overview.md** - The central routing guide that directs AI agents to appropriate documentation based on task complexity. This file maps your entire documentation structure and enables intelligent context loading. + +2. **project-structure.md** - The comprehensive overview of your project's complete file structure and technology stack. This file is required reading for all AI agents and must be attached to Gemini consultations. + +These foundation files ensure AI agents always have the essential context needed to understand your project and navigate to relevant documentation. + +## Why the 3-Tier System + +### Traditional Documentation Problems + +Standard documentation approaches create friction for AI-assisted development: + +- **Context Overload** - AI agents must process entire documentation sets for simple tasks +- **Maintenance Burden** - Every code change cascades to multiple documentation locations +- **Stale Content** - Documentation diverges from implementation reality +- **No AI Optimization** - Human-readable formats lack structure for machine processing + +### The 3-Tier Solution + +The kit solves these problems through hierarchical organization: + +**Tier 1: Foundation (Rarely Changes)** +- Project-wide standards, architecture decisions, technology stack +- Auto-loads for every AI session +- Provides consistent baseline without redundancy +- Uses CLAUDE.md as the master context file + +**Tier 2: Component (Occasionally Changes)** +- Component boundaries, architectural patterns, integration points +- Loads only when working within specific components +- Isolates architectural decisions from implementation details +- Uses CONTEXT.md files at component roots + +**Tier 3: Feature (Frequently Changes)** +- Implementation specifics, technical details, local patterns +- Co-located with code for immediate updates +- Minimizes documentation cascade when code changes +- Uses CONTEXT.md files within feature directories + +## Benefits vs Traditional Systems + +### 1. Intelligent Context Loading + +**Traditional**: AI loads entire documentation corpus regardless of task +**3-Tier**: Commands load only relevant tiers based on complexity + +Example: +- Simple query → Tier 1 only (minimal tokens) +- Component work → Tier 1 + relevant Tier 2 +- Deep implementation → All relevant tiers + +### 2. Maintenance Efficiency + +**Traditional**: Update multiple documents for each change +**3-Tier**: Updates isolated to appropriate tier + +Example: +- API endpoint change → Update only Tier 3 API documentation +- New component → Add Tier 2 documentation, Tier 1 unchanged +- Coding standard → Update only Tier 1, applies everywhere + +### 3. AI Performance Optimization + +**Traditional**: AI struggles to find relevant information +**3-Tier**: Structured hierarchy guides AI to precise context + +The system provides: +- Clear routing logic for agent navigation +- Predictable documentation locations +- Efficient token usage through targeted loading + +## Integration with Kit Components + +### Command Integration + +Commands leverage the 3-tier structure for intelligent operation: + +``` +Command Execution → Analyze Task Complexity → Load Appropriate Tiers + ↓ + Simple: Tier 1 only + Component: Tiers 1-2 + Complex: All relevant tiers +``` + +### MCP Server Integration + +External AI services receive proper context through the tier system: + +- **Gemini Consultations** - Auto-attach `project-structure.md` (Tier 1) +- **Context7 Lookups** - Happen within established project context +- **Recommendations** - Align with documented architecture + +### Multi-Agent Routing + +The documentation structure determines agent behavior: + +- Number of agents spawned based on tiers involved +- Each agent receives targeted documentation subset +- Parallel analysis without context overlap + +## Key Files and Their Roles + +### Foundation Files (ai-context/) + +**docs-overview.md** +- Template for implementing 3-tier documentation +- Maps documentation structure for AI navigation +- [View Template](ai-context/docs-overview.md) + +**project-structure.md** +- Complete technology stack and file organization +- Required reading for all AI agents +- Auto-attaches to Gemini consultations +- [View Template](ai-context/project-structure.md) + +**system-integration.md** +- Cross-component communication patterns +- Integration architectures for multi-agent analysis +- [View Template](ai-context/system-integration.md) + +**deployment-infrastructure.md** +- Infrastructure patterns and constraints +- Deployment context for AI recommendations +- [View Template](ai-context/deployment-infrastructure.md) + +**handoff.md** +- Session continuity between AI interactions +- Task state preservation +- [View Template](ai-context/handoff.md) + +### Context Templates + +**CLAUDE.md** (Tier 1) +- Master AI context with coding standards +- Project-wide instructions and patterns +- [View Template](CLAUDE.md) + +**CONTEXT-tier2-component.md** +- Component-level architectural context +- [View Template](CONTEXT-tier2-component.md) + +**CONTEXT-tier3-feature.md** +- Feature-specific implementation details +- [View Template](CONTEXT-tier3-feature.md) + +## Implementation Strategy + +### 1. Start with Templates + +Use provided templates as foundation: +- Copy and customize for your project +- Maintain consistent structure +- Focus on AI-consumable formatting + +### 2. Follow Natural Boundaries + +Let your architecture guide tier placement: +- Stable decisions → Tier 1 +- Component design → Tier 2 +- Implementation details → Tier 3 + +### 3. Co-locate Documentation + +Place CONTEXT.md files with related code: +``` +backend/ +├── CONTEXT.md # Backend architecture (Tier 2) +└── src/ + └── api/ + └── CONTEXT.md # API implementation (Tier 3) +``` + +### 4. Maintain Hierarchy + +Ensure clear relationships: +- Tier 3 references Tier 2 patterns +- Tier 2 follows Tier 1 standards +- No circular dependencies + +### 5. Use Documentation Commands + +The kit provides commands to manage documentation: +- **`/create-docs`** - Generate initial documentation structure for projects without existing docs +- **`/update-docs`** - Regenerate and update documentation after code changes to keep everything current + +## Measuring Success + +The 3-tier system succeeds when: + +1. **AI agents find context quickly** - No searching through irrelevant documentation +2. **Updates stay localized** - Changes don't cascade unnecessarily +3. **Documentation stays current** - Co-location ensures updates happen +4. **Commands work efficiently** - Appropriate context loads automatically +5. **MCP servers provide relevant advice** - External AI understands your project + +--- + +*Part of the Claude Code Development Kit - see [main documentation](../README.md) for complete system overview.* \ No newline at end of file diff --git a/docs/ai-context/deployment-infrastructure.md b/docs/ai-context/deployment-infrastructure.md new file mode 100644 index 00000000..c5669bbd --- /dev/null +++ b/docs/ai-context/deployment-infrastructure.md @@ -0,0 +1,21 @@ +# Deployment & Infrastructure Documentation + +This document contains deployment and infrastructure-related documentation for the project. + +## Purpose + +This template serves as a placeholder for documenting: +- Deployment strategies and procedures +- Infrastructure architecture and configuration +- CI/CD pipelines and automation +- Environment management +- Monitoring and observability setup +- Scaling strategies and considerations + +## Implementation Note + +Replace this template with your actual deployment and infrastructure documentation as your project develops. Focus on patterns and decisions that AI agents need to understand when working with infrastructure-related code or making architectural recommendations. + +--- + +*Customize this template based on your specific deployment and infrastructure requirements.* \ No newline at end of file diff --git a/docs/ai-context/docs-overview.md b/docs/ai-context/docs-overview.md new file mode 100644 index 00000000..1f2d4c5f --- /dev/null +++ b/docs/ai-context/docs-overview.md @@ -0,0 +1,89 @@ +# Documentation Architecture + +This project uses a **3-tier documentation system** that organizes knowledge by stability and scope, enabling efficient AI context loading and scalable development. + +## How the 3-Tier System Works + +**Tier 1 (Foundation)**: Stable, system-wide documentation that rarely changes - architectural principles, technology decisions, cross-component patterns, and core development protocols. + +**Tier 2 (Component)**: Architectural charters for major components - high-level design principles, integration patterns, and component-wide conventions without feature-specific details. + +**Tier 3 (Feature-Specific)**: Granular documentation co-located with code - specific implementation patterns, technical details, and local architectural decisions that evolve with features. + +This hierarchy allows AI agents to load targeted context efficiently while maintaining a stable foundation of core knowledge. + +## Documentation Principles +- **Co-location**: Documentation lives near relevant code +- **Smart Extension**: New documentation files created automatically when warranted +- **AI-First**: Optimized for efficient AI context loading and machine-readable patterns + +## Tier 1: Foundational Documentation (System-Wide) + +- **[Master Context](/CLAUDE.md)** - *Essential for every session.* Coding standards, security requirements, MCP server integration patterns, and development protocols +- **[Project Structure](/docs/ai-context/project-structure.md)** - *REQUIRED reading.* Complete technology stack, file tree, and system architecture. Must be attached to Gemini consultations +- **[System Integration](/docs/ai-context/system-integration.md)** - *For cross-component work.* Communication patterns, data flow, testing strategies, and performance optimization +- **[Deployment Infrastructure](/docs/ai-context/deployment-infrastructure.md)** - *Infrastructure patterns.* Containerization, monitoring, CI/CD workflows, and scaling strategies +- **[Task Management](/docs/ai-context/handoff.md)** - *Session continuity.* Current tasks, documentation system progress, and next session goals + +## Tier 2: Component-Level Documentation + +### Backend Components +- **[Backend Context](/backend/CONTEXT.md)** - *Server implementation.* API patterns, database integration, service architecture, and performance considerations +- **[Worker Services](/workers/CONTEXT.md)** - *Background processing.* Job queue patterns, scheduling, and async task management +- **[Shared Libraries](/shared/CONTEXT.md)** - *Reusable code.* Common utilities, shared types, and cross-component functionality + +### Frontend Components +- **[Web Application](/frontend/CONTEXT.md)** - *Client implementation.* UI patterns, state management, routing, and user interaction patterns +- **[Mobile Application](/mobile/CONTEXT.md)** - *Mobile implementation.* Platform-specific patterns, native integrations, and mobile optimizations +- **[Admin Dashboard](/admin/CONTEXT.md)** - *Administrative interface.* Permission patterns, admin workflows, and management tools + +### Infrastructure Components +- **[Infrastructure Code](/infrastructure/CONTEXT.md)** - *IaC patterns.* Terraform/CloudFormation templates, resource definitions, and deployment automation +- **[Monitoring Setup](/monitoring/CONTEXT.md)** - *Observability patterns.* Metrics collection, alerting rules, and dashboard configurations + +## Tier 3: Feature-Specific Documentation + +Granular CONTEXT.md files co-located with code for minimal cascade effects: + +### Backend Feature Documentation +- **[Core Services](/backend/src/core/services/CONTEXT.md)** - *Business logic patterns.* Service architecture, data processing, integration patterns, and error handling +- **[API Layer](/backend/src/api/CONTEXT.md)** - *API patterns.* Endpoint design, validation, middleware, and request/response handling +- **[Data Layer](/backend/src/data/CONTEXT.md)** - *Data patterns.* Database models, queries, migrations, and data access patterns +- **[Authentication](/backend/src/auth/CONTEXT.md)** - *Auth patterns.* Authentication flows, authorization rules, session management, and security +- **[Integrations](/backend/src/integrations/CONTEXT.md)** - *External services.* Third-party API clients, webhook handlers, and service adapters + +### Frontend Feature Documentation +- **[UI Components](/frontend/src/components/CONTEXT.md)** - *Component patterns.* Reusable components, styling patterns, accessibility, and composition strategies +- **[State Management](/frontend/src/store/CONTEXT.md)** - *State patterns.* Global state, local state, data flow, and persistence strategies +- **[API Client](/frontend/src/api/CONTEXT.md)** - *Client patterns.* HTTP clients, error handling, caching, and data synchronization +- **[Routing](/frontend/src/routes/CONTEXT.md)** - *Navigation patterns.* Route definitions, guards, lazy loading, and deep linking +- **[Utilities](/frontend/src/utils/CONTEXT.md)** - *Helper functions.* Formatters, validators, transformers, and common utilities + +### Shared Feature Documentation +- **[Common Types](/shared/src/types/CONTEXT.md)** - *Type definitions.* Shared interfaces, enums, and type utilities +- **[Validation Rules](/shared/src/validation/CONTEXT.md)** - *Validation patterns.* Schema definitions, custom validators, and error messages +- **[Constants](/shared/src/constants/CONTEXT.md)** - *Shared constants.* Configuration values, enums, and magic numbers +- **[Utilities](/shared/src/utils/CONTEXT.md)** - *Shared utilities.* Cross-platform helpers, formatters, and common functions + + + +## Adding New Documentation + +### New Component +1. Create `/new-component/CONTEXT.md` (Tier 2) +2. Add entry to this file under appropriate section +3. Create feature-specific Tier 3 docs as features develop + +### New Feature +1. Create `/component/src/feature/CONTEXT.md` (Tier 3) +2. Reference parent component patterns +3. Add entry to this file under component's features + +### Deprecating Documentation +1. Remove obsolete CONTEXT.md files +2. Update this mapping document +3. Check for broken references in other docs + +--- + +*This documentation architecture template should be customized to match your project's actual structure and components. Add or remove sections based on your architecture.* \ No newline at end of file diff --git a/docs/ai-context/handoff.md b/docs/ai-context/handoff.md new file mode 100644 index 00000000..e09570b2 --- /dev/null +++ b/docs/ai-context/handoff.md @@ -0,0 +1,174 @@ +# Task Management & Handoff Template + +This file manages task continuity, session transitions, and knowledge transfer for AI-assisted development sessions. + +## Purpose + +This template helps maintain: +- **Session continuity** between AI development sessions +- **Task status tracking** for complex, multi-session work +- **Context preservation** when switching between team members +- **Knowledge transfer** for project handoffs +- **Progress documentation** for ongoing development efforts + +## Current Session Status + +### Active Tasks +Document currently in-progress work: + +```markdown +## In Progress +- [ ] Task 1: [Brief description] + - Status: [Started/Blocked/Awaiting review] + - Context: [Relevant files, decisions made] + - Next steps: [What needs to be done next] + - Dependencies: [What this task depends on] + +- [ ] Task 2: [Brief description] + - Status: [Current status] + - Files modified: [List of files changed] + - Challenges: [Any issues encountered] + - Notes: [Important context for continuation] +``` + +### Pending Tasks +Document queued work: + +```markdown +## Pending +- [ ] Task A: [Description] + - Priority: [High/Medium/Low] + - Dependencies: [What must be completed first] + - Estimated effort: [Time estimate] + - Context: [Background information] + +- [ ] Task B: [Description] + - Priority: [Priority level] + - Requirements: [Specific requirements or constraints] + - Resources needed: [Tools, access, information needed] +``` + +### Completed Tasks +Track completed work for context: + +```markdown +## Completed This Session +- [x] Task X: [Description] + - Completed: [Date] + - Outcome: [What was accomplished] + - Files changed: [Modified files] + - Notes: [Important decisions or lessons learned] + +- [x] Task Y: [Description] + - Completed: [Date] + - Impact: [How this affects other tasks] + - Follow-up needed: [Any follow-up actions required] +``` + +## Architecture & Design Decisions + +### Recent Decisions +Document architectural decisions made during development: + +```markdown +## Design Decisions Made +- **Decision**: [What was decided] + - Date: [When decision was made] + - Rationale: [Why this approach was chosen] + - Alternatives considered: [Other options evaluated] + - Impact: [How this affects the system] + - Validation: [How to verify this was the right choice] + +- **Decision**: [Another decision] + - Context: [Situation that led to this decision] + - Trade-offs: [What was gained/lost with this choice] + - Dependencies: [What this decision depends on] +``` + +### Technical Debt & Issues +Track technical debt and known issues: + +```markdown +## Technical Debt Identified +- **Issue**: [Description of technical debt] + - Location: [Where in codebase] + - Impact: [How it affects development/performance] + - Proposed solution: [How to address it] + - Priority: [When should this be addressed] + +- **Issue**: [Another issue] + - Root cause: [Why this debt exists] + - Workaround: [Current mitigation strategy] + - Long-term fix: [Proper solution approach] +``` + +## Next Session Goals + +### Immediate Priorities +Define what should be tackled next: + +```markdown +## Next Session Priorities +1. **Primary Goal**: [Main objective for next session] + - Success criteria: [How to know this is complete] + - Prerequisites: [What must be ready beforehand] + - Estimated effort: [Time estimate] + +2. **Secondary Goal**: [Secondary objective] + - Dependencies: [What this depends on] + - Resources needed: [Tools, information, access required] + +3. **If Time Permits**: [Optional tasks] + - Context: [Background on why these are valuable] + - Preparation: [What needs to be done to start these] +``` + +### Knowledge Gaps +Document areas needing research or clarification: + +```markdown +## Knowledge Gaps to Address +- **Question**: [What needs to be clarified] + - Impact: [How this affects current work] + - Research needed: [What investigation is required] + - Decision maker: [Who can answer this] + +- **Unknown**: [Technical uncertainty] + - Options: [Possible approaches to explore] + - Experiments: [What should be tested] + - Timeline: [When this needs to be resolved] +``` + +## Context for Continuation + +### Key Files & Components +Document important files for session continuity: + +```markdown +## Files Currently Being Modified +- `[file-path]`: [Purpose and current changes] +- `[file-path]`: [What's being implemented here] +- `[file-path]`: [Status and next steps] + +## Important Context Files +- `[context-file]`: [Why this is relevant] +- `[documentation]`: [What information this contains] +- `[reference]`: [How this relates to current work] +``` + +### Development Environment +Document environment and setup considerations: + +```markdown +## Environment Status +- **Development setup**: [Current state of dev environment] +- **Database**: [Schema changes, migrations, data state] +- **External services**: [API keys, service configurations] +- **Testing**: [Test suite status, coverage, failing tests] +- **Build/Deploy**: [Build status, deployment considerations] +``` + + +--- + +*This template provides a comprehensive framework for managing task continuity and knowledge transfer. Customize it based on your team's workflow, project complexity, and communication needs.* \ No newline at end of file diff --git a/docs/ai-context/project-structure.md b/docs/ai-context/project-structure.md new file mode 100644 index 00000000..803f0a14 --- /dev/null +++ b/docs/ai-context/project-structure.md @@ -0,0 +1,160 @@ +# Project Structure Template + +This document provides a template for documenting the complete technology stack and file tree structure for your project. **AI agents MUST read this file to understand the project organization before making any changes.** + +## Technology Stack Template + +### Backend Technologies +Document your backend technology choices: +- **[Language] [Version]** with **[Package Manager]** - Dependency management and packaging +- **[Web Framework] [Version]** - Web framework with specific features (async, type hints, etc.) +- **[Server] [Version]** - Application server configuration +- **[Configuration] [Version]** - Configuration management approach + +Example: +``` +- Python 3.11+ with Poetry - Dependency management and packaging +- FastAPI 0.115.0+ - Web framework with type hints and async support +- Uvicorn 0.32.0+ - ASGI server with standard extras +- Pydantic Settings 2.5.2+ - Configuration management with type validation +``` + +### Integration Services & APIs +Document external services and integrations: +- **[Service Name] [API/SDK Version]** - Purpose and usage pattern +- **[AI Service] [Version]** - AI/ML service integration details +- **[Database] [Version]** - Data storage and management +- **[Monitoring] [Version]** - Observability and logging + +### Real-time Communication +Document real-time features: +- **[WebSocket Library]** - Real-time communication patterns +- **[HTTP Client]** - Async HTTP communication +- **[Message Queue]** - Event processing (if applicable) + +### Development & Quality Tools +Document development toolchain: +- **[Formatter] [Version]** - Code formatting +- **[Linter] [Version]** - Code quality and linting +- **[Type Checker] [Version]** - Static type checking +- **[Testing Framework] [Version]** - Testing approach +- **[Task Runner]** - Build automation and task orchestration + +### Frontend Technologies (if applicable) +Document frontend technology stack: +- **[Language] [Version]** - Frontend development language +- **[Framework] [Version]** - UI framework +- **[Build Tool] [Version]** - Development and build tooling +- **[Deployment] [Version]** - Deployment and hosting approach + +### Future Technologies +Document planned technology additions: +- **[Planned Technology]** - Future integration plans +- **[Platform]** - Target platform expansion +- **[Service]** - Planned service integrations + +## Complete Project Structure Template + +``` +[PROJECT-NAME]/ +├── README.md # Project overview and setup +├── CLAUDE.md # Master AI context file +├── [BUILD-FILE] # Build configuration (Makefile, package.json, etc.) +├── .gitignore # Git ignore patterns +├── .[IDE-CONFIG]/ # IDE workspace configuration +│ ├── settings.[ext] # IDE settings +│ ├── extensions.[ext] # Recommended extensions +│ └── launch.[ext] # Debug configurations +├── [BACKEND-DIR]/ # Backend application +│ ├── CONTEXT.md # Backend-specific AI context +│ ├── src/ # Source code +│ │ ├── config/ # Configuration management +│ │ │ └── settings.[ext] # Application settings +│ │ ├── core/ # Core business logic +│ │ │ ├── CONTEXT.md # Core logic patterns +│ │ │ ├── services/ # Business services +│ │ │ │ ├── [service1].[ext] # Service implementations +│ │ │ │ └── [service2].[ext] +│ │ │ ├── models/ # Data models +│ │ │ │ ├── [model1].[ext] # Model definitions +│ │ │ │ └── [model2].[ext] +│ │ │ └── utils/ # Utility functions +│ │ │ ├── logging.[ext] # Structured logging +│ │ │ ├── validation.[ext] # Input validation +│ │ │ └── helpers.[ext] # Helper functions +│ │ ├── api/ # API layer +│ │ │ ├── CONTEXT.md # API patterns and conventions +│ │ │ ├── routes/ # API route definitions +│ │ │ │ ├── [resource1].[ext] # Resource-specific routes +│ │ │ │ └── [resource2].[ext] +│ │ │ ├── middleware/ # API middleware +│ │ │ │ ├── auth.[ext] # Authentication middleware +│ │ │ │ ├── logging.[ext] # Request logging +│ │ │ │ └── validation.[ext] # Request validation +│ │ │ └── schemas/ # Request/response schemas +│ │ │ ├── [schema1].[ext] # Data schemas +│ │ │ └── [schema2].[ext] +│ │ └── integrations/ # External service integrations +│ │ ├── CONTEXT.md # Integration patterns +│ │ ├── [service1]/ # Service-specific integration +│ │ │ ├── client.[ext] # API client +│ │ │ ├── models.[ext] # Integration models +│ │ │ └── handlers.[ext] # Response handlers +│ │ └── [service2]/ +│ ├── tests/ # Test suite +│ │ ├── unit/ # Unit tests +│ │ ├── integration/ # Integration tests +│ │ └── fixtures/ # Test fixtures and data +│ ├── [PACKAGE-FILE] # Package configuration +│ └── [ENV-FILE] # Environment configuration +├── [FRONTEND-DIR]/ # Frontend application (if applicable) +│ ├── CONTEXT.md # Frontend-specific AI context +│ ├── src/ # Source code +│ │ ├── components/ # UI components +│ │ │ ├── CONTEXT.md # Component patterns +│ │ │ ├── common/ # Shared components +│ │ │ └── [feature]/ # Feature-specific components +│ │ ├── pages/ # Page components/routes +│ │ │ ├── [page1].[ext] # Page implementations +│ │ │ └── [page2].[ext] +│ │ ├── stores/ # State management +│ │ │ ├── CONTEXT.md # State management patterns +│ │ │ ├── [store1].[ext] # Store implementations +│ │ │ └── [store2].[ext] +│ │ ├── api/ # API client layer +│ │ │ ├── CONTEXT.md # Client patterns +│ │ │ ├── client.[ext] # HTTP client setup +│ │ │ └── endpoints/ # API endpoint definitions +│ │ ├── utils/ # Utility functions +│ │ │ ├── logging.[ext] # Client-side logging +│ │ │ ├── validation.[ext] # Form validation +│ │ │ └── helpers.[ext] # Helper functions +│ │ └── assets/ # Static assets +│ ├── tests/ # Frontend tests +│ ├── [BUILD-CONFIG] # Build configuration +│ └── [PACKAGE-FILE] # Package configuration +├── docs/ # Documentation +│ ├── ai-context/ # AI-specific documentation +│ │ ├── project-structure.md # This file +│ │ ├── docs-overview.md # Documentation architecture +│ │ ├── system-integration.md # Integration patterns +│ │ ├── deployment-infrastructure.md # Infrastructure docs +│ │ └── handoff.md # Task management +│ ├── api/ # API documentation +│ ├── deployment/ # Deployment guides +│ └── development/ # Development guides +├── scripts/ # Automation scripts +│ ├── setup.[ext] # Environment setup +│ ├── deploy.[ext] # Deployment scripts +│ └── maintenance/ # Maintenance scripts +├── [INFRASTRUCTURE-DIR]/ # Infrastructure as code (if applicable) +│ ├── [PROVIDER]/ # Cloud provider configurations +│ ├── docker/ # Container configurations +│ └── monitoring/ # Monitoring and alerting +└── [CONFIG-FILES] # Root-level configuration files +``` + + +--- + +*This template provides a comprehensive foundation for documenting project structure. Adapt it based on your specific technology stack, architecture decisions, and organizational requirements.* \ No newline at end of file diff --git a/docs/ai-context/system-integration.md b/docs/ai-context/system-integration.md new file mode 100644 index 00000000..86d5f881 --- /dev/null +++ b/docs/ai-context/system-integration.md @@ -0,0 +1,21 @@ +# System Integration Documentation + +This document contains cross-component integration patterns and system-wide architectural decisions. + +## Purpose + +This template serves as a placeholder for documenting: +- Cross-component communication patterns +- Data flow architectures between services +- Integration strategies with external systems +- Performance optimization patterns +- Testing strategies for integrated systems +- Error handling across service boundaries + +## Implementation Note + +Replace this template with your actual system integration documentation as your project develops. Focus on patterns that AI agents need to understand when working across component boundaries or implementing features that span multiple services. + +--- + +*Customize this template based on your specific integration patterns and architectural requirements.* \ No newline at end of file diff --git a/docs/open-issues/example-api-performance-issue.md b/docs/open-issues/example-api-performance-issue.md new file mode 100644 index 00000000..841f160e --- /dev/null +++ b/docs/open-issues/example-api-performance-issue.md @@ -0,0 +1,79 @@ +# API Performance Issue - Example Template + +## Issue Description + +Describe the specific performance issue or bug that needs to be addressed. Include symptoms, affected features, and user impact. + +Example: "API endpoint `/api/data/process` has intermittent high latency (>5 seconds) under normal load conditions, causing user timeout errors." + +## Root Cause + +Detailed analysis of what's causing the issue. Include: +- Technical root cause +- Contributing factors +- System conditions that trigger the issue + +Example: "Database query optimization needed for complex joins. Current query scans entire table without proper indexing." + +## Impact Assessment + +- **Severity**: Critical/High/Medium/Low +- **Affected Users**: Percentage or number of users impacted +- **Business Impact**: Revenue, user experience, or operational impact +- **Workarounds**: Any temporary solutions currently in place + +## Proposed Solution + +### Technical Approach +Detailed technical solution including: +- Code changes required +- Architecture modifications +- Database schema updates +- Performance improvements expected + +### Implementation Plan +1. **Phase 1**: Initial fixes (timeline) +2. **Phase 2**: Optimization improvements (timeline) +3. **Phase 3**: Monitoring and validation (timeline) + +## Testing Strategy + +- **Unit Tests**: Specific test cases to validate the fix +- **Integration Tests**: End-to-end testing scenarios +- **Performance Tests**: Load testing and benchmarking +- **Regression Tests**: Ensure no existing functionality breaks + +## Related Files + +List all files that need to be modified: +- `src/api/routes/data.py` - Main endpoint logic +- `src/database/models.py` - Database model updates +- `src/utils/query_optimizer.py` - Query optimization utilities +- `tests/test_api_performance.py` - Performance test suite + +## References + +- [External documentation or APIs] +- [Related GitHub issues or PRs] +- [Performance benchmarking results] +- [Stack Overflow discussions or solutions] + +## Status + +- [ ] **Open** - Issue identified and documented +- [ ] **In Progress** - Solution being implemented +- [ ] **Testing** - Fix implemented, undergoing testing +- [ ] **Fixed** - Issue resolved and deployed +- [ ] **Closed** - Issue confirmed resolved in production + +## Implementation Notes + +Track progress and implementation details: +- Date started: [DATE] +- Key decisions made: [DECISIONS] +- Challenges encountered: [CHALLENGES] +- Performance improvements achieved: [METRICS] + +--- + +*This template provides a structured approach to documenting and tracking technical issues. Customize sections based on your project's specific needs and workflow.* \ No newline at end of file diff --git a/docs/specs/example-api-integration-spec.md b/docs/specs/example-api-integration-spec.md new file mode 100644 index 00000000..1285daa7 --- /dev/null +++ b/docs/specs/example-api-integration-spec.md @@ -0,0 +1,397 @@ +# API Integration Specification: Example External Service + +## Overview + +This document outlines the integration with an external service API. This serves as a template for documenting API integration specifications. + +### Integration Objectives +- Connect to external service for data processing +- Implement error handling and retry logic +- Ensure secure API communication +- Maintain performance and reliability + +### External Service Details +- **Service**: Example Data Processing API +- **Version**: v2.1 +- **Authentication**: API Key + OAuth2 +- **Rate Limits**: 1000 requests/minute +- **Documentation**: https://api.example.com/docs + +## Architecture + +### Integration Flow +``` +Client Request → Input Validation → External API Call → Response Processing → Client Response +``` + +### Error Handling Flow +``` +API Error → Retry Logic → Fallback Strategy → Error Logging → Client Error Response +``` + +## Technical Specifications + +### 1. API Client Implementation + +#### Configuration +```python +# Configuration settings +API_BASE_URL = "https://api.example.com/v2" +API_KEY = "your-api-key" +OAUTH_CLIENT_ID = "your-client-id" +OAUTH_CLIENT_SECRET = "your-client-secret" +REQUEST_TIMEOUT = 30 # seconds +MAX_RETRIES = 3 +RETRY_DELAY = 1 # seconds +``` + +#### Client Class +```python +import aiohttp +import asyncio +from typing import Dict, Any, Optional + +class ExternalAPIClient: + def __init__(self, api_key: str, base_url: str): + self.api_key = api_key + self.base_url = base_url + self.session: Optional[aiohttp.ClientSession] = None + self.access_token: Optional[str] = None + + async def __aenter__(self): + self.session = aiohttp.ClientSession( + timeout=aiohttp.ClientTimeout(total=REQUEST_TIMEOUT) + ) + await self.authenticate() + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + if self.session: + await self.session.close() + + async def authenticate(self) -> None: + """Authenticate with OAuth2 to get access token""" + auth_url = f"{self.base_url}/oauth/token" + auth_data = { + "grant_type": "client_credentials", + "client_id": OAUTH_CLIENT_ID, + "client_secret": OAUTH_CLIENT_SECRET + } + + async with self.session.post(auth_url, data=auth_data) as response: + if response.status == 200: + auth_response = await response.json() + self.access_token = auth_response["access_token"] + else: + raise APIAuthenticationError("Failed to authenticate with external API") +``` + +### 2. API Operations + +#### Data Processing Operation +```python +async def process_data( + self, + data: Dict[str, Any], + options: Optional[Dict[str, Any]] = None +) -> Dict[str, Any]: + """Process data using external API""" + endpoint = f"{self.base_url}/process" + headers = { + "Authorization": f"Bearer {self.access_token}", + "X-API-Key": self.api_key, + "Content-Type": "application/json" + } + + payload = { + "data": data, + "options": options or {} + } + + for attempt in range(MAX_RETRIES): + try: + async with self.session.post( + endpoint, + json=payload, + headers=headers + ) as response: + + if response.status == 200: + return await response.json() + elif response.status == 429: + # Rate limit exceeded + await asyncio.sleep(RETRY_DELAY * (2 ** attempt)) + continue + elif response.status == 401: + # Token expired, re-authenticate + await self.authenticate() + continue + else: + response.raise_for_status() + + except aiohttp.ClientError as e: + if attempt == MAX_RETRIES - 1: + raise ExternalAPIError(f"API request failed after {MAX_RETRIES} attempts: {str(e)}") + await asyncio.sleep(RETRY_DELAY * (2 ** attempt)) + + raise ExternalAPIError("Maximum retry attempts exceeded") +``` + +### 3. Error Handling + +#### Custom Exceptions +```python +class ExternalAPIError(Exception): + """Base exception for external API errors""" + pass + +class APIAuthenticationError(ExternalAPIError): + """Authentication with external API failed""" + pass + +class APIRateLimitError(ExternalAPIError): + """Rate limit exceeded""" + pass + +class APITimeoutError(ExternalAPIError): + """Request timeout""" + pass +``` + +#### Error Response Mapping +```python +def map_api_error(status_code: int, response_data: Dict[str, Any]) -> ExternalAPIError: + """Map API error responses to custom exceptions""" + error_mapping = { + 400: ("Bad Request", ExternalAPIError), + 401: ("Unauthorized", APIAuthenticationError), + 429: ("Rate Limit Exceeded", APIRateLimitError), + 500: ("Internal Server Error", ExternalAPIError), + 503: ("Service Unavailable", ExternalAPIError) + } + + error_message, exception_class = error_mapping.get( + status_code, + (f"Unknown error (status: {status_code})", ExternalAPIError) + ) + + # Include API error details if available + if "error" in response_data: + error_message += f": {response_data['error']}" + + return exception_class(error_message) +``` + +## Implementation Plan + +### Phase 1: Basic Integration (Week 1) +- [ ] API client implementation +- [ ] Authentication flow +- [ ] Basic data processing endpoint +- [ ] Error handling structure +- [ ] Configuration management + +### Phase 2: Advanced Features (Week 2) +- [ ] Retry logic with exponential backoff +- [ ] Rate limiting handling +- [ ] Connection pooling +- [ ] Response caching +- [ ] Monitoring and logging + +### Phase 3: Testing & Optimization (Week 3) +- [ ] Unit tests for API client +- [ ] Integration tests with mocked API +- [ ] Performance testing +- [ ] Error scenario testing +- [ ] Documentation updates + +### Phase 4: Production Deployment (Week 4) +- [ ] Production configuration +- [ ] Monitoring setup +- [ ] Performance optimization +- [ ] Security audit +- [ ] Deployment and rollout + +## API Endpoints + +### Process Data +```http +POST /api/external/process +Content-Type: application/json +Authorization: Bearer {access_token} + +{ + "data": { + "input": "data to process", + "format": "json" + }, + "options": { + "async": false, + "callback_url": "https://yourapp.com/callback" + } +} +``` + +**Response:** +```json +{ + "success": true, + "result": { + "processed_data": "...", + "metadata": { + "processing_time": 1.5, + "version": "v2.1" + } + }, + "request_id": "req_abc123" +} +``` + +### Get Processing Status +```http +GET /api/external/status/{request_id} +Authorization: Bearer {access_token} +``` + +**Response:** +```json +{ + "request_id": "req_abc123", + "status": "completed", + "result": { + "processed_data": "...", + "metadata": {} + }, + "created_at": "2024-01-15T10:30:00Z", + "completed_at": "2024-01-15T10:30:15Z" +} +``` + +## Performance Considerations + +### Connection Management +- Use connection pooling for multiple requests +- Implement connection timeouts +- Monitor connection health + +### Caching Strategy +- Cache authentication tokens +- Cache frequently requested data +- Implement cache invalidation + +### Rate Limiting +- Implement client-side rate limiting +- Queue requests during rate limit periods +- Monitor rate limit status + +## Security Considerations + +### Authentication +- Secure storage of API keys and secrets +- Token refresh mechanism +- Regular credential rotation + +### Data Protection +- Encrypt sensitive data in transit +- Validate all input data +- Sanitize API responses + +### Monitoring +- Log all API interactions +- Monitor for suspicious activity +- Track error rates and patterns + +## Testing Strategy + +### Unit Tests +```python +import pytest +from unittest.mock import AsyncMock, patch + +@pytest.mark.asyncio +async def test_successful_data_processing(): + with patch('aiohttp.ClientSession') as mock_session: + mock_response = AsyncMock() + mock_response.status = 200 + mock_response.json.return_value = {"result": "processed"} + + mock_session.return_value.__aenter__.return_value.post.return_value.__aenter__.return_value = mock_response + + client = ExternalAPIClient("test-key", "https://api.test.com") + result = await client.process_data({"input": "test"}) + + assert result["result"] == "processed" +``` + +### Integration Tests +- Test with actual API endpoints (staging environment) +- Test error scenarios and recovery +- Test rate limiting behavior +- Test authentication flow + +## Monitoring and Logging + +### Metrics to Track +- Request success/failure rates +- Response times +- Rate limit status +- Authentication failures +- Error distribution + +### Logging Format +```json +{ + "timestamp": "2024-01-15T10:30:00Z", + "level": "INFO", + "event": "external_api_request", + "request_id": "req_abc123", + "endpoint": "/process", + "method": "POST", + "status_code": 200, + "response_time": 1.5, + "retry_count": 0 +} +``` + +## Configuration + +### Environment Variables +```bash +# External API Configuration +EXTERNAL_API_BASE_URL=https://api.example.com/v2 +EXTERNAL_API_KEY=your-api-key +EXTERNAL_OAUTH_CLIENT_ID=your-client-id +EXTERNAL_OAUTH_CLIENT_SECRET=your-client-secret + +# Request Configuration +EXTERNAL_API_TIMEOUT=30 +EXTERNAL_API_MAX_RETRIES=3 +EXTERNAL_API_RETRY_DELAY=1 + +# Caching +EXTERNAL_API_CACHE_TTL=300 +REDIS_URL=redis://localhost:6379 +``` + +## Related Files + +After implementation, update this list with actual file paths: +- `src/integrations/external_api.py` - Main API client +- `src/integrations/exceptions.py` - Custom exceptions +- `src/api/routes/external.py` - Integration endpoints +- `tests/test_external_api.py` - Integration tests +- `config/external_api.py` - Configuration settings + +## Success Criteria + +- [ ] All API operations functional +- [ ] Error handling robust +- [ ] Performance requirements met +- [ ] Security requirements satisfied +- [ ] Monitoring and logging implemented +- [ ] Tests passing (unit and integration) +- [ ] Documentation complete + +--- + +*This API integration specification template provides a comprehensive approach to documenting external service integrations. Customize based on your specific API requirements and integration needs.* \ No newline at end of file diff --git a/docs/specs/example-feature-specification.md b/docs/specs/example-feature-specification.md new file mode 100644 index 00000000..d6708564 --- /dev/null +++ b/docs/specs/example-feature-specification.md @@ -0,0 +1,277 @@ +# Feature Specification: Example Authentication System + +## Overview + +This document outlines the implementation of a user authentication system for the application. This serves as a template for documenting feature specifications. + +### Objectives +- Implement secure user authentication +- Support multiple authentication methods +- Ensure scalable session management +- Maintain security best practices + +### Key Technologies +- **Backend**: FastAPI with JWT tokens +- **Database**: PostgreSQL with user management +- **Frontend**: React/Svelte with secure token storage +- **Security**: bcrypt for password hashing, OAuth2 for third-party auth + +## Architecture + +### Data Flow +``` +User Registration → Input Validation → Password Hashing → Database Storage → JWT Token Generation → Client Storage +``` + +### Authentication Flow +``` +Login Request → Credential Validation → Database Lookup → Password Verification → JWT Token → Secure Cookie/Storage +``` + +## Technical Specifications + +### 1. Database Schema + +#### Users Table +```sql +CREATE TABLE users ( + id SERIAL PRIMARY KEY, + email VARCHAR(255) UNIQUE NOT NULL, + password_hash VARCHAR(255) NOT NULL, + first_name VARCHAR(100), + last_name VARCHAR(100), + is_active BOOLEAN DEFAULT TRUE, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); +``` + +#### Sessions Table +```sql +CREATE TABLE user_sessions ( + id SERIAL PRIMARY KEY, + user_id INTEGER REFERENCES users(id), + session_token VARCHAR(255) UNIQUE NOT NULL, + expires_at TIMESTAMP NOT NULL, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); +``` + +### 2. API Endpoints + +#### Authentication Endpoints +- `POST /auth/register` - User registration +- `POST /auth/login` - User login +- `POST /auth/logout` - User logout +- `POST /auth/refresh` - Token refresh +- `GET /auth/me` - Get current user profile + +#### User Management Endpoints +- `GET /users/profile` - Get user profile +- `PUT /users/profile` - Update user profile +- `DELETE /users/account` - Delete user account + +### 3. Security Requirements + +#### Password Security +- Minimum 8 characters +- Must include uppercase, lowercase, number, and special character +- Hashed using bcrypt with salt rounds >= 12 + +#### Token Security +- JWT tokens with 15-minute expiration +- Refresh tokens with 7-day expiration +- Secure HTTP-only cookies for token storage +- CSRF protection for state-changing operations + +#### Rate Limiting +- Login attempts: 5 per minute per IP +- Registration: 3 per minute per IP +- Password reset: 1 per minute per email + +## Implementation Plan + +### Phase 1: Core Authentication (Week 1) +- [ ] Database schema setup +- [ ] User registration endpoint +- [ ] Login/logout endpoints +- [ ] JWT token generation and validation +- [ ] Basic password hashing + +### Phase 2: Security Enhancements (Week 2) +- [ ] Rate limiting implementation +- [ ] CSRF protection +- [ ] Session management +- [ ] Password strength validation +- [ ] Account lockout after failed attempts + +### Phase 3: Advanced Features (Week 3) +- [ ] OAuth2 integration (Google, GitHub) +- [ ] Two-factor authentication +- [ ] Password reset functionality +- [ ] Email verification +- [ ] Account recovery + +### Phase 4: Testing & Deployment (Week 4) +- [ ] Unit tests for all endpoints +- [ ] Integration tests for auth flows +- [ ] Security testing and penetration testing +- [ ] Performance testing +- [ ] Production deployment + +## API Documentation + +### Registration Endpoint +```http +POST /auth/register +Content-Type: application/json + +{ + "email": "user@example.com", + "password": "SecurePass123!", + "first_name": "John", + "last_name": "Doe" +} +``` + +**Response:** +```json +{ + "success": true, + "user": { + "id": 1, + "email": "user@example.com", + "first_name": "John", + "last_name": "Doe" + }, + "access_token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9...", + "refresh_token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9..." +} +``` + +### Login Endpoint +```http +POST /auth/login +Content-Type: application/json + +{ + "email": "user@example.com", + "password": "SecurePass123!" +} +``` + +**Response:** +```json +{ + "success": true, + "access_token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9...", + "refresh_token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9..." +} +``` + +## Testing Strategy + +### Unit Tests +- Password hashing and verification +- JWT token generation and validation +- Input validation and sanitization +- Database operations + +### Integration Tests +- Complete authentication flows +- Session management +- Rate limiting functionality +- CSRF protection + +### Security Tests +- SQL injection prevention +- XSS protection +- CSRF protection +- Password strength validation +- Rate limiting effectiveness + +## Deployment Considerations + +### Environment Variables +```bash +# Database +DATABASE_URL=postgresql://user:pass@localhost/dbname + +# JWT Configuration +JWT_SECRET_KEY=your-secret-key +JWT_ALGORITHM=HS256 +JWT_ACCESS_TOKEN_EXPIRE_MINUTES=15 +JWT_REFRESH_TOKEN_EXPIRE_DAYS=7 + +# Rate Limiting +RATE_LIMIT_ENABLED=true +REDIS_URL=redis://localhost:6379 + +# Email Configuration +SMTP_SERVER=smtp.gmail.com +SMTP_PORT=587 +SMTP_USERNAME=your-email@gmail.com +SMTP_PASSWORD=your-app-password +``` + +### Database Migrations +```bash +# Create migration +alembic revision --autogenerate -m "Add user authentication tables" + +# Apply migration +alembic upgrade head +``` + +## Performance Considerations + +### Database Optimization +- Index on email field for fast user lookups +- Index on session_token for session validation +- Regular cleanup of expired sessions + +### Caching Strategy +- Cache user profiles in Redis +- Cache JWT blacklist for logout +- Cache rate limiting counters + +### Monitoring +- Track authentication success/failure rates +- Monitor session creation and cleanup +- Alert on unusual login patterns + +## Security Compliance + +### OWASP Guidelines +- Secure password storage (bcrypt) +- Protection against common attacks (CSRF, XSS, SQL injection) +- Secure session management +- Rate limiting and account lockout + +### Data Protection +- Minimal data collection +- Secure data transmission (HTTPS) +- Regular security audits +- Compliance with privacy regulations + +## Related Files + +After implementation, update this list with actual file paths: +- `src/api/routes/auth.py` - Authentication endpoints +- `src/core/security.py` - Security utilities +- `src/database/models/user.py` - User database models +- `src/core/auth.py` - Authentication logic +- `tests/test_auth.py` - Authentication tests + +## Success Criteria + +- [ ] All authentication endpoints functional +- [ ] Security requirements met +- [ ] Performance benchmarks achieved +- [ ] All tests passing +- [ ] Documentation complete +- [ ] Production deployment successful + +--- + +*This specification template provides a comprehensive approach to documenting feature requirements. Adapt sections and details based on your specific feature requirements and project needs.* \ No newline at end of file diff --git a/eslint.config.mjs b/eslint.config.mjs index 0594e3af..658c5ad1 100644 --- a/eslint.config.mjs +++ b/eslint.config.mjs @@ -1,3 +1,6 @@ +// For more info, see https://github.com/storybookjs/eslint-plugin-storybook#configuration-flat-config-format +import storybook from "eslint-plugin-storybook"; + import js from "@eslint/js"; import globals from "globals"; import reactHooks from "eslint-plugin-react-hooks"; @@ -5,31 +8,28 @@ import reactRefresh from "eslint-plugin-react-refresh"; import tseslint from "typescript-eslint"; import eslintConfigPrettier from "eslint-config-prettier/flat"; -export default tseslint.config( - { ignores: ["dist"] }, - { - extends: [ - js.configs.recommended, - ...tseslint.configs.recommended, - eslintConfigPrettier, +export default tseslint.config({ ignores: ["dist"] }, { + extends: [ + js.configs.recommended, + ...tseslint.configs.recommended, + eslintConfigPrettier, + ], + files: ["**/*.{ts,tsx}"], + languageOptions: { + ecmaVersion: 2020, + globals: globals.browser, + }, + plugins: { + "react-hooks": reactHooks, + "react-refresh": reactRefresh, + }, + rules: { + ...reactHooks.configs.recommended.rules, + "react-refresh/only-export-components": [ + "warn", + { allowConstantExport: true }, ], - files: ["**/*.{ts,tsx}"], - languageOptions: { - ecmaVersion: 2020, - globals: globals.browser, - }, - plugins: { - "react-hooks": reactHooks, - "react-refresh": reactRefresh, - }, - rules: { - ...reactHooks.configs.recommended.rules, - "react-refresh/only-export-components": [ - "warn", - { allowConstantExport: true }, - ], - "@typescript-eslint/no-unsafe-function-type": "off", - "@typescript-eslint/no-explicit-any": "off", - }, + "@typescript-eslint/no-unsafe-function-type": "off", + "@typescript-eslint/no-explicit-any": "off", }, -); +}, storybook.configs["flat/recommended"]); diff --git a/package.json b/package.json index a5a5e586..1893b55b 100644 --- a/package.json +++ b/package.json @@ -34,7 +34,9 @@ "git:branch:clean:merged": "git branch --merged|egrep -v \"(\\*|master|main|dev|skip_branch_name)\" | xargs git branch -d", "git:branch:safe-delete": "echo '> git log --graph --left-right --cherry --oneline another-branch...main'", "git:forget": "git rm -r --cached . && git add . && git commit -m \"Forget all ignored files\"", - "test:specs": "echo \"Error: test:specs is not supported\"" + "test:specs": "echo \"Error: test:specs is not supported\"", + "storybook": "storybook dev -p 6006", + "build-storybook": "storybook build" }, "main": "./dist/zenuml.js", "module": "./dist/zenuml.esm.mjs", @@ -79,6 +81,9 @@ "devDependencies": { "@eslint/js": "^9.21.0", "@playwright/test": "^1.54.1", + "@storybook/addon-docs": "^9.0.16", + "@storybook/addon-onboarding": "^9.0.16", + "@storybook/react-vite": "^9.0.16", "@testing-library/jest-dom": "^6.6.3", "@testing-library/react": "^16.3.0", "@types/antlr4": "~4.11.2", @@ -96,12 +101,14 @@ "eslint-config-prettier": "^10.1.1", "eslint-plugin-react-hooks": "^5.1.0", "eslint-plugin-react-refresh": "^0.4.19", + "eslint-plugin-storybook": "^9.0.16", "globals": "^15.15.0", "jsdom": "^26.1.0", "less": "^4.3.0", "postcss": "^8.5.3", "prettier": "3.5.3", "sass": "^1.86.3", + "storybook": "^9.0.16", "typescript": "~5.7.2", "typescript-eslint": "^8.24.1", "vite": "^6.2.0", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 028f624c..c92e3e91 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -79,6 +79,15 @@ importers: '@playwright/test': specifier: ^1.54.1 version: 1.54.1 + '@storybook/addon-docs': + specifier: ^9.0.16 + version: 9.0.16(@types/react@19.1.0)(storybook@9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3)) + '@storybook/addon-onboarding': + specifier: ^9.0.16 + version: 9.0.16(storybook@9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3)) + '@storybook/react-vite': + specifier: ^9.0.16 + version: 9.0.16(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(rollup@4.39.0)(storybook@9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3))(typescript@5.7.3)(vite@6.2.5(@types/node@22.14.0)(jiti@1.21.7)(less@4.3.0)(sass@1.86.3)(yaml@2.7.1)) '@testing-library/jest-dom': specifier: ^6.6.3 version: 6.6.3 @@ -130,6 +139,9 @@ importers: eslint-plugin-react-refresh: specifier: ^0.4.19 version: 0.4.19(eslint@9.24.0(jiti@1.21.7)) + eslint-plugin-storybook: + specifier: ^9.0.16 + version: 9.0.16(eslint@9.24.0(jiti@1.21.7))(storybook@9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3))(typescript@5.7.3) globals: specifier: ^15.15.0 version: 15.15.0 @@ -148,6 +160,9 @@ importers: sass: specifier: ^1.86.3 version: 1.86.3 + storybook: + specifier: ^9.0.16 + version: 9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3) typescript: specifier: ~5.7.2 version: 5.7.3 @@ -553,6 +568,15 @@ packages: resolution: {integrity: sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==} engines: {node: '>=12'} + '@joshwooding/vite-plugin-react-docgen-typescript@0.6.1': + resolution: {integrity: sha512-J4BaTocTOYFkMHIra1JDWrMWpNmBl4EkplIwHEsV8aeUOtdWjwSnln9U7twjMFTAEB7mptNtSKyVi1Y2W9sDJw==} + peerDependencies: + typescript: '>= 4.3.x' + vite: ^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 + peerDependenciesMeta: + typescript: + optional: true + '@jridgewell/gen-mapping@0.3.8': resolution: {integrity: sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==} engines: {node: '>=6.0.0'} @@ -571,6 +595,12 @@ packages: '@jridgewell/trace-mapping@0.3.25': resolution: {integrity: sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==} + '@mdx-js/react@3.1.0': + resolution: {integrity: sha512-QjHtSaoameoalGnKDT3FoIl4+9RwyTmo9ZJGBdLOks/YOiWHoRDI3PUwEzOE7kEmGcV3AFcp9K6dYu9rEuKLAQ==} + peerDependencies: + '@types/react': '>=16' + react: ^19.0.0 + '@nodelib/fs.scandir@2.1.5': resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==} engines: {node: '>= 8'} @@ -820,6 +850,65 @@ packages: cpu: [x64] os: [win32] + '@storybook/addon-docs@9.0.16': + resolution: {integrity: sha512-/ZXaxMC/JqL0cnVuyPHXdJhNvgCrKvxcnM3ACdgBLsEIGcIqegPF+Ahkb2f9sjU36sR7ihT81cL/7cUvQwzd4Q==} + peerDependencies: + storybook: ^9.0.16 + + '@storybook/addon-onboarding@9.0.16': + resolution: {integrity: sha512-69BPJ9fGNGpDAcGvNJ58V5uQOmpHkQMLcgp/ON3NepoCHiSReUzojB6wV8Ag13PUZmvWXVnE14SWKBZp93xTFQ==} + peerDependencies: + storybook: ^9.0.16 + + '@storybook/builder-vite@9.0.16': + resolution: {integrity: sha512-zXockUexeRy3ABG7DFLEvJqJe4mGL0JkI7FMrpwiKaHCQNaD87vR0xkRVeh0a3B8GKUNxoYtpYKvdzc9DobQHQ==} + peerDependencies: + storybook: ^9.0.16 + vite: ^5.0.0 || ^6.0.0 || ^7.0.0 + + '@storybook/csf-plugin@9.0.16': + resolution: {integrity: sha512-MSmfPwI0j1mMAc+R3DVkVBQf2KLzaVn2SLdEwweesx63Nh9j3zu9CqKEa0zOuDX1lR2M0DZU0lV6K4sc2EYI4A==} + peerDependencies: + storybook: ^9.0.16 + + '@storybook/global@5.0.0': + resolution: {integrity: sha512-FcOqPAXACP0I3oJ/ws6/rrPT9WGhu915Cg8D02a9YxLo0DE9zI+a9A5gRGvmQ09fiWPukqI8ZAEoQEdWUKMQdQ==} + + '@storybook/icons@1.4.0': + resolution: {integrity: sha512-Td73IeJxOyalzvjQL+JXx72jlIYHgs+REaHiREOqfpo3A2AYYG71AUbcv+lg7mEDIweKVCxsMQ0UKo634c8XeA==} + engines: {node: '>=14.0.0'} + peerDependencies: + react: ^19.0.0 + react-dom: ^19.0.0 + + '@storybook/react-dom-shim@9.0.16': + resolution: {integrity: sha512-5aIK+31R41mRUvDB4vmBv8hwh3IVHIk/Zbs6kkWF2a+swOsB2+a06aLX21lma4/0T/AuFVXHWat0+inQ4nrXRg==} + peerDependencies: + react: ^19.0.0 + react-dom: ^19.0.0 + storybook: ^9.0.16 + + '@storybook/react-vite@9.0.16': + resolution: {integrity: sha512-a+UsoymyvPH4bJJVI+asj02N8U2wlkGyzhUqF6LUM9gXzixRMxoRHkchCKLdqLhE+//STrwC0YFF3GG6Y5oMEg==} + engines: {node: '>=20.0.0'} + peerDependencies: + react: ^19.0.0 + react-dom: ^19.0.0 + storybook: ^9.0.16 + vite: ^5.0.0 || ^6.0.0 || ^7.0.0 + + '@storybook/react@9.0.16': + resolution: {integrity: sha512-1jk9fBe8vEoZrba9cK19ZDdZgYMXUNl3Egjj5RsTMYMc1L2mtIu9o56VyK/1V4Q52N9IyawHvmIIuxc5pCZHkQ==} + engines: {node: '>=20.0.0'} + peerDependencies: + react: ^19.0.0 + react-dom: ^19.0.0 + storybook: ^9.0.16 + typescript: '>= 4.9.x' + peerDependenciesMeta: + typescript: + optional: true + '@svgr/babel-plugin-add-jsx-attribute@8.0.0': resolution: {integrity: sha512-b9MIk7yhdS1pMCZM8VeNfUlSKVRhsHZNMl5O9SfaX0l0t5wjdgu4IDzGB8bpnGBBOjGST3rRFVsaaEtI4W6f7g==} engines: {node: '>=14'} @@ -923,6 +1012,12 @@ packages: '@types/react-dom': optional: true + '@testing-library/user-event@14.6.1': + resolution: {integrity: sha512-vq7fv0rnt+QTXgPxr5Hjc210p6YKq2kmdziLgnsZGgLJ9e6VAShx1pACLuRjd/AS/sr7phAR58OIIpf0LlmQNw==} + engines: {node: '>=12', npm: '>=6'} + peerDependencies: + '@testing-library/dom': '>=7.21.4' + '@trysound/sax@0.2.0': resolution: {integrity: sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA==} engines: {node: '>=10.13.0'} @@ -945,9 +1040,18 @@ packages: '@types/babel__traverse@7.20.7': resolution: {integrity: sha512-dkO5fhS7+/oos4ciWxyEyjWe48zmG6wbCheo/G2ZnHx4fs3EU6YC6UM8rk56gAjNJ9P3MTH2jo5jb92/K6wbng==} + '@types/chai@5.2.2': + resolution: {integrity: sha512-8kB30R7Hwqf40JPiKhVzodJs2Qc1ZJ5zuT3uzw5Hq/dhNCl3G3l83jfpdI1e20BP348+fV7VIL/+FxaXkqBmWg==} + '@types/color-string@1.5.5': resolution: {integrity: sha512-p9+C1ssJsjnHV8nn96rkimm2h90LclLIwgBfiMCHW0oUr6jLmB+wzZUEGJPduB/D2RzI2Ahoe69xKNOawX6jgw==} + '@types/deep-eql@4.0.2': + resolution: {integrity: sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==} + + '@types/doctrine@0.0.9': + resolution: {integrity: sha512-eOIHzCUSH7SMfonMG1LsC2f8vxBFtho6NGBznK41R84YzPuvSBzrhEps33IsQiOW9+VL6NQ9DbjQJznk/S4uRA==} + '@types/estree@1.0.7': resolution: {integrity: sha512-w28IoSUCJpidD/TGviZwwMJckNESJZXFu7NBZ5YJ4mEUnNraUn9Pm8HSZm/jDF1pDWYKspWE7oVphigUPRakIQ==} @@ -964,6 +1068,9 @@ packages: '@types/marked@4.3.2': resolution: {integrity: sha512-a79Yc3TOk6dGdituy8hmTTJXjOkZ7zsFYV10L337ttq/rec8lRMDBpV7fL3uLx6TgbFCa5DU/h8FmIBQPSbU0w==} + '@types/mdx@2.0.13': + resolution: {integrity: sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==} + '@types/node@22.14.0': resolution: {integrity: sha512-Kmpl+z84ILoG+3T/zQFyAJsU6EPTmOCj8/2+83fSN6djd6I4o7uOuGIH6vq3PrjY5BGitSbFuMN18j3iknubbA==} @@ -978,6 +1085,9 @@ packages: '@types/react@19.1.0': resolution: {integrity: sha512-UaicktuQI+9UKyA4njtDOGBD/67t8YEBt2xdfqu8+gP9hqPUPsiXlNPcpS2gVdjmis5GKPG3fCxbQLVgxsQZ8w==} + '@types/resolve@1.20.6': + resolution: {integrity: sha512-A4STmOXPhMUtHH+S6ymgE2GiBSMqf4oTvcQZMcHzokuTLVYzXTB8ttjcgxOVaAp2lGwEdzZ0J+cRbbeevQj1UQ==} + '@types/tough-cookie@4.0.5': resolution: {integrity: sha512-/Ad8+nIOV7Rl++6f1BdKxFSMgmoqEoYbHRpPcx3JEfv8VRsQe9Z4mCXeJBzxs7mbHY/XOZZuXlRNfhpVPbs6ZA==} @@ -1040,6 +1150,9 @@ packages: '@vitest/expect@3.1.1': resolution: {integrity: sha512-q/zjrW9lgynctNbwvFtQkGK9+vvHA5UzVi2V8APrp1C6fG6/MuYYkmlx4FubuqLycCeSdHD5aadWfua/Vr0EUA==} + '@vitest/expect@3.2.4': + resolution: {integrity: sha512-Io0yyORnB6sikFlt8QW5K7slY4OjqNX9jmJQ02QDda8lyM6B5oNgVWoSoKPac8/kgnCUzuHQKrSLtu/uOqqrig==} + '@vitest/mocker@3.1.1': resolution: {integrity: sha512-bmpJJm7Y7i9BBELlLuuM1J1Q6EQ6K5Ye4wcyOpOMXMcePYKSIYlpcrCm4l/O6ja4VJA5G2aMJiuZkZdnxlC3SA==} peerDependencies: @@ -1054,6 +1167,9 @@ packages: '@vitest/pretty-format@3.1.1': resolution: {integrity: sha512-dg0CIzNx+hMMYfNmSqJlLSXEmnNhMswcn3sXO7Tpldr0LiGmg3eXdLLhwkv2ZqgHb/d5xg5F7ezNFRA1fA13yA==} + '@vitest/pretty-format@3.2.4': + resolution: {integrity: sha512-IVNZik8IVRJRTr9fxlitMKeJeXFFFN0JaB9PHPGQ8NKQbGpfjlTx9zO4RefN8gp7eqjNy8nyK3NZmBzOPeIxtA==} + '@vitest/runner@3.1.1': resolution: {integrity: sha512-X/d46qzJuEDO8ueyjtKfxffiXraPRfmYasoC4i5+mlLEJ10UvPb0XH5M9C3gWuxd7BAQhpK42cJgJtq53YnWVA==} @@ -1063,9 +1179,15 @@ packages: '@vitest/spy@3.1.1': resolution: {integrity: sha512-+EmrUOOXbKzLkTDwlsc/xrwOlPDXyVk3Z6P6K4oiCndxz7YLpp/0R0UsWVOKT0IXWjjBJuSMk6D27qipaupcvQ==} + '@vitest/spy@3.2.4': + resolution: {integrity: sha512-vAfasCOe6AIK70iP5UD11Ac4siNUNJ9i/9PZ3NKx07sG6sUxeag1LWdNrMWeKKYBLlzuK+Gn65Yd5nyL6ds+nw==} + '@vitest/utils@3.1.1': resolution: {integrity: sha512-1XIjflyaU2k3HMArJ50bwSh3wKWPD6Q47wz/NUSmRV0zNywPc4w79ARjg/i/aNINHwA+mIALhUVqD9/aUvZNgg==} + '@vitest/utils@3.2.4': + resolution: {integrity: sha512-fB2V0JFrQSMsCo9HiSq3Ezpdv4iYaXRG1Sx8edX3MwxfyNn83mKiGzOcH+Fkxt4MHxr3y42fQi1oeAInqgX2QA==} + '@vue/compiler-core@3.5.13': resolution: {integrity: sha512-oOdAkwqUfW1WqpwSYJce06wvt6HljgY3fGeM9NcVA1HaYOij3mZG9Rkysn0OHuyUAGMbEbARIpsG+LPVlBJ5/Q==} @@ -1160,6 +1282,10 @@ packages: resolution: {integrity: sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==} engines: {node: '>=12'} + ast-types@0.16.1: + resolution: {integrity: sha512-6t10qk83GOG8p0vKmaCr8eiilZwO171AvbROMtvvNiwrTly62t+7XkA8RdIIVbpMhCASAsxgAzdRSwh6nw/5Dg==} + engines: {node: '>=4'} + atomic-sleep@1.0.0: resolution: {integrity: sha512-kNOjDqAh7px0XWNI+4QbzoiR/nTkHAWNud2uvnJquD1/x5a7EQZMJT0AczqK0Qn67oY/TTQ1LbUKajZpp3I9tQ==} engines: {node: '>=8.0.0'} @@ -1177,6 +1303,10 @@ packages: base64-js@1.5.1: resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==} + better-opn@3.0.2: + resolution: {integrity: sha512-aVNobHnJqLiUelTaHat9DZ1qM2w0C0Eym4LPI/3JxOnSokGVdsl1T1kN7TFvsEAD8G47A6VKQ0TVHqbBnYMJlQ==} + engines: {node: '>=12.0.0'} + binary-extensions@2.3.0: resolution: {integrity: sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==} engines: {node: '>=8'} @@ -1354,6 +1484,10 @@ packages: deep-is@0.1.4: resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} + define-lazy-prop@2.0.0: + resolution: {integrity: sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==} + engines: {node: '>=8'} + dequal@2.0.3: resolution: {integrity: sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==} engines: {node: '>=6'} @@ -1369,6 +1503,10 @@ packages: dlv@1.1.3: resolution: {integrity: sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==} + doctrine@3.0.0: + resolution: {integrity: sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==} + engines: {node: '>=6.0.0'} + dom-accessibility-api@0.5.16: resolution: {integrity: sha512-X7BJ2yElsnOJ30pZF4uIIDfBEVgF4XEBxL9Bxhy6dnrm5hkzqmsWHGTiHqRiITNhMyFLyAiWndIJP7Z1NTteDg==} @@ -1424,6 +1562,11 @@ packages: es-module-lexer@1.6.0: resolution: {integrity: sha512-qqnD1yMU6tk/jnaMosogGySTZP8YtUgAffA9nMN+E/rjxcfRQ6IEk7IiozUjgxKoFHBGjTLnrHB/YC45r/59EQ==} + esbuild-register@3.6.0: + resolution: {integrity: sha512-H2/S7Pm8a9CL1uhp9OvjwrBh5Pvx0H8qVOxNu8Wed9Y7qv56MPtq+GGM8RJpq6glYJn9Wspr8uw7l55uyinNeg==} + peerDependencies: + esbuild: '>=0.12 <1' + esbuild@0.25.2: resolution: {integrity: sha512-16854zccKPnC+toMywC+uKNeYSv+/eXkevRAfwRD/G9Cleq66m8XFIrigkbvauLLlCfDL45Q2cWegSg53gGBnQ==} engines: {node: '>=18'} @@ -1454,6 +1597,13 @@ packages: peerDependencies: eslint: '>=8.40' + eslint-plugin-storybook@9.0.16: + resolution: {integrity: sha512-A9kJaYBGYswo11t9coo1rpY5i8qPJx9JX5/6YWK3L3zT9lCxJWkYFAed/1Jt92yk7EkOzLrwrIIjMj/+7erlgw==} + engines: {node: '>=20.0.0'} + peerDependencies: + eslint: '>=8' + storybook: ^9.0.16 + eslint-scope@8.3.0: resolution: {integrity: sha512-pUNxi75F8MJ/GdeKtVLSbYg4ZI34J6C0C7sbL4YOp2exGwen7ZsuBqKzUhXd0qMQ362yET3z+uPwKeg/0C2XCQ==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -1480,6 +1630,11 @@ packages: resolution: {integrity: sha512-0QYC8b24HWY8zjRnDTL6RiHfDbAWn63qb4LMj1Z4b076A4une81+z03Kg7l7mn/48PUTqoLptSXez8oknU8Clg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + esprima@4.0.1: + resolution: {integrity: sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==} + engines: {node: '>=4'} + hasBin: true + esquery@1.6.0: resolution: {integrity: sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==} engines: {node: '>=0.10'} @@ -1546,6 +1701,10 @@ packages: resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} engines: {node: '>=10'} + find-up@7.0.0: + resolution: {integrity: sha512-YyZM99iHrqLKjmt4LJDj58KI+fYyufRLBSYcqycxf//KpBk9FoewoGX0450m9nB44qrZnovzC2oeP5hUibxc/g==} + engines: {node: '>=18'} + flat-cache@4.0.1: resolution: {integrity: sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==} engines: {node: '>=16'} @@ -1678,6 +1837,11 @@ packages: resolution: {integrity: sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==} engines: {node: '>= 0.4'} + is-docker@2.2.1: + resolution: {integrity: sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==} + engines: {node: '>=8'} + hasBin: true + is-extglob@2.1.1: resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} engines: {node: '>=0.10.0'} @@ -1700,6 +1864,10 @@ packages: is-what@3.14.1: resolution: {integrity: sha512-sNxgpk9793nzSs7bA6JQJGeIuRBQhAaNGG77kzYQgMkrID+lS6SlK07K5LaptscDlSaIgH+GPFzf+d75FVxozA==} + is-wsl@2.2.0: + resolution: {integrity: sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==} + engines: {node: '>=8'} + isexe@2.0.0: resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} @@ -1783,6 +1951,10 @@ packages: resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} engines: {node: '>=10'} + locate-path@7.2.0: + resolution: {integrity: sha512-gvVijfZvn7R+2qyPX8mAuKcFGDf6Nc61GdvGafQsHL0sBIxfKzA+usWn4GFC/bk+QdwPUD4kWFJLhElipq+0VA==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + lodash.merge@4.6.2: resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} @@ -1792,6 +1964,9 @@ packages: loupe@3.1.3: resolution: {integrity: sha512-kkIp7XSkP78ZxJEsSxW3712C6teJVoeHHwgo9zJ380de7IYyJ2ISlxojcH2pC5OFLewESmnRi/+XCDIEEVyoug==} + loupe@3.1.4: + resolution: {integrity: sha512-wJzkKwJrheKtknCOKNEtDK4iqg/MxmZheEMtSTYvnzRdEYaZzmgH976nenp8WdJRdx5Vc1X/9MO0Oszl6ezeXg==} + lower-case@2.0.2: resolution: {integrity: sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==} @@ -1847,6 +2022,9 @@ packages: resolution: {integrity: sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==} engines: {node: '>=16 || 14 >=14.17'} + minimist@1.2.8: + resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==} + minipass@7.1.2: resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==} engines: {node: '>=16 || 14 >=14.17'} @@ -1905,6 +2083,10 @@ packages: resolution: {integrity: sha512-0eJJY6hXLGf1udHwfNftBqH+g73EU4B504nZeKpz1sYRKafAghwxEJunB2O7rDZkL4PGfsMVnTXZ2EjibbqcsA==} engines: {node: '>=14.0.0'} + open@8.4.2: + resolution: {integrity: sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==} + engines: {node: '>=12'} + optionator@0.9.4: resolution: {integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==} engines: {node: '>= 0.8.0'} @@ -1913,10 +2095,18 @@ packages: resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} engines: {node: '>=10'} + p-limit@4.0.0: + resolution: {integrity: sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + p-locate@5.0.0: resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} engines: {node: '>=10'} + p-locate@6.0.0: + resolution: {integrity: sha512-wPrq66Llhl7/4AGC6I+cqxT07LhXvWL08LNXz1fENOw0Ap4sRZZ/gZpTTJ5jpurzzzfS2W/Ge9BY3LgLjCShcw==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + package-json-from-dist@1.0.1: resolution: {integrity: sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==} @@ -1939,6 +2129,10 @@ packages: resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} engines: {node: '>=8'} + path-exists@5.0.0: + resolution: {integrity: sha512-RjhtfwJOxzcFmNOi6ltcbcu4Iu+FL3zEj83dk4kAS+fVpTxXLO1b38RvJgT/0QwvV/L3aY9TAnyv0EOqW4GoMQ==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + path-key@3.1.1: resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} engines: {node: '>=8'} @@ -2085,6 +2279,15 @@ packages: ramda@0.28.0: resolution: {integrity: sha512-9QnLuG/kPVgWvMQ4aODhsBUFKOUmnbUnsSXACv+NCQZcHbeb+v8Lodp8OVxtRULN1/xOyYLLaL6npE6dMq5QTA==} + react-docgen-typescript@2.4.0: + resolution: {integrity: sha512-ZtAp5XTO5HRzQctjPU0ybY0RRCQO19X/8fxn3w7y2VVTUbGHDKULPTL4ky3vB05euSgG5NpALhEhDPvQ56wvXg==} + peerDependencies: + typescript: '>= 4.3.x' + + react-docgen@8.0.0: + resolution: {integrity: sha512-kmob/FOTwep7DUWf9KjuenKX0vyvChr3oTdvvPt09V60Iz75FJp+T/0ZeHMbAfJj2WaVWqAPP5Hmm3PYzSPPKg==} + engines: {node: ^20.9.0 || >=22} + react-dom@19.1.0: resolution: {integrity: sha512-Xs1hdnE+DyKgeHJeJznQmYMIBG3TKIHJJT95Q58nHLSrElKlGQqDTR2HQ9fx5CN/Gk6Vh/kupBTDLU11/nDk/g==} peerDependencies: @@ -2120,6 +2323,10 @@ packages: resolution: {integrity: sha512-57frrGM/OCTLqLOAh0mhVA9VBMHd+9U7Zb2THMGdBUoZVOtGbJzjxsYGDJ3A9AYYCP4hn6y1TVbaOfzWtm5GFg==} engines: {node: '>= 12.13.0'} + recast@0.23.11: + resolution: {integrity: sha512-YTUo+Flmw4ZXiWfQKGcwwc11KnoRAYgzAE2E7mXKCjSviTKShtxBsN6YUUBB2gtaBzKzeKunxhUwNHQuRryhWA==} + engines: {node: '>= 4'} + redent@3.0.0: resolution: {integrity: sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==} engines: {node: '>=8'} @@ -2228,6 +2435,15 @@ packages: std-env@3.9.0: resolution: {integrity: sha512-UGvjygr6F6tpH7o2qyqR6QYpwraIjKSdtzyBdyytFOHmPZY917kwdwLG0RbOjWOnKmnm3PeHjaoLLMie7kPLQw==} + storybook@9.0.16: + resolution: {integrity: sha512-DzjzeggdzlXKKBK1L9iqNKqqNpyfeaL1hxxeAOmqgeMezwy5d5mCJmjNcZEmx+prsRmvj1OWm4ZZAg6iP/wABg==} + hasBin: true + peerDependencies: + prettier: ^2 || ^3 + peerDependenciesMeta: + prettier: + optional: true + string-width@4.2.3: resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==} engines: {node: '>=8'} @@ -2247,10 +2463,18 @@ packages: resolution: {integrity: sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==} engines: {node: '>=12'} + strip-bom@3.0.0: + resolution: {integrity: sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==} + engines: {node: '>=4'} + strip-indent@3.0.0: resolution: {integrity: sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==} engines: {node: '>=8'} + strip-indent@4.0.0: + resolution: {integrity: sha512-mnVSV2l+Zv6BLpSD/8V87CW/y9EmmbYzGCIavsnsI6/nwn26DwffM/yztm30Z/I2DY9wdS3vXVCMnHDgZaVNoA==} + engines: {node: '>=12'} + strip-json-comments@3.1.1: resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} engines: {node: '>=8'} @@ -2300,6 +2524,9 @@ packages: thread-stream@2.7.0: resolution: {integrity: sha512-qQiRWsU/wvNolI6tbbCKd9iKaTnCXsTwVxhhKM6nctPdujTyztjlbUkUTUymidWcMnZ5pWR0ej4a0tjsW021vw==} + tiny-invariant@1.3.3: + resolution: {integrity: sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==} + tinybench@2.9.0: resolution: {integrity: sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==} @@ -2318,6 +2545,10 @@ packages: resolution: {integrity: sha512-n1cw8k1k0x4pgA2+9XrOkFydTerNcJ1zWCO5Nn9scWHTD+5tp8dghT2x1uduQePZTZgd3Tupf+x9BxJjeJi77Q==} engines: {node: '>=14.0.0'} + tinyspy@4.0.3: + resolution: {integrity: sha512-t2T/WLB2WRgZ9EpE4jgPJ9w+i66UZfDc8wHh0xrwiRNN+UwH98GIJkTeZqX9rg0i0ptwzqW+uYeIF0T4F8LR7A==} + engines: {node: '>=14.0.0'} + tldts-core@6.1.86: resolution: {integrity: sha512-Je6p7pkk+KMzMv2XXKmAE3McmolOQFdxkKw0R8EYNr7sELW46JqnNeTX8ybPiQgvg1ymCoF8LXs5fzFaZvJPTA==} @@ -2343,12 +2574,20 @@ packages: peerDependencies: typescript: '>=4.8.4' + ts-dedent@2.2.0: + resolution: {integrity: sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ==} + engines: {node: '>=6.10'} + ts-interface-checker@0.1.13: resolution: {integrity: sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==} ts-toolbelt@6.15.5: resolution: {integrity: sha512-FZIXf1ksVyLcfr7M317jbB67XFJhOO1YqdTcuGaq9q5jLUoTikukZ+98TPjKiP2jC5CgmYdWWYs0s2nLSU0/1A==} + tsconfig-paths@4.2.0: + resolution: {integrity: sha512-NoZ4roiN7LnbKn9QqE1amc9DJfzvZXxF4xDavcOWt1BPkdx+m+0gJuPM+S0vCe7zTJMYUP0R8pO2XMr+Y8oLIg==} + engines: {node: '>=6'} + tslib@2.8.1: resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==} @@ -2371,6 +2610,14 @@ packages: undici-types@6.21.0: resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==} + unicorn-magic@0.1.0: + resolution: {integrity: sha512-lRfVq8fE8gz6QMBuDM6a+LO3IAzTi05H6gCVaUpir2E1Rwpo4ZUog45KpNXKC/Mn3Yb9UDuHumeFTo9iV/D9FQ==} + engines: {node: '>=18'} + + unplugin@1.16.1: + resolution: {integrity: sha512-4/u/j4FrCKdi17jaxuJA0jClGxB1AvU2hw/IuayPc4ay1XGaJs/rbb4v5WKwAjNifjmXK9PIFyuPiaK8azyR9w==} + engines: {node: '>=14.0.0'} + update-browserslist-db@1.1.3: resolution: {integrity: sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw==} hasBin: true @@ -2492,6 +2739,9 @@ packages: resolution: {integrity: sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==} engines: {node: '>=12'} + webpack-virtual-modules@0.6.2: + resolution: {integrity: sha512-66/V2i5hQanC51vBQKPH4aI8NMAcBW59FVBs+rC7eGHupMyfn34q7rZIE+ETlJ+XTevqfUhVVBgSUNSW2flEUQ==} + whatwg-encoding@3.1.1: resolution: {integrity: sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==} engines: {node: '>=18'} @@ -2557,6 +2807,10 @@ packages: resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} engines: {node: '>=10'} + yocto-queue@1.2.1: + resolution: {integrity: sha512-AyeEbWOu/TAXdxlV9wmGcR0+yh2j3vYPGOECcIj2S7MkrLyC7ne+oye2BKTItt0ii2PHk4cDy+95+LshzbXnGg==} + engines: {node: '>=12.20'} + snapshots: '@adobe/css-tools@4.4.2': {} @@ -2902,6 +3156,15 @@ snapshots: wrap-ansi: 8.1.0 wrap-ansi-cjs: wrap-ansi@7.0.0 + '@joshwooding/vite-plugin-react-docgen-typescript@0.6.1(typescript@5.7.3)(vite@6.2.5(@types/node@22.14.0)(jiti@1.21.7)(less@4.3.0)(sass@1.86.3)(yaml@2.7.1))': + dependencies: + glob: 10.4.5 + magic-string: 0.30.17 + react-docgen-typescript: 2.4.0(typescript@5.7.3) + vite: 6.2.5(@types/node@22.14.0)(jiti@1.21.7)(less@4.3.0)(sass@1.86.3)(yaml@2.7.1) + optionalDependencies: + typescript: 5.7.3 + '@jridgewell/gen-mapping@0.3.8': dependencies: '@jridgewell/set-array': 1.2.1 @@ -2919,6 +3182,12 @@ snapshots: '@jridgewell/resolve-uri': 3.1.2 '@jridgewell/sourcemap-codec': 1.5.0 + '@mdx-js/react@3.1.0(@types/react@19.1.0)(react@19.1.0)': + dependencies: + '@types/mdx': 2.0.13 + '@types/react': 19.1.0 + react: 19.1.0 + '@nodelib/fs.scandir@2.1.5': dependencies: '@nodelib/fs.stat': 2.0.5 @@ -3116,6 +3385,78 @@ snapshots: '@rollup/rollup-win32-x64-msvc@4.39.0': optional: true + '@storybook/addon-docs@9.0.16(@types/react@19.1.0)(storybook@9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3))': + dependencies: + '@mdx-js/react': 3.1.0(@types/react@19.1.0)(react@19.1.0) + '@storybook/csf-plugin': 9.0.16(storybook@9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3)) + '@storybook/icons': 1.4.0(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@storybook/react-dom-shim': 9.0.16(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(storybook@9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3)) + react: 19.1.0 + react-dom: 19.1.0(react@19.1.0) + storybook: 9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3) + ts-dedent: 2.2.0 + transitivePeerDependencies: + - '@types/react' + + '@storybook/addon-onboarding@9.0.16(storybook@9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3))': + dependencies: + storybook: 9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3) + + '@storybook/builder-vite@9.0.16(storybook@9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3))(vite@6.2.5(@types/node@22.14.0)(jiti@1.21.7)(less@4.3.0)(sass@1.86.3)(yaml@2.7.1))': + dependencies: + '@storybook/csf-plugin': 9.0.16(storybook@9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3)) + storybook: 9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3) + ts-dedent: 2.2.0 + vite: 6.2.5(@types/node@22.14.0)(jiti@1.21.7)(less@4.3.0)(sass@1.86.3)(yaml@2.7.1) + + '@storybook/csf-plugin@9.0.16(storybook@9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3))': + dependencies: + storybook: 9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3) + unplugin: 1.16.1 + + '@storybook/global@5.0.0': {} + + '@storybook/icons@1.4.0(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + dependencies: + react: 19.1.0 + react-dom: 19.1.0(react@19.1.0) + + '@storybook/react-dom-shim@9.0.16(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(storybook@9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3))': + dependencies: + react: 19.1.0 + react-dom: 19.1.0(react@19.1.0) + storybook: 9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3) + + '@storybook/react-vite@9.0.16(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(rollup@4.39.0)(storybook@9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3))(typescript@5.7.3)(vite@6.2.5(@types/node@22.14.0)(jiti@1.21.7)(less@4.3.0)(sass@1.86.3)(yaml@2.7.1))': + dependencies: + '@joshwooding/vite-plugin-react-docgen-typescript': 0.6.1(typescript@5.7.3)(vite@6.2.5(@types/node@22.14.0)(jiti@1.21.7)(less@4.3.0)(sass@1.86.3)(yaml@2.7.1)) + '@rollup/pluginutils': 5.1.4(rollup@4.39.0) + '@storybook/builder-vite': 9.0.16(storybook@9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3))(vite@6.2.5(@types/node@22.14.0)(jiti@1.21.7)(less@4.3.0)(sass@1.86.3)(yaml@2.7.1)) + '@storybook/react': 9.0.16(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(storybook@9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3))(typescript@5.7.3) + find-up: 7.0.0 + magic-string: 0.30.17 + react: 19.1.0 + react-docgen: 8.0.0 + react-dom: 19.1.0(react@19.1.0) + resolve: 1.22.10 + storybook: 9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3) + tsconfig-paths: 4.2.0 + vite: 6.2.5(@types/node@22.14.0)(jiti@1.21.7)(less@4.3.0)(sass@1.86.3)(yaml@2.7.1) + transitivePeerDependencies: + - rollup + - supports-color + - typescript + + '@storybook/react@9.0.16(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(storybook@9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3))(typescript@5.7.3)': + dependencies: + '@storybook/global': 5.0.0 + '@storybook/react-dom-shim': 9.0.16(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(storybook@9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3)) + react: 19.1.0 + react-dom: 19.1.0(react@19.1.0) + storybook: 9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3) + optionalDependencies: + typescript: 5.7.3 + '@svgr/babel-plugin-add-jsx-attribute@8.0.0(@babel/core@7.26.10)': dependencies: '@babel/core': 7.26.10 @@ -3229,6 +3570,10 @@ snapshots: '@types/react': 19.1.0 '@types/react-dom': 19.1.1(@types/react@19.1.0) + '@testing-library/user-event@14.6.1(@testing-library/dom@10.4.0)': + dependencies: + '@testing-library/dom': 10.4.0 + '@trysound/sax@0.2.0': {} '@types/antlr4@4.11.6': {} @@ -3256,8 +3601,16 @@ snapshots: dependencies: '@babel/types': 7.27.0 + '@types/chai@5.2.2': + dependencies: + '@types/deep-eql': 4.0.2 + '@types/color-string@1.5.5': {} + '@types/deep-eql@4.0.2': {} + + '@types/doctrine@0.0.9': {} + '@types/estree@1.0.7': {} '@types/highlight.js@10.1.0': @@ -3274,6 +3627,8 @@ snapshots: '@types/marked@4.3.2': {} + '@types/mdx@2.0.13': {} + '@types/node@22.14.0': dependencies: undici-types: 6.21.0 @@ -3290,6 +3645,8 @@ snapshots: dependencies: csstype: 3.1.3 + '@types/resolve@1.20.6': {} + '@types/tough-cookie@4.0.5': {} '@types/trusted-types@2.0.7': @@ -3390,6 +3747,14 @@ snapshots: chai: 5.2.0 tinyrainbow: 2.0.0 + '@vitest/expect@3.2.4': + dependencies: + '@types/chai': 5.2.2 + '@vitest/spy': 3.2.4 + '@vitest/utils': 3.2.4 + chai: 5.2.0 + tinyrainbow: 2.0.0 + '@vitest/mocker@3.1.1(vite@6.2.5(@types/node@22.14.0)(jiti@1.21.7)(less@4.3.0)(sass@1.86.3)(yaml@2.7.1))': dependencies: '@vitest/spy': 3.1.1 @@ -3402,6 +3767,10 @@ snapshots: dependencies: tinyrainbow: 2.0.0 + '@vitest/pretty-format@3.2.4': + dependencies: + tinyrainbow: 2.0.0 + '@vitest/runner@3.1.1': dependencies: '@vitest/utils': 3.1.1 @@ -3417,12 +3786,22 @@ snapshots: dependencies: tinyspy: 3.0.2 + '@vitest/spy@3.2.4': + dependencies: + tinyspy: 4.0.3 + '@vitest/utils@3.1.1': dependencies: '@vitest/pretty-format': 3.1.1 loupe: 3.1.3 tinyrainbow: 2.0.0 + '@vitest/utils@3.2.4': + dependencies: + '@vitest/pretty-format': 3.2.4 + loupe: 3.1.4 + tinyrainbow: 2.0.0 + '@vue/compiler-core@3.5.13': dependencies: '@babel/parser': 7.27.0 @@ -3527,6 +3906,10 @@ snapshots: assertion-error@2.0.1: {} + ast-types@0.16.1: + dependencies: + tslib: 2.8.1 + atomic-sleep@1.0.0: {} autoprefixer@10.4.21(postcss@8.5.3): @@ -3543,6 +3926,10 @@ snapshots: base64-js@1.5.1: {} + better-opn@3.0.2: + dependencies: + open: 8.4.2 + binary-extensions@2.3.0: {} boolbase@1.0.0: {} @@ -3713,6 +4100,8 @@ snapshots: deep-is@0.1.4: {} + define-lazy-prop@2.0.0: {} + dequal@2.0.3: {} detect-libc@1.0.3: @@ -3722,6 +4111,10 @@ snapshots: dlv@1.1.3: {} + doctrine@3.0.0: + dependencies: + esutils: 2.0.3 + dom-accessibility-api@0.5.16: {} dom-accessibility-api@0.6.3: {} @@ -3776,6 +4169,13 @@ snapshots: es-module-lexer@1.6.0: {} + esbuild-register@3.6.0(esbuild@0.25.2): + dependencies: + debug: 4.4.0 + esbuild: 0.25.2 + transitivePeerDependencies: + - supports-color + esbuild@0.25.2: optionalDependencies: '@esbuild/aix-ppc64': 0.25.2 @@ -3820,6 +4220,15 @@ snapshots: dependencies: eslint: 9.24.0(jiti@1.21.7) + eslint-plugin-storybook@9.0.16(eslint@9.24.0(jiti@1.21.7))(storybook@9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3))(typescript@5.7.3): + dependencies: + '@typescript-eslint/utils': 8.29.0(eslint@9.24.0(jiti@1.21.7))(typescript@5.7.3) + eslint: 9.24.0(jiti@1.21.7) + storybook: 9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3) + transitivePeerDependencies: + - supports-color + - typescript + eslint-scope@8.3.0: dependencies: esrecurse: 4.3.0 @@ -3877,6 +4286,8 @@ snapshots: acorn-jsx: 5.3.2(acorn@8.14.1) eslint-visitor-keys: 4.2.0 + esprima@4.0.1: {} + esquery@1.6.0: dependencies: estraverse: 5.3.0 @@ -3934,6 +4345,12 @@ snapshots: locate-path: 6.0.0 path-exists: 4.0.0 + find-up@7.0.0: + dependencies: + locate-path: 7.2.0 + path-exists: 5.0.0 + unicorn-magic: 0.1.0 + flat-cache@4.0.1: dependencies: flatted: 3.3.3 @@ -4048,6 +4465,8 @@ snapshots: dependencies: hasown: 2.0.2 + is-docker@2.2.1: {} + is-extglob@2.1.1: {} is-fullwidth-code-point@3.0.0: {} @@ -4062,6 +4481,10 @@ snapshots: is-what@3.14.1: {} + is-wsl@2.2.0: + dependencies: + is-docker: 2.2.1 + isexe@2.0.0: {} jackspeak@3.4.3: @@ -4153,12 +4576,18 @@ snapshots: dependencies: p-locate: 5.0.0 + locate-path@7.2.0: + dependencies: + p-locate: 6.0.0 + lodash.merge@4.6.2: {} lodash@4.17.21: {} loupe@3.1.3: {} + loupe@3.1.4: {} + lower-case@2.0.2: dependencies: tslib: 2.8.1 @@ -4207,6 +4636,8 @@ snapshots: dependencies: brace-expansion: 2.0.1 + minimist@1.2.8: {} + minipass@7.1.2: {} ms@2.1.3: {} @@ -4253,6 +4684,12 @@ snapshots: on-exit-leak-free@2.1.2: {} + open@8.4.2: + dependencies: + define-lazy-prop: 2.0.0 + is-docker: 2.2.1 + is-wsl: 2.2.0 + optionator@0.9.4: dependencies: deep-is: 0.1.4 @@ -4266,10 +4703,18 @@ snapshots: dependencies: yocto-queue: 0.1.0 + p-limit@4.0.0: + dependencies: + yocto-queue: 1.2.1 + p-locate@5.0.0: dependencies: p-limit: 3.1.0 + p-locate@6.0.0: + dependencies: + p-limit: 4.0.0 + package-json-from-dist@1.0.1: {} parent-module@1.0.1: @@ -4291,6 +4736,8 @@ snapshots: path-exists@4.0.0: {} + path-exists@5.0.0: {} + path-key@3.1.1: {} path-parse@1.0.7: {} @@ -4412,6 +4859,25 @@ snapshots: ramda@0.28.0: {} + react-docgen-typescript@2.4.0(typescript@5.7.3): + dependencies: + typescript: 5.7.3 + + react-docgen@8.0.0: + dependencies: + '@babel/core': 7.26.10 + '@babel/traverse': 7.27.0 + '@babel/types': 7.27.0 + '@types/babel__core': 7.20.5 + '@types/babel__traverse': 7.20.7 + '@types/doctrine': 0.0.9 + '@types/resolve': 1.20.6 + doctrine: 3.0.0 + resolve: 1.22.10 + strip-indent: 4.0.0 + transitivePeerDependencies: + - supports-color + react-dom@19.1.0(react@19.1.0): dependencies: react: 19.1.0 @@ -4443,6 +4909,14 @@ snapshots: real-require@0.2.0: {} + recast@0.23.11: + dependencies: + ast-types: 0.16.1 + esprima: 4.0.1 + source-map: 0.6.1 + tiny-invariant: 1.3.3 + tslib: 2.8.1 + redent@3.0.0: dependencies: indent-string: 4.0.0 @@ -4543,8 +5017,7 @@ snapshots: source-map-js@1.2.1: {} - source-map@0.6.1: - optional: true + source-map@0.6.1: {} split2@4.2.0: {} @@ -4552,6 +5025,27 @@ snapshots: std-env@3.9.0: {} + storybook@9.0.16(@testing-library/dom@10.4.0)(prettier@3.5.3): + dependencies: + '@storybook/global': 5.0.0 + '@testing-library/jest-dom': 6.6.3 + '@testing-library/user-event': 14.6.1(@testing-library/dom@10.4.0) + '@vitest/expect': 3.2.4 + '@vitest/spy': 3.2.4 + better-opn: 3.0.2 + esbuild: 0.25.2 + esbuild-register: 3.6.0(esbuild@0.25.2) + recast: 0.23.11 + semver: 7.7.1 + ws: 8.18.1 + optionalDependencies: + prettier: 3.5.3 + transitivePeerDependencies: + - '@testing-library/dom' + - bufferutil + - supports-color + - utf-8-validate + string-width@4.2.3: dependencies: emoji-regex: 8.0.0 @@ -4576,10 +5070,16 @@ snapshots: dependencies: ansi-regex: 6.1.0 + strip-bom@3.0.0: {} + strip-indent@3.0.0: dependencies: min-indent: 1.0.1 + strip-indent@4.0.0: + dependencies: + min-indent: 1.0.1 + strip-json-comments@3.1.1: {} sucrase@3.35.0: @@ -4655,6 +5155,8 @@ snapshots: dependencies: real-require: 0.2.0 + tiny-invariant@1.3.3: {} + tinybench@2.9.0: {} tinyexec@0.3.2: {} @@ -4665,6 +5167,8 @@ snapshots: tinyspy@3.0.2: {} + tinyspy@4.0.3: {} + tldts-core@6.1.86: {} tldts@6.1.86: @@ -4687,10 +5191,18 @@ snapshots: dependencies: typescript: 5.7.3 + ts-dedent@2.2.0: {} + ts-interface-checker@0.1.13: {} ts-toolbelt@6.15.5: {} + tsconfig-paths@4.2.0: + dependencies: + json5: 2.2.3 + minimist: 1.2.8 + strip-bom: 3.0.0 + tslib@2.8.1: {} type-check@0.4.0: @@ -4711,6 +5223,13 @@ snapshots: undici-types@6.21.0: {} + unicorn-magic@0.1.0: {} + + unplugin@1.16.1: + dependencies: + acorn: 8.14.1 + webpack-virtual-modules: 0.6.2 + update-browserslist-db@1.1.3(browserslist@4.24.4): dependencies: browserslist: 4.24.4 @@ -4836,6 +5355,8 @@ snapshots: webidl-conversions@7.0.0: {} + webpack-virtual-modules@0.6.2: {} + whatwg-encoding@3.1.1: dependencies: iconv-lite: 0.6.3 @@ -4881,3 +5402,5 @@ snapshots: yaml@2.7.1: {} yocto-queue@0.1.0: {} + + yocto-queue@1.2.1: {} diff --git a/src/ZenUml.stories.tsx b/src/ZenUml.stories.tsx new file mode 100644 index 00000000..3adad6ec --- /dev/null +++ b/src/ZenUml.stories.tsx @@ -0,0 +1,149 @@ +import type { Meta, StoryObj } from '@storybook/react' +import { useEffect, useRef } from 'react' +import ZenUml from './core' + +const ZenUmlWrapper = ({ + code, + theme = 'default', + enableScopedTheming = false, + mode = 'Dynamic' as any +}: { + code: string + theme?: string + enableScopedTheming?: boolean + mode?: 'Static' | 'Dynamic' +}) => { + const containerRef = useRef(null) + const zenUmlRef = useRef(null) + + useEffect(() => { + if (containerRef.current && !zenUmlRef.current) { + zenUmlRef.current = new ZenUml(containerRef.current) + } + }, []) + + useEffect(() => { + if (zenUmlRef.current) { + zenUmlRef.current.render(code, { + theme, + enableScopedTheming, + mode: mode as any, + }) + } + }, [code, theme, enableScopedTheming, mode]) + + return
+} + +const meta: Meta = { + title: 'ZenUML/Complete Integration', + component: ZenUmlWrapper, + parameters: { + layout: 'fullscreen', + }, + argTypes: { + code: { control: 'text' }, + theme: { + control: 'select', + options: ['default', 'blue', 'black-white', 'star-uml', 'blue-river'], + }, + enableScopedTheming: { control: 'boolean' }, + mode: { + control: 'select', + options: ['Static', 'Dynamic'], + }, + }, +} + +export default meta +type Story = StoryObj + +export const SimpleSequence: Story = { + args: { + code: `Alice -> Bob: Hello Bob +Bob -> Alice: Hello Alice +Alice -> Bob: How are you? +Bob -> Alice: I'm fine, thank you!`, + theme: 'default', + enableScopedTheming: false, + mode: 'Static', + }, +} + +export const ComplexInteraction: Story = { + args: { + code: `@Actor Client #FFAAAA +@Database Database #FFFFAA +@Boundary WebServer #AAFFAA + +Client->WebServer.authenticate() { + WebServer->Database.checkCredentials() { + alt valid credentials { + Database->WebServer: User data + WebServer->Client: Login successful + } else invalid credentials { + Database->WebServer: Error + WebServer->Client: Login failed + opt retry { + Client->WebServer: Retry login + } + } + } +}`, + theme: 'default', + enableScopedTheming: false, + mode: 'Static', + }, +} + +export const AsyncMessaging: Story = { + args: { + code: `Frontend -> Backend: Sync request +Frontend ->> MessageQueue: Async message +MessageQueue ->> Worker: Process job +Worker -->> MessageQueue: Job completed +MessageQueue -->> Frontend: Notification +Backend --> Frontend: Response`, + theme: 'blue', + enableScopedTheming: false, + mode: 'Static', + }, +} + +export const NestedFragments: Story = { + args: { + code: `User -> System: Login request + +alt authentication { + System -> Database: Validate credentials + + alt valid { + Database -> System: Success + + opt remember me { + System -> CacheService: Store session + CacheService -> System: Cached + } + + System -> User: Login successful + } else invalid { + Database -> System: Failure + System -> User: Login failed + + critical rate limiting { + System -> RateLimiter: Check attempts + + alt too many attempts { + RateLimiter -> System: Block user + System -> User: Account locked + } else within limits { + RateLimiter -> System: Allow retry + } + } + } +}`, + theme: 'default', + enableScopedTheming: false, + mode: 'Static', + }, +} \ No newline at end of file diff --git a/src/components/DiagramFrame/DiagramFrame.stories.tsx b/src/components/DiagramFrame/DiagramFrame.stories.tsx new file mode 100644 index 00000000..3542f39e --- /dev/null +++ b/src/components/DiagramFrame/DiagramFrame.stories.tsx @@ -0,0 +1,88 @@ +import type { Meta, StoryObj } from '@storybook/react' +import { DiagramFrame } from './DiagramFrame' +import { Provider } from 'jotai' +import store, { codeAtom, modeAtom, RenderMode } from '../../store/Store' + +const meta: Meta = { + title: 'Components/DiagramFrame', + component: DiagramFrame, + parameters: { + layout: 'fullscreen', + }, + decorators: [ + (Story) => ( + +
+ +
+
+ ), + ], +} + +export default meta +type Story = StoryObj + +export const Default: Story = { + render: () => { + store.set(codeAtom, `Alice -> Bob: Hello Bob +Bob -> Alice: Hello Alice`) + store.set(modeAtom, RenderMode.Static) + return + }, +} + +export const ComplexSequence: Story = { + render: () => { + store.set(codeAtom, `@Actor Client #FFAAAA +@Database Database #FFFFAA +@Boundary WebServer #AAFFAA + +Client->WebServer.doPost() { + WebServer->Database.load() { + alt success { + Database->WebServer: Data + } else { + Database->WebServer: Error + } + } + + WebServer->Client: Response +}`) + store.set(modeAtom, RenderMode.Static) + return + }, +} + +export const WithFragments: Story = { + render: () => { + store.set(codeAtom, `Alice -> Bob: Authentication Request + +alt successful case { + Bob -> Alice: Authentication Accepted +} else failure case { + Bob -> Alice: Authentication Failure + opt { + loop 1000 times { + Alice -> Bob: DNS Attack + } + } +} + +Alice -> Bob: Another authentication Request +Alice <- Bob: another authentication Response`) + store.set(modeAtom, RenderMode.Static) + return + }, +} + +export const AsyncMessages: Story = { + render: () => { + store.set(codeAtom, `A -> B: Sync message +A ->> B: Async message +B -->> A: Async response +B --> A: Sync response`) + store.set(modeAtom, RenderMode.Static) + return + }, +} \ No newline at end of file diff --git a/src/components/DiagramFrame/SeqDiagram/LifeLineLayer/EditableLabel.css b/src/components/DiagramFrame/SeqDiagram/LifeLineLayer/EditableLabel.css new file mode 100644 index 00000000..df626373 --- /dev/null +++ b/src/components/DiagramFrame/SeqDiagram/LifeLineLayer/EditableLabel.css @@ -0,0 +1,55 @@ +/* Completely remove browser focus ring for editable labels */ +[contenteditable="true"] { + outline: none !important; + -webkit-appearance: none !important; + -moz-appearance: none !important; + box-shadow: none !important; +} + +[contenteditable="true"]:focus { + outline: none !important; + -webkit-focus-ring-color: transparent !important; + box-shadow: none !important; + border-color: transparent !important; +} + +/* Firefox specific */ +[contenteditable="true"]:focus { + -moz-outline: none !important; +} + +/* WebKit specific */ +[contenteditable="true"]:focus { + -webkit-focus-ring-color: transparent !important; + -webkit-tap-highlight-color: transparent !important; +} + +/* Remove any webkit focus ring */ +[contenteditable="true"]::-webkit-focus-ring-color { + outline-color: transparent !important; +} + +/* Base styling for all editable labels to prevent layout shifts */ +.editable-label-base { + border: 2px solid transparent !important; + padding: 2px 4px !important; + margin: -2px -4px !important; + border-radius: 4px !important; + transition: all 0.15s ease-in-out !important; +} + +/* Editing state - just change colors, keep dimensions */ +.editable-label-editing { + outline: none !important; + border-color: #93c5fd !important; + border-style: dashed !important; + background-color: #eff6ff !important; +} + +/* Hover state - just change colors, keep dimensions */ +.editable-label-hover { + background-color: #f3f4f6 !important; + border-color: #d1d5db !important; + border-style: dashed !important; + cursor: text !important; +} \ No newline at end of file diff --git a/src/components/DiagramFrame/SeqDiagram/LifeLineLayer/ParticipantLabel.stories.tsx b/src/components/DiagramFrame/SeqDiagram/LifeLineLayer/ParticipantLabel.stories.tsx new file mode 100644 index 00000000..f1a8ec1f --- /dev/null +++ b/src/components/DiagramFrame/SeqDiagram/LifeLineLayer/ParticipantLabel.stories.tsx @@ -0,0 +1,149 @@ +import type { Meta, StoryObj } from '@storybook/react' +import { ParticipantLabel } from './ParticipantLabel' +import { Provider } from 'jotai' +import store, { modeAtom, RenderMode } from '../../../../store/Store' + +const meta: Meta = { + title: 'Components/ParticipantLabel', + component: ParticipantLabel, + parameters: { + layout: 'centered', + }, + decorators: [ + (Story) => ( + +
+ +
+
+ ), + ], + parameters: { + layout: 'centered', + docs: { + description: { + component: 'Enhanced ParticipantLabel with single-click editing, hover hints, and improved focus styling.', + }, + }, + }, + argTypes: { + labelText: { + control: 'text', + description: 'The participant label text' + }, + assignee: { + control: 'text', + description: 'Optional assignee (variable name)' + }, + editable: { + control: 'boolean', + description: 'Enable editing mode (Dynamic vs Static)', + defaultValue: true, + }, + }, +} + +export default meta +type Story = StoryObj + +export const Default: Story = { + render: (args) => { + const { editable, ...participantProps } = args as any + store.set(modeAtom, editable ? RenderMode.Dynamic : RenderMode.Static) + return + }, + args: { + labelText: 'Alice', + labelPositions: [[0, 5]], + editable: true, + }, +} + +export const WithAssignee: Story = { + render: (args) => { + const { editable, ...participantProps } = args as any + store.set(modeAtom, editable ? RenderMode.Dynamic : RenderMode.Static) + return + }, + args: { + labelText: 'WebServer', + assignee: 'server', + labelPositions: [[0, 9]], + assigneePositions: [[0, 6]], + editable: true, + }, +} + +export const LongText: Story = { + render: (args) => { + const { editable, ...participantProps } = args as any + store.set(modeAtom, editable ? RenderMode.Dynamic : RenderMode.Static) + return + }, + args: { + labelText: 'DatabaseConnectionPool', + labelPositions: [[0, 22]], + editable: true, + }, +} + +export const SpecialCharacters: Story = { + render: (args) => { + const { editable, ...participantProps } = args as any + store.set(modeAtom, editable ? RenderMode.Dynamic : RenderMode.Static) + return + }, + args: { + labelText: '"User Account"', + labelPositions: [[0, 14]], + editable: true, + }, +} + +export const ImprovedEditingDemo: Story = { + render: (args) => { + const { editable, ...participantProps } = args as any + store.set(modeAtom, editable ? RenderMode.Dynamic : RenderMode.Static) + return ( +
+
+

✨ Enhanced Editing Experience:

+

Single-click to edit (no more double-click!)

+

Hover to see visual hints

+

Clean focus styling without cursor-hiding ring

+

Press Enter/Escape to finish editing

+
+ +
+ ) + }, + args: { + labelText: 'UserService', + assignee: 'service', + labelPositions: [[0, 11]], + assigneePositions: [[0, 7]], + editable: true, + }, +} + +export const EditableExample: Story = { + render: (args) => { + const { editable, ...participantProps } = args as any + store.set(modeAtom, editable ? RenderMode.Dynamic : RenderMode.Static) + return ( +
+

+ Double-click labels to edit when editable mode is enabled +

+ +
+ ) + }, + args: { + labelText: 'EditableParticipant', + assignee: 'variable', + labelPositions: [[0, 18]], + assigneePositions: [[0, 8]], + editable: true, + }, +} \ No newline at end of file diff --git a/src/components/DiagramFrame/SeqDiagram/LifeLineLayer/ParticipantLabel.tsx b/src/components/DiagramFrame/SeqDiagram/LifeLineLayer/ParticipantLabel.tsx index 4a5a856d..04962203 100644 --- a/src/components/DiagramFrame/SeqDiagram/LifeLineLayer/ParticipantLabel.tsx +++ b/src/components/DiagramFrame/SeqDiagram/LifeLineLayer/ParticipantLabel.tsx @@ -6,9 +6,10 @@ import { } from "@/store/Store"; import { useAtom, useAtomValue } from "jotai"; import { Position } from "@/parser/Participants"; -import { useEditLabel, specialCharRegex } from "@/functions/useEditLabel"; +import { useEditLabelImproved, specialCharRegex } from "@/functions/useEditLabel"; import { SyntheticEvent } from "react"; import { cn } from "@/utils"; +import "./EditableLabel.css"; const UneditableText = ["Missing Constructor", "ZenUML"]; @@ -61,11 +62,13 @@ export const ParticipantLabel = (props: { }; }; - const participantLabelHandler = useEditLabel( + const participantLabelHandler = useEditLabelImproved( replaceLabelTextWithaPositions(props.labelPositions ?? []), + { singleClick: true, showHoverHint: true } ); - const assigneeLabelHandler = useEditLabel( + const assigneeLabelHandler = useEditLabelImproved( replaceLabelTextWithaPositions(props.assigneePositions ?? []), + { singleClick: true, showHoverHint: true } ); return ( @@ -73,17 +76,18 @@ export const ParticipantLabel = (props: { {props.assignee && ( <>