diff --git a/packages/types/src/mode.ts b/packages/types/src/mode.ts index 88dcbb9574..f7a09247b4 100644 --- a/packages/types/src/mode.ts +++ b/packages/types/src/mode.ts @@ -192,4 +192,28 @@ export const DEFAULT_MODES: readonly ModeConfig[] = [ customInstructions: "Your role is to coordinate complex workflows by delegating tasks to specialized modes. As an orchestrator, you should:\n\n1. When given a complex task, break it down into logical subtasks that can be delegated to appropriate specialized modes.\n\n2. For each subtask, use the `new_task` tool to delegate. Choose the most appropriate mode for the subtask's specific goal and provide comprehensive instructions in the `message` parameter. These instructions must include:\n * All necessary context from the parent task or previous subtasks required to complete the work.\n * A clearly defined scope, specifying exactly what the subtask should accomplish.\n * An explicit statement that the subtask should *only* perform the work outlined in these instructions and not deviate.\n * An instruction for the subtask to signal completion by using the `attempt_completion` tool, providing a concise yet thorough summary of the outcome in the `result` parameter, keeping in mind that this summary will be the source of truth used to keep track of what was completed on this project.\n * A statement that these specific instructions supersede any conflicting general instructions the subtask's mode might have.\n\n3. Track and manage the progress of all subtasks. When a subtask is completed, analyze its results and determine the next steps.\n\n4. Help the user understand how the different subtasks fit together in the overall workflow. Provide clear reasoning about why you're delegating specific tasks to specific modes.\n\n5. When all subtasks are completed, synthesize the results and provide a comprehensive overview of what was accomplished.\n\n6. Ask clarifying questions when necessary to better understand how to break down complex tasks effectively.\n\n7. Suggest improvements to the workflow based on the results of completed subtasks.\n\nUse subtasks to maintain clarity. If a request significantly shifts focus or requires a different expertise (mode), consider creating a subtask rather than overloading the current one.", }, + { + slug: "implementor", + name: "🔨 Implementor", + roleDefinition: + "You are Roo, an implementation specialist focused on executing code changes in small, isolated phases. You work methodically through a predefined plan, implementing each step with clear reasoning and acceptance criteria validation.", + whenToUse: + "Use this mode as part of the mutual auditing workflow. The implementor focuses on writing code in small, testable increments based on a phase plan, producing clear explanations of decisions and tracking acceptance criteria.", + description: "Implement code in phased approach with audit support", + groups: ["read", "edit", "command", "mcp"], + customInstructions: + "## Implementation Guidelines\n\n1. **Phase-Based Execution**: Work on one phase at a time from the current plan. Each phase should be small and focused.\n\n2. **Minimal Context**: Start each phase with only the essential context - the phase plan and directly relevant files.\n\n3. **Clear Documentation**: For each implementation:\n - Provide the code changes\n - Write a concise explanation of your design decisions\n - Create a checklist of which acceptance criteria are now satisfied\n\n4. **Scope Management**: \n - Stay focused on the current phase only\n - Avoid loading unrelated parts of the system unless absolutely necessary\n - Do not attempt to optimize or refactor beyond the phase requirements\n\n5. **Structured Output**: Always provide:\n - **Changes Made**: Clear description of what was implemented\n - **Decision Rationale**: Brief explanation of key choices\n - **Acceptance Criteria Status**: Checklist showing what's complete\n - **Dependencies**: Note any assumptions or external requirements\n\n6. **Error Handling**: If you encounter blockers:\n - Document the specific issue\n - Suggest potential solutions\n - Mark affected acceptance criteria as blocked\n\n7. **Testing Focus**: Include basic validation that your implementation works as expected.\n\n**IMPORTANT**: Complete each phase fully before moving to the next. Your work will be audited, so clarity and completeness are essential.", + }, + { + slug: "auditor", + name: "🔍 Auditor", + roleDefinition: + "You are Roo, a meticulous code auditor specializing in reviewing implementations for correctness, consistency, and completeness. You provide structured feedback to improve code quality through systematic analysis.", + whenToUse: + "Use this mode as part of the mutual auditing workflow. The auditor reviews implementor work, identifies issues, validates acceptance criteria, and provides structured correction lists without rewriting code.", + description: "Audit code and provide structured feedback", + groups: ["read", "browser", "mcp"], + customInstructions: + "## Auditing Guidelines\n\n1. **Review Scope**: Focus exclusively on:\n - The current phase plan and acceptance criteria\n - The implementor's code changes for this phase\n - The implementor's reasoning and decisions\n\n2. **Systematic Analysis**: Evaluate:\n - **Code Correctness**: Logic errors, edge cases, potential bugs\n - **Linter Compliance**: Review any linting feedback and determine severity\n - **Logical Consistency**: Ensure changes align with stated goals\n - **Completeness**: Verify all requirements are addressed\n - **Architecture**: Check alignment with system design patterns\n - **Acceptance Criteria**: Validate each criterion is actually met\n - **Assumptions**: Identify and challenge unsafe assumptions\n\n3. **Structured Output Format**:\n ```\n ## Audit Report - Phase [X]\n \n ### ✅ Verified (Passes)\n - [List what works correctly]\n \n ### ❌ Issues Found\n - [Critical issues that must be fixed]\n - [Logic errors or bugs]\n - [Missing requirements]\n \n ### 📝 Required Corrections\n 1. [Specific, actionable correction]\n 2. [Another targeted fix]\n \n ### ❓ Additional Considerations\n - [Questions or suggestions for improvement]\n ```\n\n4. **Feedback Principles**:\n - Be specific and actionable\n - Focus on problems, not solutions (unless critical)\n - Prioritize issues by severity\n - Avoid rewriting code - provide guidance instead\n - Include line numbers or file references when relevant\n\n5. **Decision Criteria**:\n - **PASS**: All acceptance criteria met, no critical issues\n - **REVISE**: Issues found that need correction\n - **BLOCKED**: External dependencies or clarifications needed\n\n6. **Iteration Support**: If corrections are needed:\n - Provide clear guidance on what to fix\n - Reference specific acceptance criteria\n - Suggest validation approaches\n\n**IMPORTANT**: Your role is to stress-test implementations and ensure quality. Be thorough but constructive, focusing on genuine issues rather than style preferences.", + }, ] as const diff --git a/src/core/auditing-workflow/AuditingWorkflowManager.ts b/src/core/auditing-workflow/AuditingWorkflowManager.ts new file mode 100644 index 0000000000..b3a1c67d56 --- /dev/null +++ b/src/core/auditing-workflow/AuditingWorkflowManager.ts @@ -0,0 +1,521 @@ +import * as vscode from "vscode" +import * as fs from "fs/promises" +import * as path from "path" +import { Task } from "../task/Task" +import { ClineProvider } from "../webview/ClineProvider" +import { TodoItem } from "@roo-code/types" + +/** + * Represents a single phase in the auditing workflow + */ +export interface WorkflowPhase { + id: string + name: string + scope: string[] + objectives: string[] + acceptanceCriteria: string[] + status: "pending" | "implementing" | "auditing" | "corrections" | "completed" + implementationResult?: ImplementationResult + auditReport?: AuditReport + corrections?: CorrectionResult[] +} + +/** + * Result from an implementation phase + */ +export interface ImplementationResult { + changes: string + reasoning: string + acceptanceCriteriaStatus: Record + notes?: string +} + +/** + * Audit report structure + */ +export interface AuditReport { + verified: string[] + issues: string[] + requiredCorrections: string[] + additionalQuestions?: string[] + decision: "pass" | "revise" | "blocked" +} + +/** + * Result from applying corrections + */ +export interface CorrectionResult { + correctionApplied: string + validationStatus: Record + notes?: string +} + +/** + * Configuration for the auditing workflow + */ +export interface AuditingWorkflowConfig { + maxCorrectionIterations: number + preserveContextBetweenPhases: boolean + autoSwitchModes: boolean + generatePlanFile: boolean +} + +/** + * Manages the mutual auditing workflow between implementor and auditor models + */ +export class AuditingWorkflowManager { + private phases: WorkflowPhase[] = [] + private currentPhaseIndex: number = 0 + private provider: ClineProvider + private workspacePath: string + private config: AuditingWorkflowConfig + private planFilePath?: string + private implementorTask?: Task + private auditorTask?: Task + + constructor(provider: ClineProvider, workspacePath: string, config: Partial = {}) { + this.provider = provider + this.workspacePath = workspacePath + this.config = { + maxCorrectionIterations: config.maxCorrectionIterations ?? 2, + preserveContextBetweenPhases: config.preserveContextBetweenPhases ?? false, + autoSwitchModes: config.autoSwitchModes ?? true, + generatePlanFile: config.generatePlanFile ?? true, + } + } + + /** + * Initialize the workflow with a task description + */ + async initialize(taskDescription: string): Promise { + // Generate the phase plan + await this.generatePhasePlan(taskDescription) + + // Save plan to file if configured + if (this.config.generatePlanFile) { + await this.savePlanToFile() + } + } + + /** + * Generate a phase plan for the given task + */ + private async generatePhasePlan(taskDescription: string): Promise { + // Switch to architect mode to create the plan + await this.provider.setMode("architect") + + // Create a planning task + const planningMessage = `Create a detailed phase plan for implementing the following task using the mutual auditing workflow: + +${taskDescription} + +Break down the implementation into small, focused phases. Each phase should: +- Have a clear scope (which files/modules to touch) +- Define specific objectives +- Include measurable acceptance criteria +- Be completable in a single focused session + +Format the plan as a structured list of phases.` + + // In a real implementation, this would use the Task system to get the plan + // For now, we'll create a sample plan structure + this.phases = [ + { + id: "phase-1", + name: "Setup and Configuration", + scope: ["Configuration files", "Initial structure"], + objectives: ["Create necessary configuration", "Set up basic structure"], + acceptanceCriteria: ["Configuration is valid", "Structure follows patterns"], + status: "pending", + }, + // Additional phases would be dynamically generated + ] + } + + /** + * Save the current plan to a markdown file + */ + private async savePlanToFile(): Promise { + const planContent = this.generatePlanMarkdown() + this.planFilePath = path.join(this.workspacePath, "auditing-workflow-plan.md") + await fs.writeFile(this.planFilePath, planContent, "utf-8") + } + + /** + * Generate markdown representation of the plan + */ + private generatePlanMarkdown(): string { + let content = "# Auditing Workflow Plan\n\n" + + for (const phase of this.phases) { + content += `## ${phase.name}\n\n` + content += `**Status:** ${phase.status}\n\n` + content += `### Scope\n` + phase.scope.forEach((item) => { + content += `- ${item}\n` + }) + content += `\n### Objectives\n` + phase.objectives.forEach((item) => { + content += `- ${item}\n` + }) + content += `\n### Acceptance Criteria\n` + phase.acceptanceCriteria.forEach((item) => { + content += `- [ ] ${item}\n` + }) + + if (phase.implementationResult) { + content += `\n### Implementation Result\n` + content += `**Changes:** ${phase.implementationResult.changes}\n` + content += `**Reasoning:** ${phase.implementationResult.reasoning}\n` + } + + if (phase.auditReport) { + content += `\n### Audit Report\n` + content += this.formatAuditReport(phase.auditReport) + } + + content += "\n---\n\n" + } + + return content + } + + /** + * Format an audit report as markdown + */ + private formatAuditReport(report: AuditReport): string { + let content = "" + + if (report.verified.length > 0) { + content += "#### ✅ Verified\n" + report.verified.forEach((item) => { + content += `- ${item}\n` + }) + content += "\n" + } + + if (report.issues.length > 0) { + content += "#### ❌ Issues Found\n" + report.issues.forEach((item) => { + content += `- ${item}\n` + }) + content += "\n" + } + + if (report.requiredCorrections.length > 0) { + content += "#### 📝 Required Corrections\n" + report.requiredCorrections.forEach((item, index) => { + content += `${index + 1}. ${item}\n` + }) + content += "\n" + } + + if (report.additionalQuestions && report.additionalQuestions.length > 0) { + content += "#### ❓ Additional Questions\n" + report.additionalQuestions.forEach((item) => { + content += `- ${item}\n` + }) + content += "\n" + } + + content += `**Decision:** ${report.decision}\n` + + return content + } + + /** + * Execute the next phase in the workflow + */ + async executeNextPhase(): Promise { + if (this.currentPhaseIndex >= this.phases.length) { + return false // All phases completed + } + + const currentPhase = this.phases[this.currentPhaseIndex] + + switch (currentPhase.status) { + case "pending": + await this.startImplementation(currentPhase) + break + case "implementing": + await this.startAudit(currentPhase) + break + case "auditing": + await this.processAuditResult(currentPhase) + break + case "corrections": + await this.applyCorrections(currentPhase) + break + case "completed": + this.currentPhaseIndex++ + if (this.currentPhaseIndex < this.phases.length) { + await this.resetContext() + } + break + } + + // Update plan file + if (this.config.generatePlanFile) { + await this.savePlanToFile() + } + + return this.currentPhaseIndex < this.phases.length + } + + /** + * Start implementation for a phase + */ + private async startImplementation(phase: WorkflowPhase): Promise { + if (this.config.autoSwitchModes) { + await this.provider.setMode("implementor") + } + + const implementationMessage = this.buildImplementationMessage(phase) + + // Create implementation task with phase-specific todos + const todos: TodoItem[] = phase.acceptanceCriteria.map((criteria, index) => ({ + id: `phase-${phase.id}-criteria-${index}`, + content: criteria, + status: "pending" as const, + })) + + this.implementorTask = await this.provider.createTask(implementationMessage, undefined, undefined, { + initialTodos: todos, + }) + + phase.status = "implementing" + } + + /** + * Build the implementation message for a phase + */ + private buildImplementationMessage(phase: WorkflowPhase): string { + return `## Implementation Phase: ${phase.name} + +### Scope +${phase.scope.map((s) => `- ${s}`).join("\n")} + +### Objectives +${phase.objectives.map((o) => `- ${o}`).join("\n")} + +### Acceptance Criteria +${phase.acceptanceCriteria.map((ac) => `- [ ] ${ac}`).join("\n")} + +Please implement this phase following the implementor mode guidelines: +1. Focus only on the scope defined above +2. Provide clear reasoning for your implementation decisions +3. Track which acceptance criteria are satisfied +4. Document any assumptions or blockers + +When complete, provide a summary of: +- What was implemented +- Key design decisions and rationale +- Status of each acceptance criterion +- Any issues or dependencies discovered` + } + + /** + * Start audit for a completed implementation + */ + private async startAudit(phase: WorkflowPhase): Promise { + if (this.config.autoSwitchModes) { + await this.provider.setMode("auditor") + } + + const auditMessage = this.buildAuditMessage(phase) + + this.auditorTask = await this.provider.createTask(auditMessage) + + phase.status = "auditing" + } + + /** + * Build the audit message for a phase + */ + private buildAuditMessage(phase: WorkflowPhase): string { + return `## Audit Phase: ${phase.name} + +### Phase Plan +**Scope:** ${phase.scope.join(", ")} +**Objectives:** ${phase.objectives.join(", ")} + +### Acceptance Criteria +${phase.acceptanceCriteria.map((ac) => `- ${ac}`).join("\n")} + +### Implementation Result +${ + phase.implementationResult + ? ` +**Changes:** ${phase.implementationResult.changes} +**Reasoning:** ${phase.implementationResult.reasoning} +**Criteria Status:** ${JSON.stringify(phase.implementationResult.acceptanceCriteriaStatus, null, 2)} +` + : "No implementation result available" +} + +Please audit this implementation following the auditor mode guidelines: +1. Review the code changes for correctness +2. Validate that acceptance criteria are actually met +3. Identify any issues, bugs, or missing requirements +4. Provide a structured audit report with your decision (pass/revise/blocked) + +Format your response as a structured audit report.` + } + + /** + * Process the audit result and determine next steps + */ + private async processAuditResult(phase: WorkflowPhase): Promise { + if (!phase.auditReport) { + // In real implementation, this would parse the auditor's response + phase.auditReport = { + verified: [], + issues: [], + requiredCorrections: [], + decision: "pass", + } + } + + switch (phase.auditReport.decision) { + case "pass": + phase.status = "completed" + break + case "revise": + phase.status = "corrections" + break + case "blocked": + // Handle blocked state - might need user intervention + await this.handleBlockedPhase(phase) + break + } + } + + /** + * Apply corrections based on audit feedback + */ + private async applyCorrections(phase: WorkflowPhase): Promise { + if (!phase.auditReport || phase.auditReport.requiredCorrections.length === 0) { + phase.status = "completed" + return + } + + if (this.config.autoSwitchModes) { + await this.provider.setMode("implementor") + } + + const correctionMessage = this.buildCorrectionMessage(phase) + + // Create correction task + const correctionTask = await this.provider.createTask(correctionMessage) + + // After corrections, go back to audit + phase.status = "auditing" + } + + /** + * Build the correction message for a phase + */ + private buildCorrectionMessage(phase: WorkflowPhase): string { + return `## Apply Corrections: ${phase.name} + +### Audit Feedback +${phase.auditReport ? this.formatAuditReport(phase.auditReport) : "No audit report available"} + +### Required Corrections +${phase.auditReport?.requiredCorrections.map((c, i) => `${i + 1}. ${c}`).join("\n") || "None"} + +Please apply the required corrections: +1. Address each correction item systematically +2. Validate that the corrections resolve the identified issues +3. Re-check the affected acceptance criteria +4. Document what was changed and why + +Provide a summary of: +- Which corrections were applied +- How each correction addresses the audit feedback +- Updated status of acceptance criteria` + } + + /** + * Handle a blocked phase + */ + private async handleBlockedPhase(phase: WorkflowPhase): Promise { + const message = `Phase "${phase.name}" is blocked. + +Audit Report: +${phase.auditReport ? this.formatAuditReport(phase.auditReport) : "No audit report"} + +This phase requires external clarification or dependencies to be resolved before continuing. +Please review the audit report and provide guidance on how to proceed.` + + await vscode.window.showWarningMessage(message, "Continue", "Skip Phase") + // Handle user response appropriately + } + + /** + * Reset context between phases + */ + private async resetContext(): Promise { + if (!this.config.preserveContextBetweenPhases) { + // Clear task instances to release memory + this.implementorTask = undefined + this.auditorTask = undefined + + // Could implement more sophisticated context management here + } + } + + /** + * Get the current phase + */ + getCurrentPhase(): WorkflowPhase | undefined { + return this.phases[this.currentPhaseIndex] + } + + /** + * Get workflow progress + */ + getProgress(): { + totalPhases: number + completedPhases: number + currentPhase: string + percentComplete: number + } { + const completedPhases = this.phases.filter((p) => p.status === "completed").length + const currentPhase = this.getCurrentPhase() + + return { + totalPhases: this.phases.length, + completedPhases, + currentPhase: currentPhase?.name || "None", + percentComplete: this.phases.length > 0 ? Math.round((completedPhases / this.phases.length) * 100) : 0, + } + } + + /** + * Export the workflow state for persistence + */ + exportState(): string { + return JSON.stringify( + { + phases: this.phases, + currentPhaseIndex: this.currentPhaseIndex, + config: this.config, + planFilePath: this.planFilePath, + }, + null, + 2, + ) + } + + /** + * Import workflow state from persistence + */ + importState(state: string): void { + const parsed = JSON.parse(state) + this.phases = parsed.phases || [] + this.currentPhaseIndex = parsed.currentPhaseIndex || 0 + this.config = parsed.config || this.config + this.planFilePath = parsed.planFilePath + } +} diff --git a/src/core/auditing-workflow/__tests__/AuditingWorkflowManager.spec.ts b/src/core/auditing-workflow/__tests__/AuditingWorkflowManager.spec.ts new file mode 100644 index 0000000000..78234bf165 --- /dev/null +++ b/src/core/auditing-workflow/__tests__/AuditingWorkflowManager.spec.ts @@ -0,0 +1,276 @@ +import { describe, it, expect, beforeEach, vi } from "vitest" +import * as fs from "fs/promises" +import { AuditingWorkflowManager, WorkflowPhase, AuditReport } from "../AuditingWorkflowManager" +import { ClineProvider } from "../../webview/ClineProvider" + +// Mock fs/promises module +vi.mock("fs/promises", () => ({ + writeFile: vi.fn().mockResolvedValue(undefined), + readFile: vi.fn().mockResolvedValue("{}"), + mkdir: vi.fn().mockResolvedValue(undefined), +})) + +describe("AuditingWorkflowManager", () => { + let mockProvider: any + let workflowManager: AuditingWorkflowManager + const testWorkspacePath = "/test/workspace" + + beforeEach(() => { + // Mock the ClineProvider + mockProvider = { + setMode: vi.fn().mockResolvedValue(undefined), + createTask: vi.fn().mockResolvedValue({}), + getMcpHub: vi.fn().mockReturnValue(undefined), + log: vi.fn(), + } + + workflowManager = new AuditingWorkflowManager( + mockProvider as ClineProvider, + testWorkspacePath, + { generatePlanFile: false }, // Disable file generation for most tests + ) + }) + + describe("initialization", () => { + it("should initialize with default configuration", () => { + // Create a manager without overriding defaults + const defaultManager = new AuditingWorkflowManager(mockProvider as ClineProvider, testWorkspacePath) + const config = (defaultManager as any).config + expect(config.maxCorrectionIterations).toBe(2) + expect(config.preserveContextBetweenPhases).toBe(false) + expect(config.autoSwitchModes).toBe(true) + expect(config.generatePlanFile).toBe(true) + }) + + it("should accept custom configuration", () => { + const customManager = new AuditingWorkflowManager(mockProvider, testWorkspacePath, { + maxCorrectionIterations: 3, + preserveContextBetweenPhases: true, + autoSwitchModes: false, + generatePlanFile: false, + }) + + const config = (customManager as any).config + expect(config.maxCorrectionIterations).toBe(3) + expect(config.preserveContextBetweenPhases).toBe(true) + expect(config.autoSwitchModes).toBe(false) + expect(config.generatePlanFile).toBe(false) + }) + }) + + describe("workflow initialization", () => { + it("should generate a phase plan", async () => { + const taskDescription = "Implement user authentication with JWT" + + // Create manager with file generation disabled + const testManager = new AuditingWorkflowManager(mockProvider as ClineProvider, testWorkspacePath, { + generatePlanFile: false, + }) + + await testManager.initialize(taskDescription) + + expect(mockProvider.setMode).toHaveBeenCalledWith("architect") + const phases = (testManager as any).phases + expect(phases).toBeDefined() + expect(phases.length).toBeGreaterThan(0) + expect(phases[0].status).toBe("pending") + }) + + it("should save plan to file when configured", async () => { + // Create manager with file generation enabled + const fileManager = new AuditingWorkflowManager(mockProvider as ClineProvider, testWorkspacePath, { + generatePlanFile: true, + }) + + await fileManager.initialize("Test task") + + expect(fs.writeFile).toHaveBeenCalled() + }) + }) + + describe("phase execution", () => { + beforeEach(async () => { + // Create manager with file generation disabled for tests + workflowManager = new AuditingWorkflowManager(mockProvider as ClineProvider, testWorkspacePath, { + generatePlanFile: false, + }) + await workflowManager.initialize("Test task") + }) + + it("should start implementation for pending phase", async () => { + const hasMore = await workflowManager.executeNextPhase() + + expect(hasMore).toBe(true) + expect(mockProvider.setMode).toHaveBeenCalledWith("implementor") + expect(mockProvider.createTask).toHaveBeenCalled() + + const currentPhase = workflowManager.getCurrentPhase() + expect(currentPhase?.status).toBe("implementing") + }) + + it("should handle audit phase after implementation", async () => { + // Set up phase with implementation result + const phases = (workflowManager as any).phases as WorkflowPhase[] + phases[0].status = "implementing" + phases[0].implementationResult = { + changes: "Added authentication module", + reasoning: "Used JWT for stateless auth", + acceptanceCriteriaStatus: { + "Auth module created": true, + "JWT integration": true, + }, + } + + const hasMore = await workflowManager.executeNextPhase() + + expect(hasMore).toBe(true) + expect(mockProvider.setMode).toHaveBeenCalledWith("auditor") + expect(mockProvider.createTask).toHaveBeenCalled() + + const currentPhase = workflowManager.getCurrentPhase() + expect(currentPhase?.status).toBe("auditing") + }) + + it("should handle corrections when audit requires revision", async () => { + // Set up phase with audit report requiring corrections + const phases = (workflowManager as any).phases as WorkflowPhase[] + phases[0].status = "auditing" + phases[0].auditReport = { + verified: ["Basic structure is correct"], + issues: ["Missing error handling"], + requiredCorrections: ["Add try-catch blocks"], + decision: "revise", + } + + const hasMore = await workflowManager.executeNextPhase() + + expect(hasMore).toBe(true) + + const currentPhase = workflowManager.getCurrentPhase() + expect(currentPhase?.status).toBe("corrections") + }) + + it("should complete phase when audit passes", async () => { + // Set up phase with passing audit + const phases = (workflowManager as any).phases as WorkflowPhase[] + phases[0].status = "auditing" + phases[0].auditReport = { + verified: ["All criteria met"], + issues: [], + requiredCorrections: [], + decision: "pass", + } + + await workflowManager.executeNextPhase() + + // Should mark phase as completed + expect(phases[0].status).toBe("completed") + + // Check if there are more phases to process + const progress = workflowManager.getProgress() + expect(progress.completedPhases).toBe(1) + expect(progress.totalPhases).toBe(1) + }) + }) + + describe("progress tracking", () => { + it("should calculate progress correctly", () => { + // Create manager with file generation disabled for tests + const progressManager: any = new AuditingWorkflowManager(mockProvider, testWorkspacePath, { + generatePlanFile: false, + }) + + // Directly set phases and currentPhaseIndex for testing + progressManager.phases = [ + { + id: "phase-1", + name: "Setup and Configuration", + scope: ["Configuration files", "Initial structure"], + objectives: ["Create necessary configuration", "Set up basic structure"], + acceptanceCriteria: ["Configuration is valid", "Structure follows patterns"], + status: "completed", + }, + { + id: "phase-2", + name: "Phase 2", + scope: ["Module B"], + objectives: ["Objective B"], + acceptanceCriteria: ["Criteria B"], + status: "completed", + }, + { + id: "phase-3", + name: "Phase 3", + scope: ["Module C"], + objectives: ["Objective C"], + acceptanceCriteria: ["Criteria C"], + status: "pending", + }, + ] + + // Set current phase index to point to Phase 2 (index 1) + progressManager.currentPhaseIndex = 1 + + const progress = progressManager.getProgress() + + expect(progress.totalPhases).toBe(3) + expect(progress.completedPhases).toBe(2) + expect(progress.percentComplete).toBe(67) + expect(progress.currentPhase).toBe("Phase 2") + }) + }) + + describe("state persistence", () => { + it("should export and import state correctly", async () => { + // Create manager with file generation disabled + const persistManager = new AuditingWorkflowManager(mockProvider as ClineProvider, testWorkspacePath, { + generatePlanFile: false, + }) + await persistManager.initialize("Test task") + + // Modify state + const phases = (persistManager as any).phases as WorkflowPhase[] + phases[0].status = "completed" + phases[0].implementationResult = { + changes: "Test changes", + reasoning: "Test reasoning", + acceptanceCriteriaStatus: { Test: true }, + } + + // Export state + const exportedState = persistManager.exportState() + + // Create new manager and import state + const newManager = new AuditingWorkflowManager(mockProvider, testWorkspacePath) + newManager.importState(exportedState) + + // Verify state was restored + const newPhases = (newManager as any).phases + expect(newPhases[0].status).toBe("completed") + expect(newPhases[0].implementationResult?.changes).toBe("Test changes") + }) + }) + + describe("audit report formatting", () => { + it("should format audit report as markdown", () => { + const report: AuditReport = { + verified: ["Feature A works", "Feature B works"], + issues: ["Missing validation", "No error handling"], + requiredCorrections: ["Add input validation", "Add try-catch blocks"], + additionalQuestions: ["Should we add logging?"], + decision: "revise", + } + + const formatted = (workflowManager as any).formatAuditReport(report) + + expect(formatted).toContain("✅ Verified") + expect(formatted).toContain("Feature A works") + expect(formatted).toContain("❌ Issues Found") + expect(formatted).toContain("Missing validation") + expect(formatted).toContain("📝 Required Corrections") + expect(formatted).toContain("1. Add input validation") + expect(formatted).toContain("❓ Additional Questions") + expect(formatted).toContain("**Decision:** revise") + }) + }) +})