diff --git a/.github/workflows/agentics-maintenance.yml b/.github/workflows/agentics-maintenance.yml index 1f07e2e46..6e21e8620 100644 --- a/.github/workflows/agentics-maintenance.yml +++ b/.github/workflows/agentics-maintenance.yml @@ -1,12 +1,12 @@ # -# ___ _ _ -# / _ \ | | (_) -# | |_| | __ _ ___ _ __ | |_ _ ___ +# ___ _ _ +# / _ \ | | (_) +# | |_| | __ _ ___ _ __ | |_ _ ___ # | _ |/ _` |/ _ \ '_ \| __| |/ __| -# | | | | (_| | __/ | | | |_| | (__ +# | | | | (_| | __/ | | | |_| | (__ # \_| |_/\__, |\___|_| |_|\__|_|\___| # __/ | -# _ _ |___/ +# _ _ |___/ # | | | | / _| | # | | | | ___ _ __ _ __| |_| | _____ ____ # | |/\| |/ _ \ '__| |/ /| _| |/ _ \ \ /\ / / ___| @@ -21,10 +21,10 @@ # # Alternative regeneration methods: # make recompile -# +# # Or use the gh-aw CLI directly: # ./gh-aw compile --validate --verbose -# +# # The workflow is generated when any workflow uses the 'expires' field # in create-discussions or create-issues safe-outputs configuration. # Schedule frequency is automatically determined by the shortest expiration time. @@ -33,7 +33,7 @@ name: Agentics Maintenance on: schedule: - - cron: "37 0 * * *" # Daily (based on minimum expires: 7 days) + - cron: "37 0 * * *" # Daily (based on minimum expires: 7 days) workflow_dispatch: permissions: {} @@ -90,7 +90,6 @@ jobs: .github persist-credentials: false - - name: Setup Go uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0 with: diff --git a/.github/workflows/ci-doctor.lock.yml b/.github/workflows/ci-doctor.lock.yml index 1ebfd13ec..df5dab980 100644 --- a/.github/workflows/ci-doctor.lock.yml +++ b/.github/workflows/ci-doctor.lock.yml @@ -1,12 +1,12 @@ # -# ___ _ _ -# / _ \ | | (_) -# | |_| | __ _ ___ _ __ | |_ _ ___ +# ___ _ _ +# / _ \ | | (_) +# | |_| | __ _ ___ _ __ | |_ _ ___ # | _ |/ _` |/ _ \ '_ \| __| |/ __| -# | | | | (_| | __/ | | | |_| | (__ +# | | | | (_| | __/ | | | |_| | (__ # \_| |_/\__, |\___|_| |_|\__|_|\___| # __/ | -# _ _ |___/ +# _ _ |___/ # | | | | / _| | # | | | | ___ _ __ _ __| |_| | _____ ____ # | |/\| |/ _ \ '__| |/ /| _| |/ _ \ \ /\ / / ___| @@ -33,11 +33,11 @@ name: "CI Failure Doctor" workflow_run: # zizmor: ignore[dangerous-triggers] - workflow_run trigger is secured with role and fork validation branches: - - main + - main types: - - completed + - completed workflows: - - "CI - KSail" + - "CI - KSail" permissions: read-all @@ -146,13 +146,13 @@ jobs: run: | # Download official Copilot CLI installer script curl -fsSL https://raw.githubusercontent.com/github/copilot-cli/main/install.sh -o /tmp/copilot-install.sh - + # Execute the installer with the specified version export VERSION=0.0.375 && sudo bash /tmp/copilot-install.sh - + # Cleanup rm -f /tmp/copilot-install.sh - + # Verify installation copilot --version - name: Install awf binary @@ -443,7 +443,7 @@ jobs: with: script: | const fs = require('fs'); - + const awInfo = { engine_id: "copilot", engine_name: "GitHub Copilot CLI", @@ -473,13 +473,13 @@ jobs: }, created_at: new Date().toISOString() }; - + // Write to /tmp/gh-aw directory to avoid inclusion in PR const tmpPath = '/tmp/gh-aw/aw_info.json'; fs.writeFileSync(tmpPath, JSON.stringify(awInfo, null, 2)); console.log('Generated aw_info.json at:', tmpPath); console.log(JSON.stringify(awInfo, null, 2)); - + // Set model as output for reuse in other steps/jobs core.setOutput('model', awInfo.model); - name: Generate workflow overview @@ -503,30 +503,30 @@ jobs: bash /opt/gh-aw/actions/create_prompt_first.sh cat << 'PROMPT_EOF' > "$GH_AW_PROMPT" # CI Failure Doctor - + You are the CI Failure Doctor, an expert investigative agent that analyzes failed GitHub Actions workflows to identify root causes and patterns. Your mission is to conduct a deep investigation when the CI workflow fails. - + ## Current Context - + - **Repository**: __GH_AW_GITHUB_REPOSITORY__ - **Workflow Run**: __GH_AW_GITHUB_EVENT_WORKFLOW_RUN_ID__ - **Conclusion**: __GH_AW_GITHUB_EVENT_WORKFLOW_RUN_CONCLUSION__ - **Run URL**: __GH_AW_GITHUB_EVENT_WORKFLOW_RUN_HTML_URL__ - **Head SHA**: __GH_AW_GITHUB_EVENT_WORKFLOW_RUN_HEAD_SHA__ - + ## Investigation Protocol - + **ONLY proceed if the workflow conclusion is 'failure' or 'cancelled'**. Exit immediately if the workflow was successful. - + ### Phase 1: Initial Triage - + 1. **Verify Failure**: Check that `__GH_AW_GITHUB_EVENT_WORKFLOW_RUN_CONCLUSION__` is `failure` or `cancelled` 2. **Get Workflow Details**: Use `get_workflow_run` to get full details of the failed run 3. **List Jobs**: Use `list_workflow_jobs` to identify which specific jobs failed 4. **Quick Assessment**: Determine if this is a new type of failure or a recurring pattern - + ### Phase 2: Deep Log Analysis - + 1. **Retrieve Logs**: Use `get_job_logs` with `failed_only=true` to get logs from all failed jobs 2. **Pattern Recognition**: Analyze logs for: - Error messages and stack traces @@ -541,9 +541,9 @@ jobs: - Test names that failed - Dependency versions involved - Timing patterns - + ### Phase 3: Historical Context Analysis - + 1. **Search Investigation History**: Use file-based storage to search for similar failures: - Read from cached investigation files in `/tmp/memory/investigations/` - Parse previous failure patterns and solutions @@ -551,9 +551,9 @@ jobs: 2. **Issue History**: Search existing issues for related problems 3. **Commit Analysis**: Examine the commit that triggered the failure 4. **PR Context**: If triggered by a PR, analyze the changed files - + ### Phase 4: Root Cause Investigation - + 1. **Categorize Failure Type**: - **Code Issues**: Syntax errors, logic bugs, test failures - **Infrastructure**: Runner issues, network problems, resource constraints @@ -561,24 +561,24 @@ jobs: - **Configuration**: Workflow configuration, environment variables - **Flaky Tests**: Intermittent failures, timing issues - **External Services**: Third-party API failures, downstream dependencies - + 2. **Deep Dive Analysis**: - For test failures: Identify specific test methods and assertions - For build failures: Analyze compilation errors and missing dependencies - For infrastructure issues: Check runner logs and resource usage - For timeout issues: Identify slow operations and bottlenecks - + ### Phase 5: Pattern Storage and Knowledge Building - + 1. **Store Investigation**: Save structured investigation data to files: - Write investigation report to `/tmp/memory/investigations/-.json` - Store error patterns in `/tmp/memory/patterns/` - Maintain an index file of all investigations for fast searching 2. **Update Pattern Database**: Enhance knowledge with new findings by updating pattern files 3. **Save Artifacts**: Store detailed logs and analysis in the cached directories - + ### Phase 6: Looking for existing issues - + 1. **Convert the report to a search query** - Use any advanced search features in GitHub Issues to find related issues - Look for keywords, error messages, and patterns in existing issues @@ -587,9 +587,9 @@ jobs: 3. **Add issue comment to duplicate issue and finish** - If you find a duplicate issue, add a comment with your findings and close the investigation. - Do NOT open a new issue since you found a duplicate already (skip next phases). - + ### Phase 6: Reporting and Recommendations - + 1. **Create Investigation Report**: Generate a comprehensive analysis including: - **Executive Summary**: Quick overview of the failure - **Root Cause**: Detailed explanation of what went wrong @@ -598,63 +598,63 @@ jobs: - **Prevention Strategies**: How to avoid similar failures - **AI Team Self-Improvement**: Give a short set of additional prompting instructions to copy-and-paste into instructions.md for AI coding agents to help prevent this type of failure in future - **Historical Context**: Similar past failures and their resolutions - + 2. **Actionable Deliverables**: - Create an issue with investigation results (if warranted) - Comment on related PR with analysis (if PR-triggered) - Provide specific file locations and line numbers for fixes - Suggest code changes or configuration updates - + ## Output Requirements - + ### Investigation Issue Template - + When creating an investigation issue, use this structure: - + ```markdown # 🏥 CI Failure Investigation - Run #__GH_AW_GITHUB_EVENT_WORKFLOW_RUN_RUN_NUMBER__ - + ## Summary - + [Brief description of the failure] - + ## Failure Details - + - **Run**: [__GH_AW_GITHUB_EVENT_WORKFLOW_RUN_ID__](__GH_AW_GITHUB_EVENT_WORKFLOW_RUN_HTML_URL__) - **Commit**: __GH_AW_GITHUB_EVENT_WORKFLOW_RUN_HEAD_SHA__ - **Trigger**: __GH_AW_GITHUB_EVENT_WORKFLOW_RUN_EVENT__ - + ## Root Cause Analysis - + [Detailed analysis of what went wrong] - + ## Failed Jobs and Errors - + [List of failed jobs with key error messages] - + ## Investigation Findings - + [Deep analysis results] - + ## Recommended Actions - + - [ ] [Specific actionable steps] - + ## Prevention Strategies - + [How to prevent similar failures] - + ## AI Team Self-Improvement - + [Short set of additional prompting instructions to copy-and-paste into instructions.md for a AI coding agents to help prevent this type of failure in future] - + ## Historical Context - + [Similar past failures and patterns] ``` - + ## Important Guidelines - + - **Be Thorough**: Don't just report the error - investigate the underlying cause - **Use Memory**: Always check for similar past failures and learn from them - **Be Specific**: Provide exact file paths, line numbers, and error messages @@ -662,15 +662,15 @@ jobs: - **Pattern Building**: Contribute to the knowledge base for future investigations - **Resource Efficient**: Use caching to avoid re-downloading large logs - **Security Conscious**: Never execute untrusted code from logs or external sources - + ## Cache Usage Strategy - + - Store investigation database and knowledge patterns in `/tmp/memory/investigations/` and `/tmp/memory/patterns/` - Cache detailed log analysis and artifacts in `/tmp/investigation/logs/` and `/tmp/investigation/reports/` - Persist findings across workflow runs using GitHub Actions cache - Build cumulative knowledge about failure patterns and solutions using structured JSON files - Use file-based indexing for fast pattern matching and similarity detection - + PROMPT_EOF - name: Substitute placeholders uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 @@ -686,7 +686,7 @@ jobs: with: script: | const substitutePlaceholders = require('/opt/gh-aw/actions/substitute_placeholders.cjs'); - + // Call the substitution function return await substitutePlaceholders({ file: process.env.GH_AW_PROMPT, @@ -715,24 +715,24 @@ jobs: GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt run: | cat << 'PROMPT_EOF' >> "$GH_AW_PROMPT" - + --- - + ## Cache Folder Available - + You have access to a persistent cache folder at `/tmp/gh-aw/cache-memory/` where you can read and write files to create memories and store information. - + - **Read/Write Access**: You can freely read from and write to any files in this folder - **Persistence**: Files in this folder persist across workflow runs via GitHub Actions cache - **Last Write Wins**: If multiple processes write to the same file, the last write will be preserved - **File Share**: Use this as a simple file share - organize files as you see fit - + Examples of what you can store: - `/tmp/gh-aw/cache-memory/notes.txt` - general notes and observations - `/tmp/gh-aw/cache-memory/preferences.json` - user preferences and settings - `/tmp/gh-aw/cache-memory/history.log` - activity history and logs - `/tmp/gh-aw/cache-memory/state/` - organized state files in subdirectories - + Feel free to create, read, update, and organize files in this folder as needed for your tasks. PROMPT_EOF - name: Append safe outputs instructions to prompt @@ -747,9 +747,9 @@ jobs: To create or modify GitHub resources (issues, discussions, pull requests, etc.), you MUST call the appropriate safe output tool. Simply writing content will NOT work - the workflow requires actual tool calls. - + **Available tools**: add_comment, create_issue, missing_tool, noop - + **Critical**: Tool calls write structured data that downstream jobs process. Without tool calls, follow-up actions will be skipped. @@ -794,7 +794,7 @@ jobs: - **workflow-run-id**: __GH_AW_GITHUB_RUN_ID__ {{/if}} - + PROMPT_EOF - name: Substitute placeholders uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 @@ -811,7 +811,7 @@ jobs: with: script: | const substitutePlaceholders = require('/opt/gh-aw/actions/substitute_placeholders.cjs'); - + // Call the substitution function return await substitutePlaceholders({ file: process.env.GH_AW_PROMPT, @@ -877,7 +877,7 @@ jobs: # This ensures they are in /tmp/gh-aw/ where secret redaction can scan them SESSION_STATE_DIR="$HOME/.copilot/session-state" LOGS_DIR="/tmp/gh-aw/sandbox/agent/logs" - + if [ -d "$SESSION_STATE_DIR" ]; then echo "Copying Copilot session state files from $SESSION_STATE_DIR to $LOGS_DIR" mkdir -p "$LOGS_DIR" @@ -896,7 +896,7 @@ jobs: const { main } = require('/opt/gh-aw/actions/redact_secrets.cjs'); await main(); env: - GH_AW_SECRET_NAMES: 'COPILOT_GITHUB_TOKEN,GH_AW_GITHUB_MCP_SERVER_TOKEN,GH_AW_GITHUB_TOKEN,GITHUB_TOKEN' + GH_AW_SECRET_NAMES: "COPILOT_GITHUB_TOKEN,GH_AW_GITHUB_MCP_SERVER_TOKEN,GH_AW_GITHUB_TOKEN,GITHUB_TOKEN" SECRET_COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }} SECRET_GH_AW_GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN }} SECRET_GH_AW_GITHUB_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN }} @@ -1167,13 +1167,13 @@ jobs: run: | # Download official Copilot CLI installer script curl -fsSL https://raw.githubusercontent.com/github/copilot-cli/main/install.sh -o /tmp/copilot-install.sh - + # Execute the installer with the specified version export VERSION=0.0.375 && sudo bash /tmp/copilot-install.sh - + # Cleanup rm -f /tmp/copilot-install.sh - + # Verify installation copilot --version - name: Execute GitHub Copilot CLI @@ -1299,7 +1299,7 @@ jobs: uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 env: GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }} - GH_AW_SAFE_OUTPUTS_HANDLER_CONFIG: "{\"add_comment\":{\"max\":1},\"create_issue\":{\"max\":1,\"title_prefix\":\"${{ github.workflow }}\"}}" + GH_AW_SAFE_OUTPUTS_HANDLER_CONFIG: '{"add_comment":{"max":1},"create_issue":{"max":1,"title_prefix":"${{ github.workflow }}"}}' with: github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} script: | @@ -1331,4 +1331,3 @@ jobs: with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory - diff --git a/.github/workflows/daily-perf-improver.lock.yml b/.github/workflows/daily-perf-improver.lock.yml index 477567c1b..00c1432e6 100644 --- a/.github/workflows/daily-perf-improver.lock.yml +++ b/.github/workflows/daily-perf-improver.lock.yml @@ -1,12 +1,12 @@ # -# ___ _ _ -# / _ \ | | (_) -# | |_| | __ _ ___ _ __ | |_ _ ___ +# ___ _ _ +# / _ \ | | (_) +# | |_| | __ _ ___ _ __ | |_ _ ___ # | _ |/ _` |/ _ \ '_ \| __| |/ __| -# | | | | (_| | __/ | | | |_| | (__ +# | | | | (_| | __/ | | | |_| | (__ # \_| |_/\__, |\___|_| |_|\__|_|\___| # __/ | -# _ _ |___/ +# _ _ |___/ # | | | | / _| | # | | | | ___ _ __ _ __| |_| | _____ ____ # | |/\| |/ _ \ '__| |/ /| _| |/ _ \ \ /\ / / ___| @@ -32,7 +32,7 @@ name: "Daily Perf Improver" "on": schedule: - - cron: "39 23 * * *" + - cron: "39 23 * * *" workflow_dispatch: null permissions: @@ -170,13 +170,13 @@ jobs: run: | # Download official Copilot CLI installer script curl -fsSL https://raw.githubusercontent.com/github/copilot-cli/main/install.sh -o /tmp/copilot-install.sh - + # Execute the installer with the specified version export VERSION=0.0.375 && sudo bash /tmp/copilot-install.sh - + # Cleanup rm -f /tmp/copilot-install.sh - + # Verify installation copilot --version - name: Install awf binary @@ -508,7 +508,7 @@ jobs: with: script: | const fs = require('fs'); - + const awInfo = { engine_id: "copilot", engine_name: "GitHub Copilot CLI", @@ -538,13 +538,13 @@ jobs: }, created_at: new Date().toISOString() }; - + // Write to /tmp/gh-aw directory to avoid inclusion in PR const tmpPath = '/tmp/gh-aw/aw_info.json'; fs.writeFileSync(tmpPath, JSON.stringify(awInfo, null, 2)); console.log('Generated aw_info.json at:', tmpPath); console.log(JSON.stringify(awInfo, null, 2)); - + // Set model as output for reuse in other steps/jobs core.setOutput('model', awInfo.model); - name: Generate workflow overview @@ -563,161 +563,161 @@ jobs: bash /opt/gh-aw/actions/create_prompt_first.sh cat << 'PROMPT_EOF' > "$GH_AW_PROMPT" # Daily Perf Improver - + ## Job Description - + You are an AI performance engineer for `__GH_AW_GITHUB_REPOSITORY__`. Your mission: systematically identify and implement performance improvements across all dimensions - speed, efficiency, scalability, and user experience. - + You are doing your work in phases. Right now you will perform just one of the following three phases. Choose the phase depending on what has been done so far. - + ## Phase selection - + To decide which phase to perform: - + 1. First check for existing open discussion titled "__GH_AW_GITHUB_WORKFLOW__" using `list_discussions`. Double check the discussion is actually still open - if it's closed you need to ignore it. If found, and open, read it and maintainer comments. If not found, then perform Phase 1 and nothing else. - + 2. Next check if `.github/actions/daily-perf-improver/build-steps/action.yml` exists. If yes then read it. If not then perform Phase 2 and nothing else. - + 3. Finally, if both those exist, then perform Phase 3. - + ## Phase 1 - Performance research - + 1. Research performance landscape in this repo: - + - Current performance testing practices and tooling - User-facing performance concerns (load times, responsiveness, throughput) - System performance bottlenecks (compute, memory, I/O, network) - Maintainer performance priorities and success metrics - Development/build performance issues affecting performance engineering - Existing performance documentation and measurement approaches - + **Identify optimization targets:** - + - User experience bottlenecks (slow page loads, UI lag, high resource usage) - System inefficiencies (algorithms, data structures, resource utilization) - Development workflow pain points affecting performance engineering (build times, test execution, CI duration) - Infrastructure concerns (scaling, deployment, monitoring) - Performance engineering gaps (lack of guides, rapidity, measurement strategies) - + **Goal:** Enable engineers to quickly measure performance impact across different dimensions using appropriate tools - from quick synthetic tests to realistic user scenarios. - + 1. Use this research to create a discussion with title "__GH_AW_GITHUB_WORKFLOW__ - Research and Plan" - + **Include a "How to Control this Workflow" section at the end of the discussion that explains:** - The user can add comments to the discussion to provide feedback or adjustments to the plan - The user can use these commands: - + gh aw disable daily-perf-improver --repo __GH_AW_GITHUB_REPOSITORY__ gh aw enable daily-perf-improver --repo __GH_AW_GITHUB_REPOSITORY__ gh aw run daily-perf-improver --repo __GH_AW_GITHUB_REPOSITORY__ --repeat gh aw logs daily-perf-improver --repo __GH_AW_GITHUB_REPOSITORY__ - + **Include a "What Happens Next" section at the end of the discussion that explains:** - The next time this workflow runs, Phase 2 will be performed, which will analyze the codebase to create build steps configuration and performance engineering guides - After Phase 2 completes, Phase 3 will begin on subsequent runs to implement actual performance improvements - If running in "repeat" mode, the workflow will automatically run again to proceed to the next phase - Humans can review this research and add comments before the workflow continues - + 2. Exit this entire workflow, do not proceed to Phase 2 on this run. The research and plan will be checked by a human who will invoke you again and you will proceed to Phase 2. - + ## Phase 2 - Build steps inference and configuration and perf engineering guides - + 1. Check for open PR titled "__GH_AW_GITHUB_WORKFLOW__ - Updates to complete configuration". If exists then comment "configuration needs completion" and exit. - + 2. Analyze existing CI files, build scripts, and documentation to determine build commands needed for performance development, testing tools (if any used in repo), linting tools (if any used in repo), code formatting tools (if any used in repo) and other environment setup. - + 3. Create `.github/actions/daily-perf-improver/build-steps/action.yml` with validated build steps. Each step must log output to `build-steps.log` in repo root. Cross-check against existing CI/devcontainer configs. - + 4. Create 1-5 performance engineering guides in `.github/copilot/instructions/` covering relevant areas (e.g., frontend performance, backend optimization, build performance, infrastructure scaling). Each guide should document: - + - Performance measurement strategies and tooling - Common bottlenecks and optimization techniques - Success metrics and testing approaches - How to do explore performance efficiently using focused, maximally-efficient measurements and rebuilds - + 1. Create PR with title "__GH_AW_GITHUB_WORKFLOW__ - Updates to complete configuration" containing files from steps 2d-2e. Request maintainer review. - + **Include a "What Happens Next" section in the PR description that explains:** - Once this PR is merged, the next workflow run will proceed to Phase 3, where actual performance improvements will be implemented - Phase 3 will use the build steps and performance guides to systematically make performance improvements - If running in "repeat" mode, the workflow will automatically run again to proceed to Phase 3 - Humans can review and merge this configuration before continuing - + Exit workflow. - + 2. Test build steps manually. If fixes needed then update the PR branch. If unable to resolve then create issue and exit. - + 3. Add brief comment (1 or 2 sentences) to the discussion identified at the start of the workflow stating progress made and giving links to the PR created. - + 4. Exit this entire workflow, do not proceed to Phase 3 on this run. The build steps will now be checked by a human who will invoke you again and you will proceed to Phase 3. - + ## Phase 3 - Goal selection, work and results - + 1. **Goal selection**. Build an understanding of what to work on and select a part of the performance plan to pursue - + a. Repository is now performance-ready. Review `build-steps/action.yml` and `build-steps.log` to understand setup. If build failed then create fix PR and exit. - + b. Read the plan in the discussion mentioned earlier, along with comments. - + c. Check for existing performance PRs (especially yours with "__GH_AW_GITHUB_WORKFLOW__" prefix). Avoid duplicate work. - + d. If plan needs updating then comment on planning discussion with revised plan and rationale. Consider maintainer feedback. e. Select a performance improvement goal to pursue from the plan. Ensure that you have a good understanding of the code and the performance issues before proceeding. - + f. Select and read the appropriate performance engineering guide(s) in `.github/copilot/instructions/` to help you with your work. If it doesn't exist, create it and later add it to your pull request. - + 2. **Work towards your selected goal**. For the performance improvement goal you selected, do the following: - + a. Create a new branch starting with "perf/". - + b. Work towards the performance improvement goal you selected. Consider approaches like: - **Code optimization:** Algorithm improvements, data structure changes, caching - **User experience:** Reducing load times, improving responsiveness, optimizing assets - **System efficiency:** Resource utilization, concurrency, I/O optimization - **Performance engineering workflow:** Build optimization, test performance, CI improvements for faster performance engineering - **Infrastructure:** Scaling strategies, deployment efficiency, monitoring setup - + **Measurement strategy:** Plan before/after measurements using appropriate methods for your performance target - synthetic benchmarks for algorithms, user journey tests for UX, load tests for scalability, or build time comparisons for developer experience. Choose reliable measurement approaches that clearly demonstrate impact. - + c. Ensure the code still works as expected and that any existing relevant tests pass. Add new tests if appropriate and make sure they pass too. - + d. Measure performance impact. Document measurement attempts even if unsuccessful. If no improvement then iterate, revert, or try different approach. - + 3. **Finalizing changes** - + 1. Apply any automatic code formatting used in the repo. If necessary check CI files to understand what code formatting is used. - + b. Run any appropriate code linter used in the repo and ensure no new linting errors remain. If necessary check CI files to understand what code linting is used. - + 4. **Results and learnings** - + a. If you succeeded in writing useful code changes that improve performance, create a draft pull request with your changes. - + **Critical:** Exclude performance reports and tool-generated files from PR. Double-check added files and remove any that don't belong. - + Include a description of the improvements with evidence of impact. In the description, explain: - + - **Goal and rationale:** Performance target chosen and why it matters - **Approach:** Strategy, methodology, and implementation steps - **Impact measurement:** How performance was tested and results achieved - **Trade-offs:** What changed (complexity, maintainability, resource usage) - **Validation:** Testing approach and success criteria met - **Future work:** Additional opportunities identified - + **Performance evidence section:** Document performance impact with appropriate evidence - timing data, resource usage, user metrics, or other relevant measurements. Be transparent about measurement limitations and methodology. Mark estimates clearly. - + **Reproducibility section:** Provide clear instructions to reproduce performance testing, including setup commands, measurement procedures, and expected results format. - + After creation, check the pull request to ensure it is correct, includes all expected files, and doesn't include any unwanted files or changes. Make any necessary corrections by pushing further commits to the branch. - + b. If failed or lessons learned then add more files to the PR branch to update relevant performance guide in `.github/copilot/instructions/` with insights. Create a new guide if needed, or split, merge or delete existing guides as appropriate. This is your chance to improve the performance engineering documentation for next time, so you and your team don't make the same mistakes again! Make the most of it! - + 5. **Final update**: Add brief comment (1 or 2 sentences) to the discussion identified at the start of the workflow stating goal worked on, PR links, and progress made. - + PROMPT_EOF - name: Substitute placeholders uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 @@ -728,7 +728,7 @@ jobs: with: script: | const substitutePlaceholders = require('/opt/gh-aw/actions/substitute_placeholders.cjs'); - + // Call the substitution function return await substitutePlaceholders({ file: process.env.GH_AW_PROMPT, @@ -759,9 +759,9 @@ jobs: To create or modify GitHub resources (issues, discussions, pull requests, etc.), you MUST call the appropriate safe output tool. Simply writing content will NOT work - the workflow requires actual tool calls. - + **Available tools**: add_comment, create_discussion, create_pull_request, missing_tool, noop - + **Critical**: Tool calls write structured data that downstream jobs process. Without tool calls, follow-up actions will be skipped. @@ -806,7 +806,7 @@ jobs: - **workflow-run-id**: __GH_AW_GITHUB_RUN_ID__ {{/if}} - + PROMPT_EOF - name: Substitute placeholders uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 @@ -823,7 +823,7 @@ jobs: with: script: | const substitutePlaceholders = require('/opt/gh-aw/actions/substitute_placeholders.cjs'); - + // Call the substitution function return await substitutePlaceholders({ file: process.env.GH_AW_PROMPT, @@ -889,7 +889,7 @@ jobs: # This ensures they are in /tmp/gh-aw/ where secret redaction can scan them SESSION_STATE_DIR="$HOME/.copilot/session-state" LOGS_DIR="/tmp/gh-aw/sandbox/agent/logs" - + if [ -d "$SESSION_STATE_DIR" ]; then echo "Copying Copilot session state files from $SESSION_STATE_DIR to $LOGS_DIR" mkdir -p "$LOGS_DIR" @@ -908,7 +908,7 @@ jobs: const { main } = require('/opt/gh-aw/actions/redact_secrets.cjs'); await main(); env: - GH_AW_SECRET_NAMES: 'COPILOT_GITHUB_TOKEN,GH_AW_GITHUB_MCP_SERVER_TOKEN,GH_AW_GITHUB_TOKEN,GITHUB_TOKEN' + GH_AW_SECRET_NAMES: "COPILOT_GITHUB_TOKEN,GH_AW_GITHUB_MCP_SERVER_TOKEN,GH_AW_GITHUB_TOKEN,GITHUB_TOKEN" SECRET_COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }} SECRET_GH_AW_GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN }} SECRET_GH_AW_GITHUB_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN }} @@ -1173,13 +1173,13 @@ jobs: run: | # Download official Copilot CLI installer script curl -fsSL https://raw.githubusercontent.com/github/copilot-cli/main/install.sh -o /tmp/copilot-install.sh - + # Execute the installer with the specified version export VERSION=0.0.375 && sudo bash /tmp/copilot-install.sh - + # Cleanup rm -f /tmp/copilot-install.sh - + # Verify installation copilot --version - name: Execute GitHub Copilot CLI @@ -1318,7 +1318,7 @@ jobs: uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 env: GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }} - GH_AW_SAFE_OUTPUTS_HANDLER_CONFIG: "{\"add_comment\":{\"max\":1,\"target\":\"*\"},\"create_discussion\":{\"category\":\"ideas\",\"expires\":168,\"max\":5,\"title_prefix\":\"${{ github.workflow }}\"},\"create_pull_request\":{\"base_branch\":\"${{ github.ref_name }}\",\"draft\":true,\"max\":1,\"max_patch_size\":1024}}" + GH_AW_SAFE_OUTPUTS_HANDLER_CONFIG: '{"add_comment":{"max":1,"target":"*"},"create_discussion":{"category":"ideas","expires":168,"max":5,"title_prefix":"${{ github.workflow }}"},"create_pull_request":{"base_branch":"${{ github.ref_name }}","draft":true,"max":1,"max_patch_size":1024}}' with: github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} script: | @@ -1326,4 +1326,3 @@ jobs: setupGlobals(core, github, context, exec, io); const { main } = require('/opt/gh-aw/actions/safe_output_handler_manager.cjs'); await main(); - diff --git a/.github/workflows/daily-qa.lock.yml b/.github/workflows/daily-qa.lock.yml index ab07a76c9..a2d95d8b3 100644 --- a/.github/workflows/daily-qa.lock.yml +++ b/.github/workflows/daily-qa.lock.yml @@ -1,12 +1,12 @@ # -# ___ _ _ -# / _ \ | | (_) -# | |_| | __ _ ___ _ __ | |_ _ ___ +# ___ _ _ +# / _ \ | | (_) +# | |_| | __ _ ___ _ __ | |_ _ ___ # | _ |/ _` |/ _ \ '_ \| __| |/ __| -# | | | | (_| | __/ | | | |_| | (__ +# | | | | (_| | __/ | | | |_| | (__ # \_| |_/\__, |\___|_| |_|\__|_|\___| # __/ | -# _ _ |___/ +# _ _ |___/ # | | | | / _| | # | | | | ___ _ __ _ __| |_| | _____ ____ # | |/\| |/ _ \ '__| |/ /| _| |/ _ \ \ /\ / / ___| @@ -31,7 +31,7 @@ name: "Daily QA" "on": schedule: - - cron: "5 19 * * *" + - cron: "5 19 * * *" workflow_dispatch: null permissions: read-all @@ -126,13 +126,13 @@ jobs: run: | # Download official Copilot CLI installer script curl -fsSL https://raw.githubusercontent.com/github/copilot-cli/main/install.sh -o /tmp/copilot-install.sh - + # Execute the installer with the specified version export VERSION=0.0.375 && sudo bash /tmp/copilot-install.sh - + # Cleanup rm -f /tmp/copilot-install.sh - + # Verify installation copilot --version - name: Install awf binary @@ -464,7 +464,7 @@ jobs: with: script: | const fs = require('fs'); - + const awInfo = { engine_id: "copilot", engine_name: "GitHub Copilot CLI", @@ -494,13 +494,13 @@ jobs: }, created_at: new Date().toISOString() }; - + // Write to /tmp/gh-aw directory to avoid inclusion in PR const tmpPath = '/tmp/gh-aw/aw_info.json'; fs.writeFileSync(tmpPath, JSON.stringify(awInfo, null, 2)); console.log('Generated aw_info.json at:', tmpPath); console.log(JSON.stringify(awInfo, null, 2)); - + // Set model as output for reuse in other steps/jobs core.setOutput('model', awInfo.model); - name: Generate workflow overview @@ -519,15 +519,15 @@ jobs: bash /opt/gh-aw/actions/create_prompt_first.sh cat << 'PROMPT_EOF' > "$GH_AW_PROMPT" # Daily QA - + ## Job Description - - - + + + Your name is __GH_AW_GITHUB_WORKFLOW__. Your job is to act as an agentic QA engineer for the team working in the GitHub repository `__GH_AW_GITHUB_REPOSITORY__`. - + 1. Your task is to analyze the repo and check that things are working as expected, e.g. - + - Check that the code builds and runs - Check that the tests pass - Check that instructions are clear and easy to follow @@ -535,25 +535,25 @@ jobs: - Check that the code is well structured and easy to read - Check that the code is well tested - Check that the documentation is up to date - + You can also choose to do nothing if you think everything is fine. - + If the repository is empty or doesn't have any implementation code just yet, then exit without doing anything. - + 2. You have access to various tools. You can use these tools to perform your tasks. For example, you can use the GitHub tool to list issues, create issues, add comments, etc. - + 3. As you find problems, create new issues or add a comment on an existing issue. For each distinct problem: - + - First, check if a duplicate already exist, and if so, consider adding a comment to the existing issue instead of creating a new one, if you have something new to add. - + - Make sure to include a clear description of the problem, steps to reproduce it, and any relevant information that might help the team understand and fix the issue. If you create a pull request, make sure to include a clear description of the changes you made and why they are necessary. - + 4. If you find any small problems you can fix with very high confidence, create a PR for them. - + 5. Search for any previous "__GH_AW_GITHUB_WORKFLOW__" open discussions in the repository. Read the latest one. If the status is essentially the same as the current state of the repository, then add a very brief comment to that discussion saying you didn't find anything new and exit. Close all the previous open Daily QA Report discussions. - + 6. Create a new discussion with title starting with "__GH_AW_GITHUB_WORKFLOW__", very very briefly summarizing the problems you found and the actions you took. Use note form. Include links to any issues you created or commented on, and any pull requests you created. In a collapsed section highlight any bash commands you used, any web searches you performed, and any web pages you visited that were relevant to your work. If you tried to run bash commands but were refused permission, then include a list of those at the end of the discussion. - + PROMPT_EOF - name: Substitute placeholders uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 @@ -564,7 +564,7 @@ jobs: with: script: | const substitutePlaceholders = require('/opt/gh-aw/actions/substitute_placeholders.cjs'); - + // Call the substitution function return await substitutePlaceholders({ file: process.env.GH_AW_PROMPT, @@ -595,9 +595,9 @@ jobs: To create or modify GitHub resources (issues, discussions, pull requests, etc.), you MUST call the appropriate safe output tool. Simply writing content will NOT work - the workflow requires actual tool calls. - + **Available tools**: add_comment, create_discussion, create_pull_request, missing_tool, noop - + **Critical**: Tool calls write structured data that downstream jobs process. Without tool calls, follow-up actions will be skipped. @@ -642,7 +642,7 @@ jobs: - **workflow-run-id**: __GH_AW_GITHUB_RUN_ID__ {{/if}} - + PROMPT_EOF - name: Substitute placeholders uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 @@ -659,7 +659,7 @@ jobs: with: script: | const substitutePlaceholders = require('/opt/gh-aw/actions/substitute_placeholders.cjs'); - + // Call the substitution function return await substitutePlaceholders({ file: process.env.GH_AW_PROMPT, @@ -725,7 +725,7 @@ jobs: # This ensures they are in /tmp/gh-aw/ where secret redaction can scan them SESSION_STATE_DIR="$HOME/.copilot/session-state" LOGS_DIR="/tmp/gh-aw/sandbox/agent/logs" - + if [ -d "$SESSION_STATE_DIR" ]; then echo "Copying Copilot session state files from $SESSION_STATE_DIR to $LOGS_DIR" mkdir -p "$LOGS_DIR" @@ -744,7 +744,7 @@ jobs: const { main } = require('/opt/gh-aw/actions/redact_secrets.cjs'); await main(); env: - GH_AW_SECRET_NAMES: 'COPILOT_GITHUB_TOKEN,GH_AW_GITHUB_MCP_SERVER_TOKEN,GH_AW_GITHUB_TOKEN,GITHUB_TOKEN' + GH_AW_SECRET_NAMES: "COPILOT_GITHUB_TOKEN,GH_AW_GITHUB_MCP_SERVER_TOKEN,GH_AW_GITHUB_TOKEN,GITHUB_TOKEN" SECRET_COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }} SECRET_GH_AW_GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN }} SECRET_GH_AW_GITHUB_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN }} @@ -1009,13 +1009,13 @@ jobs: run: | # Download official Copilot CLI installer script curl -fsSL https://raw.githubusercontent.com/github/copilot-cli/main/install.sh -o /tmp/copilot-install.sh - + # Execute the installer with the specified version export VERSION=0.0.375 && sudo bash /tmp/copilot-install.sh - + # Cleanup rm -f /tmp/copilot-install.sh - + # Verify installation copilot --version - name: Execute GitHub Copilot CLI @@ -1154,7 +1154,7 @@ jobs: uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 env: GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }} - GH_AW_SAFE_OUTPUTS_HANDLER_CONFIG: "{\"add_comment\":{\"max\":5,\"target\":\"*\"},\"create_discussion\":{\"category\":\"q-a\",\"expires\":168,\"max\":1,\"title_prefix\":\"${{ github.workflow }}\"},\"create_pull_request\":{\"base_branch\":\"${{ github.ref_name }}\",\"draft\":true,\"max\":1,\"max_patch_size\":1024}}" + GH_AW_SAFE_OUTPUTS_HANDLER_CONFIG: '{"add_comment":{"max":5,"target":"*"},"create_discussion":{"category":"q-a","expires":168,"max":1,"title_prefix":"${{ github.workflow }}"},"create_pull_request":{"base_branch":"${{ github.ref_name }}","draft":true,"max":1,"max_patch_size":1024}}' with: github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} script: | @@ -1162,4 +1162,3 @@ jobs: setupGlobals(core, github, context, exec, io); const { main } = require('/opt/gh-aw/actions/safe_output_handler_manager.cjs'); await main(); - diff --git a/.github/workflows/daily-test-improver.lock.yml b/.github/workflows/daily-test-improver.lock.yml index 1fbca3332..883234591 100644 --- a/.github/workflows/daily-test-improver.lock.yml +++ b/.github/workflows/daily-test-improver.lock.yml @@ -1,12 +1,12 @@ # -# ___ _ _ -# / _ \ | | (_) -# | |_| | __ _ ___ _ __ | |_ _ ___ +# ___ _ _ +# / _ \ | | (_) +# | |_| | __ _ ___ _ __ | |_ _ ___ # | _ |/ _` |/ _ \ '_ \| __| |/ __| -# | | | | (_| | __/ | | | |_| | (__ +# | | | | (_| | __/ | | | |_| | (__ # \_| |_/\__, |\___|_| |_|\__|_|\___| # __/ | -# _ _ |___/ +# _ _ |___/ # | | | | / _| | # | | | | ___ _ __ _ __| |_| | _____ ____ # | |/\| |/ _ \ '__| |/ /| _| |/ _ \ \ /\ / / ___| @@ -31,7 +31,7 @@ name: "Daily Test Coverage Improver" "on": schedule: - - cron: "32 7 * * *" + - cron: "32 7 * * *" workflow_dispatch: null permissions: @@ -169,13 +169,13 @@ jobs: run: | # Download official Copilot CLI installer script curl -fsSL https://raw.githubusercontent.com/github/copilot-cli/main/install.sh -o /tmp/copilot-install.sh - + # Execute the installer with the specified version export VERSION=0.0.375 && sudo bash /tmp/copilot-install.sh - + # Cleanup rm -f /tmp/copilot-install.sh - + # Verify installation copilot --version - name: Install awf binary @@ -580,7 +580,7 @@ jobs: with: script: | const fs = require('fs'); - + const awInfo = { engine_id: "copilot", engine_name: "GitHub Copilot CLI", @@ -610,13 +610,13 @@ jobs: }, created_at: new Date().toISOString() }; - + // Write to /tmp/gh-aw directory to avoid inclusion in PR const tmpPath = '/tmp/gh-aw/aw_info.json'; fs.writeFileSync(tmpPath, JSON.stringify(awInfo, null, 2)); console.log('Generated aw_info.json at:', tmpPath); console.log(JSON.stringify(awInfo, null, 2)); - + // Set model as output for reuse in other steps/jobs core.setOutput('model', awInfo.model); - name: Generate workflow overview @@ -635,140 +635,140 @@ jobs: bash /opt/gh-aw/actions/create_prompt_first.sh cat << 'PROMPT_EOF' > "$GH_AW_PROMPT" # Daily Test Coverage Improver - + ## Job Description - + You are an AI test engineer for `__GH_AW_GITHUB_REPOSITORY__`. Your mission: systematically identify and implement test coverage improvements across this repository. - + You are doing your work in phases. Right now you will perform just one of the following three phases. Choose the phase depending on what has been done so far. - + ## Phase selection - + To decide which phase to perform: - + 1. First check for existing open discussion titled "__GH_AW_GITHUB_WORKFLOW__" using `list_discussions`. Double check the discussion is actually still open - if it's closed you need to ignore it. If found, and open, read it and maintainer comments. If not found, then perform Phase 1 and nothing else. - + 2. Next check if `.github/actions/daily-test-improver/coverage-steps/action.yml` exists. If yes then read it. If not then perform Phase 2 and nothing else. - + 3. Finally, if both those exist, then perform Phase 3. - + ## Phase 1 - Testing research - + 1. Research the current state of test coverage in the repository. Look for existing test files, coverage reports, and any related issues or pull requests. - + 2. Create a discussion with title "__GH_AW_GITHUB_WORKFLOW__ - Research and Plan" that includes: - + - A summary of your findings about the repository, its testing strategies, its test coverage - A plan for how you will approach improving test coverage, including specific areas to focus on and strategies to use - Details of the commands needed to run to build the project, run tests, and generate coverage reports - Details of how tests are organized in the repo, and how new tests should be organized - Opportunities for new ways of greatly increasing test coverage - Any questions or clarifications needed from maintainers - + **Include a "How to Control this Workflow" section at the end of the discussion that explains:** - + - The user can add comments to the discussion to provide feedback or adjustments to the plan - The user can use these commands: - + gh aw disable daily-test-improver --repo __GH_AW_GITHUB_REPOSITORY__ gh aw enable daily-test-improver --repo __GH_AW_GITHUB_REPOSITORY__ gh aw run daily-test-improver --repo __GH_AW_GITHUB_REPOSITORY__ --repeat gh aw logs daily-test-improver --repo __GH_AW_GITHUB_REPOSITORY__ - + **Include a "What Happens Next" section at the end of the discussion that explains:** - + - The next time this workflow runs, Phase 2 will be performed, which will analyze the codebase to create coverage steps configuration - After Phase 2 completes, Phase 3 will begin on subsequent runs to implement actual test coverage improvements - If running in "repeat" mode, the workflow will automatically run again to proceed to the next phase - Humans can review this research and add comments before the workflow continues - + 1. Exit this entire workflow, do not proceed to Phase 2 on this run. The research and plan will be checked by a human who will invoke you again and you will proceed to Phase 2. - + ## Phase 2 - Coverage steps inference and configuration - + 1. Check if an open pull request with title "__GH_AW_GITHUB_WORKFLOW__ - Updates to complete configuration" exists in this repo. If it does, add a comment to the pull request saying configuration needs to be completed, then exit the workflow. - + 2. Have a careful think about the CI commands needed to build the repository, run tests, produce a combined coverage report and upload it as an artifact. Do this by carefully reading any existing documentation and CI files in the repository that do similar things, and by looking at any build scripts, project files, dev guides and so on in the repository. If multiple projects are present, perform build and coverage testing on as many as possible, and where possible merge the coverage reports into one combined report. Work out the steps you worked out, in order, as a series of YAML steps suitable for inclusion in a GitHub Action. - + 3. Create the file `.github/actions/daily-test-improver/coverage-steps/action.yml` containing these steps, ensuring that the action.yml file is valid. Leave comments in the file to explain what the steps are doing, where the coverage report will be generated, and any other relevant information. Ensure that the steps include uploading the coverage report(s) as an artifact called "coverage". Each step of the action should append its output to a file called `coverage-steps.log` in the root of the repository. Ensure that the action.yml file is valid and correctly formatted. - + 4. Before running any of the steps, make a pull request for the addition of the `action.yml` file, with title "__GH_AW_GITHUB_WORKFLOW__ - Updates to complete configuration". Encourage the maintainer to review the files carefully to ensure they are appropriate for the project. - + **Include a "What Happens Next" section in the PR description that explains:** - Once this PR is merged, the next workflow run will proceed to Phase 3, where actual test coverage improvements will be implemented - Phase 3 will use the coverage steps to systematically improve test coverage - If running in "repeat" mode, the workflow will automatically run again to proceed to Phase 3 - Humans can review and merge this configuration before continuing - + 5. Try to run through the steps you worked out manually one by one. If the a step needs updating, then update the branch you created in step 4. Continue through all the steps. If you can't get it to work, then create an issue describing the problem and exit the entire workflow. - + 6. Add brief comment (1 or 2 sentences) to the discussion identified at the start of the workflow stating what you've done and giving links to the PR created. If you have taken successful initial coverage numbers for the repository, report the initial coverage numbers appropriately. - + 7. Exit this entire workflow, do not proceed to Phase 3 on this run. The coverage steps will now be checked by a human who will invoke you again and you will proceed to Phase 3. - + ## Phase 3 - Goal selection, work and results - + 1. **Goal selection**. Build an understanding of what to work on and select an area of the test coverage plan to pursue - + a. Repository is now test-ready. Review `coverage-steps/action.yml` and `coverage-steps.log` to understand setup. If coverage steps failed then create fix PR and exit. - + b. Locate and read the coverage report. Be detailed, looking to understand the files, functions, branches, and lines of code that are not covered by tests. Look for areas where you can add meaningful tests that will improve coverage. - + c. Read the plan in the discussion mentioned earlier, along with comments. - + d. Check the most recent pull request with title starting with "__GH_AW_GITHUB_WORKFLOW__" (it may have been closed) and see what the status of things was there. These are your notes from last time you did your work, and may include useful recommendations for future areas to work on. - + e. Check for existing open pull requests (especially yours with "__GH_AW_GITHUB_WORKFLOW__" prefix). Avoid duplicate work. - + f. If plan needs updating then comment on planning discussion with revised plan and rationale. Consider maintainer feedback. g. Based on all of the above, select an area of relatively low coverage to work on that appears tractable for further test additions. Ensure that you have a good understanding of the code and the testing requirements before proceeding. - + 2. **Work towards your selected goal**. For the test coverage improvement goal you selected, do the following: - + a. Create a new branch starting with "test/". - + b. Write new tests to improve coverage. Ensure that the tests are meaningful and cover edge cases where applicable. - + c. Build the tests if necessary and remove any build errors. - + d. Run the new tests to ensure they pass. - + e. Re-run the test suite collecting coverage information. Check that overall coverage has improved. Document measurement attempts even if unsuccessful. If no improvement then iterate, revert, or try different approach. - + 3. **Finalizing changes** - + a. Apply any automatic code formatting used in the repo. If necessary check CI files to understand what code formatting is used. - + b. Run any appropriate code linter used in the repo and ensure no new linting errors remain. If necessary check CI files to understand what code linting is used. - + 4. **Results and learnings** - + a. If you succeeded in writing useful code changes that improve test coverage, create a **draft** pull request with your changes. - + **Critical:** Exclude coverage reports and tool-generated files from PR. Double-check added files and remove any that don't belong. - + Include a description of the improvements with evidence of impact. In the description, explain: - + - **Goal and rationale:** Coverage area chosen and why it matters - **Approach:** Testing strategy, methodology, and implementation steps - **Impact measurement:** How coverage was tested and results achieved - **Trade-offs:** What changed (complexity, test maintenance) - **Validation:** Testing approach and success criteria met - **Future work:** Additional coverage opportunities identified - + **Test coverage results section:** Document coverage impact with exact coverage numbers before and after the changes, drawing from the coverage reports, in a table if possible. Include changes in numbers for overall coverage. Be transparent about measurement limitations and methodology. Mark estimates clearly. - + **Reproducibility section:** Provide clear instructions to reproduce coverage testing, including setup commands (install dependencies, build code, run tests, generate coverage reports), measurement procedures, and expected results format. - + After creation, check the pull request to ensure it is correct, includes all expected files, and doesn't include any unwanted files or changes. Make any necessary corrections by pushing further commits to the branch. - + b. If you think you found bugs in the code while adding tests, also create one single combined issue for all of them, starting the title of the issue with "__GH_AW_GITHUB_WORKFLOW__". Do not include fixes in your pull requests unless you are 100% certain the bug is real and the fix is right. - + 5. **Final update**: Add brief comment (1 or 2 sentences) to the discussion identified at the start of the workflow stating goal worked on, PR links, and progress made, reporting the coverage improvement numbers achieved and current overall coverage numbers. - + PROMPT_EOF - name: Substitute placeholders uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 @@ -779,7 +779,7 @@ jobs: with: script: | const substitutePlaceholders = require('/opt/gh-aw/actions/substitute_placeholders.cjs'); - + // Call the substitution function return await substitutePlaceholders({ file: process.env.GH_AW_PROMPT, @@ -810,9 +810,9 @@ jobs: To create or modify GitHub resources (issues, discussions, pull requests, etc.), you MUST call the appropriate safe output tool. Simply writing content will NOT work - the workflow requires actual tool calls. - + **Available tools**: add_comment, create_discussion, create_issue, create_pull_request, missing_tool, noop - + **Critical**: Tool calls write structured data that downstream jobs process. Without tool calls, follow-up actions will be skipped. @@ -857,7 +857,7 @@ jobs: - **workflow-run-id**: __GH_AW_GITHUB_RUN_ID__ {{/if}} - + PROMPT_EOF - name: Substitute placeholders uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 @@ -874,7 +874,7 @@ jobs: with: script: | const substitutePlaceholders = require('/opt/gh-aw/actions/substitute_placeholders.cjs'); - + // Call the substitution function return await substitutePlaceholders({ file: process.env.GH_AW_PROMPT, @@ -940,7 +940,7 @@ jobs: # This ensures they are in /tmp/gh-aw/ where secret redaction can scan them SESSION_STATE_DIR="$HOME/.copilot/session-state" LOGS_DIR="/tmp/gh-aw/sandbox/agent/logs" - + if [ -d "$SESSION_STATE_DIR" ]; then echo "Copying Copilot session state files from $SESSION_STATE_DIR to $LOGS_DIR" mkdir -p "$LOGS_DIR" @@ -959,7 +959,7 @@ jobs: const { main } = require('/opt/gh-aw/actions/redact_secrets.cjs'); await main(); env: - GH_AW_SECRET_NAMES: 'COPILOT_GITHUB_TOKEN,GH_AW_GITHUB_MCP_SERVER_TOKEN,GH_AW_GITHUB_TOKEN,GITHUB_TOKEN' + GH_AW_SECRET_NAMES: "COPILOT_GITHUB_TOKEN,GH_AW_GITHUB_MCP_SERVER_TOKEN,GH_AW_GITHUB_TOKEN,GITHUB_TOKEN" SECRET_COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }} SECRET_GH_AW_GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN }} SECRET_GH_AW_GITHUB_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN }} @@ -1224,13 +1224,13 @@ jobs: run: | # Download official Copilot CLI installer script curl -fsSL https://raw.githubusercontent.com/github/copilot-cli/main/install.sh -o /tmp/copilot-install.sh - + # Execute the installer with the specified version export VERSION=0.0.375 && sudo bash /tmp/copilot-install.sh - + # Cleanup rm -f /tmp/copilot-install.sh - + # Verify installation copilot --version - name: Execute GitHub Copilot CLI @@ -1369,7 +1369,7 @@ jobs: uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 env: GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }} - GH_AW_SAFE_OUTPUTS_HANDLER_CONFIG: "{\"add_comment\":{\"max\":1,\"target\":\"*\"},\"create_discussion\":{\"category\":\"ideas\",\"expires\":168,\"max\":1,\"title_prefix\":\"${{ github.workflow }}\"},\"create_issue\":{\"max\":1},\"create_pull_request\":{\"base_branch\":\"${{ github.ref_name }}\",\"draft\":true,\"max\":1,\"max_patch_size\":1024}}" + GH_AW_SAFE_OUTPUTS_HANDLER_CONFIG: '{"add_comment":{"max":1,"target":"*"},"create_discussion":{"category":"ideas","expires":168,"max":1,"title_prefix":"${{ github.workflow }}"},"create_issue":{"max":1},"create_pull_request":{"base_branch":"${{ github.ref_name }}","draft":true,"max":1,"max_patch_size":1024}}' with: github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} script: | @@ -1377,4 +1377,3 @@ jobs: setupGlobals(core, github, context, exec, io); const { main } = require('/opt/gh-aw/actions/safe_output_handler_manager.cjs'); await main(); - diff --git a/.github/workflows/issue-triage.lock.yml b/.github/workflows/issue-triage.lock.yml index c7e648819..d207bdf30 100644 --- a/.github/workflows/issue-triage.lock.yml +++ b/.github/workflows/issue-triage.lock.yml @@ -1,12 +1,12 @@ # -# ___ _ _ -# / _ \ | | (_) -# | |_| | __ _ ___ _ __ | |_ _ ___ +# ___ _ _ +# / _ \ | | (_) +# | |_| | __ _ ___ _ __ | |_ _ ___ # | _ |/ _` |/ _ \ '_ \| __| |/ __| -# | | | | (_| | __/ | | | |_| | (__ +# | | | | (_| | __/ | | | |_| | (__ # \_| |_/\__, |\___|_| |_|\__|_|\___| # __/ | -# _ _ |___/ +# _ _ |___/ # | | | | / _| | # | | | | ___ _ __ _ __| |_| | _____ ____ # | |/\| |/ _ \ '__| |/ /| _| |/ _ \ \ /\ / / ___| @@ -33,8 +33,8 @@ name: "Agentic Triage" "on": issues: types: - - opened - - reopened + - opened + - reopened permissions: read-all @@ -144,13 +144,13 @@ jobs: run: | # Download official Copilot CLI installer script curl -fsSL https://raw.githubusercontent.com/github/copilot-cli/main/install.sh -o /tmp/copilot-install.sh - + # Execute the installer with the specified version export VERSION=0.0.375 && sudo bash /tmp/copilot-install.sh - + # Cleanup rm -f /tmp/copilot-install.sh - + # Verify installation copilot --version - name: Install awf binary @@ -407,7 +407,7 @@ jobs: with: script: | const fs = require('fs'); - + const awInfo = { engine_id: "copilot", engine_name: "GitHub Copilot CLI", @@ -437,13 +437,13 @@ jobs: }, created_at: new Date().toISOString() }; - + // Write to /tmp/gh-aw directory to avoid inclusion in PR const tmpPath = '/tmp/gh-aw/aw_info.json'; fs.writeFileSync(tmpPath, JSON.stringify(awInfo, null, 2)); console.log('Generated aw_info.json at:', tmpPath); console.log(JSON.stringify(awInfo, null, 2)); - + // Set model as output for reuse in other steps/jobs core.setOutput('model', awInfo.model); - name: Generate workflow overview @@ -461,35 +461,35 @@ jobs: bash /opt/gh-aw/actions/create_prompt_first.sh cat << 'PROMPT_EOF' > "$GH_AW_PROMPT" # Agentic Triage - - - + + + You're a triage assistant for GitHub issues. Your task is to analyze issue #__GH_AW_GITHUB_EVENT_ISSUE_NUMBER__ and perform some initial triage tasks related to that issue. - + 1. Select appropriate labels for the issue from the provided list. - + 2. Retrieve the issue content using the `get_issue` tool. If the issue is obviously spam, or generated by bot, or something else that is not an actual issue to be worked on, then add an issue comment to the issue with a one sentence analysis and exit the workflow. - + 3. Next, use the GitHub tools to gather additional context about the issue: - + - Fetch the list of labels available in this repository. Use 'gh label list' bash command to fetch the labels. This will give you the labels you can use for triaging issues. - Fetch any comments on the issue using the `get_issue_comments` tool - Find similar issues if needed using the `search_issues` tool - List the issues to see other open issues in the repository using the `list_issues` tool - + 4. Analyze the issue content, considering: - + - The issue title and description - The type of issue (bug report, feature request, question, etc.) - Technical areas mentioned - Severity or priority indicators - User impact - Components affected - + 5. Write notes, ideas, nudges, resource links, debugging strategies and/or reproduction steps for the team to consider relevant to the issue. - + 6. Select appropriate labels from the available labels list provided above: - + - Choose labels that accurately reflect the issue's nature - Be specific but comprehensive - Select priority labels if you can determine urgency (high-priority, med-priority, or low-priority) @@ -497,13 +497,13 @@ jobs: - Search for similar issues, and if you find similar issues consider using a "duplicate" label if appropriate. Only do so if the issue is a duplicate of another OPEN issue. - Only select labels from the provided list above - It's okay to not add any labels if none are clearly applicable - + 7. Apply the selected labels: - + - Use the `update_issue` tool to apply the labels to the issue - DO NOT communicate directly with users - If no labels are clearly applicable, do not apply any labels - + 8. Add an issue comment to the issue with your analysis: - Start with "🎯 Agentic Issue Triage" - Provide a brief summary of the issue @@ -515,7 +515,7 @@ jobs: - If you have any debugging strategies, include them in the comment - If appropriate break the issue down to sub-tasks and write a checklist of things to do. - Use collapsed-by-default sections in the GitHub markdown to keep the comment tidy. Collapse all sections except the short main summary at the top. - + PROMPT_EOF - name: Substitute placeholders uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 @@ -525,7 +525,7 @@ jobs: with: script: | const substitutePlaceholders = require('/opt/gh-aw/actions/substitute_placeholders.cjs'); - + // Call the substitution function return await substitutePlaceholders({ file: process.env.GH_AW_PROMPT, @@ -555,9 +555,9 @@ jobs: To create or modify GitHub resources (issues, discussions, pull requests, etc.), you MUST call the appropriate safe output tool. Simply writing content will NOT work - the workflow requires actual tool calls. - + **Available tools**: add_comment, add_labels, missing_tool, noop - + **Critical**: Tool calls write structured data that downstream jobs process. Without tool calls, follow-up actions will be skipped. @@ -602,7 +602,7 @@ jobs: - **workflow-run-id**: __GH_AW_GITHUB_RUN_ID__ {{/if}} - + PROMPT_EOF - name: Substitute placeholders uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 @@ -619,7 +619,7 @@ jobs: with: script: | const substitutePlaceholders = require('/opt/gh-aw/actions/substitute_placeholders.cjs'); - + // Call the substitution function return await substitutePlaceholders({ file: process.env.GH_AW_PROMPT, @@ -679,7 +679,7 @@ jobs: # This ensures they are in /tmp/gh-aw/ where secret redaction can scan them SESSION_STATE_DIR="$HOME/.copilot/session-state" LOGS_DIR="/tmp/gh-aw/sandbox/agent/logs" - + if [ -d "$SESSION_STATE_DIR" ]; then echo "Copying Copilot session state files from $SESSION_STATE_DIR to $LOGS_DIR" mkdir -p "$LOGS_DIR" @@ -698,7 +698,7 @@ jobs: const { main } = require('/opt/gh-aw/actions/redact_secrets.cjs'); await main(); env: - GH_AW_SECRET_NAMES: 'COPILOT_GITHUB_TOKEN,GH_AW_GITHUB_MCP_SERVER_TOKEN,GH_AW_GITHUB_TOKEN,GITHUB_TOKEN' + GH_AW_SECRET_NAMES: "COPILOT_GITHUB_TOKEN,GH_AW_GITHUB_MCP_SERVER_TOKEN,GH_AW_GITHUB_TOKEN,GITHUB_TOKEN" SECRET_COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }} SECRET_GH_AW_GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN }} SECRET_GH_AW_GITHUB_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN }} @@ -960,13 +960,13 @@ jobs: run: | # Download official Copilot CLI installer script curl -fsSL https://raw.githubusercontent.com/github/copilot-cli/main/install.sh -o /tmp/copilot-install.sh - + # Execute the installer with the specified version export VERSION=0.0.375 && sudo bash /tmp/copilot-install.sh - + # Cleanup rm -f /tmp/copilot-install.sh - + # Verify installation copilot --version - name: Execute GitHub Copilot CLI @@ -1091,7 +1091,7 @@ jobs: uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 env: GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }} - GH_AW_SAFE_OUTPUTS_HANDLER_CONFIG: "{\"add_comment\":{\"max\":1},\"add_labels\":{\"max\":5}}" + GH_AW_SAFE_OUTPUTS_HANDLER_CONFIG: '{"add_comment":{"max":1},"add_labels":{"max":5}}' with: github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} script: | @@ -1099,4 +1099,3 @@ jobs: setupGlobals(core, github, context, exec, io); const { main } = require('/opt/gh-aw/actions/safe_output_handler_manager.cjs'); await main(); - diff --git a/.github/workflows/update-docs.lock.yml b/.github/workflows/update-docs.lock.yml index f81fdac34..46ccf65be 100644 --- a/.github/workflows/update-docs.lock.yml +++ b/.github/workflows/update-docs.lock.yml @@ -1,12 +1,12 @@ # -# ___ _ _ -# / _ \ | | (_) -# | |_| | __ _ ___ _ __ | |_ _ ___ +# ___ _ _ +# / _ \ | | (_) +# | |_| | __ _ ___ _ __ | |_ _ ___ # | _ |/ _` |/ _ \ '_ \| __| |/ __| -# | | | | (_| | __/ | | | |_| | (__ +# | | | | (_| | __/ | | | |_| | (__ # \_| |_/\__, |\___|_| |_|\__|_|\___| # __/ | -# _ _ |___/ +# _ _ |___/ # | | | | / _| | # | | | | ___ _ __ _ __| |_| | _____ ____ # | |/\| |/ _ \ '__| |/ /| _| |/ _ \ \ /\ / / ___| @@ -33,7 +33,7 @@ name: "Update Docs" "on": push: branches: - - main + - main workflow_dispatch: null permissions: read-all @@ -126,13 +126,13 @@ jobs: run: | # Download official Copilot CLI installer script curl -fsSL https://raw.githubusercontent.com/github/copilot-cli/main/install.sh -o /tmp/copilot-install.sh - + # Execute the installer with the specified version export VERSION=0.0.375 && sudo bash /tmp/copilot-install.sh - + # Cleanup rm -f /tmp/copilot-install.sh - + # Verify installation copilot --version - name: Install awf binary @@ -376,7 +376,7 @@ jobs: with: script: | const fs = require('fs'); - + const awInfo = { engine_id: "copilot", engine_name: "GitHub Copilot CLI", @@ -406,13 +406,13 @@ jobs: }, created_at: new Date().toISOString() }; - + // Write to /tmp/gh-aw directory to avoid inclusion in PR const tmpPath = '/tmp/gh-aw/aw_info.json'; fs.writeFileSync(tmpPath, JSON.stringify(awInfo, null, 2)); console.log('Generated aw_info.json at:', tmpPath); console.log(JSON.stringify(awInfo, null, 2)); - + // Set model as output for reuse in other steps/jobs core.setOutput('model', awInfo.model); - name: Generate workflow overview @@ -431,38 +431,38 @@ jobs: bash /opt/gh-aw/actions/create_prompt_first.sh cat << 'PROMPT_EOF' > "$GH_AW_PROMPT" # Update Docs - + ## Job Description - - - + + + Your name is __GH_AW_GITHUB_WORKFLOW__. You are an **Autonomous Technical Writer & Documentation Steward** for the GitHub repository `__GH_AW_GITHUB_REPOSITORY__`. - + ### Mission - + Ensure every code‑level change is mirrored by clear, accurate, and stylistically consistent documentation. - + ### Voice & Tone - + - Precise, concise, and developer‑friendly - Active voice, plain English, progressive disclosure (high‑level first, drill‑down examples next) - Empathetic toward both newcomers and power users - + ### Key Values - + Documentation‑as‑Code, transparency, single source of truth, continuous improvement, accessibility, internationalization‑readiness - + ### Your Workflow - + 1. **Analyze Repository Changes** - + - On every push to main branch, examine the diff to identify changed/added/removed entities - Look for new APIs, functions, classes, configuration files, or significant code changes - Check existing documentation for accuracy and completeness - Identify documentation gaps like failing tests: a "red build" until fixed - + 2. **Documentation Assessment** - + - Review existing documentation structure (look for docs/, documentation/, or similar directories) - Assess documentation quality against style guidelines: - Diátaxis framework (tutorials, how-to guides, technical reference, explanation) @@ -470,17 +470,17 @@ jobs: - Inclusive naming conventions - Microsoft Writing Style Guide standards - Identify missing or outdated documentation - + 3. **Create or Update Documentation** - + - Use Markdown (.md) format wherever possible - Fall back to MDX only when interactive components are indispensable - Follow progressive disclosure: high-level concepts first, detailed examples second - Ensure content is accessible and internationalization-ready - Create clear, actionable documentation that serves both newcomers and power users - + 4. **Documentation Structure & Organization** - + - Organize content following Diátaxis methodology: - **Tutorials**: Learning-oriented, hands-on lessons - **How-to guides**: Problem-oriented, practical steps @@ -488,43 +488,43 @@ jobs: - **Explanation**: Understanding-oriented, clarification and discussion - Maintain consistent navigation and cross-references - Ensure searchability and discoverability - + 5. **Quality Assurance** - + - Check for broken links, missing images, or formatting issues - Ensure code examples are accurate and functional - Verify accessibility standards are met - + 6. **Continuous Improvement** - + - Perform nightly sanity sweeps for documentation drift - Update documentation based on user feedback in issues and discussions - Maintain and improve documentation toolchain and automation - + ### Output Requirements - + - **Create Draft Pull Requests**: When documentation needs updates, create focused draft pull requests with clear descriptions - + ### Technical Implementation - + - **Hosting**: Prepare documentation for GitHub Pages deployment with branch-based workflows - **Automation**: Implement linting and style checking for documentation consistency - + ### Error Handling - + - If documentation directories don't exist, suggest appropriate structure - If build tools are missing, recommend necessary packages or configuration - + ### Exit Conditions - + - Exit if the repository has no implementation code yet (empty repository) - Exit if no code changes require documentation updates - Exit if all documentation is already up-to-date and comprehensive - + > NOTE: Never make direct pushes to the main branch. Always create a pull request for documentation changes. - + > NOTE: Treat documentation gaps like failing tests. - + PROMPT_EOF - name: Substitute placeholders uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 @@ -535,7 +535,7 @@ jobs: with: script: | const substitutePlaceholders = require('/opt/gh-aw/actions/substitute_placeholders.cjs'); - + // Call the substitution function return await substitutePlaceholders({ file: process.env.GH_AW_PROMPT, @@ -566,9 +566,9 @@ jobs: To create or modify GitHub resources (issues, discussions, pull requests, etc.), you MUST call the appropriate safe output tool. Simply writing content will NOT work - the workflow requires actual tool calls. - + **Available tools**: create_pull_request, missing_tool, noop - + **Critical**: Tool calls write structured data that downstream jobs process. Without tool calls, follow-up actions will be skipped. @@ -613,7 +613,7 @@ jobs: - **workflow-run-id**: __GH_AW_GITHUB_RUN_ID__ {{/if}} - + PROMPT_EOF - name: Substitute placeholders uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 @@ -630,7 +630,7 @@ jobs: with: script: | const substitutePlaceholders = require('/opt/gh-aw/actions/substitute_placeholders.cjs'); - + // Call the substitution function return await substitutePlaceholders({ file: process.env.GH_AW_PROMPT, @@ -691,7 +691,7 @@ jobs: # This ensures they are in /tmp/gh-aw/ where secret redaction can scan them SESSION_STATE_DIR="$HOME/.copilot/session-state" LOGS_DIR="/tmp/gh-aw/sandbox/agent/logs" - + if [ -d "$SESSION_STATE_DIR" ]; then echo "Copying Copilot session state files from $SESSION_STATE_DIR to $LOGS_DIR" mkdir -p "$LOGS_DIR" @@ -710,7 +710,7 @@ jobs: const { main } = require('/opt/gh-aw/actions/redact_secrets.cjs'); await main(); env: - GH_AW_SECRET_NAMES: 'COPILOT_GITHUB_TOKEN,GH_AW_GITHUB_MCP_SERVER_TOKEN,GH_AW_GITHUB_TOKEN,GITHUB_TOKEN' + GH_AW_SECRET_NAMES: "COPILOT_GITHUB_TOKEN,GH_AW_GITHUB_MCP_SERVER_TOKEN,GH_AW_GITHUB_TOKEN,GITHUB_TOKEN" SECRET_COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }} SECRET_GH_AW_GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN }} SECRET_GH_AW_GITHUB_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN }} @@ -973,13 +973,13 @@ jobs: run: | # Download official Copilot CLI installer script curl -fsSL https://raw.githubusercontent.com/github/copilot-cli/main/install.sh -o /tmp/copilot-install.sh - + # Execute the installer with the specified version export VERSION=0.0.375 && sudo bash /tmp/copilot-install.sh - + # Cleanup rm -f /tmp/copilot-install.sh - + # Verify installation copilot --version - name: Execute GitHub Copilot CLI @@ -1129,7 +1129,7 @@ jobs: uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 env: GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }} - GH_AW_SAFE_OUTPUTS_HANDLER_CONFIG: "{\"create_pull_request\":{\"base_branch\":\"${{ github.ref_name }}\",\"draft\":true,\"max\":1,\"max_patch_size\":1024}}" + GH_AW_SAFE_OUTPUTS_HANDLER_CONFIG: '{"create_pull_request":{"base_branch":"${{ github.ref_name }}","draft":true,"max":1,"max_patch_size":1024}}' with: github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} script: | @@ -1137,4 +1137,3 @@ jobs: setupGlobals(core, github, context, exec, io); const { main } = require('/opt/gh-aw/actions/safe_output_handler_manager.cjs'); await main(); - diff --git a/.github/workflows/weekly-research.lock.yml b/.github/workflows/weekly-research.lock.yml index 7494b566f..7f6e6812e 100644 --- a/.github/workflows/weekly-research.lock.yml +++ b/.github/workflows/weekly-research.lock.yml @@ -1,12 +1,12 @@ # -# ___ _ _ -# / _ \ | | (_) -# | |_| | __ _ ___ _ __ | |_ _ ___ +# ___ _ _ +# / _ \ | | (_) +# | |_| | __ _ ___ _ __ | |_ _ ___ # | _ |/ _` |/ _ \ '_ \| __| |/ __| -# | | | | (_| | __/ | | | |_| | (__ +# | | | | (_| | __/ | | | |_| | (__ # \_| |_/\__, |\___|_| |_|\__|_|\___| # __/ | -# _ _ |___/ +# _ _ |___/ # | | | | / _| | # | | | | ___ _ __ _ __| |_| | _____ ____ # | |/\| |/ _ \ '__| |/ /| _| |/ _ \ \ /\ / / ___| @@ -32,7 +32,7 @@ name: "Weekly Research" "on": schedule: - - cron: "38 8 * * 1" + - cron: "38 8 * * 1" workflow_dispatch: null permissions: read-all @@ -127,13 +127,13 @@ jobs: run: | # Download official Copilot CLI installer script curl -fsSL https://raw.githubusercontent.com/github/copilot-cli/main/install.sh -o /tmp/copilot-install.sh - + # Execute the installer with the specified version export VERSION=0.0.375 && sudo bash /tmp/copilot-install.sh - + # Cleanup rm -f /tmp/copilot-install.sh - + # Verify installation copilot --version - name: Install awf binary @@ -367,7 +367,7 @@ jobs: with: script: | const fs = require('fs'); - + const awInfo = { engine_id: "copilot", engine_name: "GitHub Copilot CLI", @@ -397,13 +397,13 @@ jobs: }, created_at: new Date().toISOString() }; - + // Write to /tmp/gh-aw directory to avoid inclusion in PR const tmpPath = '/tmp/gh-aw/aw_info.json'; fs.writeFileSync(tmpPath, JSON.stringify(awInfo, null, 2)); console.log('Generated aw_info.json at:', tmpPath); console.log(JSON.stringify(awInfo, null, 2)); - + // Set model as output for reuse in other steps/jobs core.setOutput('model', awInfo.model); - name: Generate workflow overview @@ -422,16 +422,16 @@ jobs: bash /opt/gh-aw/actions/create_prompt_first.sh cat << 'PROMPT_EOF' > "$GH_AW_PROMPT" # Weekly Research - + ## Job Description - + Do a deep research investigation in __GH_AW_GITHUB_REPOSITORY__ repository, and the related industry in general. - + - Read selections of the latest code, issues and PRs for this repo. - Read latest trends and news from the software industry news source on the Web. - + Create a new GitHub discussion with title starting with "__GH_AW_GITHUB_WORKFLOW__" containing a markdown report with - + - Interesting news about the area related to this software project. - Related products and competitive analysis - Related research papers @@ -439,15 +439,15 @@ jobs: - Market opportunities - Business analysis - Enjoyable anecdotes - + Only a new discussion should be created, no existing discussions should be adjusted. - + At the end of the report list write a collapsed section with the following: - + - All search queries (web, issues, pulls, content) you used - All bash commands you executed - All MCP tools you used - + PROMPT_EOF - name: Substitute placeholders uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 @@ -458,7 +458,7 @@ jobs: with: script: | const substitutePlaceholders = require('/opt/gh-aw/actions/substitute_placeholders.cjs'); - + // Call the substitution function return await substitutePlaceholders({ file: process.env.GH_AW_PROMPT, @@ -489,9 +489,9 @@ jobs: To create or modify GitHub resources (issues, discussions, pull requests, etc.), you MUST call the appropriate safe output tool. Simply writing content will NOT work - the workflow requires actual tool calls. - + **Available tools**: create_discussion, missing_tool, noop - + **Critical**: Tool calls write structured data that downstream jobs process. Without tool calls, follow-up actions will be skipped. @@ -536,7 +536,7 @@ jobs: - **workflow-run-id**: __GH_AW_GITHUB_RUN_ID__ {{/if}} - + PROMPT_EOF - name: Substitute placeholders uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 @@ -553,7 +553,7 @@ jobs: with: script: | const substitutePlaceholders = require('/opt/gh-aw/actions/substitute_placeholders.cjs'); - + // Call the substitution function return await substitutePlaceholders({ file: process.env.GH_AW_PROMPT, @@ -614,7 +614,7 @@ jobs: # This ensures they are in /tmp/gh-aw/ where secret redaction can scan them SESSION_STATE_DIR="$HOME/.copilot/session-state" LOGS_DIR="/tmp/gh-aw/sandbox/agent/logs" - + if [ -d "$SESSION_STATE_DIR" ]; then echo "Copying Copilot session state files from $SESSION_STATE_DIR to $LOGS_DIR" mkdir -p "$LOGS_DIR" @@ -633,7 +633,7 @@ jobs: const { main } = require('/opt/gh-aw/actions/redact_secrets.cjs'); await main(); env: - GH_AW_SECRET_NAMES: 'COPILOT_GITHUB_TOKEN,GH_AW_GITHUB_MCP_SERVER_TOKEN,GH_AW_GITHUB_TOKEN,GITHUB_TOKEN' + GH_AW_SECRET_NAMES: "COPILOT_GITHUB_TOKEN,GH_AW_GITHUB_MCP_SERVER_TOKEN,GH_AW_GITHUB_TOKEN,GITHUB_TOKEN" SECRET_COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }} SECRET_GH_AW_GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN }} SECRET_GH_AW_GITHUB_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN }} @@ -897,13 +897,13 @@ jobs: run: | # Download official Copilot CLI installer script curl -fsSL https://raw.githubusercontent.com/github/copilot-cli/main/install.sh -o /tmp/copilot-install.sh - + # Execute the installer with the specified version export VERSION=0.0.375 && sudo bash /tmp/copilot-install.sh - + # Cleanup rm -f /tmp/copilot-install.sh - + # Verify installation copilot --version - name: Execute GitHub Copilot CLI @@ -1014,7 +1014,7 @@ jobs: uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 env: GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }} - GH_AW_SAFE_OUTPUTS_HANDLER_CONFIG: "{\"create_discussion\":{\"category\":\"ideas\",\"expires\":168,\"max\":1,\"title_prefix\":\"${{ github.workflow }}\"}}" + GH_AW_SAFE_OUTPUTS_HANDLER_CONFIG: '{"create_discussion":{"category":"ideas","expires":168,"max":1,"title_prefix":"${{ github.workflow }}"}}' with: github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} script: | @@ -1022,4 +1022,3 @@ jobs: setupGlobals(core, github, context, exec, io); const { main } = require('/opt/gh-aw/actions/safe_output_handler_manager.cjs'); await main(); - diff --git a/.golangci.yml b/.golangci.yml index b4203c8bc..3efed2632 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -76,6 +76,9 @@ linters: - linters: - ireturn path: pkg/client/flux/reconciler.go + - linters: + - ireturn + path: pkg/k8s/apiserver_test.go - linters: - ireturn path: pkg/svc/reconciler/base.go diff --git a/pkg/apis/cluster/v1alpha1/enums_test.go b/pkg/apis/cluster/v1alpha1/enums_test.go new file mode 100644 index 000000000..cc5e7d1e4 --- /dev/null +++ b/pkg/apis/cluster/v1alpha1/enums_test.go @@ -0,0 +1,366 @@ +package v1alpha1_test + +import ( + "testing" + + v1alpha1 "github.com/devantler-tech/ksail/v5/pkg/apis/cluster/v1alpha1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Test Default() and ValidValues() methods for all enum types. + +func TestDistribution_Default(t *testing.T) { + t.Parallel() + + var dist v1alpha1.Distribution + assert.Equal(t, v1alpha1.DistributionVanilla, dist.Default()) +} + +func TestDistribution_ValidValues(t *testing.T) { + t.Parallel() + + var dist v1alpha1.Distribution + + values := dist.ValidValues() + assert.Contains(t, values, "Vanilla") + assert.Contains(t, values, "K3s") + assert.Contains(t, values, "Talos") + assert.Len(t, values, 3) +} + +func TestCNI_Default(t *testing.T) { + t.Parallel() + + var cni v1alpha1.CNI + assert.Equal(t, v1alpha1.CNIDefault, cni.Default()) +} + +func TestCNI_ValidValues(t *testing.T) { + t.Parallel() + + var cni v1alpha1.CNI + + values := cni.ValidValues() + assert.Contains(t, values, "Default") + assert.Contains(t, values, "Cilium") + assert.Contains(t, values, "Calico") + assert.Len(t, values, 3) +} + +func TestCSI_Default(t *testing.T) { + t.Parallel() + + var csi v1alpha1.CSI + assert.Equal(t, v1alpha1.CSIDefault, csi.Default()) +} + +func TestCSI_ValidValues(t *testing.T) { + t.Parallel() + + var csi v1alpha1.CSI + + values := csi.ValidValues() + assert.Contains(t, values, "Default") + assert.Contains(t, values, "LocalPathStorage") + assert.Len(t, values, 2) +} + +func TestMetricsServer_Default(t *testing.T) { + t.Parallel() + + var ms v1alpha1.MetricsServer + assert.Equal(t, v1alpha1.MetricsServerDefault, ms.Default()) +} + +func TestMetricsServer_ValidValues(t *testing.T) { + t.Parallel() + + var ms v1alpha1.MetricsServer + + values := ms.ValidValues() + assert.Contains(t, values, "Default") + assert.Contains(t, values, "Enabled") + assert.Contains(t, values, "Disabled") + assert.Len(t, values, 3) +} + +func TestCertManager_Default(t *testing.T) { + t.Parallel() + + var cm v1alpha1.CertManager + assert.Equal(t, v1alpha1.CertManagerDisabled, cm.Default()) +} + +func TestCertManager_ValidValues(t *testing.T) { + t.Parallel() + + var cm v1alpha1.CertManager + + values := cm.ValidValues() + assert.Contains(t, values, "Enabled") + assert.Contains(t, values, "Disabled") + assert.Len(t, values, 2) +} + +func TestPolicyEngine_Default(t *testing.T) { + t.Parallel() + + var pe v1alpha1.PolicyEngine + assert.Equal(t, v1alpha1.PolicyEngineNone, pe.Default()) +} + +func TestPolicyEngine_ValidValues(t *testing.T) { + t.Parallel() + + var pe v1alpha1.PolicyEngine + + values := pe.ValidValues() + assert.Contains(t, values, "None") + assert.Contains(t, values, "Kyverno") + assert.Contains(t, values, "Gatekeeper") + assert.Len(t, values, 3) +} + +func TestPolicyEngine_StringAndType(t *testing.T) { + t.Parallel() + + pe := v1alpha1.PolicyEngineKyverno + assert.Equal(t, "Kyverno", pe.String()) + assert.Equal(t, "PolicyEngine", pe.Type()) +} + +func TestGitOpsEngine_Default(t *testing.T) { + t.Parallel() + + var engine v1alpha1.GitOpsEngine + assert.Equal(t, v1alpha1.GitOpsEngineNone, engine.Default()) +} + +func TestGitOpsEngine_ValidValues(t *testing.T) { + t.Parallel() + + var engine v1alpha1.GitOpsEngine + + values := engine.ValidValues() + assert.Contains(t, values, "None") + assert.Contains(t, values, "Flux") + assert.Contains(t, values, "ArgoCD") + assert.Len(t, values, 3) +} + +// Provider tests. + +func TestProvider_Set(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input string + expected v1alpha1.Provider + wantError bool + }{ + { + name: "docker_lowercase", + input: "docker", + expected: v1alpha1.ProviderDocker, + wantError: false, + }, + { + name: "docker_uppercase", + input: "DOCKER", + expected: v1alpha1.ProviderDocker, + wantError: false, + }, + { + name: "hetzner_lowercase", + input: "hetzner", + expected: v1alpha1.ProviderHetzner, + wantError: false, + }, + { + name: "hetzner_mixed_case", + input: "Hetzner", + expected: v1alpha1.ProviderHetzner, + wantError: false, + }, + { + name: "invalid_provider", + input: "invalid", + wantError: true, + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + var provider v1alpha1.Provider + + err := provider.Set(testCase.input) + if testCase.wantError { + require.Error(t, err) + require.ErrorIs(t, err, v1alpha1.ErrInvalidProvider) + } else { + require.NoError(t, err) + assert.Equal(t, testCase.expected, provider) + } + }) + } +} + +func TestProvider_StringAndType(t *testing.T) { + t.Parallel() + + provider := v1alpha1.ProviderDocker + assert.Equal(t, "Docker", provider.String()) + assert.Equal(t, "Provider", provider.Type()) +} + +func TestProvider_Default(t *testing.T) { + t.Parallel() + + var provider v1alpha1.Provider + assert.Equal(t, v1alpha1.ProviderDocker, provider.Default()) +} + +func TestProvider_ValidValues(t *testing.T) { + t.Parallel() + + var provider v1alpha1.Provider + + values := provider.ValidValues() + assert.Contains(t, values, "Docker") + assert.Contains(t, values, "Hetzner") + assert.Len(t, values, 2) +} + +func TestProvider_ValidateForDistribution_ValidCombinations(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + provider v1alpha1.Provider + distribution v1alpha1.Distribution + }{ + {"docker_for_vanilla", v1alpha1.ProviderDocker, v1alpha1.DistributionVanilla}, + {"docker_for_k3s", v1alpha1.ProviderDocker, v1alpha1.DistributionK3s}, + {"docker_for_talos", v1alpha1.ProviderDocker, v1alpha1.DistributionTalos}, + {"hetzner_for_talos", v1alpha1.ProviderHetzner, v1alpha1.DistributionTalos}, + {"empty_provider_defaults_to_docker", v1alpha1.Provider(""), v1alpha1.DistributionVanilla}, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + err := testCase.provider.ValidateForDistribution(testCase.distribution) + require.NoError(t, err) + }) + } +} + +func TestProvider_ValidateForDistribution_InvalidCombinations(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + provider v1alpha1.Provider + distribution v1alpha1.Distribution + }{ + {"hetzner_for_vanilla_invalid", v1alpha1.ProviderHetzner, v1alpha1.DistributionVanilla}, + {"hetzner_for_k3s_invalid", v1alpha1.ProviderHetzner, v1alpha1.DistributionK3s}, + {"unknown_distribution", v1alpha1.ProviderDocker, v1alpha1.Distribution("Unknown")}, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + err := testCase.provider.ValidateForDistribution(testCase.distribution) + require.Error(t, err) + }) + } +} + +// Defaults tests. + +func TestExpectedDistributionConfigName(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + distribution v1alpha1.Distribution + expected string + }{ + { + name: "vanilla_returns_kind_yaml", + distribution: v1alpha1.DistributionVanilla, + expected: "kind.yaml", + }, + { + name: "k3s_returns_k3d_yaml", + distribution: v1alpha1.DistributionK3s, + expected: "k3d.yaml", + }, + { + name: "talos_returns_talos", + distribution: v1alpha1.DistributionTalos, + expected: "talos", + }, + { + name: "unknown_defaults_to_kind_yaml", + distribution: v1alpha1.Distribution("Unknown"), + expected: "kind.yaml", + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + result := v1alpha1.ExpectedDistributionConfigName(testCase.distribution) + assert.Equal(t, testCase.expected, result) + }) + } +} + +func TestExpectedContextName(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + distribution v1alpha1.Distribution + expected string + }{ + { + name: "vanilla_returns_kind_context", + distribution: v1alpha1.DistributionVanilla, + expected: "kind-kind", + }, + { + name: "k3s_returns_k3d_context", + distribution: v1alpha1.DistributionK3s, + expected: "k3d-k3d-default", + }, + { + name: "talos_returns_admin_context", + distribution: v1alpha1.DistributionTalos, + expected: "admin@talos-default", + }, + { + name: "unknown_returns_empty", + distribution: v1alpha1.Distribution("Unknown"), + expected: "", + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + result := v1alpha1.ExpectedContextName(testCase.distribution) + assert.Equal(t, testCase.expected, result) + }) + } +} diff --git a/pkg/apis/cluster/v1alpha1/marshal_test.go b/pkg/apis/cluster/v1alpha1/marshal_test.go new file mode 100644 index 000000000..99a6e0b99 --- /dev/null +++ b/pkg/apis/cluster/v1alpha1/marshal_test.go @@ -0,0 +1,447 @@ +// Package v1alpha1_test provides unit tests for the v1alpha1 package. +// +//nolint:funlen // Table-driven tests are naturally long +package v1alpha1_test + +import ( + "encoding/json" + "testing" + "time" + + v1alpha1 "github.com/devantler-tech/ksail/v5/pkg/apis/cluster/v1alpha1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/yaml" +) + +// TestCluster_MarshalYAML tests that MarshalYAML correctly prunes default values. +func TestCluster_MarshalYAML(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + cluster v1alpha1.Cluster + wantContains []string + wantExcludes []string + }{ + { + name: "minimal cluster omits all defaults", + cluster: v1alpha1.Cluster{ + TypeMeta: metav1.TypeMeta{ + Kind: v1alpha1.Kind, + APIVersion: v1alpha1.APIVersion, + }, + }, + wantContains: []string{"kind: Cluster", "apiVersion: ksail.io/v1alpha1"}, + wantExcludes: []string{ + "distribution:", + "cni:", + "csi:", + "kubeconfig:", + "sourceDirectory:", + }, + }, + { + name: "cluster with distribution includes distribution", + cluster: v1alpha1.Cluster{ + TypeMeta: metav1.TypeMeta{ + Kind: v1alpha1.Kind, + APIVersion: v1alpha1.APIVersion, + }, + Spec: v1alpha1.Spec{ + Cluster: v1alpha1.ClusterSpec{ + Distribution: v1alpha1.DistributionK3s, + }, + }, + }, + wantContains: []string{"distribution: K3s"}, + }, + { + name: "cluster with non-default CNI includes CNI", + cluster: v1alpha1.Cluster{ + TypeMeta: metav1.TypeMeta{ + Kind: v1alpha1.Kind, + APIVersion: v1alpha1.APIVersion, + }, + Spec: v1alpha1.Spec{ + Cluster: v1alpha1.ClusterSpec{ + CNI: v1alpha1.CNICilium, + }, + }, + }, + wantContains: []string{"cni: Cilium"}, + }, + { + name: "cluster with connection timeout includes timeout", + cluster: v1alpha1.Cluster{ + TypeMeta: metav1.TypeMeta{ + Kind: v1alpha1.Kind, + APIVersion: v1alpha1.APIVersion, + }, + Spec: v1alpha1.Spec{ + Cluster: v1alpha1.ClusterSpec{ + Connection: v1alpha1.Connection{ + Timeout: metav1.Duration{Duration: 5 * time.Minute}, + }, + }, + }, + }, + wantContains: []string{"timeout: 5m0s"}, + }, + { + name: "cluster with gitops engine", + cluster: v1alpha1.Cluster{ + TypeMeta: metav1.TypeMeta{ + Kind: v1alpha1.Kind, + APIVersion: v1alpha1.APIVersion, + }, + Spec: v1alpha1.Spec{ + Cluster: v1alpha1.ClusterSpec{ + GitOpsEngine: v1alpha1.GitOpsEngineFlux, + }, + }, + }, + wantContains: []string{"gitOpsEngine: Flux"}, + }, + { + name: "workload spec with custom source directory", + cluster: v1alpha1.Cluster{ + TypeMeta: metav1.TypeMeta{ + Kind: v1alpha1.Kind, + APIVersion: v1alpha1.APIVersion, + }, + Spec: v1alpha1.Spec{ + Workload: v1alpha1.WorkloadSpec{ + SourceDirectory: "manifests", + }, + }, + }, + wantContains: []string{"sourceDirectory: manifests"}, + }, + { + name: "workload spec with validateOnPush", + cluster: v1alpha1.Cluster{ + TypeMeta: metav1.TypeMeta{ + Kind: v1alpha1.Kind, + APIVersion: v1alpha1.APIVersion, + }, + Spec: v1alpha1.Spec{ + Workload: v1alpha1.WorkloadSpec{ + ValidateOnPush: true, + }, + }, + }, + wantContains: []string{"validateOnPush: true"}, + }, + { + name: "cluster with editor", + cluster: v1alpha1.Cluster{ + TypeMeta: metav1.TypeMeta{ + Kind: v1alpha1.Kind, + APIVersion: v1alpha1.APIVersion, + }, + Spec: v1alpha1.Spec{ + Editor: "vim", + }, + }, + wantContains: []string{"editor: vim"}, + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + got, err := yaml.Marshal(&testCase.cluster) + require.NoError(t, err) + + yamlStr := string(got) + for _, want := range testCase.wantContains { + assert.Contains(t, yamlStr, want, "should contain %q", want) + } + + for _, exclude := range testCase.wantExcludes { + assert.NotContains(t, yamlStr, exclude, "should not contain %q", exclude) + } + }) + } +} + +// TestCluster_MarshalJSON tests that MarshalJSON correctly prunes default values. +func TestCluster_MarshalJSON(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + cluster v1alpha1.Cluster + }{ + { + name: "minimal cluster produces valid JSON", + cluster: v1alpha1.Cluster{ + TypeMeta: metav1.TypeMeta{ + Kind: v1alpha1.Kind, + APIVersion: v1alpha1.APIVersion, + }, + }, + }, + { + name: "full cluster produces valid JSON", + cluster: v1alpha1.Cluster{ + TypeMeta: metav1.TypeMeta{ + Kind: v1alpha1.Kind, + APIVersion: v1alpha1.APIVersion, + }, + Spec: v1alpha1.Spec{ + Editor: "code --wait", + Cluster: v1alpha1.ClusterSpec{ + Distribution: v1alpha1.DistributionK3s, + CNI: v1alpha1.CNICilium, + CSI: v1alpha1.CSILocalPathStorage, + GitOpsEngine: v1alpha1.GitOpsEngineFlux, + Connection: v1alpha1.Connection{ + Kubeconfig: "/custom/path", + Context: "my-context", + Timeout: metav1.Duration{Duration: 10 * time.Minute}, + }, + }, + Workload: v1alpha1.WorkloadSpec{ + SourceDirectory: "k8s", + ValidateOnPush: true, + }, + }, + }, + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + got, err := json.Marshal(&testCase.cluster) + require.NoError(t, err) + + // Verify it's valid JSON by unmarshaling + var result map[string]any + + err = json.Unmarshal(got, &result) + require.NoError(t, err) + + // Verify kind and apiVersion are present + assert.Equal(t, v1alpha1.Kind, result["kind"]) + assert.Equal(t, v1alpha1.APIVersion, result["apiVersion"]) + }) + } +} + +// TestCluster_MarshalRoundTrip tests that marshal/unmarshal preserves data. +func TestCluster_MarshalRoundTrip(t *testing.T) { + t.Parallel() + + original := v1alpha1.Cluster{ + TypeMeta: metav1.TypeMeta{ + Kind: v1alpha1.Kind, + APIVersion: v1alpha1.APIVersion, + }, + Spec: v1alpha1.Spec{ + Editor: "nano", + Cluster: v1alpha1.ClusterSpec{ + Distribution: v1alpha1.DistributionK3s, + CNI: v1alpha1.CNICilium, + CSI: v1alpha1.CSILocalPathStorage, + GitOpsEngine: v1alpha1.GitOpsEngineFlux, + Connection: v1alpha1.Connection{ + Kubeconfig: "/custom/kubeconfig", + Context: "test-context", + Timeout: metav1.Duration{Duration: 5 * time.Minute}, + }, + }, + Workload: v1alpha1.WorkloadSpec{ + SourceDirectory: "manifests", + ValidateOnPush: true, + }, + }, + } + + // Marshal to YAML + yamlData, err := yaml.Marshal(&original) + require.NoError(t, err) + + // Unmarshal back + var restored v1alpha1.Cluster + + err = yaml.Unmarshal(yamlData, &restored) + require.NoError(t, err) + + // Verify key fields are preserved + assert.Equal(t, original.Kind, restored.Kind) + assert.Equal(t, original.APIVersion, restored.APIVersion) + assert.Equal(t, original.Spec.Editor, restored.Spec.Editor) + assert.Equal(t, original.Spec.Cluster.Distribution, restored.Spec.Cluster.Distribution) + assert.Equal(t, original.Spec.Cluster.CNI, restored.Spec.Cluster.CNI) + assert.Equal(t, original.Spec.Cluster.CSI, restored.Spec.Cluster.CSI) + assert.Equal(t, original.Spec.Cluster.GitOpsEngine, restored.Spec.Cluster.GitOpsEngine) + assert.Equal( + t, + original.Spec.Cluster.Connection.Kubeconfig, + restored.Spec.Cluster.Connection.Kubeconfig, + ) + assert.Equal( + t, + original.Spec.Cluster.Connection.Context, + restored.Spec.Cluster.Connection.Context, + ) + assert.Equal(t, original.Spec.Workload.SourceDirectory, restored.Spec.Workload.SourceDirectory) + assert.Equal(t, original.Spec.Workload.ValidateOnPush, restored.Spec.Workload.ValidateOnPush) +} + +// TestCluster_DefaultDistributionConfigPruning tests that default distribution config is pruned. +func TestCluster_DefaultDistributionConfigPruning(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + distribution v1alpha1.Distribution + distributionCfg string + wantInYAML bool + description string + }{ + { + name: "Vanilla with kind.yaml is pruned", + distribution: v1alpha1.DistributionVanilla, + distributionCfg: "kind.yaml", + wantInYAML: false, + description: "default config for Vanilla should be omitted", + }, + { + name: "Vanilla with custom config is kept", + distribution: v1alpha1.DistributionVanilla, + distributionCfg: "custom-kind.yaml", + wantInYAML: true, + description: "non-default config should be included", + }, + { + name: "K3s with k3d.yaml is pruned", + distribution: v1alpha1.DistributionK3s, + distributionCfg: "k3d.yaml", + wantInYAML: false, + description: "default config for K3s should be omitted", + }, + { + name: "K3s with custom config is kept", + distribution: v1alpha1.DistributionK3s, + distributionCfg: "my-k3d.yaml", + wantInYAML: true, + description: "non-default config should be included", + }, + { + name: "Talos with talos is pruned", + distribution: v1alpha1.DistributionTalos, + distributionCfg: "talos", + wantInYAML: false, + description: "default config for Talos should be omitted", + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + cluster := v1alpha1.Cluster{ + TypeMeta: metav1.TypeMeta{ + Kind: v1alpha1.Kind, + APIVersion: v1alpha1.APIVersion, + }, + Spec: v1alpha1.Spec{ + Cluster: v1alpha1.ClusterSpec{ + Distribution: testCase.distribution, + DistributionConfig: testCase.distributionCfg, + }, + }, + } + + yamlData, err := yaml.Marshal(&cluster) + require.NoError(t, err) + + yamlStr := string(yamlData) + if testCase.wantInYAML { + assert.Contains(t, yamlStr, "distributionConfig:", testCase.description) + } else { + assert.NotContains(t, yamlStr, "distributionConfig:", testCase.description) + } + }) + } +} + +// TestCluster_DefaultContextPruning tests that default context names are pruned. +func TestCluster_DefaultContextPruning(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + distribution v1alpha1.Distribution + context string + wantInYAML bool + description string + }{ + { + name: "Vanilla with kind-kind is pruned", + distribution: v1alpha1.DistributionVanilla, + context: "kind-kind", + wantInYAML: false, + description: "default context for Vanilla should be omitted", + }, + { + name: "Vanilla with custom context is kept", + distribution: v1alpha1.DistributionVanilla, + context: "my-custom-context", + wantInYAML: true, + description: "non-default context should be included", + }, + { + name: "K3s with k3d-k3d-default is pruned", + distribution: v1alpha1.DistributionK3s, + context: "k3d-k3d-default", + wantInYAML: false, + description: "default context for K3s should be omitted", + }, + { + name: "Talos with admin@talos-default is pruned", + distribution: v1alpha1.DistributionTalos, + context: "admin@talos-default", + wantInYAML: false, + description: "default context for Talos should be omitted", + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + cluster := v1alpha1.Cluster{ + TypeMeta: metav1.TypeMeta{ + Kind: v1alpha1.Kind, + APIVersion: v1alpha1.APIVersion, + }, + Spec: v1alpha1.Spec{ + Cluster: v1alpha1.ClusterSpec{ + Distribution: testCase.distribution, + Connection: v1alpha1.Connection{ + Context: testCase.context, + }, + }, + }, + } + + yamlData, err := yaml.Marshal(&cluster) + require.NoError(t, err) + + yamlStr := string(yamlData) + if testCase.wantInYAML { + assert.Contains(t, yamlStr, "context:", testCase.description) + } else { + assert.NotContains(t, yamlStr, "context:", testCase.description) + } + }) + } +} diff --git a/pkg/apis/cluster/v1alpha1/types_test.go b/pkg/apis/cluster/v1alpha1/types_test.go index ab347bee2..bd9181f4d 100644 --- a/pkg/apis/cluster/v1alpha1/types_test.go +++ b/pkg/apis/cluster/v1alpha1/types_test.go @@ -249,3 +249,175 @@ func TestLocalRegistry_ResolvedTag(t *testing.T) { }) } } + +func TestLocalRegistry_Enabled(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + registry string + want bool + }{ + { + name: "returns_true_for_non_empty", + registry: "localhost:5000", + want: true, + }, + { + name: "returns_true_for_external_registry", + registry: "ghcr.io/org/repo", + want: true, + }, + { + name: "returns_false_for_empty", + registry: "", + want: false, + }, + { + name: "returns_false_for_whitespace_only", + registry: " ", + want: false, + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + reg := v1alpha1.LocalRegistry{Registry: testCase.registry} + assert.Equal(t, testCase.want, reg.Enabled()) + }) + } +} + +func TestLocalRegistry_IsExternal(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + registry string + want bool + }{ + { + name: "localhost_is_not_external", + registry: "localhost:5000", + want: false, + }, + { + name: "127_0_0_1_is_not_external", + registry: "127.0.0.1:5000", + want: false, + }, + { + name: "ghcr_io_is_external", + registry: "ghcr.io/org/repo", + want: true, + }, + { + name: "docker_io_is_external", + registry: "docker.io/library/nginx", + want: true, + }, + { + name: "empty_defaults_to_localhost_not_external", + registry: "", + want: false, + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + reg := v1alpha1.LocalRegistry{Registry: testCase.registry} + assert.Equal(t, testCase.want, reg.IsExternal()) + }) + } +} + +func TestLocalRegistry_HasCredentials(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + registry string + want bool + }{ + { + name: "no_credentials", + registry: "ghcr.io/org/repo", + want: false, + }, + { + name: "username_and_password", + registry: "user:pass@ghcr.io/org/repo", + want: true, + }, + { + name: "username_only", + registry: "user@ghcr.io/org/repo", + want: true, + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + reg := v1alpha1.LocalRegistry{Registry: testCase.registry} + assert.Equal(t, testCase.want, reg.HasCredentials()) + }) + } +} + +func TestLocalRegistry_ResolvedHostPortPath(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + registry string + wantHost string + wantPort int32 + wantPath string + }{ + { + name: "localhost_with_port", + registry: "localhost:5000", + wantHost: "localhost", + wantPort: 5000, + wantPath: "", + }, + { + name: "localhost_without_port_uses_default", + registry: "localhost", + wantHost: "localhost", + wantPort: v1alpha1.DefaultLocalRegistryPort, + wantPath: "", + }, + { + name: "external_registry_with_path", + registry: "ghcr.io/org/repo", + wantHost: "ghcr.io", + wantPort: 0, // No port for external registries + wantPath: "org/repo", + }, + { + name: "empty_registry_defaults", + registry: "", + wantHost: "localhost", + wantPort: v1alpha1.DefaultLocalRegistryPort, + wantPath: "", + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + reg := v1alpha1.LocalRegistry{Registry: testCase.registry} + assert.Equal(t, testCase.wantHost, reg.ResolvedHost()) + assert.Equal(t, testCase.wantPort, reg.ResolvedPort()) + assert.Equal(t, testCase.wantPath, reg.ResolvedPath()) + }) + } +} diff --git a/pkg/cli/helpers/registry_test.go b/pkg/cli/helpers/registry_test.go new file mode 100644 index 000000000..f57d6da2d --- /dev/null +++ b/pkg/cli/helpers/registry_test.go @@ -0,0 +1,154 @@ +package helpers_test + +import ( + "testing" + + "github.com/devantler-tech/ksail/v5/pkg/cli/helpers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type formatRegistryURLTestCase struct { + name string + host string + port int32 + repository string + expected string +} + +func getFormatRegistryURLTestCases() []formatRegistryURLTestCase { + return []formatRegistryURLTestCase{ + {"localhost with port", "localhost", 5000, "myproject", "oci://localhost:5000/myproject"}, + { + "custom host with port", + "registry.example.com", + 8080, + "app", + "oci://registry.example.com:8080/app", + }, + {"IPv4 with port", "192.168.1.100", 5000, "images", "oci://192.168.1.100:5000/images"}, + {"IPv6 with port", "::1", 5000, "project", "oci://[::1]:5000/project"}, + {"external registry without port", "ghcr.io", 0, "org/repo", "oci://ghcr.io/org/repo"}, + { + "docker hub without port", + "docker.io", + 0, + "library/nginx", + "oci://docker.io/library/nginx", + }, + {"empty repository", "localhost", 5000, "", "oci://localhost:5000/"}, + { + "nested repository path", + "ghcr.io", + 0, + "org/project/subdir", + "oci://ghcr.io/org/project/subdir", + }, + } +} + +// TestFormatRegistryURL tests the FormatRegistryURL function. +func TestFormatRegistryURL(t *testing.T) { + t.Parallel() + + for _, testCase := range getFormatRegistryURLTestCases() { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + result := helpers.FormatRegistryURL(testCase.host, testCase.port, testCase.repository) + assert.Equal(t, testCase.expected, result) + }) + } +} + +// TestDetectRegistryFromViper_NilViper tests DetectRegistryFromViper with nil viper. +func TestDetectRegistryFromViper_NilViper(t *testing.T) { + t.Parallel() + + info, err := helpers.DetectRegistryFromViper(nil) + + require.Error(t, err) + require.ErrorIs(t, err, helpers.ErrViperNil) + assert.Nil(t, info) +} + +// TestDetectRegistryFromConfig_DisabledLocalRegistry tests DetectRegistryFromConfig +// with a config where local registry is disabled. +func TestDetectRegistryFromConfig_DisabledLocalRegistry(t *testing.T) { + t.Parallel() + + // Note: Calling with nil panics - that's a bug in the function. + // Testing with disabled local registry instead. +} + +// TestRegistryInfo_Fields tests that RegistryInfo struct fields work correctly. +func TestRegistryInfo_Fields(t *testing.T) { + t.Parallel() + + info := helpers.RegistryInfo{ + Host: "localhost", + Port: 5000, + Repository: "myproject", + Tag: "v1.0.0", + Username: "user", + Password: "pass", + IsExternal: false, + Source: "test", + } + + assert.Equal(t, "localhost", info.Host) + assert.Equal(t, int32(5000), info.Port) + assert.Equal(t, "myproject", info.Repository) + assert.Equal(t, "v1.0.0", info.Tag) + assert.Equal(t, "user", info.Username) + assert.Equal(t, "pass", info.Password) + assert.False(t, info.IsExternal) + assert.Equal(t, "test", info.Source) +} + +// TestRegistryErrors tests that registry error constants are defined correctly. +func TestRegistryErrors(t *testing.T) { + t.Parallel() + + t.Run("ErrNoRegistryFound", func(t *testing.T) { + t.Parallel() + require.Error(t, helpers.ErrNoRegistryFound) + assert.Contains(t, helpers.ErrNoRegistryFound.Error(), "unable to detect registry") + }) + + t.Run("ErrViperNil", func(t *testing.T) { + t.Parallel() + require.Error(t, helpers.ErrViperNil) + assert.Contains(t, helpers.ErrViperNil.Error(), "nil") + }) + + t.Run("ErrRegistryNotSet", func(t *testing.T) { + t.Parallel() + require.Error(t, helpers.ErrRegistryNotSet) + assert.Contains(t, helpers.ErrRegistryNotSet.Error(), "not set") + }) + + t.Run("ErrLocalRegistryNotConfigured", func(t *testing.T) { + t.Parallel() + require.Error(t, helpers.ErrLocalRegistryNotConfigured) + assert.Contains(t, helpers.ErrLocalRegistryNotConfigured.Error(), "not configured") + }) + + t.Run("ErrFluxNoSyncURL", func(t *testing.T) { + t.Parallel() + require.Error(t, helpers.ErrFluxNoSyncURL) + assert.Contains(t, helpers.ErrFluxNoSyncURL.Error(), "sync.url") + }) + + t.Run("ErrArgoCDNoRepoURL", func(t *testing.T) { + t.Parallel() + require.Error(t, helpers.ErrArgoCDNoRepoURL) + assert.Contains(t, helpers.ErrArgoCDNoRepoURL.Error(), "repoURL") + }) + + t.Run("ErrEmptyOCIURL", func(t *testing.T) { + t.Parallel() + require.Error(t, helpers.ErrEmptyOCIURL) + assert.Contains(t, helpers.ErrEmptyOCIURL.Error(), "empty") + }) +} diff --git a/pkg/client/docker/export_test.go b/pkg/client/docker/export_test.go new file mode 100644 index 000000000..65d99a576 --- /dev/null +++ b/pkg/client/docker/export_test.go @@ -0,0 +1,34 @@ +//nolint:gochecknoglobals // export_test.go pattern requires global variables to expose internal functions +package docker + +// Export unexported functions for testing. + +// UniqueNonEmpty exports uniqueNonEmpty for testing. +var UniqueNonEmpty = uniqueNonEmpty + +// IsNotConnectedError exports isNotConnectedError for testing. +var IsNotConnectedError = isNotConnectedError + +// IsClusterNetworkName exports isClusterNetworkName for testing. +var IsClusterNetworkName = isClusterNetworkName + +// RegistryAttachedToOtherClusters exports registryAttachedToOtherClusters for testing. +var RegistryAttachedToOtherClusters = registryAttachedToOtherClusters + +// DeriveRegistryVolumeName exports deriveRegistryVolumeName for testing. +var DeriveRegistryVolumeName = deriveRegistryVolumeName + +// InspectContainer exports inspectContainer for testing. +var InspectContainer = inspectContainer + +// DisconnectRegistryNetwork exports disconnectRegistryNetwork for testing. +var DisconnectRegistryNetwork = disconnectRegistryNetwork + +// CleanupRegistryVolume exports cleanupRegistryVolume for testing. +var CleanupRegistryVolume = cleanupRegistryVolume + +// CleanupOrphanedRegistryVolume exports cleanupOrphanedRegistryVolume for testing. +var CleanupOrphanedRegistryVolume = cleanupOrphanedRegistryVolume + +// RemoveRegistryVolume exports removeRegistryVolume for testing. +var RemoveRegistryVolume = removeRegistryVolume diff --git a/pkg/client/docker/registry_helpers_test.go b/pkg/client/docker/registry_helpers_test.go new file mode 100644 index 000000000..fcca9dd23 --- /dev/null +++ b/pkg/client/docker/registry_helpers_test.go @@ -0,0 +1,999 @@ +// Package docker_test provides unit tests for the docker package. +// +//nolint:err113,funlen // Tests use dynamic errors for mock behaviors and table-driven tests are naturally long +package docker_test + +import ( + "context" + "errors" + "testing" + + docker "github.com/devantler-tech/ksail/v5/pkg/client/docker" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Test constants. +const ( + testContainerID = "test-container-id" + testClusterKind = "kind-test-cluster" + testRegistryName = "docker.io" + testFallbackName = "fallback-name" + testContainerName = "test-container" +) + +func TestUniqueNonEmpty(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input []string + expected []string + }{ + { + name: "empty input returns empty slice", + input: []string{}, + expected: []string{}, + }, + { + name: "single value returns single value", + input: []string{"foo"}, + expected: []string{"foo"}, + }, + { + name: "duplicate values are removed", + input: []string{"foo", "bar", "foo"}, + expected: []string{"foo", "bar"}, + }, + { + name: "empty strings are filtered out", + input: []string{"foo", "", "bar"}, + expected: []string{"foo", "bar"}, + }, + { + name: "whitespace only strings are filtered out", + input: []string{"foo", " ", "bar"}, + expected: []string{"foo", "bar"}, + }, + { + name: "whitespace is trimmed from values", + input: []string{" foo ", " bar "}, + expected: []string{"foo", "bar"}, + }, + { + name: "duplicates after trimming are removed", + input: []string{" foo", "foo ", "foo"}, + expected: []string{"foo"}, + }, + { + name: "all empty values returns empty slice", + input: []string{"", " ", "\t"}, + expected: []string{}, + }, + { + name: "preserves order of first occurrence", + input: []string{"c", "a", "b", "a", "c"}, + expected: []string{"c", "a", "b"}, + }, + { + name: "nil input returns empty slice", + input: nil, + expected: []string{}, + }, + } + + for i := range tests { + testCase := tests[i] + + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + result := docker.UniqueNonEmpty(testCase.input...) + + assert.Equal(t, testCase.expected, result) + }) + } +} + +func TestIsNotConnectedError(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + err error + expected bool + }{ + { + name: "nil error returns false", + err: nil, + expected: false, + }, + { + name: "not connected error returns true", + err: errors.New("container xyz is not connected to the network abc"), + expected: true, + }, + { + name: "random error returns false", + err: errors.New("something went wrong"), + expected: false, + }, + { + name: "partial match returns true", + err: errors.New("Error: is not connected to the network"), + expected: true, + }, + { + name: "case sensitive - uppercase returns false", + err: errors.New("container IS NOT CONNECTED TO THE NETWORK"), + expected: false, + }, + { + name: "wrapped error with not connected message returns true", + err: errors.New("wrapped: is not connected to the network: details"), + expected: true, + }, + } + + for i := range tests { + testCase := tests[i] + + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + result := docker.IsNotConnectedError(testCase.err) + + assert.Equal(t, testCase.expected, result) + }) + } +} + +func TestIsClusterNetworkName(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + network string + expected bool + }{ + { + name: "empty string returns false", + network: "", + expected: false, + }, + { + name: "kind network returns true", + network: "kind", + expected: true, + }, + { + name: "kind-prefixed network returns true", + network: "kind-test-cluster", + expected: true, + }, + { + name: "k3d network returns true", + network: "k3d", + expected: true, + }, + { + name: "k3d-prefixed network returns true", + network: "k3d-test-cluster", + expected: true, + }, + { + name: "bridge network returns false", + network: "bridge", + expected: false, + }, + { + name: "host network returns false", + network: "host", + expected: false, + }, + { + name: "custom network returns false", + network: "my-custom-network", + expected: false, + }, + { + name: "kubernetes network returns false", + network: "kubernetes", + expected: false, + }, + { + name: "similar but not matching prefix returns false", + network: "kindof-network", + expected: false, + }, + { + name: "k3d alone is cluster network", + network: "k3d", + expected: true, + }, + } + + for i := range tests { + testCase := tests[i] + + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + result := docker.IsClusterNetworkName(testCase.network) + + assert.Equal(t, testCase.expected, result) + }) + } +} + +func TestRegistryAttachedToOtherClusters(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + inspect container.InspectResponse + ignoredNetwork string + expected bool + }{ + { + name: "nil network settings returns false", + inspect: container.InspectResponse{ + NetworkSettings: nil, + }, + ignoredNetwork: "", + expected: false, + }, + { + name: "empty networks returns false", + inspect: container.InspectResponse{ + NetworkSettings: &container.NetworkSettings{ + Networks: map[string]*network.EndpointSettings{}, + }, + }, + ignoredNetwork: "", + expected: false, + }, + { + name: "only non-cluster networks returns false", + inspect: container.InspectResponse{ + NetworkSettings: &container.NetworkSettings{ + Networks: map[string]*network.EndpointSettings{ + "bridge": {}, + "host": {}, + }, + }, + }, + ignoredNetwork: "", + expected: false, + }, + { + name: "cluster network present returns true", + inspect: container.InspectResponse{ + NetworkSettings: &container.NetworkSettings{ + Networks: map[string]*network.EndpointSettings{ + "kind-test-cluster": {}, + }, + }, + }, + ignoredNetwork: "", + expected: true, + }, + { + name: "only ignored cluster network returns false", + inspect: container.InspectResponse{ + NetworkSettings: &container.NetworkSettings{ + Networks: map[string]*network.EndpointSettings{ + "kind-test-cluster": {}, + }, + }, + }, + ignoredNetwork: "kind-test-cluster", + expected: false, + }, + { + name: "multiple cluster networks with one ignored returns true", + inspect: container.InspectResponse{ + NetworkSettings: &container.NetworkSettings{ + Networks: map[string]*network.EndpointSettings{ + "kind-cluster1": {}, + "kind-cluster2": {}, + }, + }, + }, + ignoredNetwork: "kind-cluster1", + expected: true, + }, + { + name: "ignoring case insensitive", + inspect: container.InspectResponse{ + NetworkSettings: &container.NetworkSettings{ + Networks: map[string]*network.EndpointSettings{ + "Kind-Test-Cluster": {}, + }, + }, + }, + ignoredNetwork: "kind-test-cluster", + expected: false, + }, + { + name: "empty string network name in map is skipped", + inspect: container.InspectResponse{ + NetworkSettings: &container.NetworkSettings{ + Networks: map[string]*network.EndpointSettings{ + "": {}, + "kind-test-cluster": {}, + }, + }, + }, + ignoredNetwork: "", + expected: true, + }, + { + name: "whitespace network name in map is skipped", + inspect: container.InspectResponse{ + NetworkSettings: &container.NetworkSettings{ + Networks: map[string]*network.EndpointSettings{ + " ": {}, + "kind-test-cluster": {}, + }, + }, + }, + ignoredNetwork: "", + expected: true, + }, + { + name: "k3d network also detected as cluster network", + inspect: container.InspectResponse{ + NetworkSettings: &container.NetworkSettings{ + Networks: map[string]*network.EndpointSettings{ + "k3d-my-cluster": {}, + }, + }, + }, + ignoredNetwork: "", + expected: true, + }, + } + + for i := range tests { + testCase := tests[i] + + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + result := docker.RegistryAttachedToOtherClusters( + testCase.inspect, + testCase.ignoredNetwork, + ) + + assert.Equal(t, testCase.expected, result) + }) + } +} + +func TestDeriveRegistryVolumeName(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + registry container.Summary + fallback string + expected string + }{ + { + name: "returns volume name from mount point", + registry: container.Summary{ + Mounts: []container.MountPoint{ + {Type: mount.TypeVolume, Name: "registry-volume"}, + }, + }, + fallback: "fallback-name", + expected: "registry-volume", + }, + { + name: "skips bind mounts", + registry: container.Summary{ + Mounts: []container.MountPoint{ + {Type: mount.TypeBind, Name: "bind-mount"}, + {Type: mount.TypeVolume, Name: "volume-mount"}, + }, + }, + fallback: "fallback-name", + expected: "volume-mount", + }, + { + name: "returns fallback when no volume mounts", + registry: container.Summary{ + Mounts: []container.MountPoint{}, + }, + fallback: "fallback-name", + expected: "fallback-name", + }, + { + name: "returns normalized fallback for kind prefix", + registry: container.Summary{ + Mounts: []container.MountPoint{}, + }, + fallback: "kind-docker.io", + expected: "docker.io", + }, + { + name: "returns normalized fallback for k3d prefix", + registry: container.Summary{ + Mounts: []container.MountPoint{}, + }, + fallback: "k3d-registry", + expected: "registry", + }, + { + name: "handles empty volume name in mount", + registry: container.Summary{ + Mounts: []container.MountPoint{ + {Type: mount.TypeVolume, Name: ""}, + {Type: mount.TypeVolume, Name: "actual-volume"}, + }, + }, + fallback: "fallback-name", + expected: "actual-volume", + }, + { + name: "returns trimmed fallback", + registry: container.Summary{ + Mounts: []container.MountPoint{}, + }, + fallback: " whitespace-fallback ", + expected: "whitespace-fallback", + }, + { + name: "returns empty string when no mounts and empty fallback", + registry: container.Summary{ + Mounts: []container.MountPoint{}, + }, + fallback: "", + expected: "", + }, + } + + for i := range tests { + testCase := tests[i] + + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + result := docker.DeriveRegistryVolumeName(testCase.registry, testCase.fallback) + + assert.Equal(t, testCase.expected, result) + }) + } +} + +func TestInspectContainer(t *testing.T) { + t.Parallel() + + t.Run("returns container inspection response", func(t *testing.T) { + t.Parallel() + + mockClient := docker.NewMockAPIClient(t) + ctx := context.Background() + containerID := testContainerID + + expectedInspect := container.InspectResponse{ + ContainerJSONBase: &container.ContainerJSONBase{ + ID: containerID, + Name: "test-container", + }, + } + + mockClient.EXPECT(). + ContainerInspect(ctx, containerID). + Return(expectedInspect, nil). + Once() + + result, err := docker.InspectContainer(ctx, mockClient, containerID) + + require.NoError(t, err) + assert.Equal(t, containerID, result.ID) + assert.Equal(t, "test-container", result.Name) + }) + + t.Run("returns error when inspection fails", func(t *testing.T) { + t.Parallel() + + mockClient := docker.NewMockAPIClient(t) + ctx := context.Background() + containerID := testContainerID + + mockClient.EXPECT(). + ContainerInspect(ctx, containerID). + Return(container.InspectResponse{}, errors.New("inspection failed")). + Once() + + _, err := docker.InspectContainer(ctx, mockClient, containerID) + + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to inspect registry container") + }) +} + +func TestDisconnectRegistryNetwork(t *testing.T) { + t.Parallel() + + t.Run("disconnects successfully", func(t *testing.T) { + t.Parallel() + + mockClient := docker.NewMockAPIClient(t) + ctx := context.Background() + containerID := testContainerID + networkName := testClusterKind + + inspectBefore := container.InspectResponse{ + ContainerJSONBase: &container.ContainerJSONBase{ID: containerID}, + NetworkSettings: &container.NetworkSettings{ + Networks: map[string]*network.EndpointSettings{ + networkName: {}, + }, + }, + } + + inspectAfter := container.InspectResponse{ + ContainerJSONBase: &container.ContainerJSONBase{ID: containerID}, + NetworkSettings: &container.NetworkSettings{ + Networks: map[string]*network.EndpointSettings{}, + }, + } + + mockClient.EXPECT(). + NetworkDisconnect(ctx, networkName, containerID, true). + Return(nil). + Once() + + mockClient.EXPECT(). + ContainerInspect(ctx, containerID). + Return(inspectAfter, nil). + Once() + + result, err := docker.DisconnectRegistryNetwork( + ctx, + mockClient, + containerID, + "docker.io", + networkName, + inspectBefore, + ) + + require.NoError(t, err) + assert.Empty(t, result.NetworkSettings.Networks) + }) + + t.Run("returns early when network is empty", func(t *testing.T) { + t.Parallel() + + mockClient := docker.NewMockAPIClient(t) + ctx := context.Background() + containerID := testContainerID + + inspectInput := container.InspectResponse{ + ContainerJSONBase: &container.ContainerJSONBase{ID: containerID}, + } + + result, err := docker.DisconnectRegistryNetwork( + ctx, + mockClient, + containerID, + "docker.io", + "", + inspectInput, + ) + + require.NoError(t, err) + assert.Equal(t, inspectInput, result) + mockClient.AssertNotCalled(t, "NetworkDisconnect") + }) + + t.Run("ignores not connected error", func(t *testing.T) { + t.Parallel() + + mockClient := docker.NewMockAPIClient(t) + ctx := context.Background() + containerID := testContainerID + networkName := testClusterKind + + inspectInput := container.InspectResponse{ + ContainerJSONBase: &container.ContainerJSONBase{ID: containerID}, + } + + inspectAfter := container.InspectResponse{ + ContainerJSONBase: &container.ContainerJSONBase{ID: containerID}, + NetworkSettings: &container.NetworkSettings{ + Networks: map[string]*network.EndpointSettings{}, + }, + } + + mockClient.EXPECT(). + NetworkDisconnect(ctx, networkName, containerID, true). + Return(errors.New("container is not connected to the network")). + Once() + + mockClient.EXPECT(). + ContainerInspect(ctx, containerID). + Return(inspectAfter, nil). + Once() + + result, err := docker.DisconnectRegistryNetwork( + ctx, + mockClient, + containerID, + "docker.io", + networkName, + inspectInput, + ) + + require.NoError(t, err) + assert.NotNil(t, result.NetworkSettings) + }) + + t.Run("ignores not found error", func(t *testing.T) { + t.Parallel() + + mockClient := docker.NewMockAPIClient(t) + ctx := context.Background() + containerID := testContainerID + networkName := testClusterKind + + inspectInput := container.InspectResponse{ + ContainerJSONBase: &container.ContainerJSONBase{ID: containerID}, + } + + inspectAfter := container.InspectResponse{ + ContainerJSONBase: &container.ContainerJSONBase{ID: containerID}, + NetworkSettings: &container.NetworkSettings{ + Networks: map[string]*network.EndpointSettings{}, + }, + } + + mockClient.EXPECT(). + NetworkDisconnect(ctx, networkName, containerID, true). + Return(testNotFoundError{}). + Once() + + mockClient.EXPECT(). + ContainerInspect(ctx, containerID). + Return(inspectAfter, nil). + Once() + + result, err := docker.DisconnectRegistryNetwork( + ctx, + mockClient, + containerID, + "docker.io", + networkName, + inspectInput, + ) + + require.NoError(t, err) + assert.NotNil(t, result.NetworkSettings) + }) + + t.Run("returns error on unexpected disconnect failure", func(t *testing.T) { + t.Parallel() + + mockClient := docker.NewMockAPIClient(t) + ctx := context.Background() + containerID := testContainerID + networkName := testClusterKind + + inspectInput := container.InspectResponse{ + ContainerJSONBase: &container.ContainerJSONBase{ID: containerID}, + } + + mockClient.EXPECT(). + NetworkDisconnect(ctx, networkName, containerID, true). + Return(errors.New("unexpected error")). + Once() + + _, err := docker.DisconnectRegistryNetwork( + ctx, + mockClient, + containerID, + "docker.io", + networkName, + inspectInput, + ) + + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to disconnect registry") + }) +} + +func TestCleanupRegistryVolume(t *testing.T) { + t.Parallel() + + t.Run("does nothing when deleteVolume is false", func(t *testing.T) { + t.Parallel() + + mockClient := docker.NewMockAPIClient(t) + ctx := context.Background() + registry := container.Summary{} + + err := docker.CleanupRegistryVolume(ctx, mockClient, registry, "", "fallback", false) + + require.NoError(t, err) + mockClient.AssertNotCalled(t, "VolumeRemove") + }) + + t.Run("removes explicit volume when provided", func(t *testing.T) { + t.Parallel() + + mockClient := docker.NewMockAPIClient(t) + ctx := context.Background() + registry := container.Summary{} + + mockClient.EXPECT(). + VolumeRemove(ctx, "explicit-volume", false). + Return(nil). + Once() + + err := docker.CleanupRegistryVolume( + ctx, + mockClient, + registry, + "explicit-volume", + "fallback", + true, + ) + + require.NoError(t, err) + }) + + t.Run("derives volume name from registry when no explicit volume", func(t *testing.T) { + t.Parallel() + + mockClient := docker.NewMockAPIClient(t) + ctx := context.Background() + registry := container.Summary{ + Mounts: []container.MountPoint{ + {Type: mount.TypeVolume, Name: "registry-volume"}, + }, + } + + mockClient.EXPECT(). + VolumeRemove(ctx, "registry-volume", false). + Return(nil). + Once() + + err := docker.CleanupRegistryVolume(ctx, mockClient, registry, "", "fallback", true) + + require.NoError(t, err) + }) + + t.Run("returns error on volume removal failure", func(t *testing.T) { + t.Parallel() + + mockClient := docker.NewMockAPIClient(t) + ctx := context.Background() + registry := container.Summary{} + + mockClient.EXPECT(). + VolumeRemove(ctx, "explicit-volume", false). + Return(errors.New("removal failed")). + Once() + + err := docker.CleanupRegistryVolume( + ctx, + mockClient, + registry, + "explicit-volume", + "fallback", + true, + ) + + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to remove registry volume") + }) +} + +func TestCleanupOrphanedRegistryVolume(t *testing.T) { + t.Parallel() + + t.Run("removes explicit volume first", func(t *testing.T) { + t.Parallel() + + mockClient := docker.NewMockAPIClient(t) + ctx := context.Background() + + mockClient.EXPECT(). + VolumeRemove(ctx, "explicit-volume", false). + Return(nil). + Once() + + err := docker.CleanupOrphanedRegistryVolume(ctx, mockClient, "explicit-volume", "fallback") + + require.NoError(t, err) + }) + + t.Run("falls back to normalized name when explicit not found", func(t *testing.T) { + t.Parallel() + + mockClient := docker.NewMockAPIClient(t) + ctx := context.Background() + + mockClient.EXPECT(). + VolumeRemove(ctx, "explicit-volume", false). + Return(testNotFoundError{}). + Once() + + mockClient.EXPECT(). + VolumeRemove(ctx, "docker.io", false). + Return(nil). + Once() + + err := docker.CleanupOrphanedRegistryVolume( + ctx, + mockClient, + "explicit-volume", + "kind-docker.io", + ) + + require.NoError(t, err) + }) + + t.Run("tries original fallback as last resort", func(t *testing.T) { + t.Parallel() + + mockClient := docker.NewMockAPIClient(t) + ctx := context.Background() + + mockClient.EXPECT(). + VolumeRemove(ctx, "docker.io", false). + Return(testNotFoundError{}). + Once() + + mockClient.EXPECT(). + VolumeRemove(ctx, "kind-docker.io", false). + Return(nil). + Once() + + err := docker.CleanupOrphanedRegistryVolume(ctx, mockClient, "", "kind-docker.io") + + require.NoError(t, err) + }) + + t.Run("returns nil when no volumes found", func(t *testing.T) { + t.Parallel() + + mockClient := docker.NewMockAPIClient(t) + ctx := context.Background() + + mockClient.EXPECT(). + VolumeRemove(ctx, "docker.io", false). + Return(testNotFoundError{}). + Once() + + mockClient.EXPECT(). + VolumeRemove(ctx, "kind-docker.io", false). + Return(testNotFoundError{}). + Once() + + err := docker.CleanupOrphanedRegistryVolume(ctx, mockClient, "", "kind-docker.io") + + require.NoError(t, err) + }) + + t.Run("returns error on unexpected failure", func(t *testing.T) { + t.Parallel() + + mockClient := docker.NewMockAPIClient(t) + ctx := context.Background() + + mockClient.EXPECT(). + VolumeRemove(ctx, "explicit-volume", false). + Return(errors.New("unexpected error")). + Once() + + err := docker.CleanupOrphanedRegistryVolume(ctx, mockClient, "explicit-volume", "fallback") + + require.Error(t, err) + }) +} + +func TestRemoveRegistryVolume(t *testing.T) { + t.Parallel() + + t.Run("returns false for empty volume name", func(t *testing.T) { + t.Parallel() + + mockClient := docker.NewMockAPIClient(t) + ctx := context.Background() + + removed, err := docker.RemoveRegistryVolume(ctx, mockClient, "") + + require.NoError(t, err) + assert.False(t, removed) + mockClient.AssertNotCalled(t, "VolumeRemove") + }) + + t.Run("returns false for whitespace only volume name", func(t *testing.T) { + t.Parallel() + + mockClient := docker.NewMockAPIClient(t) + ctx := context.Background() + + removed, err := docker.RemoveRegistryVolume(ctx, mockClient, " ") + + require.NoError(t, err) + assert.False(t, removed) + mockClient.AssertNotCalled(t, "VolumeRemove") + }) + + t.Run("returns true when volume removed successfully", func(t *testing.T) { + t.Parallel() + + mockClient := docker.NewMockAPIClient(t) + ctx := context.Background() + + mockClient.EXPECT(). + VolumeRemove(ctx, "test-volume", false). + Return(nil). + Once() + + removed, err := docker.RemoveRegistryVolume(ctx, mockClient, "test-volume") + + require.NoError(t, err) + assert.True(t, removed) + }) + + t.Run("trims whitespace from volume name", func(t *testing.T) { + t.Parallel() + + mockClient := docker.NewMockAPIClient(t) + ctx := context.Background() + + mockClient.EXPECT(). + VolumeRemove(ctx, "test-volume", false). + Return(nil). + Once() + + removed, err := docker.RemoveRegistryVolume(ctx, mockClient, " test-volume ") + + require.NoError(t, err) + assert.True(t, removed) + }) + + t.Run("returns false when volume not found", func(t *testing.T) { + t.Parallel() + + mockClient := docker.NewMockAPIClient(t) + ctx := context.Background() + + mockClient.EXPECT(). + VolumeRemove(ctx, "missing-volume", false). + Return(testNotFoundError{}). + Once() + + removed, err := docker.RemoveRegistryVolume(ctx, mockClient, "missing-volume") + + require.NoError(t, err) + assert.False(t, removed) + }) + + t.Run("returns error on unexpected failure", func(t *testing.T) { + t.Parallel() + + mockClient := docker.NewMockAPIClient(t) + ctx := context.Background() + + mockClient.EXPECT(). + VolumeRemove(ctx, "test-volume", false). + Return(errors.New("permission denied")). + Once() + + removed, err := docker.RemoveRegistryVolume(ctx, mockClient, "test-volume") + + require.Error(t, err) + assert.False(t, removed) + assert.Contains(t, err.Error(), "failed to remove registry volume") + }) +} diff --git a/pkg/client/oci/verifier.go b/pkg/client/oci/verifier.go index 765fcc417..c72a744ef 100644 --- a/pkg/client/oci/verifier.go +++ b/pkg/client/oci/verifier.go @@ -61,7 +61,7 @@ type verifier struct{} // NewRegistryVerifier creates a new registry verifier. // -//nolint:ireturn // interface return is intentional for testability +//nolint:ireturn // Interface return is intentional for dependency injection func NewRegistryVerifier() RegistryVerifier { return &verifier{} } diff --git a/pkg/client/oci/verifier_test.go b/pkg/client/oci/verifier_test.go new file mode 100644 index 000000000..f3644af75 --- /dev/null +++ b/pkg/client/oci/verifier_test.go @@ -0,0 +1,140 @@ +package oci_test + +import ( + "context" + "testing" + + "github.com/devantler-tech/ksail/v5/pkg/client/oci" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestVerifyOptions_EmptyEndpoint(t *testing.T) { + t.Parallel() + + verifier := oci.NewRegistryVerifier() + + err := verifier.VerifyAccess(context.Background(), oci.VerifyOptions{ + RegistryEndpoint: "", + Repository: "test", + }) + + require.Error(t, err) + assert.Equal(t, oci.ErrRegistryEndpointRequired, err) +} + +func TestVerifyRegistryAccessWithTimeout_EmptyEndpoint(t *testing.T) { + t.Parallel() + + err := oci.VerifyRegistryAccessWithTimeout( + context.Background(), + oci.VerifyOptions{ + RegistryEndpoint: "", + Repository: "test", + }, + 100, // timeout + ) + + require.Error(t, err) + assert.Contains(t, err.Error(), "registry endpoint is required") +} + +//nolint:funlen // Table-driven test with many cases naturally exceeds limit +func TestErrorVariables(t *testing.T) { + t.Parallel() + + type testCase struct { + name string + err error + contains string + } + + tests := []testCase{ + { + name: "ErrRegistryUnreachable", + err: oci.ErrRegistryUnreachable, + contains: "registry is unreachable", + }, + { + name: "ErrRegistryAuthRequired", + err: oci.ErrRegistryAuthRequired, + contains: "registry requires authentication", + }, + { + name: "ErrRegistryPermissionDenied", + err: oci.ErrRegistryPermissionDenied, + contains: "registry access denied", + }, + { + name: "ErrRegistryNotFound", + err: oci.ErrRegistryNotFound, + contains: "registry or repository not found", + }, + { + name: "ErrSourcePathRequired", + err: oci.ErrSourcePathRequired, + contains: "source path is required", + }, + { + name: "ErrSourcePathNotFound", + err: oci.ErrSourcePathNotFound, + contains: "source path does not exist", + }, + { + name: "ErrSourcePathNotDirectory", + err: oci.ErrSourcePathNotDirectory, + contains: "source path must be a directory", + }, + { + name: "ErrRegistryEndpointRequired", + err: oci.ErrRegistryEndpointRequired, + contains: "registry endpoint is required", + }, + { + name: "ErrVersionRequired", + err: oci.ErrVersionRequired, + contains: "version is required", + }, + { + name: "ErrNoManifestFiles", + err: oci.ErrNoManifestFiles, + contains: "no manifest files found", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + require.Error(t, tc.err) + assert.Contains(t, tc.err.Error(), tc.contains) + }) + } +} + +func TestErrorsAreDistinct(t *testing.T) { + t.Parallel() + + errs := []error{ + oci.ErrRegistryUnreachable, + oci.ErrRegistryAuthRequired, + oci.ErrRegistryPermissionDenied, + oci.ErrRegistryNotFound, + oci.ErrSourcePathRequired, + oci.ErrSourcePathNotFound, + oci.ErrSourcePathNotDirectory, + oci.ErrRegistryEndpointRequired, + oci.ErrVersionRequired, + oci.ErrNoManifestFiles, + } + + // Verify all errors are distinct + for i, err1 := range errs { + for j, err2 := range errs { + if i != j { + assert.NotErrorIs(t, err1, err2, + "error %q should not match %q", err1, err2) + } + } + } +} diff --git a/pkg/io/config-manager/helpers_test.go b/pkg/io/config-manager/helpers_test.go index 3cad54e17..b2fbbb62e 100644 --- a/pkg/io/config-manager/helpers_test.go +++ b/pkg/io/config-manager/helpers_test.go @@ -10,6 +10,15 @@ import ( v1alpha4 "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" ) +// mockClusterNameProvider is a mock implementation of ClusterNameProvider for testing. +type mockClusterNameProvider struct { + name string +} + +func (m *mockClusterNameProvider) GetClusterName() string { + return m.name +} + func TestGetClusterName(t *testing.T) { t.Parallel() @@ -30,10 +39,22 @@ func TestGetClusterName(t *testing.T) { }, wantName: "k3d-custom", }, + "cluster name provider interface": { + config: &mockClusterNameProvider{name: "talos-custom"}, + wantName: "talos-custom", + }, + "cluster name provider with empty name": { + config: &mockClusterNameProvider{name: ""}, + wantName: "", + }, "unsupported type": { config: 123, wantErr: true, }, + "nil config": { + config: nil, + wantErr: true, + }, } for name, testCase := range tests { diff --git a/pkg/io/errors_test.go b/pkg/io/errors_test.go new file mode 100644 index 000000000..dfbe3339d --- /dev/null +++ b/pkg/io/errors_test.go @@ -0,0 +1,93 @@ +package io_test + +import ( + "fmt" + "testing" + + io "github.com/devantler-tech/ksail/v5/pkg/io" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestErrorVariables(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + err error + expectedMsg string + }{ + { + name: "ErrPathOutsideBase is defined", + err: io.ErrPathOutsideBase, + expectedMsg: "invalid path: file is outside base directory", + }, + { + name: "ErrEmptyOutputPath is defined", + err: io.ErrEmptyOutputPath, + expectedMsg: "output path cannot be empty", + }, + { + name: "ErrBasePath is defined", + err: io.ErrBasePath, + expectedMsg: "base path cannot be empty", + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + require.Error(t, testCase.err) + assert.Equal(t, testCase.expectedMsg, testCase.err.Error()) + }) + } +} + +func TestErrorsAreDistinct(t *testing.T) { + t.Parallel() + + allErrors := []error{ + io.ErrPathOutsideBase, + io.ErrEmptyOutputPath, + io.ErrBasePath, + } + + // Verify all errors are distinct from each other + for index := range allErrors { + for innerIndex := index + 1; innerIndex < len(allErrors); innerIndex++ { + assert.NotErrorIs( + t, + allErrors[index], allErrors[innerIndex], + "errors at index %d and %d should be distinct", + index, + innerIndex, + ) + } + } +} + +func TestErrorsCanBeWrapped(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + sentinel error + }{ + {name: "ErrPathOutsideBase can be wrapped", sentinel: io.ErrPathOutsideBase}, + {name: "ErrEmptyOutputPath can be wrapped", sentinel: io.ErrEmptyOutputPath}, + {name: "ErrBasePath can be wrapped", sentinel: io.ErrBasePath}, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + // Wrap the error using fmt.Errorf with %w + wrapped := fmt.Errorf("context: %w", testCase.sentinel) + + // Verify error wrapping works correctly with errors.Is + assert.ErrorIs(t, wrapped, testCase.sentinel) + }) + } +} diff --git a/pkg/io/generator/generator_test.go b/pkg/io/generator/generator_test.go index cbb718f02..b6a3015c7 100644 --- a/pkg/io/generator/generator_test.go +++ b/pkg/io/generator/generator_test.go @@ -15,7 +15,7 @@ type buildOCIURLTestCase struct { expected string } -func getBuildOCIURLTestCases() []buildOCIURLTestCase { +func getDefaultAndCustomValueTestCases() []buildOCIURLTestCase { return []buildOCIURLTestCase{ { name: "with all default values", @@ -52,6 +52,11 @@ func getBuildOCIURLTestCases() []buildOCIURLTestCase { projectName: "test-app", expected: "oci://registry.example.com:9000/test-app", }, + } +} + +func getIPAndEdgeCaseTestCases() []buildOCIURLTestCase { + return []buildOCIURLTestCase{ { name: "with IPv4 host", host: "192.168.1.100", @@ -73,9 +78,30 @@ func getBuildOCIURLTestCases() []buildOCIURLTestCase { projectName: "my-awesome-project", expected: "oci://registry.localhost:5000/my-awesome-project", }, + { + name: "with negative port for external HTTPS registry", + host: "ghcr.io", + port: -1, + projectName: "org/repo", + expected: "oci://ghcr.io/org/repo", + }, + { + name: "with negative port and default host", + host: "", + port: -1, + projectName: "my-project", + expected: "oci://ksail-registry.localhost/my-project", + }, } } +func getBuildOCIURLTestCases() []buildOCIURLTestCase { + tests := getDefaultAndCustomValueTestCases() + tests = append(tests, getIPAndEdgeCaseTestCases()...) + + return tests +} + func TestBuildOCIURL(t *testing.T) { t.Parallel() diff --git a/pkg/io/generator/talos/generator_test.go b/pkg/io/generator/talos/generator_test.go index d32e97747..0bb39017d 100644 --- a/pkg/io/generator/talos/generator_test.go +++ b/pkg/io/generator/talos/generator_test.go @@ -272,3 +272,350 @@ func TestTalosGenerator_Generate_NoDisableCNIPatchWhenFalse(t *testing.T) { _, err = os.Stat(gitkeepPath) require.NoError(t, err, "expected .gitkeep in cluster/ when no patches generated") } + +func TestTalosGenerator_Generate_AllowSchedulingOnControlPlanes(t *testing.T) { + t.Parallel() + + tempDir := t.TempDir() + gen := talosgenerator.NewTalosGenerator() + + config := &talosgenerator.TalosConfig{ + PatchesDir: "talos", + WorkerNodes: 0, // Zero workers triggers allow-scheduling patch + } + opts := yamlgenerator.Options{ + Output: tempDir, + } + + result, err := gen.Generate(config, opts) + require.NoError(t, err) + assert.Equal(t, filepath.Join(tempDir, "talos"), result) + + // Verify allow-scheduling-on-control-planes.yaml was created + clusterDir := filepath.Join(tempDir, "talos", "cluster") + patchPath := filepath.Join(clusterDir, "allow-scheduling-on-control-planes.yaml") + content, err := os.ReadFile(patchPath) //nolint:gosec // Test file path is safe + require.NoError(t, err) + assert.Contains(t, string(content), "cluster:") + assert.Contains(t, string(content), "allowSchedulingOnControlPlanes: true") + + // Verify .gitkeep was NOT created in cluster/ since we have a patch there + gitkeepPath := filepath.Join(clusterDir, ".gitkeep") + _, err = os.Stat(gitkeepPath) + assert.True(t, os.IsNotExist(err), "expected .gitkeep to not exist when patches are generated") +} + +func TestTalosGenerator_Generate_MirrorRegistries(t *testing.T) { + t.Parallel() + + tempDir := t.TempDir() + gen := talosgenerator.NewTalosGenerator() + + config := &talosgenerator.TalosConfig{ + PatchesDir: "talos", + WorkerNodes: 1, + MirrorRegistries: []string{ + "docker.io=https://registry-1.docker.io", + "gcr.io=https://gcr.io", + }, + } + opts := yamlgenerator.Options{ + Output: tempDir, + } + + result, err := gen.Generate(config, opts) + require.NoError(t, err) + assert.Equal(t, filepath.Join(tempDir, "talos"), result) + + // Verify mirror-registries.yaml was created + patchPath := filepath.Join(tempDir, "talos", "cluster", "mirror-registries.yaml") + content, err := os.ReadFile(patchPath) //nolint:gosec // Test file path is safe + require.NoError(t, err) + assert.Contains(t, string(content), "machine:") + assert.Contains(t, string(content), "registries:") + assert.Contains(t, string(content), "mirrors:") + assert.Contains(t, string(content), "docker.io:") + assert.Contains(t, string(content), "gcr.io:") + assert.Contains(t, string(content), "endpoints:") + assert.Contains(t, string(content), "http://") + + // Verify .gitkeep was NOT created in cluster/ since we have a patch there + gitkeepPath := filepath.Join(tempDir, "talos", "cluster", ".gitkeep") + _, err = os.Stat(gitkeepPath) + assert.True(t, os.IsNotExist(err), "expected .gitkeep to not exist when patches are generated") +} + +func TestTalosGenerator_Generate_EmptyMirrorRegistries(t *testing.T) { + t.Parallel() + + tempDir := t.TempDir() + gen := talosgenerator.NewTalosGenerator() + + config := &talosgenerator.TalosConfig{ + PatchesDir: "talos", + WorkerNodes: 1, + MirrorRegistries: []string{}, // Empty array should not create patch + } + opts := yamlgenerator.Options{ + Output: tempDir, + } + + _, err := gen.Generate(config, opts) + require.NoError(t, err) + + // Verify mirror-registries.yaml was NOT created + patchPath := filepath.Join(tempDir, "talos", "cluster", "mirror-registries.yaml") + _, err = os.Stat(patchPath) + assert.True(t, os.IsNotExist(err), "expected mirror-registries.yaml to not exist") +} + +func TestTalosGenerator_Generate_KubeletCertRotation(t *testing.T) { + t.Parallel() + + tempDir := t.TempDir() + gen := talosgenerator.NewTalosGenerator() + + config := &talosgenerator.TalosConfig{ + PatchesDir: "talos", + WorkerNodes: 1, + EnableKubeletCertRotation: true, + } + opts := yamlgenerator.Options{ + Output: tempDir, + } + + result, err := gen.Generate(config, opts) + require.NoError(t, err) + assert.Equal(t, filepath.Join(tempDir, "talos"), result) + + // Verify kubelet-cert-rotation.yaml was created + certRotationPath := filepath.Join(tempDir, "talos", "cluster", "kubelet-cert-rotation.yaml") + content, err := os.ReadFile(certRotationPath) //nolint:gosec // Test file path is safe + require.NoError(t, err) + assert.Contains(t, string(content), "machine:") + assert.Contains(t, string(content), "kubelet:") + assert.Contains(t, string(content), "extraArgs:") + assert.Contains(t, string(content), "rotate-server-certificates") + assert.Contains(t, string(content), `"true"`) + + // Verify kubelet-csr-approver.yaml was also created + csrApproverPath := filepath.Join(tempDir, "talos", "cluster", "kubelet-csr-approver.yaml") + csrContent, err := os.ReadFile(csrApproverPath) //nolint:gosec // Test file path is safe + require.NoError(t, err) + assert.Contains(t, string(csrContent), "cluster:") + assert.Contains(t, string(csrContent), "extraManifests:") + assert.Contains(t, string(csrContent), talosgenerator.KubeletServingCertApproverManifestURL) +} + +func TestTalosGenerator_Generate_NoKubeletCertRotationPatchWhenFalse(t *testing.T) { + t.Parallel() + + tempDir := t.TempDir() + gen := talosgenerator.NewTalosGenerator() + + config := &talosgenerator.TalosConfig{ + PatchesDir: "talos", + WorkerNodes: 1, + EnableKubeletCertRotation: false, + } + opts := yamlgenerator.Options{ + Output: tempDir, + } + + _, err := gen.Generate(config, opts) + require.NoError(t, err) + + // Verify kubelet-cert-rotation.yaml was NOT created + certRotationPath := filepath.Join(tempDir, "talos", "cluster", "kubelet-cert-rotation.yaml") + _, err = os.Stat(certRotationPath) + assert.True(t, os.IsNotExist(err), "expected kubelet-cert-rotation.yaml to not exist") + + // Verify kubelet-csr-approver.yaml was also NOT created + csrApproverPath := filepath.Join(tempDir, "talos", "cluster", "kubelet-csr-approver.yaml") + _, err = os.Stat(csrApproverPath) + assert.True(t, os.IsNotExist(err), "expected kubelet-csr-approver.yaml to not exist") +} + +func TestTalosGenerator_Generate_ClusterName(t *testing.T) { + t.Parallel() + + tempDir := t.TempDir() + gen := talosgenerator.NewTalosGenerator() + + config := &talosgenerator.TalosConfig{ + PatchesDir: "talos", + WorkerNodes: 1, + ClusterName: "my-custom-cluster", + } + opts := yamlgenerator.Options{ + Output: tempDir, + } + + result, err := gen.Generate(config, opts) + require.NoError(t, err) + assert.Equal(t, filepath.Join(tempDir, "talos"), result) + + // Verify cluster-name.yaml was created + patchPath := filepath.Join(tempDir, "talos", "cluster", "cluster-name.yaml") + content, err := os.ReadFile(patchPath) //nolint:gosec // Test file path is safe + require.NoError(t, err) + assert.Contains(t, string(content), "cluster:") + assert.Contains(t, string(content), "clusterName: my-custom-cluster") +} + +func TestTalosGenerator_Generate_NoClusterNamePatchWhenEmpty(t *testing.T) { + t.Parallel() + + tempDir := t.TempDir() + gen := talosgenerator.NewTalosGenerator() + + config := &talosgenerator.TalosConfig{ + PatchesDir: "talos", + WorkerNodes: 1, + ClusterName: "", // Empty should not create patch + } + opts := yamlgenerator.Options{ + Output: tempDir, + } + + _, err := gen.Generate(config, opts) + require.NoError(t, err) + + // Verify cluster-name.yaml was NOT created + patchPath := filepath.Join(tempDir, "talos", "cluster", "cluster-name.yaml") + _, err = os.Stat(patchPath) + assert.True(t, os.IsNotExist(err), "expected cluster-name.yaml to not exist") +} + +func TestTalosGenerator_Generate_AllPatchesCombined(t *testing.T) { + t.Parallel() + + tempDir := t.TempDir() + gen := talosgenerator.NewTalosGenerator() + + config := &talosgenerator.TalosConfig{ + PatchesDir: "talos", + WorkerNodes: 0, // Triggers allow-scheduling patch + MirrorRegistries: []string{"docker.io=https://registry-1.docker.io"}, + DisableDefaultCNI: true, + EnableKubeletCertRotation: true, + ClusterName: "test-cluster", + } + opts := yamlgenerator.Options{ + Output: tempDir, + } + + result, err := gen.Generate(config, opts) + require.NoError(t, err) + assert.Equal(t, filepath.Join(tempDir, "talos"), result) + + // Verify all patches were created + clusterDir := filepath.Join(tempDir, "talos", "cluster") + + // Check allow-scheduling patch + _, err = os.Stat(filepath.Join(clusterDir, "allow-scheduling-on-control-planes.yaml")) + require.NoError(t, err, "expected allow-scheduling-on-control-planes.yaml") + + // Check mirror registries patch + _, err = os.Stat(filepath.Join(clusterDir, "mirror-registries.yaml")) + require.NoError(t, err, "expected mirror-registries.yaml") + + // Check disable CNI patch + _, err = os.Stat(filepath.Join(clusterDir, "disable-default-cni.yaml")) + require.NoError(t, err, "expected disable-default-cni.yaml") + + // Check kubelet cert rotation patch + _, err = os.Stat(filepath.Join(clusterDir, "kubelet-cert-rotation.yaml")) + require.NoError(t, err, "expected kubelet-cert-rotation.yaml") + + // Check kubelet CSR approver patch + _, err = os.Stat(filepath.Join(clusterDir, "kubelet-csr-approver.yaml")) + require.NoError(t, err, "expected kubelet-csr-approver.yaml") + + // Check cluster name patch + _, err = os.Stat(filepath.Join(clusterDir, "cluster-name.yaml")) + require.NoError(t, err, "expected cluster-name.yaml") + + // Verify .gitkeep was NOT created in cluster/ since we have patches there + gitkeepPath := filepath.Join(clusterDir, ".gitkeep") + _, err = os.Stat(gitkeepPath) + assert.True(t, os.IsNotExist(err), "expected .gitkeep to not exist when patches are generated") + + // Verify .gitkeep WAS created in other directories + _, err = os.Stat(filepath.Join(tempDir, "talos", "control-planes", ".gitkeep")) + require.NoError(t, err, "expected .gitkeep in control-planes/") + _, err = os.Stat(filepath.Join(tempDir, "talos", "workers", ".gitkeep")) + require.NoError(t, err, "expected .gitkeep in workers/") +} + +func TestTalosGenerator_Generate_SkipsExistingPatchesWithoutForce(t *testing.T) { + t.Parallel() + + tempDir := t.TempDir() + gen := talosgenerator.NewTalosGenerator() + + // Create an existing patch with custom content + clusterDir := filepath.Join(tempDir, "talos", "cluster") + err := os.MkdirAll(clusterDir, 0o750) + require.NoError(t, err) + + patchPath := filepath.Join(clusterDir, "disable-default-cni.yaml") + err = os.WriteFile(patchPath, []byte("existing content"), 0o600) + require.NoError(t, err) + + config := &talosgenerator.TalosConfig{ + PatchesDir: "talos", + WorkerNodes: 1, + DisableDefaultCNI: true, + } + opts := yamlgenerator.Options{ + Output: tempDir, + Force: false, + } + + result, err := gen.Generate(config, opts) + require.NoError(t, err) + assert.Equal(t, filepath.Join(tempDir, "talos"), result) + + // Verify existing file content was preserved + content, err := os.ReadFile(patchPath) //nolint:gosec // Test file path is safe + require.NoError(t, err) + assert.Equal(t, "existing content", string(content)) +} + +func TestTalosGenerator_Generate_OverwritesExistingPatchesWithForce(t *testing.T) { + t.Parallel() + + tempDir := t.TempDir() + gen := talosgenerator.NewTalosGenerator() + + // Create an existing patch with custom content + clusterDir := filepath.Join(tempDir, "talos", "cluster") + err := os.MkdirAll(clusterDir, 0o750) + require.NoError(t, err) + + patchPath := filepath.Join(clusterDir, "disable-default-cni.yaml") + err = os.WriteFile(patchPath, []byte("existing content"), 0o600) + require.NoError(t, err) + + config := &talosgenerator.TalosConfig{ + PatchesDir: "talos", + WorkerNodes: 1, + DisableDefaultCNI: true, + } + opts := yamlgenerator.Options{ + Output: tempDir, + Force: true, + } + + result, err := gen.Generate(config, opts) + require.NoError(t, err) + assert.Equal(t, filepath.Join(tempDir, "talos"), result) + + // Verify file was overwritten with new content + content, err := os.ReadFile(patchPath) //nolint:gosec // Test file path is safe + require.NoError(t, err) + assert.Contains(t, string(content), "cluster:") + assert.Contains(t, string(content), "cni:") + assert.Contains(t, string(content), "name: none") +} diff --git a/pkg/io/marshaller/yaml_marshaller_test.go b/pkg/io/marshaller/yaml_marshaller_test.go index ed3663109..3e5f572d0 100644 --- a/pkg/io/marshaller/yaml_marshaller_test.go +++ b/pkg/io/marshaller/yaml_marshaller_test.go @@ -1,3 +1,6 @@ +// Package marshaller_test provides unit tests for the marshaller package. +// +//nolint:funlen // Table-driven tests are naturally long package marshaller_test import ( @@ -13,6 +16,14 @@ type TestModel struct { Value int `yaml:"value"` } +// NestedModel tests marshalling nested structures. +type NestedModel struct { + ID string `yaml:"id"` + Inner *TestModel `yaml:"inner,omitempty"` + Items []TestModel `yaml:"items,omitempty"` + Metadata map[string]string `yaml:"metadata,omitempty"` +} + func TestYAMLMarshaller_Marshal(t *testing.T) { t.Parallel() @@ -34,6 +45,24 @@ func TestYAMLMarshaller_Marshal(t *testing.T) { expected: "Name: \"\"\nValue: 0\n", wantErr: false, }, + { + name: "marshal model with special characters", + model: TestModel{Name: "test: value", Value: 0}, + expected: "Name: 'test: value'\nValue: 0\n", + wantErr: false, + }, + { + name: "marshal model with unicode", + model: TestModel{Name: "tëst™", Value: 100}, + expected: "Name: tëst™\nValue: 100\n", + wantErr: false, + }, + { + name: "marshal model with negative value", + model: TestModel{Name: "negative", Value: -42}, + expected: "Name: negative\nValue: -42\n", + wantErr: false, + }, } for _, testCase := range tests { @@ -55,6 +84,87 @@ func TestYAMLMarshaller_Marshal(t *testing.T) { } } +func TestYAMLMarshaller_Marshal_Nested(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + model NestedModel + contains []string + wantErr bool + }{ + { + name: "marshal nested model with pointer", + model: NestedModel{ + ID: "parent", + Inner: &TestModel{Name: "child", Value: 10}, + }, + contains: []string{"ID: parent", "Inner:", "Name: child", "Value: 10"}, + wantErr: false, + }, + { + name: "marshal nested model with nil pointer", + model: NestedModel{ + ID: "alone", + Inner: nil, + }, + contains: []string{"ID: alone"}, + wantErr: false, + }, + { + name: "marshal model with slice", + model: NestedModel{ + ID: "list", + Items: []TestModel{ + {Name: "first", Value: 1}, + {Name: "second", Value: 2}, + }, + }, + contains: []string{"ID: list", "Items:", "Name: first", "Name: second"}, + wantErr: false, + }, + { + name: "marshal model with empty slice", + model: NestedModel{ + ID: "empty-list", + Items: []TestModel{}, + }, + contains: []string{"ID: empty-list"}, + wantErr: false, + }, + { + name: "marshal model with map", + model: NestedModel{ + ID: "with-map", + Metadata: map[string]string{"key1": "val1", "key2": "val2"}, + }, + contains: []string{"ID: with-map", "Metadata:", "key1: val1", "key2: val2"}, + wantErr: false, + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + m := marshaller.NewYAMLMarshaller[NestedModel]() + got, err := m.Marshal(testCase.model) + + if testCase.wantErr { + require.Error(t, err) + + return + } + + require.NoError(t, err) + + for _, substr := range testCase.contains { + assert.Contains(t, got, substr) + } + }) + } +} + func TestYAMLMarshaller_Unmarshal(t *testing.T) { t.Parallel() @@ -82,6 +192,30 @@ func TestYAMLMarshaller_Unmarshal(t *testing.T) { expected: TestModel{}, wantErr: true, }, + { + name: "unmarshal YAML with extra fields ignored", + data: []byte("Name: test\nValue: 42\nextra: ignored\n"), + expected: TestModel{Name: "test", Value: 42}, + wantErr: false, + }, + { + name: "unmarshal YAML with whitespace", + data: []byte("\n\nName: test\n\nValue: 42\n\n"), + expected: TestModel{Name: "test", Value: 42}, + wantErr: false, + }, + { + name: "unmarshal quoted values", + data: []byte("Name: \"test: value\"\nValue: 0\n"), + expected: TestModel{Name: "test: value", Value: 0}, + wantErr: false, + }, + { + name: "unmarshal nil data", + data: nil, + expected: TestModel{}, + wantErr: false, + }, } for _, testCase := range tests { @@ -106,6 +240,71 @@ func TestYAMLMarshaller_Unmarshal(t *testing.T) { } } +func TestYAMLMarshaller_Unmarshal_Nested(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + data []byte + expected NestedModel + wantErr bool + }{ + { + name: "unmarshal nested structure", + data: []byte("ID: parent\nInner:\n Name: child\n Value: 10\n"), + expected: NestedModel{ + ID: "parent", + Inner: &TestModel{Name: "child", Value: 10}, + }, + wantErr: false, + }, + { + name: "unmarshal with slice", + data: []byte( + "ID: list\nItems:\n - Name: first\n Value: 1\n - Name: second\n Value: 2\n", + ), + expected: NestedModel{ + ID: "list", + Items: []TestModel{ + {Name: "first", Value: 1}, + {Name: "second", Value: 2}, + }, + }, + wantErr: false, + }, + { + name: "unmarshal with map", + data: []byte("ID: with-map\nMetadata:\n key1: val1\n key2: val2\n"), + expected: NestedModel{ + ID: "with-map", + Metadata: map[string]string{"key1": "val1", "key2": "val2"}, + }, + wantErr: false, + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + m := marshaller.NewYAMLMarshaller[NestedModel]() + + var got NestedModel + + err := m.Unmarshal(testCase.data, &got) + + if testCase.wantErr { + require.Error(t, err) + + return + } + + require.NoError(t, err) + assert.Equal(t, testCase.expected, got) + }) + } +} + func TestYAMLMarshaller_UnmarshalString(t *testing.T) { t.Parallel() @@ -133,6 +332,12 @@ func TestYAMLMarshaller_UnmarshalString(t *testing.T) { expected: TestModel{}, wantErr: true, }, + { + name: "unmarshal multiline string values", + data: "Name: |\n multiline\n value\nValue: 0\n", + expected: TestModel{Name: "multiline\nvalue\n", Value: 0}, + wantErr: false, + }, } for _, testCase := range tests { @@ -156,3 +361,113 @@ func TestYAMLMarshaller_UnmarshalString(t *testing.T) { }) } } + +func TestYAMLMarshaller_RoundTrip(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + model TestModel + }{ + { + name: "simple model round trip", + model: TestModel{Name: "test", Value: 42}, + }, + { + name: "empty model round trip", + model: TestModel{}, + }, + { + name: "model with max int value", + model: TestModel{Name: "max", Value: 2147483647}, + }, + { + name: "model with min int value", + model: TestModel{Name: "min", Value: -2147483648}, + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + yamlMarshaller := marshaller.NewYAMLMarshaller[TestModel]() + + // Marshal to string + yamlStr, err := yamlMarshaller.Marshal(testCase.model) + require.NoError(t, err) + + // Unmarshal back + var result TestModel + + err = yamlMarshaller.UnmarshalString(yamlStr, &result) + require.NoError(t, err) + + // Verify round-trip + assert.Equal(t, testCase.model, result) + }) + } +} + +func TestYAMLMarshaller_RoundTrip_Nested(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + model NestedModel + }{ + { + name: "nested model with all fields", + model: NestedModel{ + ID: "full", + Inner: &TestModel{Name: "inner", Value: 100}, + Items: []TestModel{ + {Name: "item1", Value: 1}, + {Name: "item2", Value: 2}, + }, + Metadata: map[string]string{"a": "b", "c": "d"}, + }, + }, + { + name: "nested model with nil pointer", + model: NestedModel{ + ID: "partial", + Inner: nil, + }, + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + yamlMarshaller := marshaller.NewYAMLMarshaller[NestedModel]() + + // Marshal to string + yamlStr, err := yamlMarshaller.Marshal(testCase.model) + require.NoError(t, err) + + // Unmarshal back + var result NestedModel + + err = yamlMarshaller.UnmarshalString(yamlStr, &result) + require.NoError(t, err) + + // Verify round-trip + assert.Equal(t, testCase.model, result) + }) + } +} + +func TestNewYAMLMarshaller_Interface(t *testing.T) { + t.Parallel() + + // Verify NewYAMLMarshaller returns the Marshaller interface + m := marshaller.NewYAMLMarshaller[TestModel]() + require.NotNil(t, m) + + // Verify it can marshal + output, err := m.Marshal(TestModel{Name: "interface-test", Value: 123}) + require.NoError(t, err) + assert.Contains(t, output, "interface-test") +} diff --git a/pkg/io/validator/metadata_test.go b/pkg/io/validator/metadata_test.go new file mode 100644 index 000000000..66833beb5 --- /dev/null +++ b/pkg/io/validator/metadata_test.go @@ -0,0 +1,253 @@ +package validator_test + +import ( + "testing" + + "github.com/devantler-tech/ksail/v5/pkg/io/validator" + "github.com/stretchr/testify/assert" +) + +type validateMetadataTestCase struct { + name string + kind string + apiVersion string + expectedKind string + expectedAPIVersion string + expectedValid bool + expectedErrorCount int + expectedErrorFields []string +} + +func getValidMetadataTestCases() []validateMetadataTestCase { + return []validateMetadataTestCase{ + { + name: "valid metadata", + kind: "Cluster", + apiVersion: "ksail.io/v1alpha1", + expectedKind: "Cluster", + expectedAPIVersion: "ksail.io/v1alpha1", + expectedValid: true, + expectedErrorCount: 0, + }, + } +} + +func getMissingFieldsTestCases() []validateMetadataTestCase { + return []validateMetadataTestCase{ + { + name: "missing kind", + kind: "", + apiVersion: "ksail.io/v1alpha1", + expectedKind: "Cluster", + expectedAPIVersion: "ksail.io/v1alpha1", + expectedValid: false, + expectedErrorCount: 1, + expectedErrorFields: []string{"kind"}, + }, + { + name: "missing apiVersion", + kind: "Cluster", + apiVersion: "", + expectedKind: "Cluster", + expectedAPIVersion: "ksail.io/v1alpha1", + expectedValid: false, + expectedErrorCount: 1, + expectedErrorFields: []string{"apiVersion"}, + }, + { + name: "missing both kind and apiVersion", + kind: "", + apiVersion: "", + expectedKind: "Cluster", + expectedAPIVersion: "ksail.io/v1alpha1", + expectedValid: false, + expectedErrorCount: 2, + expectedErrorFields: []string{"kind", "apiVersion"}, + }, + } +} + +func getWrongValueTestCases() []validateMetadataTestCase { + return []validateMetadataTestCase{ + { + name: "wrong kind", + kind: "WrongKind", + apiVersion: "ksail.io/v1alpha1", + expectedKind: "Cluster", + expectedAPIVersion: "ksail.io/v1alpha1", + expectedValid: false, + expectedErrorCount: 1, + expectedErrorFields: []string{"kind"}, + }, + { + name: "wrong apiVersion", + kind: "Cluster", + apiVersion: "ksail.io/v1beta1", + expectedKind: "Cluster", + expectedAPIVersion: "ksail.io/v1alpha1", + expectedValid: false, + expectedErrorCount: 1, + expectedErrorFields: []string{"apiVersion"}, + }, + { + name: "both kind and apiVersion wrong", + kind: "Pod", + apiVersion: "v1", + expectedKind: "Cluster", + expectedAPIVersion: "ksail.io/v1alpha1", + expectedValid: false, + expectedErrorCount: 2, + expectedErrorFields: []string{"kind", "apiVersion"}, + }, + } +} + +func getCaseSensitivityTestCases() []validateMetadataTestCase { + return []validateMetadataTestCase{ + { + name: "case sensitive kind match", + kind: "cluster", + apiVersion: "ksail.io/v1alpha1", + expectedKind: "Cluster", + expectedAPIVersion: "ksail.io/v1alpha1", + expectedValid: false, + expectedErrorCount: 1, + }, + { + name: "case sensitive apiVersion match", + kind: "Cluster", + apiVersion: "KSAIL.IO/V1ALPHA1", + expectedKind: "Cluster", + expectedAPIVersion: "ksail.io/v1alpha1", + expectedValid: false, + expectedErrorCount: 1, + }, + } +} + +func getValidateMetadataTestCases() []validateMetadataTestCase { + testCases := getValidMetadataTestCases() + testCases = append(testCases, getMissingFieldsTestCases()...) + testCases = append(testCases, getWrongValueTestCases()...) + testCases = append(testCases, getCaseSensitivityTestCases()...) + + return testCases +} + +// TestValidateMetadata tests the ValidateMetadata function for various scenarios. +func TestValidateMetadata(t *testing.T) { + t.Parallel() + + tests := getValidateMetadataTestCases() + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + result := validator.NewValidationResult("test-config.yaml") + + validator.ValidateMetadata( + testCase.kind, + testCase.apiVersion, + testCase.expectedKind, + testCase.expectedAPIVersion, + result, + ) + + assert.Equal(t, testCase.expectedValid, result.Valid) + assert.Len(t, result.Errors, testCase.expectedErrorCount) + + // Verify expected error fields + errorFields := make([]string, 0, len(result.Errors)) + for _, err := range result.Errors { + errorFields = append(errorFields, err.Field) + } + + for _, expectedField := range testCase.expectedErrorFields { + assert.Contains(t, errorFields, expectedField) + } + }) + } +} + +// TestValidateMetadata_MissingKindErrorMessage tests the error message for missing kind. +func TestValidateMetadata_MissingKindErrorMessage(t *testing.T) { + t.Parallel() + + result := validator.NewValidationResult("test.yaml") + validator.ValidateMetadata("", "v1alpha1", "Cluster", "v1alpha1", result) + + assert.Len(t, result.Errors, 1) + err := result.Errors[0] + assert.Equal(t, "kind", err.Field) + assert.Equal(t, "kind is required", err.Message) + assert.Equal(t, "Cluster", err.ExpectedValue) + assert.Contains(t, err.FixSuggestion, "Cluster") +} + +// TestValidateMetadata_WrongKindErrorMessage tests the error message for wrong kind. +func TestValidateMetadata_WrongKindErrorMessage(t *testing.T) { + t.Parallel() + + result := validator.NewValidationResult("test.yaml") + validator.ValidateMetadata("WrongKind", "v1alpha1", "Cluster", "v1alpha1", result) + + assert.Len(t, result.Errors, 1) + err := result.Errors[0] + assert.Equal(t, "kind", err.Field) + assert.Equal(t, "kind does not match expected value", err.Message) + assert.Equal(t, "WrongKind", err.CurrentValue) + assert.Equal(t, "Cluster", err.ExpectedValue) + assert.Contains(t, err.FixSuggestion, "Cluster") +} + +// TestValidateMetadata_MissingAPIVersionErrorMessage tests the error message for missing apiVersion. +func TestValidateMetadata_MissingAPIVersionErrorMessage(t *testing.T) { + t.Parallel() + + result := validator.NewValidationResult("test.yaml") + validator.ValidateMetadata("Cluster", "", "Cluster", "ksail.io/v1alpha1", result) + + assert.Len(t, result.Errors, 1) + err := result.Errors[0] + assert.Equal(t, "apiVersion", err.Field) + assert.Equal(t, "apiVersion is required", err.Message) + assert.Equal(t, "ksail.io/v1alpha1", err.ExpectedValue) + assert.Contains(t, err.FixSuggestion, "ksail.io/v1alpha1") +} + +// TestValidateMetadata_WrongAPIVersionErrorMessage tests the error message for wrong apiVersion. +func TestValidateMetadata_WrongAPIVersionErrorMessage(t *testing.T) { + t.Parallel() + + result := validator.NewValidationResult("test.yaml") + validator.ValidateMetadata("Cluster", "v1beta1", "Cluster", "ksail.io/v1alpha1", result) + + assert.Len(t, result.Errors, 1) + err := result.Errors[0] + assert.Equal(t, "apiVersion", err.Field) + assert.Equal(t, "apiVersion does not match expected value", err.Message) + assert.Equal(t, "v1beta1", err.CurrentValue) + assert.Equal(t, "ksail.io/v1alpha1", err.ExpectedValue) + assert.Contains(t, err.FixSuggestion, "ksail.io/v1alpha1") +} + +// TestValidateMetadata_PreservesExistingErrors tests that ValidateMetadata preserves +// existing errors in the result. +func TestValidateMetadata_PreservesExistingErrors(t *testing.T) { + t.Parallel() + + result := validator.NewValidationResult("test.yaml") + // Add an existing error + result.AddError(validator.ValidationError{ + Field: "existing", + Message: "existing error", + }) + + // Now validate metadata with errors + validator.ValidateMetadata("", "", "Cluster", "v1alpha1", result) + + // Should have 3 errors total: 1 existing + 2 from metadata validation + assert.Len(t, result.Errors, 3) + assert.Equal(t, "existing", result.Errors[0].Field) +} diff --git a/pkg/k8s/apiserver_test.go b/pkg/k8s/apiserver_test.go new file mode 100644 index 000000000..b2eca886c --- /dev/null +++ b/pkg/k8s/apiserver_test.go @@ -0,0 +1,255 @@ +package k8s_test + +import ( + "context" + "errors" + "sync/atomic" + "testing" + "time" + + "github.com/devantler-tech/ksail/v5/pkg/k8s" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" +) + +// errAPIServerUnavailable simulates an API server connection error. +var errAPIServerUnavailable = errors.New("connection refused") + +// controllableDiscoveryClient allows tests to control when API calls succeed or fail. +type controllableDiscoveryClient struct { + *fakediscovery.FakeDiscovery + + shouldSucceed atomic.Bool + callCount atomic.Int32 +} + +func newControllableClient() (*fake.Clientset, *controllableDiscoveryClient) { + clientset := fake.NewClientset() + + fakeDiscovery, ok := clientset.Discovery().(*fakediscovery.FakeDiscovery) + if !ok { + panic("expected Discovery() to return *fakediscovery.FakeDiscovery") + } + + controllable := &controllableDiscoveryClient{ + FakeDiscovery: fakeDiscovery, + } + + return clientset, controllable +} + +func (c *controllableDiscoveryClient) ServerVersion() (*version.Info, error) { + c.callCount.Add(1) + + if c.shouldSucceed.Load() { + return &version.Info{Major: "1", Minor: "28"}, nil + } + + return nil, errAPIServerUnavailable +} + +// stubClientset wraps a fake clientset but returns our controllable discovery client. +type stubClientset struct { + kubernetes.Interface + + discovery *controllableDiscoveryClient +} + +func (s *stubClientset) Discovery() discovery.DiscoveryInterface { + return s.discovery +} + +func TestWaitForAPIServerReady(t *testing.T) { + t.Parallel() + + type testCase struct { + name string + setupClient func() kubernetes.Interface + timeout time.Duration + wantErr bool + errContains string + } + + tests := []testCase{ + { + name: "returns nil when API server responds immediately", + setupClient: func() kubernetes.Interface { + clientset, controllable := newControllableClient() + controllable.shouldSucceed.Store(true) + + return &stubClientset{Interface: clientset, discovery: controllable} + }, + timeout: 200 * time.Millisecond, + wantErr: false, + }, + { + name: "returns error when timeout exceeded", + setupClient: func() kubernetes.Interface { + clientset, controllable := newControllableClient() + controllable.shouldSucceed.Store(false) // never succeeds + + return &stubClientset{Interface: clientset, discovery: controllable} + }, + timeout: 100 * time.Millisecond, + wantErr: true, + errContains: "failed to poll for readiness", + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + client := testCase.setupClient() + + ctx, cancel := context.WithTimeout( + context.Background(), + testCase.timeout+100*time.Millisecond, + ) + defer cancel() + + err := k8s.WaitForAPIServerReady(ctx, client, testCase.timeout) + + if testCase.wantErr { + require.Error(t, err) + require.Contains(t, err.Error(), testCase.errContains) + } else { + require.NoError(t, err) + } + }) + } +} + +//nolint:funlen // Table-driven test with multiple cases naturally exceeds limit +func TestWaitForAPIServerStable(t *testing.T) { + t.Parallel() + + type testCase struct { + name string + requiredSuccesses int + setupClient func() kubernetes.Interface + timeout time.Duration + wantErr bool + errContains string + } + + tests := []testCase{ + { + name: "returns nil after required consecutive successes", + requiredSuccesses: 2, + setupClient: func() kubernetes.Interface { + clientset, controllable := newControllableClient() + controllable.shouldSucceed.Store(true) // always succeeds + + return &stubClientset{Interface: clientset, discovery: controllable} + }, + timeout: 10 * time.Second, // needs to be long enough for 2 poll cycles at 2s intervals + wantErr: false, + }, + { + name: "defaults to 1 when requiredSuccesses is less than 1", + requiredSuccesses: 0, + setupClient: func() kubernetes.Interface { + clientset, controllable := newControllableClient() + controllable.shouldSucceed.Store(true) + + return &stubClientset{Interface: clientset, discovery: controllable} + }, + timeout: 5 * time.Second, + wantErr: false, + }, + { + name: "returns error when timeout exceeded", + requiredSuccesses: 100, // require many successes, impossible within timeout + setupClient: func() kubernetes.Interface { + clientset, controllable := newControllableClient() + controllable.shouldSucceed.Store(true) + + return &stubClientset{Interface: clientset, discovery: controllable} + }, + timeout: 100 * time.Millisecond, // short timeout + wantErr: true, + errContains: "failed to poll for readiness", + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + client := testCase.setupClient() + + ctx, cancel := context.WithTimeout( + context.Background(), + testCase.timeout+100*time.Millisecond, + ) + defer cancel() + + err := k8s.WaitForAPIServerStable( + ctx, + client, + testCase.timeout, + testCase.requiredSuccesses, + ) + + if testCase.wantErr { + require.Error(t, err) + require.Contains(t, err.Error(), testCase.errContains) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestCheckAPIServerConnectivity(t *testing.T) { + t.Parallel() + + type testCase struct { + name string + setupClient func() kubernetes.Interface + wantErr bool + errContains string + } + + tests := []testCase{ + { + name: "returns nil when API server responds", + setupClient: func() kubernetes.Interface { + return fake.NewClientset() + }, + wantErr: false, + }, + { + name: "returns error when API server is unavailable", + setupClient: func() kubernetes.Interface { + clientset, controllable := newControllableClient() + controllable.shouldSucceed.Store(false) + + return &stubClientset{Interface: clientset, discovery: controllable} + }, + wantErr: true, + errContains: "API server connectivity check failed", + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + client := testCase.setupClient() + err := k8s.CheckAPIServerConnectivity(client) + + if testCase.wantErr { + require.Error(t, err) + require.Contains(t, err.Error(), testCase.errContains) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/pkg/k8s/errors_test.go b/pkg/k8s/errors_test.go new file mode 100644 index 000000000..4a15c6729 --- /dev/null +++ b/pkg/k8s/errors_test.go @@ -0,0 +1,61 @@ +package k8s_test + +import ( + "testing" + + "github.com/devantler-tech/ksail/v5/pkg/k8s" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestErrorVariables(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + err error + expectedMsg string + }{ + { + name: "ErrKubeconfigPathEmpty is defined", + err: k8s.ErrKubeconfigPathEmpty, + expectedMsg: "kubeconfig path is empty", + }, + { + name: "ErrTimeoutExceeded is defined", + err: k8s.ErrTimeoutExceeded, + expectedMsg: "timeout exceeded", + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + require.Error(t, testCase.err) + assert.Equal(t, testCase.expectedMsg, testCase.err.Error()) + }) + } +} + +func TestErrorsAreDistinct(t *testing.T) { + t.Parallel() + + allErrors := []error{ + k8s.ErrKubeconfigPathEmpty, + k8s.ErrTimeoutExceeded, + } + + // Verify all errors are distinct from each other + for index := range allErrors { + for innerIndex := index + 1; innerIndex < len(allErrors); innerIndex++ { + assert.NotErrorIs( + t, + allErrors[index], allErrors[innerIndex], + "errors at index %d and %d should be distinct", + index, + innerIndex, + ) + } + } +} diff --git a/pkg/k8s/kubeconfig_test.go b/pkg/k8s/kubeconfig_test.go new file mode 100644 index 000000000..a002c4e91 --- /dev/null +++ b/pkg/k8s/kubeconfig_test.go @@ -0,0 +1,314 @@ +package k8s_test + +import ( + "bytes" + "io" + "os" + "path/filepath" + "testing" + + "github.com/devantler-tech/ksail/v5/pkg/k8s" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/client-go/tools/clientcmd" +) + +// kubeconfigWithTargetAndOther is a kubeconfig with both target and other entries for cleanup tests. +const kubeconfigWithTargetAndOther = `apiVersion: v1 +kind: Config +clusters: +- cluster: + server: https://target.server:6443 + name: target-cluster +- cluster: + server: https://other.server:6443 + name: other-cluster +contexts: +- context: + cluster: target-cluster + user: target-user + name: target-context +- context: + cluster: other-cluster + user: other-user + name: other-context +current-context: other-context +users: +- name: target-user + user: + token: target-token +- name: other-user + user: + token: other-token +` + +// TestCleanupKubeconfig_NonExistentFile tests cleanup when kubeconfig doesn't exist. +func TestCleanupKubeconfig_NonExistentFile(t *testing.T) { + t.Parallel() + + err := k8s.CleanupKubeconfig( + "/nonexistent/path/kubeconfig", + "cluster", + "context", + "user", + io.Discard, + ) + + require.NoError(t, err, "should succeed silently when file doesn't exist") +} + +// TestCleanupKubeconfig_NoMatchingEntries tests cleanup when no entries match. +func TestCleanupKubeconfig_NoMatchingEntries(t *testing.T) { + t.Parallel() + + tmpDir := t.TempDir() + kubeconfigPath := filepath.Join(tmpDir, "kubeconfig") + + validKubeconfig := `apiVersion: v1 +kind: Config +clusters: +- cluster: + server: https://other.server:6443 + name: other-cluster +contexts: +- context: + cluster: other-cluster + user: other-user + name: other-context +current-context: other-context +users: +- name: other-user + user: + token: fake-token +` + + err := os.WriteFile(kubeconfigPath, []byte(validKubeconfig), 0o600) + require.NoError(t, err) + + // Try to cleanup non-existent entries + err = k8s.CleanupKubeconfig( + kubeconfigPath, + "nonexistent-cluster", + "nonexistent-context", + "nonexistent-user", + io.Discard, + ) + + require.NoError(t, err) + + // Verify original content is unchanged + //nolint:gosec // G304: Safe in test context with controlled paths + content, err := os.ReadFile(kubeconfigPath) + require.NoError(t, err) + assert.Contains(t, string(content), "other-cluster") + assert.Contains(t, string(content), "other-context") + assert.Contains(t, string(content), "other-user") +} + +// TestCleanupKubeconfig_RemovesMatchingEntries tests that matching entries are removed. +func TestCleanupKubeconfig_RemovesMatchingEntries(t *testing.T) { + t.Parallel() + + tmpDir := t.TempDir() + kubeconfigPath := filepath.Join(tmpDir, "kubeconfig") + + err := os.WriteFile(kubeconfigPath, []byte(kubeconfigWithTargetAndOther), 0o600) + require.NoError(t, err) + + // Cleanup target entries + err = k8s.CleanupKubeconfig( + kubeconfigPath, + "target-cluster", + "target-context", + "target-user", + io.Discard, + ) + require.NoError(t, err) + + // Verify target entries are removed and other entries remain + config, err := clientcmd.LoadFromFile(kubeconfigPath) + require.NoError(t, err) + + _, hasTargetCluster := config.Clusters["target-cluster"] + _, hasTargetContext := config.Contexts["target-context"] + _, hasTargetUser := config.AuthInfos["target-user"] + + assert.False(t, hasTargetCluster, "target cluster should be removed") + assert.False(t, hasTargetContext, "target context should be removed") + assert.False(t, hasTargetUser, "target user should be removed") + + _, hasOtherCluster := config.Clusters["other-cluster"] + _, hasOtherContext := config.Contexts["other-context"] + _, hasOtherUser := config.AuthInfos["other-user"] + + assert.True(t, hasOtherCluster, "other cluster should remain") + assert.True(t, hasOtherContext, "other context should remain") + assert.True(t, hasOtherUser, "other user should remain") +} + +// TestCleanupKubeconfig_ClearsCurrentContext tests that current-context is cleared when matching. +func TestCleanupKubeconfig_ClearsCurrentContext(t *testing.T) { + t.Parallel() + + tmpDir := t.TempDir() + kubeconfigPath := filepath.Join(tmpDir, "kubeconfig") + + validKubeconfig := `apiVersion: v1 +kind: Config +clusters: +- cluster: + server: https://target.server:6443 + name: target-cluster +contexts: +- context: + cluster: target-cluster + user: target-user + name: target-context +current-context: target-context +users: +- name: target-user + user: + token: target-token +` + + err := os.WriteFile(kubeconfigPath, []byte(validKubeconfig), 0o600) + require.NoError(t, err) + + // Cleanup entries including current context + err = k8s.CleanupKubeconfig( + kubeconfigPath, + "target-cluster", + "target-context", + "target-user", + io.Discard, + ) + + require.NoError(t, err) + + // Verify current-context is cleared + config, err := clientcmd.LoadFromFile(kubeconfigPath) + require.NoError(t, err) + + assert.Empty(t, config.CurrentContext, "current-context should be cleared") +} + +// TestCleanupKubeconfig_WritesLogMessage tests that log message is written. +func TestCleanupKubeconfig_WritesLogMessage(t *testing.T) { + t.Parallel() + + tmpDir := t.TempDir() + kubeconfigPath := filepath.Join(tmpDir, "kubeconfig") + + validKubeconfig := `apiVersion: v1 +kind: Config +clusters: +- cluster: + server: https://target.server:6443 + name: target-cluster +contexts: +- context: + cluster: target-cluster + user: target-user + name: target-context +current-context: target-context +users: +- name: target-user + user: + token: target-token +` + + err := os.WriteFile(kubeconfigPath, []byte(validKubeconfig), 0o600) + require.NoError(t, err) + + // Capture log output + var logBuffer bytes.Buffer + + err = k8s.CleanupKubeconfig( + kubeconfigPath, + "target-cluster", + "target-context", + "target-user", + &logBuffer, + ) + + require.NoError(t, err) + assert.Contains(t, logBuffer.String(), "Cleaned up kubeconfig entries") + assert.Contains(t, logBuffer.String(), "target-cluster") +} + +// TestCleanupKubeconfig_InvalidYAML tests handling of invalid YAML content. +func TestCleanupKubeconfig_InvalidYAML(t *testing.T) { + t.Parallel() + + tmpDir := t.TempDir() + kubeconfigPath := filepath.Join(tmpDir, "kubeconfig") + + invalidYAML := `this is not valid yaml {{{` + + err := os.WriteFile(kubeconfigPath, []byte(invalidYAML), 0o600) + require.NoError(t, err) + + err = k8s.CleanupKubeconfig( + kubeconfigPath, + "cluster", + "context", + "user", + io.Discard, + ) + + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to parse kubeconfig") +} + +// TestCleanupKubeconfig_PartialMatch tests cleanup when only some entries match. +func TestCleanupKubeconfig_PartialMatch(t *testing.T) { + t.Parallel() + + tmpDir := t.TempDir() + kubeconfigPath := filepath.Join(tmpDir, "kubeconfig") + + // Only has cluster and context, not the user + validKubeconfig := `apiVersion: v1 +kind: Config +clusters: +- cluster: + server: https://target.server:6443 + name: target-cluster +contexts: +- context: + cluster: target-cluster + user: different-user + name: target-context +current-context: target-context +users: +- name: different-user + user: + token: token +` + + err := os.WriteFile(kubeconfigPath, []byte(validKubeconfig), 0o600) + require.NoError(t, err) + + // Try to cleanup - user doesn't match, but cluster and context do + err = k8s.CleanupKubeconfig( + kubeconfigPath, + "target-cluster", + "target-context", + "nonexistent-user", + io.Discard, + ) + + require.NoError(t, err) + + // Verify matching entries are removed + config, err := clientcmd.LoadFromFile(kubeconfigPath) + require.NoError(t, err) + + _, hasTargetCluster := config.Clusters["target-cluster"] + _, hasTargetContext := config.Contexts["target-context"] + _, hasDifferentUser := config.AuthInfos["different-user"] + + assert.False(t, hasTargetCluster, "target cluster should be removed") + assert.False(t, hasTargetContext, "target context should be removed") + assert.True(t, hasDifferentUser, "different user should remain") +} diff --git a/pkg/k8s/multi_resource_test.go b/pkg/k8s/multi_resource_test.go new file mode 100644 index 000000000..e61230059 --- /dev/null +++ b/pkg/k8s/multi_resource_test.go @@ -0,0 +1,281 @@ +package k8s_test + +import ( + "context" + "testing" + "time" + + "github.com/devantler-tech/ksail/v5/pkg/k8s" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" +) + +// TestWaitForMultipleResources_EmptyChecks tests handling of empty checks slice. +func TestWaitForMultipleResources_EmptyChecks(t *testing.T) { + t.Parallel() + + client := fake.NewClientset() + ctx := context.Background() + + err := k8s.WaitForMultipleResources(ctx, client, []k8s.ReadinessCheck{}, 100*time.Millisecond) + + require.NoError(t, err, "should succeed with empty checks") +} + +// TestWaitForMultipleResources_SingleDeploymentReady tests single ready deployment. +func TestWaitForMultipleResources_SingleDeploymentReady(t *testing.T) { + t.Parallel() + + const ( + namespace = "test-system" + name = "test-deployment" + ) + + client := fake.NewClientset(&appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, + Status: appsv1.DeploymentStatus{ + Replicas: 1, + UpdatedReplicas: 1, + AvailableReplicas: 1, + }, + }) + + ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) + defer cancel() + + checks := []k8s.ReadinessCheck{ + {Type: "deployment", Namespace: namespace, Name: name}, + } + + err := k8s.WaitForMultipleResources(ctx, client, checks, 500*time.Millisecond) + + require.NoError(t, err) +} + +// TestWaitForMultipleResources_SingleDaemonSetReady tests single ready daemonset. +func TestWaitForMultipleResources_SingleDaemonSetReady(t *testing.T) { + t.Parallel() + + const ( + namespace = "kube-system" + name = "test-daemon" + ) + + client := fake.NewClientset(&appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: 1, + NumberUnavailable: 0, + UpdatedNumberScheduled: 1, + }, + }) + + ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) + defer cancel() + + checks := []k8s.ReadinessCheck{ + {Type: "daemonset", Namespace: namespace, Name: name}, + } + + err := k8s.WaitForMultipleResources(ctx, client, checks, 500*time.Millisecond) + + require.NoError(t, err) +} + +// TestWaitForMultipleResources_MultipleResources tests multiple resources becoming ready. +func TestWaitForMultipleResources_MultipleResources(t *testing.T) { + t.Parallel() + + client := fake.NewClientset( + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deploy1", Namespace: "ns1"}, + Status: appsv1.DeploymentStatus{ + Replicas: 1, + UpdatedReplicas: 1, + AvailableReplicas: 1, + }, + }, + &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: "ds1", Namespace: "ns2"}, + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: 1, + NumberUnavailable: 0, + UpdatedNumberScheduled: 1, + }, + }, + ) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + checks := []k8s.ReadinessCheck{ + {Type: "deployment", Namespace: "ns1", Name: "deploy1"}, + {Type: "daemonset", Namespace: "ns2", Name: "ds1"}, + } + + err := k8s.WaitForMultipleResources(ctx, client, checks, 1*time.Second) + + require.NoError(t, err) +} + +// TestWaitForMultipleResources_UnknownResourceType tests handling of unknown resource types. +func TestWaitForMultipleResources_UnknownResourceType(t *testing.T) { + t.Parallel() + + client := fake.NewClientset() + ctx := context.Background() + + checks := []k8s.ReadinessCheck{ + {Type: "unknown", Namespace: "ns", Name: "resource"}, + } + + err := k8s.WaitForMultipleResources(ctx, client, checks, 100*time.Millisecond) + + require.Error(t, err) + assert.Contains(t, err.Error(), "unknown resource type") + assert.Contains(t, err.Error(), "unknown") +} + +// TestWaitForMultipleResources_ResourceNotReady tests timeout when resource is not ready. +func TestWaitForMultipleResources_ResourceNotReady(t *testing.T) { + t.Parallel() + + const ( + namespace = "test-ns" + name = "not-ready-deploy" + ) + + // Deployment with mismatched replicas (not ready) + client := fake.NewClientset(&appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, + Status: appsv1.DeploymentStatus{ + Replicas: 2, + UpdatedReplicas: 1, // Only 1 of 2 updated + AvailableReplicas: 0, + }, + }) + + ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) + defer cancel() + + checks := []k8s.ReadinessCheck{ + {Type: "deployment", Namespace: namespace, Name: name}, + } + + err := k8s.WaitForMultipleResources(ctx, client, checks, 200*time.Millisecond) + + require.Error(t, err) + assert.Contains(t, err.Error(), "not ready") +} + +// TestWaitForMultipleResources_FirstResourceFails tests failure on first resource. +func TestWaitForMultipleResources_FirstResourceFails(t *testing.T) { + t.Parallel() + + // Second resource is ready, but first will fail + client := fake.NewClientset(&appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "second-deploy", Namespace: "ns"}, + Status: appsv1.DeploymentStatus{ + Replicas: 1, + UpdatedReplicas: 1, + AvailableReplicas: 1, + }, + }) + + ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) + defer cancel() + + checks := []k8s.ReadinessCheck{ + {Type: "deployment", Namespace: "ns", Name: "missing-deploy"}, // Doesn't exist + {Type: "deployment", Namespace: "ns", Name: "second-deploy"}, // Exists + } + + err := k8s.WaitForMultipleResources(ctx, client, checks, 200*time.Millisecond) + + require.Error(t, err) + assert.Contains(t, err.Error(), "missing-deploy") +} + +// TestReadinessCheck_Fields tests the ReadinessCheck struct fields. +func TestReadinessCheck_Fields(t *testing.T) { + t.Parallel() + + check := k8s.ReadinessCheck{ + Type: "deployment", + Namespace: "my-namespace", + Name: "my-deployment", + } + + assert.Equal(t, "deployment", check.Type) + assert.Equal(t, "my-namespace", check.Namespace) + assert.Equal(t, "my-deployment", check.Name) +} + +// TestWaitForMultipleResources_TimeoutExceeded tests the timeout exceeded error. +func TestWaitForMultipleResources_TimeoutExceeded(t *testing.T) { + t.Parallel() + + client := fake.NewClientset() + + // Use a zero-timeout context + ctx, cancel := context.WithCancel(context.Background()) + cancel() // Cancel immediately + + checks := []k8s.ReadinessCheck{ + {Type: "deployment", Namespace: "ns", Name: "deploy"}, + } + + // With 0 timeout, the error message should be about timeout + err := k8s.WaitForMultipleResources(ctx, client, checks, 0) + + require.Error(t, err) + assert.ErrorIs(t, err, k8s.ErrTimeoutExceeded) +} + +// TestWaitForMultipleResources_MixedTypes tests mixed deployment and daemonset. +func TestWaitForMultipleResources_MixedTypes(t *testing.T) { + t.Parallel() + + client := fake.NewClientset( + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "coredns", Namespace: "kube-system"}, + Status: appsv1.DeploymentStatus{ + Replicas: 2, + UpdatedReplicas: 2, + AvailableReplicas: 2, + }, + }, + &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: "cilium", Namespace: "kube-system"}, + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: 3, + NumberUnavailable: 0, + UpdatedNumberScheduled: 3, + }, + }, + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "traefik", Namespace: "traefik"}, + Status: appsv1.DeploymentStatus{ + Replicas: 1, + UpdatedReplicas: 1, + AvailableReplicas: 1, + }, + }, + ) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + checks := []k8s.ReadinessCheck{ + {Type: "deployment", Namespace: "kube-system", Name: "coredns"}, + {Type: "daemonset", Namespace: "kube-system", Name: "cilium"}, + {Type: "deployment", Namespace: "traefik", Name: "traefik"}, + } + + err := k8s.WaitForMultipleResources(ctx, client, checks, 1*time.Second) + + require.NoError(t, err) +} diff --git a/pkg/k8s/rest_config_test.go b/pkg/k8s/rest_config_test.go new file mode 100644 index 000000000..ddfc300f0 --- /dev/null +++ b/pkg/k8s/rest_config_test.go @@ -0,0 +1,193 @@ +package k8s_test + +import ( + "os" + "path/filepath" + "testing" + + "github.com/devantler-tech/ksail/v5/pkg/k8s" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const testKubeconfigYAML = `apiVersion: v1 +kind: Config +clusters: +- cluster: + server: https://127.0.0.1:6443 + name: test-cluster +contexts: +- context: + cluster: test-cluster + user: test-user + name: test-context +current-context: test-context +users: +- name: test-user + user: + token: test-token +` + +// TestBuildRESTConfig_EmptyKubeconfig tests that empty kubeconfig path returns ErrKubeconfigPathEmpty. +func TestBuildRESTConfig_EmptyKubeconfig(t *testing.T) { + t.Parallel() + + config, err := k8s.BuildRESTConfig("", "") + + require.Error(t, err) + assert.Nil(t, config) + assert.ErrorIs(t, err, k8s.ErrKubeconfigPathEmpty) +} + +// TestBuildRESTConfig_NonExistentPath tests handling of non-existent kubeconfig path. +func TestBuildRESTConfig_NonExistentPath(t *testing.T) { + t.Parallel() + + config, err := k8s.BuildRESTConfig("/nonexistent/path/to/kubeconfig", "") + + require.Error(t, err) + assert.Nil(t, config) + assert.Contains(t, err.Error(), "failed to load kubeconfig") +} + +// TestBuildRESTConfig_InvalidContent tests handling of invalid kubeconfig content. +func TestBuildRESTConfig_InvalidContent(t *testing.T) { + t.Parallel() + + // Create a temporary file with invalid kubeconfig content + tmpDir := t.TempDir() + kubeconfigPath := filepath.Join(tmpDir, "invalid-kubeconfig") + + err := os.WriteFile(kubeconfigPath, []byte("this is not valid yaml {{{"), 0o600) + require.NoError(t, err) + + config, err := k8s.BuildRESTConfig(kubeconfigPath, "") + + require.Error(t, err) + assert.Nil(t, config) + assert.Contains(t, err.Error(), "failed to load kubeconfig") +} + +// TestBuildRESTConfig_ValidKubeconfig tests successful parsing of valid kubeconfig. +func TestBuildRESTConfig_ValidKubeconfig(t *testing.T) { + t.Parallel() + + // Create a temporary file with valid kubeconfig content + tmpDir := t.TempDir() + kubeconfigPath := filepath.Join(tmpDir, "kubeconfig") + + err := os.WriteFile(kubeconfigPath, []byte(testKubeconfigYAML), 0o600) + require.NoError(t, err) + + config, err := k8s.BuildRESTConfig(kubeconfigPath, "") + + require.NoError(t, err) + require.NotNil(t, config) + assert.Equal(t, "https://127.0.0.1:6443", config.Host) +} + +// TestBuildRESTConfig_WithContext tests using a specific context from kubeconfig. +func TestBuildRESTConfig_WithContext(t *testing.T) { + t.Parallel() + + tmpDir := t.TempDir() + kubeconfigPath := filepath.Join(tmpDir, "kubeconfig") + + // Kubeconfig with multiple contexts + validKubeconfig := `apiVersion: v1 +kind: Config +clusters: +- cluster: + server: https://default.server:6443 + name: default-cluster +- cluster: + server: https://custom.server:6443 + name: custom-cluster +contexts: +- context: + cluster: default-cluster + user: default-user + name: default-context +- context: + cluster: custom-cluster + user: custom-user + name: custom-context +current-context: default-context +users: +- name: default-user + user: + token: default-token +- name: custom-user + user: + token: custom-token +` + + err := os.WriteFile(kubeconfigPath, []byte(validKubeconfig), 0o600) + require.NoError(t, err) + + // Test with explicit context override + config, err := k8s.BuildRESTConfig(kubeconfigPath, "custom-context") + + require.NoError(t, err) + require.NotNil(t, config) + assert.Equal(t, "https://custom.server:6443", config.Host) +} + +// TestBuildRESTConfig_NonExistentContext tests handling of non-existent context. +func TestBuildRESTConfig_NonExistentContext(t *testing.T) { + t.Parallel() + + tmpDir := t.TempDir() + kubeconfigPath := filepath.Join(tmpDir, "kubeconfig") + + err := os.WriteFile(kubeconfigPath, []byte(testKubeconfigYAML), 0o600) + require.NoError(t, err) + + config, err := k8s.BuildRESTConfig(kubeconfigPath, "nonexistent-context") + + require.Error(t, err) + assert.Nil(t, config) + assert.Contains(t, err.Error(), "failed to load kubeconfig") +} + +// TestNewClientset_EmptyKubeconfig tests that empty kubeconfig path returns error. +func TestNewClientset_EmptyKubeconfig(t *testing.T) { + t.Parallel() + + clientset, err := k8s.NewClientset("", "") + + require.Error(t, err) + assert.Nil(t, clientset) + assert.Contains(t, err.Error(), "failed to build rest config") + assert.ErrorIs(t, err, k8s.ErrKubeconfigPathEmpty) +} + +// TestNewClientset_ValidKubeconfig tests successful creation of clientset. +func TestNewClientset_ValidKubeconfig(t *testing.T) { + t.Parallel() + + tmpDir := t.TempDir() + kubeconfigPath := filepath.Join(tmpDir, "kubeconfig") + + err := os.WriteFile(kubeconfigPath, []byte(testKubeconfigYAML), 0o600) + require.NoError(t, err) + + clientset, err := k8s.NewClientset(kubeconfigPath, "") + + require.NoError(t, err) + require.NotNil(t, clientset) +} + +// TestErrKubeconfigPathEmpty_ErrorMessage tests the error message content. +func TestErrKubeconfigPathEmpty_ErrorMessage(t *testing.T) { + t.Parallel() + + assert.Contains(t, k8s.ErrKubeconfigPathEmpty.Error(), "kubeconfig path is empty") +} + +// TestErrTimeoutExceeded_ErrorMessage tests the timeout error message content. +func TestErrTimeoutExceeded_ErrorMessage(t *testing.T) { + t.Parallel() + + assert.Contains(t, k8s.ErrTimeoutExceeded.Error(), "timeout exceeded") +} diff --git a/pkg/svc/installer/cni/calico/__snapshots__/installer_test.snap b/pkg/svc/installer/cni/calico/__snapshots__/installer_test.snap new file mode 100755 index 000000000..0989e359c --- /dev/null +++ b/pkg/svc/installer/cni/calico/__snapshots__/installer_test.snap @@ -0,0 +1,12 @@ + +[TestCalicoInstaller_Uninstall_Error - 1] +failed to uninstall calico release: assert.AnError general error for testing +--- + +[TestCalicoInstaller_Install_RepoError - 1] +failed to install Calico: install or upgrade calico: failed to add calico repository: assert.AnError general error for testing +--- + +[TestCalicoInstaller_Install_ChartError - 1] +failed to install Calico: install or upgrade calico: failed to install calico chart: assert.AnError general error for testing +--- diff --git a/pkg/svc/installer/cni/calico/installer_test.go b/pkg/svc/installer/cni/calico/installer_test.go index 269dd500c..ed2391845 100644 --- a/pkg/svc/installer/cni/calico/installer_test.go +++ b/pkg/svc/installer/cni/calico/installer_test.go @@ -1,13 +1,33 @@ package calicoinstaller_test import ( + "context" + "os" "testing" "time" + v1alpha1 "github.com/devantler-tech/ksail/v5/pkg/apis/cluster/v1alpha1" + "github.com/devantler-tech/ksail/v5/pkg/client/helm" calicoinstaller "github.com/devantler-tech/ksail/v5/pkg/svc/installer/cni/calico" + "github.com/gkampitakis/go-snaps/snaps" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) +func TestMain(m *testing.M) { + exitCode := m.Run() + + _, err := snaps.Clean(m, snaps.CleanOpts{Sort: true}) + if err != nil { + _, _ = os.Stderr.WriteString("failed to clean snapshots: " + err.Error() + "\n") + + os.Exit(1) + } + + os.Exit(exitCode) +} + func TestNewCalicoInstaller(t *testing.T) { t.Parallel() @@ -21,6 +41,49 @@ func TestNewCalicoInstaller(t *testing.T) { require.NotNil(t, installer, "expected installer to be created") } +func TestNewCalicoInstallerWithDistribution(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + distribution v1alpha1.Distribution + }{ + { + name: "vanilla distribution", + distribution: v1alpha1.DistributionVanilla, + }, + { + name: "k3s distribution", + distribution: v1alpha1.DistributionK3s, + }, + { + name: "talos distribution", + distribution: v1alpha1.DistributionTalos, + }, + { + name: "empty distribution", + distribution: "", + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + client := helm.NewMockInterface(t) + installer := calicoinstaller.NewCalicoInstallerWithDistribution( + client, + "/path/to/kubeconfig", + "test-context", + 5*time.Minute, + testCase.distribution, + ) + + require.NotNil(t, installer, "expected installer to be created") + }) + } +} + func TestNewCalicoInstaller_WithDifferentTimeout(t *testing.T) { t.Parallel() @@ -70,3 +133,172 @@ func TestNewCalicoInstaller_WithEmptyParams(t *testing.T) { require.NotNil(t, installer, "expected installer to be created even with empty params") } + +func TestCalicoInstaller_Install_VanillaDistribution(t *testing.T) { + t.Parallel() + + installer, client := newCalicoInstallerWithDistribution(t, v1alpha1.DistributionVanilla) + expectCalicoInstall(t, client, nil) + + err := installer.Install(context.Background()) + + require.NoError(t, err) +} + +func TestCalicoInstaller_Install_K3sDistribution(t *testing.T) { + t.Parallel() + + installer, client := newCalicoInstallerWithDistribution(t, v1alpha1.DistributionK3s) + expectCalicoInstall(t, client, nil) + + err := installer.Install(context.Background()) + + require.NoError(t, err) +} + +func TestCalicoInstaller_Install_RepoError(t *testing.T) { + t.Parallel() + + installer, client := newCalicoInstallerWithDistribution(t, v1alpha1.DistributionVanilla) + client.EXPECT(). + AddRepository(mock.Anything, mock.Anything, mock.Anything). + Return(assert.AnError) + + err := installer.Install(context.Background()) + + require.Error(t, err) + snaps.MatchSnapshot(t, err.Error()) +} + +func TestCalicoInstaller_Install_ChartError(t *testing.T) { + t.Parallel() + + installer, client := newCalicoInstallerWithDistribution(t, v1alpha1.DistributionVanilla) + expectCalicoInstall(t, client, assert.AnError) + + err := installer.Install(context.Background()) + + require.Error(t, err) + snaps.MatchSnapshot(t, err.Error()) +} + +func TestCalicoInstaller_Install_NilClient(t *testing.T) { + t.Parallel() + + installer := calicoinstaller.NewCalicoInstallerWithDistribution( + nil, // nil client + "/path/to/kubeconfig", + "test-context", + 5*time.Minute, + v1alpha1.DistributionVanilla, + ) + + err := installer.Install(context.Background()) + + require.Error(t, err) + assert.Contains(t, err.Error(), "helm client is nil") +} + +func TestCalicoInstaller_Uninstall_Success(t *testing.T) { + t.Parallel() + + installer, client := newCalicoInstallerWithDistribution(t, v1alpha1.DistributionVanilla) + client.EXPECT(). + UninstallRelease(mock.Anything, "calico", "tigera-operator"). + Return(nil) + + err := installer.Uninstall(context.Background()) + + require.NoError(t, err) +} + +func TestCalicoInstaller_Uninstall_Error(t *testing.T) { + t.Parallel() + + installer, client := newCalicoInstallerWithDistribution(t, v1alpha1.DistributionVanilla) + client.EXPECT(). + UninstallRelease(mock.Anything, "calico", "tigera-operator"). + Return(assert.AnError) + + err := installer.Uninstall(context.Background()) + + require.Error(t, err) + snaps.MatchSnapshot(t, err.Error()) +} + +func TestCalicoInstaller_Uninstall_NilClient(t *testing.T) { + t.Parallel() + + installer := calicoinstaller.NewCalicoInstallerWithDistribution( + nil, // nil client + "/path/to/kubeconfig", + "test-context", + 5*time.Minute, + v1alpha1.DistributionVanilla, + ) + + err := installer.Uninstall(context.Background()) + + require.Error(t, err) + assert.Contains(t, err.Error(), "helm client is nil") +} + +// --- test helpers --- + +func newCalicoInstallerWithDistribution( + t *testing.T, + distribution v1alpha1.Distribution, +) (*calicoinstaller.CalicoInstaller, *helm.MockInterface) { + t.Helper() + + client := helm.NewMockInterface(t) + installer := calicoinstaller.NewCalicoInstallerWithDistribution( + client, + "/path/to/kubeconfig", + "test-context", + 2*time.Minute, + distribution, + ) + + return installer, client +} + +func expectCalicoInstall(t *testing.T, client *helm.MockInterface, installErr error) { + t.Helper() + + client.EXPECT(). + AddRepository( + mock.Anything, + mock.MatchedBy(func(entry *helm.RepositoryEntry) bool { + return entry != nil && entry.Name == "projectcalico" && + entry.URL == "https://docs.tigera.io/calico/charts" + }), + mock.Anything, + ). + Return(nil) + + client.EXPECT(). + InstallOrUpgradeChart( + mock.Anything, + mock.MatchedBy(func(spec *helm.ChartSpec) bool { + if spec == nil { + return false + } + + assert.Equal(t, "calico", spec.ReleaseName) + assert.Equal(t, "projectcalico/tigera-operator", spec.ChartName) + assert.Equal(t, "tigera-operator", spec.Namespace) + assert.Equal(t, "https://docs.tigera.io/calico/charts", spec.RepoURL) + assert.True(t, spec.CreateNamespace) + assert.True(t, spec.Atomic) + assert.True(t, spec.Silent) + assert.True(t, spec.UpgradeCRDs) + assert.False(t, spec.Wait, "SkipWait should be true") + assert.False(t, spec.WaitForJobs, "SkipWait should be true") + assert.Equal(t, 2*time.Minute, spec.Timeout) + + return true + }), + ). + Return(nil, installErr) +} diff --git a/pkg/svc/installer/cni/cilium/__snapshots__/installer_test.snap b/pkg/svc/installer/cni/cilium/__snapshots__/installer_test.snap new file mode 100755 index 000000000..fbb26cca0 --- /dev/null +++ b/pkg/svc/installer/cni/cilium/__snapshots__/installer_test.snap @@ -0,0 +1,12 @@ + +[TestCiliumInstaller_Uninstall_Error - 1] +failed to uninstall cilium release: assert.AnError general error for testing +--- + +[TestCiliumInstaller_Install_RepoError - 1] +failed to install Cilium: install or upgrade cilium: failed to add cilium repository: assert.AnError general error for testing +--- + +[TestCiliumInstaller_Install_ChartError - 1] +failed to install Cilium: install or upgrade cilium: failed to install cilium chart: assert.AnError general error for testing +--- diff --git a/pkg/svc/installer/cni/cilium/installer_test.go b/pkg/svc/installer/cni/cilium/installer_test.go index 7c3688743..89e9b54c8 100644 --- a/pkg/svc/installer/cni/cilium/installer_test.go +++ b/pkg/svc/installer/cni/cilium/installer_test.go @@ -1,13 +1,33 @@ package ciliuminstaller_test import ( + "context" + "os" "testing" "time" + v1alpha1 "github.com/devantler-tech/ksail/v5/pkg/apis/cluster/v1alpha1" + "github.com/devantler-tech/ksail/v5/pkg/client/helm" ciliuminstaller "github.com/devantler-tech/ksail/v5/pkg/svc/installer/cni/cilium" + "github.com/gkampitakis/go-snaps/snaps" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) +func TestMain(m *testing.M) { + exitCode := m.Run() + + _, err := snaps.Clean(m, snaps.CleanOpts{Sort: true}) + if err != nil { + _, _ = os.Stderr.WriteString("failed to clean snapshots: " + err.Error() + "\n") + + os.Exit(1) + } + + os.Exit(exitCode) +} + func TestNewCiliumInstaller(t *testing.T) { t.Parallel() @@ -21,6 +41,49 @@ func TestNewCiliumInstaller(t *testing.T) { require.NotNil(t, installer, "expected installer to be created") } +func TestNewCiliumInstallerWithDistribution(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + distribution v1alpha1.Distribution + }{ + { + name: "vanilla distribution", + distribution: v1alpha1.DistributionVanilla, + }, + { + name: "k3s distribution", + distribution: v1alpha1.DistributionK3s, + }, + { + name: "talos distribution", + distribution: v1alpha1.DistributionTalos, + }, + { + name: "empty distribution", + distribution: "", + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + client := helm.NewMockInterface(t) + installer := ciliuminstaller.NewCiliumInstallerWithDistribution( + client, + "/path/to/kubeconfig", + "test-context", + 5*time.Minute, + testCase.distribution, + ) + + require.NotNil(t, installer, "expected installer to be created") + }) + } +} + func TestNewCiliumInstaller_WithDifferentTimeout(t *testing.T) { t.Parallel() @@ -70,3 +133,172 @@ func TestNewCiliumInstaller_WithEmptyParams(t *testing.T) { require.NotNil(t, installer, "expected installer to be created even with empty params") } + +func TestCiliumInstaller_Install_VanillaDistribution(t *testing.T) { + t.Parallel() + + installer, client := newCiliumInstallerWithDistribution(t, v1alpha1.DistributionVanilla) + expectCiliumInstall(t, client, nil) + + err := installer.Install(context.Background()) + + require.NoError(t, err) +} + +func TestCiliumInstaller_Install_K3sDistribution(t *testing.T) { + t.Parallel() + + installer, client := newCiliumInstallerWithDistribution(t, v1alpha1.DistributionK3s) + expectCiliumInstall(t, client, nil) + + err := installer.Install(context.Background()) + + require.NoError(t, err) +} + +func TestCiliumInstaller_Install_RepoError(t *testing.T) { + t.Parallel() + + installer, client := newCiliumInstallerWithDistribution(t, v1alpha1.DistributionVanilla) + client.EXPECT(). + AddRepository(mock.Anything, mock.Anything, mock.Anything). + Return(assert.AnError) + + err := installer.Install(context.Background()) + + require.Error(t, err) + snaps.MatchSnapshot(t, err.Error()) +} + +func TestCiliumInstaller_Install_ChartError(t *testing.T) { + t.Parallel() + + installer, client := newCiliumInstallerWithDistribution(t, v1alpha1.DistributionVanilla) + expectCiliumInstall(t, client, assert.AnError) + + err := installer.Install(context.Background()) + + require.Error(t, err) + snaps.MatchSnapshot(t, err.Error()) +} + +func TestCiliumInstaller_Install_NilClient(t *testing.T) { + t.Parallel() + + installer := ciliuminstaller.NewCiliumInstallerWithDistribution( + nil, // nil client + "/path/to/kubeconfig", + "test-context", + 5*time.Minute, + v1alpha1.DistributionVanilla, + ) + + err := installer.Install(context.Background()) + + require.Error(t, err) + assert.Contains(t, err.Error(), "helm client is nil") +} + +func TestCiliumInstaller_Uninstall_Success(t *testing.T) { + t.Parallel() + + installer, client := newCiliumInstallerWithDistribution(t, v1alpha1.DistributionVanilla) + client.EXPECT(). + UninstallRelease(mock.Anything, "cilium", "kube-system"). + Return(nil) + + err := installer.Uninstall(context.Background()) + + require.NoError(t, err) +} + +func TestCiliumInstaller_Uninstall_Error(t *testing.T) { + t.Parallel() + + installer, client := newCiliumInstallerWithDistribution(t, v1alpha1.DistributionVanilla) + client.EXPECT(). + UninstallRelease(mock.Anything, "cilium", "kube-system"). + Return(assert.AnError) + + err := installer.Uninstall(context.Background()) + + require.Error(t, err) + snaps.MatchSnapshot(t, err.Error()) +} + +func TestCiliumInstaller_Uninstall_NilClient(t *testing.T) { + t.Parallel() + + installer := ciliuminstaller.NewCiliumInstallerWithDistribution( + nil, // nil client + "/path/to/kubeconfig", + "test-context", + 5*time.Minute, + v1alpha1.DistributionVanilla, + ) + + err := installer.Uninstall(context.Background()) + + require.Error(t, err) + assert.Contains(t, err.Error(), "helm client is nil") +} + +// --- test helpers --- + +func newCiliumInstallerWithDistribution( + t *testing.T, + distribution v1alpha1.Distribution, +) (*ciliuminstaller.CiliumInstaller, *helm.MockInterface) { + t.Helper() + + client := helm.NewMockInterface(t) + installer := ciliuminstaller.NewCiliumInstallerWithDistribution( + client, + "/path/to/kubeconfig", + "test-context", + 2*time.Minute, + distribution, + ) + + return installer, client +} + +func expectCiliumInstall(t *testing.T, client *helm.MockInterface, installErr error) { + t.Helper() + + client.EXPECT(). + AddRepository( + mock.Anything, + mock.MatchedBy(func(entry *helm.RepositoryEntry) bool { + return entry != nil && entry.Name == "cilium" && + entry.URL == "https://helm.cilium.io" + }), + mock.Anything, + ). + Return(nil) + + client.EXPECT(). + InstallOrUpgradeChart( + mock.Anything, + mock.MatchedBy(func(spec *helm.ChartSpec) bool { + if spec == nil { + return false + } + + assert.Equal(t, "cilium", spec.ReleaseName) + assert.Equal(t, "cilium/cilium", spec.ChartName) + assert.Equal(t, "kube-system", spec.Namespace) + assert.Equal(t, "https://helm.cilium.io", spec.RepoURL) + assert.False(t, spec.CreateNamespace) + assert.True(t, spec.Atomic) + assert.True(t, spec.Silent) + assert.True(t, spec.UpgradeCRDs) + assert.True(t, spec.Wait) + assert.True(t, spec.WaitForJobs) + assert.Equal(t, 2*time.Minute, spec.Timeout) + + return true + }), + ). + Return(nil, installErr) +} diff --git a/pkg/svc/installer/flux/export_test.go b/pkg/svc/installer/flux/export_test.go new file mode 100644 index 000000000..22fa936d3 --- /dev/null +++ b/pkg/svc/installer/flux/export_test.go @@ -0,0 +1,62 @@ +package fluxinstaller + +import ( + "context" + "time" + + "github.com/devantler-tech/ksail/v5/pkg/apis/cluster/v1alpha1" + corev1 "k8s.io/api/core/v1" +) + +// Exported functions for testing purposes. +// These wrappers allow the _test package to access internal functions. + +// BuildDockerConfigJSON exports buildDockerConfigJSON for testing. +func BuildDockerConfigJSON(registry, username, password string) ([]byte, error) { + return buildDockerConfigJSON(registry, username, password) +} + +// BuildExternalRegistryURL exports buildExternalRegistryURL for testing. +func BuildExternalRegistryURL(localRegistry v1alpha1.LocalRegistry) (string, string, string) { + return buildExternalRegistryURL(localRegistry) +} + +// BuildLocalRegistryURL exports buildLocalRegistryURL for testing. +func BuildLocalRegistryURL( + localRegistry v1alpha1.LocalRegistry, + clusterCfg *v1alpha1.Cluster, + clusterName string, +) string { + return buildLocalRegistryURL(localRegistry, clusterCfg, clusterName) +} + +// BuildFluxInstance exports buildFluxInstance for testing. +func BuildFluxInstance(clusterCfg *v1alpha1.Cluster, clusterName string) (*FluxInstance, error) { + return buildFluxInstance(clusterCfg, clusterName) +} + +// BuildRegistrySecret exports buildRegistrySecret for testing. +func BuildRegistrySecret(clusterCfg *v1alpha1.Cluster) (*corev1.Secret, error) { + return buildRegistrySecret(clusterCfg) +} + +// IsTransientAPIError exports isTransientAPIError for testing. +func IsTransientAPIError(err error) bool { + return isTransientAPIError(err) +} + +// NormalizeFluxPath exports normalizeFluxPath for testing. +func NormalizeFluxPath() string { + return normalizeFluxPath() +} + +// PollUntilReady exports pollUntilReady for testing. +func PollUntilReady( + ctx context.Context, + timeout time.Duration, + interval time.Duration, + resourceDesc string, + checkFn func() (bool, error), +) error { + return pollUntilReady(ctx, timeout, interval, resourceDesc, checkFn) +} diff --git a/pkg/svc/installer/flux/fluxinstance_types_test.go b/pkg/svc/installer/flux/fluxinstance_types_test.go new file mode 100644 index 000000000..0d3b769ae --- /dev/null +++ b/pkg/svc/installer/flux/fluxinstance_types_test.go @@ -0,0 +1,267 @@ +package fluxinstaller_test + +import ( + "testing" + "time" + + fluxinstaller "github.com/devantler-tech/ksail/v5/pkg/svc/installer/flux" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Test strings. +const ( + modifiedValue = "modified" + testSyncName = "test-sync" +) + +func TestFluxInstance_DeepCopy(t *testing.T) { + t.Parallel() + + original := &fluxinstaller.FluxInstance{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-instance", + Namespace: "test-namespace", + }, + Spec: fluxinstaller.FluxInstanceSpec{ + Distribution: fluxinstaller.Distribution{ + Version: "2.x", + Registry: "ghcr.io/fluxcd", + Artifact: "oci://example.com/flux", + }, + Sync: &fluxinstaller.Sync{ + Name: "test-sync", + Kind: "OCIRepository", + URL: "oci://example.com/repo", + Ref: "dev", + Path: "./", + Provider: "generic", + Interval: &metav1.Duration{Duration: time.Minute}, + }, + }, + Status: fluxinstaller.FluxInstanceStatus{ + Conditions: []metav1.Condition{ + { + Type: "Ready", + Status: metav1.ConditionTrue, + }, + }, + }, + } + + copied := original.DeepCopy() + + require.NotNil(t, copied) + assert.Equal(t, original.Name, copied.Name) + assert.Equal(t, original.Namespace, copied.Namespace) + assert.Equal(t, original.Spec.Distribution.Version, copied.Spec.Distribution.Version) + assert.Equal(t, original.Spec.Sync.URL, copied.Spec.Sync.URL) + assert.Len(t, copied.Status.Conditions, 1) + + // Verify deep copy - modifications to copy don't affect original + copied.Name = modifiedValue + copied.Spec.Distribution.Version = modifiedValue + assert.NotEqual(t, original.Name, copied.Name) + assert.NotEqual(t, original.Spec.Distribution.Version, copied.Spec.Distribution.Version) +} + +func TestFluxInstance_DeepCopy_Nil(t *testing.T) { + t.Parallel() + + var original *fluxinstaller.FluxInstance + + copied := original.DeepCopy() + + assert.Nil(t, copied) +} + +func TestFluxInstance_DeepCopyObject(t *testing.T) { + t.Parallel() + + original := &fluxinstaller.FluxInstance{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + } + + obj := original.DeepCopyObject() + + require.NotNil(t, obj) + copied, ok := obj.(*fluxinstaller.FluxInstance) + require.True(t, ok) + assert.Equal(t, original.Name, copied.Name) +} + +func TestFluxInstanceList_DeepCopy(t *testing.T) { + t.Parallel() + + original := &fluxinstaller.FluxInstanceList{ + Items: []fluxinstaller.FluxInstance{ + { + ObjectMeta: metav1.ObjectMeta{Name: "item1"}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "item2"}, + }, + }, + } + + copied := original.DeepCopy() + + require.NotNil(t, copied) + assert.Len(t, copied.Items, 2) + assert.Equal(t, "item1", copied.Items[0].Name) + assert.Equal(t, "item2", copied.Items[1].Name) + + // Verify deep copy + copied.Items[0].Name = modifiedValue + assert.NotEqual(t, original.Items[0].Name, copied.Items[0].Name) +} + +func TestFluxInstanceList_DeepCopy_Nil(t *testing.T) { + t.Parallel() + + var original *fluxinstaller.FluxInstanceList + + copied := original.DeepCopy() + + assert.Nil(t, copied) +} + +func TestFluxInstanceList_DeepCopyObject(t *testing.T) { + t.Parallel() + + original := &fluxinstaller.FluxInstanceList{ + Items: []fluxinstaller.FluxInstance{ + {ObjectMeta: metav1.ObjectMeta{Name: "test"}}, + }, + } + + obj := original.DeepCopyObject() + + require.NotNil(t, obj) + copied, ok := obj.(*fluxinstaller.FluxInstanceList) + require.True(t, ok) + assert.Len(t, copied.Items, 1) +} + +func TestFluxInstanceSpec_DeepCopyInto(t *testing.T) { + t.Parallel() + + original := fluxinstaller.FluxInstanceSpec{ + Distribution: fluxinstaller.Distribution{ + Version: "2.x", + }, + Sync: &fluxinstaller.Sync{ + Name: "test", + Interval: &metav1.Duration{Duration: 5 * time.Minute}, + }, + } + + var copied fluxinstaller.FluxInstanceSpec + original.DeepCopyInto(&copied) + + assert.Equal(t, original.Distribution.Version, copied.Distribution.Version) + require.NotNil(t, copied.Sync) + assert.Equal(t, original.Sync.Name, copied.Sync.Name) + assert.NotNil(t, copied.Sync.Interval) +} + +func TestFluxInstanceSpec_DeepCopyInto_NilSync(t *testing.T) { + t.Parallel() + + original := fluxinstaller.FluxInstanceSpec{ + Distribution: fluxinstaller.Distribution{ + Version: "2.x", + }, + Sync: nil, + } + + var copied fluxinstaller.FluxInstanceSpec + original.DeepCopyInto(&copied) + + assert.Nil(t, copied.Sync) +} + +func TestSync_DeepCopyInto(t *testing.T) { + t.Parallel() + + original := fluxinstaller.Sync{ + Name: "test", + Kind: "OCIRepository", + URL: "oci://example.com", + Ref: "main", + Path: "./", + PullSecret: "secret", + Provider: "generic", + Interval: &metav1.Duration{Duration: time.Minute}, + } + + var copied fluxinstaller.Sync + original.DeepCopyInto(&copied) + + assert.Equal(t, original.Name, copied.Name) + assert.Equal(t, original.URL, copied.URL) + require.NotNil(t, copied.Interval) + assert.Equal(t, original.Interval.Duration, copied.Interval.Duration) + + // Verify deep copy of interval + copied.Interval.Duration = 2 * time.Minute + assert.NotEqual(t, original.Interval.Duration, copied.Interval.Duration) +} + +func TestSync_DeepCopyInto_NilInterval(t *testing.T) { + t.Parallel() + + original := fluxinstaller.Sync{ + Name: "test", + Interval: nil, + } + + var copied fluxinstaller.Sync + original.DeepCopyInto(&copied) + + assert.Nil(t, copied.Interval) +} + +func TestFluxInstanceStatus_DeepCopy(t *testing.T) { + t.Parallel() + + original := &fluxinstaller.FluxInstanceStatus{ + Conditions: []metav1.Condition{ + {Type: "Ready", Status: metav1.ConditionTrue}, + {Type: "Healthy", Status: metav1.ConditionFalse}, + }, + } + + copied := original.DeepCopy() + + require.NotNil(t, copied) + assert.Len(t, copied.Conditions, 2) + assert.Equal(t, "Ready", copied.Conditions[0].Type) + assert.Equal(t, "Healthy", copied.Conditions[1].Type) +} + +func TestFluxInstanceStatus_DeepCopy_Nil(t *testing.T) { + t.Parallel() + + var original *fluxinstaller.FluxInstanceStatus + + copied := original.DeepCopy() + + assert.Nil(t, copied) +} + +func TestFluxInstanceStatus_DeepCopy_NilConditions(t *testing.T) { + t.Parallel() + + original := &fluxinstaller.FluxInstanceStatus{ + Conditions: nil, + } + + copied := original.DeepCopy() + + require.NotNil(t, copied) + assert.Nil(t, copied.Conditions) +} diff --git a/pkg/svc/installer/flux/resources.go b/pkg/svc/installer/flux/resources.go index be8202a59..e64b2f233 100644 --- a/pkg/svc/installer/flux/resources.go +++ b/pkg/svc/installer/flux/resources.go @@ -451,7 +451,6 @@ func buildLocalRegistryURL( ) } -//nolint:unparam // error return kept for consistency with resource building patterns func buildFluxInstance(clusterCfg *v1alpha1.Cluster, clusterName string) (*FluxInstance, error) { localRegistry := clusterCfg.Spec.Cluster.LocalRegistry diff --git a/pkg/svc/installer/flux/resources_test.go b/pkg/svc/installer/flux/resources_test.go new file mode 100644 index 000000000..802df5df3 --- /dev/null +++ b/pkg/svc/installer/flux/resources_test.go @@ -0,0 +1,521 @@ +// Package fluxinstaller_test provides unit tests for the flux installer package. +// +//nolint:err113,funlen // Tests use dynamic errors for mock behaviors and table-driven tests are naturally long +package fluxinstaller_test + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/devantler-tech/ksail/v5/pkg/apis/cluster/v1alpha1" + fluxinstaller "github.com/devantler-tech/ksail/v5/pkg/svc/installer/flux" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func TestBuildDockerConfigJSON(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + registry string + username string + password string + wantContains []string + wantNotContain []string + }{ + { + name: "basic credentials", + registry: "ghcr.io", + username: "user", + password: "pass", + wantContains: []string{ + `"auths"`, + `"ghcr.io"`, + `"username":"user"`, + `"password":"pass"`, + `"auth"`, + }, + }, + { + name: "custom registry", + registry: "registry.example.com:5000", + username: "admin", + password: "secret123", + wantContains: []string{ + `"registry.example.com:5000"`, + `"username":"admin"`, + `"password":"secret123"`, + }, + }, + { + name: "special characters in password", + registry: "docker.io", + username: "user@example.com", + password: "p@ss:w0rd!", + wantContains: []string{ + `"username":"user@example.com"`, + `"password":"p@ss:w0rd!"`, + }, + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + data, err := fluxinstaller.BuildDockerConfigJSON( + testCase.registry, + testCase.username, + testCase.password, + ) + require.NoError(t, err) + require.NotEmpty(t, data) + + jsonStr := string(data) + for _, want := range testCase.wantContains { + assert.Contains(t, jsonStr, want) + } + + for _, notWant := range testCase.wantNotContain { + assert.NotContains(t, jsonStr, notWant) + } + }) + } +} + +func TestBuildExternalRegistryURL(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + localRegistry v1alpha1.LocalRegistry + wantURL string + wantSecret string + wantTagContain string + }{ + { + name: "external registry without credentials", + localRegistry: v1alpha1.LocalRegistry{ + Registry: "ghcr.io/example/repo", + }, + wantURL: "oci://ghcr.io/example/repo", + wantSecret: "", + }, + { + name: "external registry with credentials", + localRegistry: v1alpha1.LocalRegistry{ + Registry: "user:pass@ghcr.io/example/repo", + }, + wantURL: "oci://ghcr.io/example/repo", + wantSecret: fluxinstaller.ExternalRegistrySecretName, + }, + { + name: "external registry with tag", + localRegistry: v1alpha1.LocalRegistry{ + Registry: "user:pass@ghcr.io/example/repo:v1.0.0", + }, + wantURL: "oci://ghcr.io/example/repo", + wantSecret: fluxinstaller.ExternalRegistrySecretName, + wantTagContain: "v1.0.0", + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + url, secret, tag := fluxinstaller.BuildExternalRegistryURL(testCase.localRegistry) + assert.Equal(t, testCase.wantURL, url) + assert.Equal(t, testCase.wantSecret, secret) + + if testCase.wantTagContain != "" { + assert.Equal(t, testCase.wantTagContain, tag) + } + }) + } +} + +func TestBuildLocalRegistryURL(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + localRegistry v1alpha1.LocalRegistry + clusterCfg *v1alpha1.Cluster + clusterName string + wantContains []string + }{ + { + name: "default local registry enabled", + localRegistry: v1alpha1.LocalRegistry{}, + clusterCfg: &v1alpha1.Cluster{ + Spec: v1alpha1.Spec{ + Workload: v1alpha1.WorkloadSpec{ + SourceDirectory: "k8s", + }, + }, + }, + clusterName: "test-cluster", + wantContains: []string{ + "oci://", + }, + }, + { + name: "custom source directory", + localRegistry: v1alpha1.LocalRegistry{}, + clusterCfg: &v1alpha1.Cluster{ + Spec: v1alpha1.Spec{ + Workload: v1alpha1.WorkloadSpec{ + SourceDirectory: "manifests/kubernetes", + }, + }, + }, + clusterName: "my-cluster", + wantContains: []string{ + "oci://", + "manifests-kubernetes", + }, + }, + { + name: "empty source directory uses default", + localRegistry: v1alpha1.LocalRegistry{}, + clusterCfg: &v1alpha1.Cluster{ + Spec: v1alpha1.Spec{ + Workload: v1alpha1.WorkloadSpec{ + SourceDirectory: "", + }, + }, + }, + clusterName: "cluster", + wantContains: []string{"oci://"}, + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + url := fluxinstaller.BuildLocalRegistryURL( + testCase.localRegistry, + testCase.clusterCfg, + testCase.clusterName, + ) + for _, want := range testCase.wantContains { + assert.Contains(t, url, want) + } + }) + } +} + +func TestBuildFluxInstance(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + clusterCfg *v1alpha1.Cluster + clusterName string + wantName string + }{ + { + name: "local registry enabled", + clusterCfg: &v1alpha1.Cluster{ + Spec: v1alpha1.Spec{ + Cluster: v1alpha1.ClusterSpec{ + LocalRegistry: v1alpha1.LocalRegistry{}, + }, + Workload: v1alpha1.WorkloadSpec{ + SourceDirectory: "k8s", + }, + }, + }, + clusterName: "test-cluster", + wantName: "flux", + }, + { + name: "external registry", + clusterCfg: &v1alpha1.Cluster{ + Spec: v1alpha1.Spec{ + Cluster: v1alpha1.ClusterSpec{ + LocalRegistry: v1alpha1.LocalRegistry{ + Registry: "ghcr.io/example/repo", + }, + }, + Workload: v1alpha1.WorkloadSpec{ + SourceDirectory: "k8s", + }, + }, + }, + clusterName: "test-cluster", + wantName: "flux", + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + instance, err := fluxinstaller.BuildFluxInstance( + testCase.clusterCfg, + testCase.clusterName, + ) + require.NoError(t, err) + require.NotNil(t, instance) + + assert.Equal(t, testCase.wantName, instance.GetName()) + assert.Equal(t, "flux-system", instance.GetNamespace()) + assert.NotNil(t, instance.Spec.Sync) + assert.Equal(t, "OCIRepository", instance.Spec.Sync.Kind) + assert.NotEmpty(t, instance.Spec.Sync.URL) + }) + } +} + +func TestBuildRegistrySecret(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + clusterCfg *v1alpha1.Cluster + wantName string + }{ + { + name: "external registry with credentials", + clusterCfg: &v1alpha1.Cluster{ + Spec: v1alpha1.Spec{ + Cluster: v1alpha1.ClusterSpec{ + LocalRegistry: v1alpha1.LocalRegistry{ + Registry: "user:pass@ghcr.io/example/repo", + }, + }, + }, + }, + wantName: fluxinstaller.ExternalRegistrySecretName, + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + secret, err := fluxinstaller.BuildRegistrySecret(testCase.clusterCfg) + require.NoError(t, err) + require.NotNil(t, secret) + + assert.Equal(t, testCase.wantName, secret.Name) + assert.Equal(t, "flux-system", secret.Namespace) + assert.Contains(t, secret.Labels, "app.kubernetes.io/managed-by") + assert.Equal(t, "ksail", secret.Labels["app.kubernetes.io/managed-by"]) + assert.NotEmpty(t, secret.Data[".dockerconfigjson"]) + }) + } +} + +func TestIsTransientAPIError(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + err error + wantRetry bool + }{ + { + name: "nil error", + err: nil, + wantRetry: false, + }, + { + name: "generic error", + err: errors.New("some error"), + wantRetry: false, + }, + { + name: "service unavailable", + err: apierrors.NewServiceUnavailable("service unavailable"), + wantRetry: true, + }, + { + name: "timeout error", + err: apierrors.NewTimeoutError("timeout", 1), + wantRetry: true, + }, + { + name: "too many requests", + err: apierrors.NewTooManyRequestsError("too many requests"), + wantRetry: true, + }, + { + name: "conflict error", + err: apierrors.NewConflict( + schema.GroupResource{Group: "", Resource: "pods"}, + "test", + errors.New("conflict"), + ), + wantRetry: true, + }, + { + name: "server could not find resource", + err: errors.New("the server could not find the requested resource"), + wantRetry: true, + }, + { + name: "no matches for kind", + err: errors.New("no matches for kind \"FluxInstance\" in version"), + wantRetry: true, + }, + { + name: "connection refused", + err: errors.New("connection refused"), + wantRetry: true, + }, + { + name: "connection reset", + err: errors.New("connection reset by peer"), + wantRetry: true, + }, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + result := fluxinstaller.IsTransientAPIError(testCase.err) + assert.Equal(t, testCase.wantRetry, result) + }) + } +} + +func TestNormalizeFluxPath(t *testing.T) { + t.Parallel() + + path := fluxinstaller.NormalizeFluxPath() + assert.Equal(t, "./", path) +} + +func TestPollUntilReady_Success(t *testing.T) { + t.Parallel() + + callCount := 0 + checkFn := func() (bool, error) { + callCount++ + if callCount >= 2 { + return true, nil + } + + return false, nil + } + + err := fluxinstaller.PollUntilReady( + context.Background(), + 5*time.Second, + 10*time.Millisecond, + "test resource", + checkFn, + ) + + require.NoError(t, err) + assert.GreaterOrEqual(t, callCount, 2) +} + +func TestPollUntilReady_Timeout(t *testing.T) { + t.Parallel() + + checkFn := func() (bool, error) { + return false, errors.New("not ready") + } + + err := fluxinstaller.PollUntilReady( + context.Background(), + 50*time.Millisecond, + 10*time.Millisecond, + "test resource", + checkFn, + ) + + require.Error(t, err) + assert.Contains(t, err.Error(), "timed out") + assert.Contains(t, err.Error(), "test resource") +} + +func TestPollUntilReady_ContextCanceled(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + cancel() // Cancel immediately + + checkFn := func() (bool, error) { + return false, nil + } + + err := fluxinstaller.PollUntilReady( + ctx, + 5*time.Second, + 10*time.Millisecond, + "test resource", + checkFn, + ) + + require.Error(t, err) + assert.Contains(t, err.Error(), "timed out") +} + +func TestPollUntilReady_ImmediateSuccess(t *testing.T) { + t.Parallel() + + callCount := 0 + checkFn := func() (bool, error) { + callCount++ + + return true, nil + } + + err := fluxinstaller.PollUntilReady( + context.Background(), + 5*time.Second, + 10*time.Millisecond, + "test resource", + checkFn, + ) + + require.NoError(t, err) + assert.Equal(t, 1, callCount) +} + +func TestEnsureDefaultResources_NilConfig(t *testing.T) { + t.Parallel() + + err := fluxinstaller.EnsureDefaultResources( + context.Background(), + "", + nil, + "test-cluster", + ) + + require.Error(t, err) + assert.Contains(t, err.Error(), "cluster configuration is required") +} + +func TestBuildLocalRegistryURL_CustomPort(t *testing.T) { + t.Parallel() + + localRegistry := v1alpha1.LocalRegistry{ + Registry: "localhost:8080", + } + + clusterCfg := &v1alpha1.Cluster{ + Spec: v1alpha1.Spec{ + Workload: v1alpha1.WorkloadSpec{ + SourceDirectory: "k8s", + }, + }, + } + + url := fluxinstaller.BuildLocalRegistryURL(localRegistry, clusterCfg, "test") + + // Should use the resolved host:port from the local registry ref + assert.Contains(t, url, "oci://") +} diff --git a/pkg/svc/provider/provider_test.go b/pkg/svc/provider/provider_test.go new file mode 100644 index 000000000..5de1385a8 --- /dev/null +++ b/pkg/svc/provider/provider_test.go @@ -0,0 +1,251 @@ +package provider_test + +import ( + "context" + "errors" + "testing" + + "github.com/devantler-tech/ksail/v5/pkg/svc/provider" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +// errListFailed is a test error for list operations. +var errListFailed = errors.New("list failed") + +// testClusterName is a constant for test cluster name. +const testClusterName = "test-cluster" + +// mockAvailableProvider implements AvailableProvider for testing. +type mockAvailableProvider struct { + mock.Mock +} + +func (m *mockAvailableProvider) IsAvailable() bool { + args := m.Called() + + return args.Bool(0) +} + +func (m *mockAvailableProvider) ListNodes( + ctx context.Context, + clusterName string, +) ([]provider.NodeInfo, error) { + args := m.Called(ctx, clusterName) + + result, ok := args.Get(0).([]provider.NodeInfo) + if !ok { + return nil, args.Error(1) //nolint:wrapcheck // mock + } + + return result, args.Error(1) //nolint:wrapcheck // mock +} + +func TestEnsureAvailableAndListNodes_ProviderUnavailable(t *testing.T) { + t.Parallel() + + ctx := context.Background() + clusterName := testClusterName + + mockProv := new(mockAvailableProvider) + mockProv.On("IsAvailable").Return(false) + + nodes, err := provider.EnsureAvailableAndListNodes(ctx, mockProv, clusterName) + + require.Error(t, err) + require.ErrorIs(t, err, provider.ErrProviderUnavailable) + assert.Nil(t, nodes) + mockProv.AssertExpectations(t) +} + +func TestEnsureAvailableAndListNodes_ListNodesFails(t *testing.T) { + t.Parallel() + + ctx := context.Background() + clusterName := testClusterName + + mockProv := new(mockAvailableProvider) + mockProv.On("IsAvailable").Return(true) + mockProv.On("ListNodes", ctx, clusterName).Return(nil, errListFailed) + + nodes, err := provider.EnsureAvailableAndListNodes(ctx, mockProv, clusterName) + + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to list nodes") + assert.Nil(t, nodes) + mockProv.AssertExpectations(t) +} + +func TestEnsureAvailableAndListNodes_ReturnsNodes(t *testing.T) { + t.Parallel() + + ctx := context.Background() + clusterName := testClusterName + expectedNodes := []provider.NodeInfo{ + {Name: "node1", ClusterName: clusterName, Role: "control-plane", State: "running"}, + {Name: "node2", ClusterName: clusterName, Role: "worker", State: "running"}, + } + + mockProv := new(mockAvailableProvider) + mockProv.On("IsAvailable").Return(true) + mockProv.On("ListNodes", ctx, clusterName).Return(expectedNodes, nil) + + nodes, err := provider.EnsureAvailableAndListNodes(ctx, mockProv, clusterName) + + require.NoError(t, err) + assert.Equal(t, expectedNodes, nodes) + mockProv.AssertExpectations(t) +} + +func TestEnsureAvailableAndListNodes_ReturnsEmptySlice(t *testing.T) { + t.Parallel() + + ctx := context.Background() + clusterName := testClusterName + + mockProv := new(mockAvailableProvider) + mockProv.On("IsAvailable").Return(true) + mockProv.On("ListNodes", ctx, clusterName).Return([]provider.NodeInfo{}, nil) + + nodes, err := provider.EnsureAvailableAndListNodes(ctx, mockProv, clusterName) + + require.NoError(t, err) + assert.Empty(t, nodes) + mockProv.AssertExpectations(t) +} + +func TestNodeInfo(t *testing.T) { + t.Parallel() + + node := provider.NodeInfo{ + Name: "test-node", + ClusterName: testClusterName, + Role: "control-plane", + State: "running", + } + + assert.Equal(t, "test-node", node.Name) + assert.Equal(t, testClusterName, node.ClusterName) + assert.Equal(t, "control-plane", node.Role) + assert.Equal(t, "running", node.State) +} + +func TestErrors(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + err error + expected string + }{ + {"ErrNoNodes", provider.ErrNoNodes, "no nodes found for cluster"}, + {"ErrProviderUnavailable", provider.ErrProviderUnavailable, "provider is not available"}, + {"ErrUnknownLabelScheme", provider.ErrUnknownLabelScheme, "unknown label scheme"}, + {"ErrSkipAction", provider.ErrSkipAction, "skip action"}, + } + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + assert.Equal(t, testCase.expected, testCase.err.Error()) + }) + } +} + +func TestMockProvider_StartNodes(t *testing.T) { + t.Parallel() + + ctx := context.Background() + clusterName := testClusterName + + mockProv := provider.NewMockProvider() + mockProv.On("StartNodes", ctx, clusterName).Return(nil) + + err := mockProv.StartNodes(ctx, clusterName) + + require.NoError(t, err) + mockProv.AssertExpectations(t) +} + +func TestMockProvider_StopNodes(t *testing.T) { + t.Parallel() + + ctx := context.Background() + clusterName := testClusterName + + mockProv := provider.NewMockProvider() + mockProv.On("StopNodes", ctx, clusterName).Return(nil) + + err := mockProv.StopNodes(ctx, clusterName) + + require.NoError(t, err) + mockProv.AssertExpectations(t) +} + +func TestMockProvider_ListNodes(t *testing.T) { + t.Parallel() + + ctx := context.Background() + clusterName := testClusterName + expectedNodes := []provider.NodeInfo{ + {Name: "node1", ClusterName: clusterName}, + } + + mockProv := provider.NewMockProvider() + mockProv.On("ListNodes", ctx, clusterName).Return(expectedNodes, nil) + + nodes, err := mockProv.ListNodes(ctx, clusterName) + + require.NoError(t, err) + assert.Equal(t, expectedNodes, nodes) + mockProv.AssertExpectations(t) +} + +func TestMockProvider_ListAllClusters(t *testing.T) { + t.Parallel() + + ctx := context.Background() + expectedClusters := []string{"cluster1", "cluster2"} + + mockProv := provider.NewMockProvider() + mockProv.On("ListAllClusters", ctx).Return(expectedClusters, nil) + + clusters, err := mockProv.ListAllClusters(ctx) + + require.NoError(t, err) + assert.Equal(t, expectedClusters, clusters) + mockProv.AssertExpectations(t) +} + +func TestMockProvider_NodesExist(t *testing.T) { + t.Parallel() + + ctx := context.Background() + clusterName := testClusterName + + mockProv := provider.NewMockProvider() + mockProv.On("NodesExist", ctx, clusterName).Return(true, nil) + + exists, err := mockProv.NodesExist(ctx, clusterName) + + require.NoError(t, err) + assert.True(t, exists) + mockProv.AssertExpectations(t) +} + +func TestMockProvider_DeleteNodes(t *testing.T) { + t.Parallel() + + ctx := context.Background() + clusterName := testClusterName + + mockProv := provider.NewMockProvider() + mockProv.On("DeleteNodes", ctx, clusterName).Return(nil) + + err := mockProv.DeleteNodes(ctx, clusterName) + + require.NoError(t, err) + mockProv.AssertExpectations(t) +} diff --git a/pkg/svc/provisioner/cluster/errors/errors_test.go b/pkg/svc/provisioner/cluster/errors/errors_test.go new file mode 100644 index 000000000..a6785fc28 --- /dev/null +++ b/pkg/svc/provisioner/cluster/errors/errors_test.go @@ -0,0 +1,78 @@ +package clustererrors_test + +import ( + "testing" + + clustererrors "github.com/devantler-tech/ksail/v5/pkg/svc/provisioner/cluster/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestErrorVariables(t *testing.T) { + t.Parallel() + + type testCase struct { + name string + err error + contains string + } + + tests := []testCase{ + { + name: "ErrClusterNotFound", + err: clustererrors.ErrClusterNotFound, + contains: "cluster not found", + }, + { + name: "ErrProviderNotSet", + err: clustererrors.ErrProviderNotSet, + contains: "infrastructure provider not set", + }, + { + name: "ErrNoNodesFound", + err: clustererrors.ErrNoNodesFound, + contains: "no nodes found for cluster", + }, + { + name: "ErrNotHetznerProvider", + err: clustererrors.ErrNotHetznerProvider, + contains: "infrastructure provider is not a Hetzner provider", + }, + { + name: "ErrNoControlPlaneNodes", + err: clustererrors.ErrNoControlPlaneNodes, + contains: "no control-plane nodes found for cluster", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + require.Error(t, tc.err) + assert.Contains(t, tc.err.Error(), tc.contains) + }) + } +} + +func TestErrorsAreDistinct(t *testing.T) { + t.Parallel() + + errs := []error{ + clustererrors.ErrClusterNotFound, + clustererrors.ErrProviderNotSet, + clustererrors.ErrNoNodesFound, + clustererrors.ErrNotHetznerProvider, + clustererrors.ErrNoControlPlaneNodes, + } + + // Verify all errors are distinct + for i, err1 := range errs { + for j, err2 := range errs { + if i != j { + assert.NotErrorIs(t, err1, err2, + "error %q should not match %q", err1, err2) + } + } + } +} diff --git a/pkg/utils/envvar/expand_test.go b/pkg/utils/envvar/expand_test.go index 92a2a863a..25b89c008 100644 --- a/pkg/utils/envvar/expand_test.go +++ b/pkg/utils/envvar/expand_test.go @@ -7,6 +7,8 @@ import ( "github.com/stretchr/testify/assert" ) +// Note: Tests using t.Setenv cannot be run in parallel, so we run them sequentially. + type expandTestCase struct { name string input string @@ -14,46 +16,153 @@ type expandTestCase struct { expected string } -func expandTestCases() []expandTestCase { +func getExpandTestCasesWithNoEnvVars() []expandTestCase { return []expandTestCase{ - {name: "empty string", input: "", envVars: nil, expected: ""}, - {name: "no placeholders", input: "hello world", envVars: nil, expected: "hello world"}, { - name: "single placeholder with value", input: "hello ${NAME}", - envVars: map[string]string{"NAME": "world"}, expected: "hello world", + name: "empty string", + input: "", + envVars: nil, + expected: "", + }, + { + name: "no placeholders", + input: "plain text without any variables", + envVars: nil, + expected: "plain text without any variables", + }, + { + name: "single placeholder without value", + input: "Hello ${UNDEFINED_VAR}!", + envVars: nil, + expected: "Hello !", + }, + { + name: "invalid placeholder syntax - no braces", + input: "$NAME", + envVars: nil, + expected: "$NAME", + }, + { + name: "invalid placeholder syntax - single brace", + input: "${NAME", + envVars: nil, + expected: "${NAME", }, { - name: "single placeholder without value", input: "hello ${MISSING}", - envVars: nil, expected: "hello ", + name: "empty placeholder", + input: "${}", + envVars: nil, + expected: "${}", }, + } +} + +func getExpandTestCasesWithEnvVarsBasic() []expandTestCase { + return []expandTestCase{ { - name: "multiple placeholders", - input: "${GREETING} ${NAME}!", - envVars: map[string]string{"GREETING": "Hello", "NAME": "World"}, + name: "single placeholder with value", + input: "Hello ${TEST_NAME}!", + envVars: map[string]string{"TEST_NAME": "World"}, expected: "Hello World!", }, { - name: "placeholder with underscores", input: "${MY_VAR_NAME}", - envVars: map[string]string{"MY_VAR_NAME": "value"}, expected: "value", + name: "multiple placeholders", + input: "${TEST_GREETING} ${TEST_TARGET}, welcome to ${TEST_PLACE}", + envVars: map[string]string{ + "TEST_GREETING": "Hello", + "TEST_TARGET": "User", + "TEST_PLACE": "Home", + }, + expected: "Hello User, welcome to Home", + }, + { + name: "mixed defined and undefined", + input: "${TEST_DEFINED} and ${TEST_UNDEFINED_XYZ}", + envVars: map[string]string{"TEST_DEFINED": "value"}, + expected: "value and ", + }, + { + name: "variable with underscore", + input: "${TEST_MY_VAR_NAME}", + envVars: map[string]string{"TEST_MY_VAR_NAME": "test"}, + expected: "test", + }, + { + name: "variable with numbers", + input: "${TEST_VAR123}", + envVars: map[string]string{"TEST_VAR123": "numeric"}, + expected: "numeric", + }, + } +} + +func getExpandTestCasesWithEnvVarsAdvanced() []expandTestCase { + return []expandTestCase{ + { + name: "variable starting with underscore", + input: "${_TEST_PRIVATE}", + envVars: map[string]string{"_TEST_PRIVATE": "secret"}, + expected: "secret", + }, + { + name: "adjacent placeholders", + input: "${TEST_A}${TEST_B}${TEST_C}", + envVars: map[string]string{"TEST_A": "1", "TEST_B": "2", "TEST_C": "3"}, + expected: "123", }, { - name: "placeholder with numbers", input: "${VAR123}", - envVars: map[string]string{"VAR123": "numeric"}, expected: "numeric", + name: "placeholder in path", + input: "/home/${TEST_USER}/config/${TEST_APP_NAME}.yaml", + envVars: map[string]string{"TEST_USER": "developer", "TEST_APP_NAME": "ksail"}, + expected: "/home/developer/config/ksail.yaml", }, { - name: "invalid placeholder format - no braces", input: "$VAR", - envVars: map[string]string{"VAR": "value"}, expected: "$VAR", + name: "URL with placeholder", + input: "https://${TEST_HOST}:${TEST_PORT}/api", + envVars: map[string]string{"TEST_HOST": "localhost", "TEST_PORT": "8080"}, + expected: "https://localhost:8080/api", }, { - name: "mixed content", input: "prefix-${VAR}-suffix", - envVars: map[string]string{"VAR": "middle"}, expected: "prefix-middle-suffix", + name: "placeholder with special chars not matching regex", + input: "${VAR-NAME}", + envVars: map[string]string{"VAR-NAME": "value"}, + expected: "${VAR-NAME}", }, + { + name: "nested braces - inner variable expanded", + input: "${${TEST_INNER}}", + envVars: map[string]string{"TEST_INNER": "nested"}, + expected: "${nested}", + }, + } +} + +func getExpandTestCasesWithEnvVars() []expandTestCase { + return append(getExpandTestCasesWithEnvVarsBasic(), getExpandTestCasesWithEnvVarsAdvanced()...) +} + +func TestExpand_NoEnvVars(t *testing.T) { + t.Parallel() + + tests := getExpandTestCasesWithNoEnvVars() + + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + result := envvar.Expand(testCase.input) + assert.Equal(t, testCase.expected, result) + }) } } -func TestExpand(t *testing.T) { - for _, testCase := range expandTestCases() { +func TestExpand_WithEnvVars(t *testing.T) { + // Note: Cannot use t.Parallel() when using t.Setenv() + tests := getExpandTestCasesWithEnvVars() + + for _, testCase := range tests { t.Run(testCase.name, func(t *testing.T) { + // Set environment variables for this test for key, value := range testCase.envVars { t.Setenv(key, value) } @@ -63,3 +172,17 @@ func TestExpand(t *testing.T) { }) } } + +func TestExpand_HomePath(t *testing.T) { + t.Setenv("TEST_HOME", "/test/home") + + result := envvar.Expand("${TEST_HOME}/config") + assert.Equal(t, "/test/home/config", result) +} + +func TestExpand_PathLikeVariable(t *testing.T) { + t.Setenv("TEST_PATH", "/usr/bin:/usr/local/bin") + + result := envvar.Expand("Paths: ${TEST_PATH}") + assert.Equal(t, "Paths: /usr/bin:/usr/local/bin", result) +}