diff --git a/applications/wg-easy/README.md b/applications/wg-easy/README.md index 291ab2b2..ffe5f040 100644 --- a/applications/wg-easy/README.md +++ b/applications/wg-easy/README.md @@ -48,6 +48,7 @@ Use tools to automate repetitive tasks, reducing human error and increasing deve - Task-based workflow automation - Helmfile for orchestration +- Container-based task running for consistency - Automated validation and testing - Streamlined release process diff --git a/applications/wg-easy/Taskfile.yaml b/applications/wg-easy/Taskfile.yaml index 8d5dc9f5..68066cb7 100644 --- a/applications/wg-easy/Taskfile.yaml +++ b/applications/wg-easy/Taskfile.yaml @@ -1,45 +1,12 @@ version: "3" -includes: - utils: ./taskfiles/utils.yml - dev: ./taskfiles/container.yml - -vars: - # Application configuration - APP_NAME: '{{.REPLICATED_APP | default "wg-easy"}}' - APP_SLUG: '{{.REPLICATED_APP_SLUG | default "wg-easy-cre"}}' - - # Release configuration - RELEASE_CHANNELd: '{{.RELEASE_CHANNEL | default "Unstable"}}' - RELEASE_VERSION: '{{.RELEASE_VERSION | default "0.0.1"}}' - RELEASE_NOTES: '{{.RELEASE_NOTES | default "Release created via task release-create"}}' - - # Cluster configuration - CLUSTER_NAME: '{{.CLUSTER_NAME | default "test-cluster"}}' - K8S_VERSION: '{{.K8S_VERSION | default "1.32.2"}}' - DISK_SIZE: '{{.DISK_SIZE | default "100"}}' - INSTANCE_TYPE: '{{.INSTANCE_TYPE | default "r1.small"}}' - DISTRIBUTION: '{{.DISTRIBUTION | default "k3s"}}' - KUBECONFIG_FILE: './{{.CLUSTER_NAME}}.kubeconfig' - - # Ports configuration - EXPOSE_PORTS: - - port: 30443 - protocol: https - - port: 30080 - protocol: http +# This is a shim taskfile for running tasks inside Podman +# The actual taskfile that is mounted within the container lives in taskfiles/internal.yaml - # GCP default configuration - GCP_PROJECT: '{{.GCP_PROJECT | default "replicated-qa"}}' - GCP_ZONE: '{{.GCP_ZONE | default "us-central1-a"}}' - VM_NAME: '{{.VM_NAME | default (printf "%s-dev" (or (env "GUSER") "user"))}}' - - # Container workflow configuration - DEV_CONTAINER_REGISTRY: '{{.DEV_CONTAINER_REGISTRY | default "ghcr.io"}}' - DEV_CONTAINER_IMAGE: '{{.DEV_CONTAINER_IMAGE | default "replicatedhq/platform-examples/wg-easy-tools"}}' - DEV_CONTAINER_TAG: '{{.DEV_CONTAINER_TAG | default "latest"}}' - DEV_CONTAINER_NAME: '{{.DEV_CONTAINER_NAME | default "wg-easy-tools"}}' - CONTAINER_RUNTIME: '{{.CONTAINER_RUNTIME | default "podman"}}' +includes: + container: + internal: true + taskfile: ./taskfiles/container.yml tasks: default: @@ -48,441 +15,205 @@ tasks: cmds: - task --list + helm: + desc: 'Run helm with args inside the container (example: "task helm -- show chart ./cert-manager")' + cmds: + - task: container:exec + vars: + CMD: 'helm {{.CLI_ARGS}}' + + kubectl: + desc: 'Run kubectl with args inside the container (example: "task kubectl -- version --client")' + cmds: + - task: container:exec + vars: + CMD: 'kubectl {{.CLI_ARGS}}' + cluster-create: desc: Create a test cluster using Replicated Compatibility Matrix (use EMBEDDED=true for embedded clusters) - run: once - silent: false - vars: - EMBEDDED: '{{.EMBEDDED | default "false"}}' - LICENSE_ID: '{{if eq .EMBEDDED "true"}}{{.LICENSE_ID | default "2cmqT1dBVHZ3aSH21kPxWtgoYGr"}}{{end}}' - TIMEOUT: '{{if eq .EMBEDDED "true"}}420{{else}}300{{end}}' - TTL: '{{.TTL | default "4h"}}' - status: - - replicated cluster ls --output json | jq -e '.[] | select(.name == "{{.CLUSTER_NAME}}")' > /dev/null cmds: - - | - if [ "{{.EMBEDDED}}" = "true" ]; then - echo "Creating embedded cluster {{.CLUSTER_NAME}} with license ID {{.LICENSE_ID}}..." - replicated cluster create --distribution embedded-cluster --name {{.CLUSTER_NAME}} --license-id {{.LICENSE_ID}} --ttl {{.TTL}} - else - echo "Creating cluster {{.CLUSTER_NAME}} with distribution {{.DISTRIBUTION}}..." - replicated cluster create --name {{.CLUSTER_NAME}} --distribution {{.DISTRIBUTION}} --version {{.K8S_VERSION}} --disk {{.DISK_SIZE}} --instance-type {{.INSTANCE_TYPE}} --ttl {{.TTL}} - fi - - task: utils:wait-for-cluster + - task: container:exec vars: - TIMEOUT: "{{.TIMEOUT}}" + CMD: 'task {{.TASK}} {{.EMBEDDED}} {{.LICENSE_ID}} {{.TIMEOUT}} {{.TTL}} {{.DISTRIBUTION}} {{.K8S_VERSION}} {{.DISK_SIZE}} {{.INSTANCE_TYPE}}' + vars: + EMBEDDED: "{{if .EMBEDDED}}EMBEDDED={{.EMBEDDED}}{{end}}" + LICENSE_ID: "{{if .LICENSE_ID}}LICENSE_ID={{.LICENSE_ID}}{{end}}" + TIMEOUT: "{{if .TIMEOUT}}TIMEOUT={{.TIMEOUT}}{{end}}" + TTL: "{{if .TTL}}TTL={{.TTL}}{{end}}" + DISTRIBUTION: "{{if .DISTRIBUTION}}DISTRIBUTION={{.DISTRIBUTION}}{{end}}" + K8S_VERSION: "{{if .K8S_VERSION}}K8S_VERSION={{.K8S_VERSION}}{{end}}" + DISK_SIZE: "{{if .DISK_SIZE}}DISK_SIZE={{.DISK_SIZE}}{{end}}" + INSTANCE_TYPE: "{{if .INSTANCE_TYPE}}INSTANCE_TYPE={{.INSTANCE_TYPE}}{{end}}" cluster-list: desc: List the cluster cmds: - - | - CLUSTER_ID=$(replicated cluster ls --output json | jq -r '.[] | select(.name == "{{.CLUSTER_NAME}}") | .id') - EXPIRES=$(replicated cluster ls --output json | jq -r '.[] | select(.name == "{{.CLUSTER_NAME}}") | .expires_at') - echo "{{.CLUSTER_NAME}} Cluster ID: ($CLUSTER_ID) Expires: ($EXPIRES)" + - task: container:exec + vars: + CMD: 'task {{.TASK}} {{.CLUSTER_NAME}}' + vars: + CLUSTER_NAME: "{{if .CLUSTER_NAME}}CLUSTER_NAME={{.CLUSTER_NAME}}{{end}}" test: desc: Run a basic test suite - silent: false cmds: - - echo "Running basic tests..." - - echo "This is a placeholder for actual tests" - - sleep 5 - - echo "Tests completed!" + - task: container:exec + vars: + CMD: 'task {{.TASK}}' verify-kubeconfig: desc: Verify kubeconfig - silent: false - run: once cmds: - - | - if [ -f {{.KUBECONFIG_FILE}} ]; then - echo "Getting Cluster ID From Replicated Cluster list" - CLUSTER_ID=$(replicated cluster ls --output json | jq -r '.[] | select(.name == "{{.CLUSTER_NAME}}") | .id') - echo "Getting Cluster ID From Kubeconfig" - CLUSTER_ID_KUBECONFIG=$(grep "current-context:" {{.KUBECONFIG_FILE}} | cut -d'-' -f3) - if [ "$CLUSTER_ID" != "$CLUSTER_ID_KUBECONFIG" ]; then - echo "{{.CLUSTER_NAME}} Cluster ID between Replicated ($CLUSTER_ID) and Kubeconfig ($CLUSTER_ID_KUBECONFIG) mismatch" - echo "Removing old kubeconfig file" - rm -f {{.KUBECONFIG_FILE}} - fi - fi + - task: container:exec + vars: + CMD: 'task {{.TASK}} {{.CLUSTER_NAME}}' + vars: + CLUSTER_NAME: "{{if .CLUSTER_NAME}}CLUSTER_NAME={{.CLUSTER_NAME}}{{end}}" + # deps: + # - cluster-create + # - verify-kubeconfig setup-kubeconfig: desc: Get kubeconfig and prepare cluster for application deployment - silent: false - run: once cmds: - - task: utils:get-kubeconfig - - task: utils:remove-k3s-traefik - status: - - | - # Check if kubeconfig exists - test -f {{.KUBECONFIG_FILE}} && \ - # For k3s, also check if traefik is removed - if [ "{{.DISTRIBUTION}}" = "k3s" ]; then - KUBECONFIG={{.KUBECONFIG_FILE}} helm list -n kube-system -o json | \ - jq -e 'map(select(.name == "traefik" or .name == "traefik-crd")) | length == 0' >/dev/null - else - true - fi - deps: - - cluster-create - - verify-kubeconfig + - task: container:exec + vars: + CMD: 'task {{.TASK}} {{.DISTRIBUTION}} {{.EMBEDDED}} {{.LICENSE_ID}} {{.TIMEOUT}} {{.TTL}} {{.DISTRIBUTION}} {{.K8S_VERSION}} {{.DISK_SIZE}} {{.INSTANCE_TYPE}} {{.CLUSTER_NAME}}' + vars: + DISTRIBUTION: "{{if .DISTRIBUTION}}DISTRIBUTION={{.DISTRIBUTION}}{{end}}" + # from deps cluster-create + EMBEDDED: "{{if .EMBEDDED}}EMBEDDED={{.EMBEDDED}}{{end}}" + LICENSE_ID: "{{if .LICENSE_ID}}LICENSE_ID={{.LICENSE_ID}}{{end}}" + TIMEOUT: "{{if .TIMEOUT}}TIMEOUT={{.TIMEOUT}}{{end}}" + TTL: "{{if .TTL}}TTL={{.TTL}}{{end}}" + DISTRIBUTION: "{{if .DISTRIBUTION}}DISTRIBUTION={{.DISTRIBUTION}}{{end}}" + K8S_VERSION: "{{if .K8S_VERSION}}K8S_VERSION={{.K8S_VERSION}}{{end}}" + DISK_SIZE: "{{if .DISK_SIZE}}DISK_SIZE={{.DISK_SIZE}}{{end}}" + INSTANCE_TYPE: "{{if .INSTANCE_TYPE}}INSTANCE_TYPE={{.INSTANCE_TYPE}}{{end}}" + # from deps verify-kubeconfig + CLUSTER_NAME: "{{if .CLUSTER_NAME}}CLUSTER_NAME={{.CLUSTER_NAME}}{{end}}" dependencies-update: desc: Update Helm dependencies for all charts - silent: false - run: once cmds: - - echo "Updating Helm dependencies for all charts..." - - | - # Find all charts and update their dependencies - for chart_dir in $(find charts/ -maxdepth 2 -name "Chart.yaml" | xargs dirname); do - echo "Updating dependency $chart_dir" - helm dependency update --skip-refresh "$chart_dir" - done - - echo "All dependencies updated!" + - task: container:exec + vars: + CMD: 'task {{.TASK}}' cluster-ports-expose: - desc: Expose configured ports for a cluster and capture exposed URLs - silent: false - run: once - status: - - | - CLUSTER_ID=$(replicated cluster ls --output json | jq -r '.[] | select(.name == "{{.CLUSTER_NAME}}") | .id') - if [ -z "$CLUSTER_ID" ]; then - exit 1 - fi - - # Check if all ports are already exposed - expected_count={{len .EXPOSE_PORTS}} - port_checks="" - {{range $i, $port := .EXPOSE_PORTS}} - port_checks="${port_checks}(.upstream_port == {{$port.port}} and .exposed_ports[0].protocol == \"{{$port.protocol}}\") or " - {{end}} - # Remove trailing "or " - port_checks="${port_checks% or }" - - PORT_COUNT=$(replicated cluster port ls $CLUSTER_ID --output json | jq -r ".[] | select($port_checks) | .upstream_port" | wc -l | tr -d ' ') - [ "$PORT_COUNT" -eq "$expected_count" ] + desc: Expose configured ports and capture exposed URLs cmds: - - task: utils:port-operations + - task: container:exec vars: - OPERATION: "expose" - deps: - - cluster-create - + CMD: 'task {{.TASK}} {{.CLUSTER_NAME}} {{.OPERATION}}' + vars: + CLUSTER_NAME: "{{if .CLUSTER_NAME}}CLUSTER_NAME={{.CLUSTER_NAME}}{{end}}" + # from task: utils:port-operations + OPERATION: "{{if .OPERATION}}OPERATION={{.OPERATION}}{{end}}" + # CLUSTER_NAME already above + + # deps: + # - setup-kubeconfig + # - cluster-ports-expose helm-install: - desc: Install all charts using helmfile - silent: false + desc: Deploy all charts using helmfile cmds: - - echo "Installing all charts via helmfile" - - | - # Get cluster ID - CLUSTER_ID=$(replicated cluster ls --output json | jq -r '.[] | select(.name == "{{.CLUSTER_NAME}}") | .id') - if [ -z "$CLUSTER_ID" ]; then - echo "Error: Could not find cluster with name {{.CLUSTER_NAME}}" - exit 1 - fi - - # Get exposed URLs - ENV_VARS=$(task utils:port-operations OPERATION=getenv CLUSTER_NAME={{.CLUSTER_NAME}}) - - # Deploy with helmfile - echo "Using $ENV_VARS" - eval "KUBECONFIG={{.KUBECONFIG_FILE}} $ENV_VARS helmfile sync --wait" - - echo "All charts installed!" - deps: - - setup-kubeconfig - - cluster-ports-expose + - task: container:exec + vars: + CMD: 'task {{.TASK}} {{.DISTRIBUTION}} {{.EMBEDDED}} {{.LICENSE_ID}} {{.TIMEOUT}} {{.TTL}} {{.DISTRIBUTION}} {{.K8S_VERSION}} {{.DISK_SIZE}} {{.INSTANCE_TYPE}} {{.CLUSTER_NAME}} {{.OPERATION}}' + vars: + # from deps setup-kubeconfig + DISTRIBUTION: "{{if .DISTRIBUTION}}DISTRIBUTION={{.DISTRIBUTION}}{{end}}" + # from deps cluster-create + EMBEDDED: "{{if .EMBEDDED}}EMBEDDED={{.EMBEDDED}}{{end}}" + LICENSE_ID: "{{if .LICENSE_ID}}LICENSE_ID={{.LICENSE_ID}}{{end}}" + TIMEOUT: "{{if .TIMEOUT}}TIMEOUT={{.TIMEOUT}}{{end}}" + TTL: "{{if .TTL}}TTL={{.TTL}}{{end}}" + DISTRIBUTION: "{{if .DISTRIBUTION}}DISTRIBUTION={{.DISTRIBUTION}}{{end}}" + K8S_VERSION: "{{if .K8S_VERSION}}K8S_VERSION={{.K8S_VERSION}}{{end}}" + DISK_SIZE: "{{if .DISK_SIZE}}DISK_SIZE={{.DISK_SIZE}}{{end}}" + INSTANCE_TYPE: "{{if .INSTANCE_TYPE}}INSTANCE_TYPE={{.INSTANCE_TYPE}}{{end}}" + ## from deps verify-kubeconfig + CLUSTER_NAME: "{{if .CLUSTER_NAME}}CLUSTER_NAME={{.CLUSTER_NAME}}{{end}}" + # from deps cluster-ports-expose + ## CLUSTER_NAME: "{{if .CLUSTER_NAME}}CLUSTER_NAME={{.CLUSTER_NAME}}{{end}}" + ## from task: utils:port-operations + OPERATION: "{{if .OPERATION}}OPERATION={{.OPERATION}}{{end}}" + ## CLUSTER_NAME already above cluster-delete: desc: Delete all test clusters with matching name and clean up kubeconfig - silent: false cmds: - - echo "Deleting clusters named {{.CLUSTER_NAME}}..." - - | - CLUSTER_IDS=$(replicated cluster ls | grep "{{.CLUSTER_NAME}}" | awk '{print $1}') - if [ -z "$CLUSTER_IDS" ]; then - echo "No clusters found with name {{.CLUSTER_NAME}}" - exit 0 - fi - - for id in $CLUSTER_IDS; do - echo "Deleting cluster ID: $id" - replicated cluster rm "$id" - done - - | - # Clean up kubeconfig file - if [ -f "{{.KUBECONFIG_FILE}}" ]; then - echo "Removing kubeconfig file {{.KUBECONFIG_FILE}}" - rm "{{.KUBECONFIG_FILE}}" - fi - - echo "All matching clusters deleted and kubeconfig cleaned up!" + - task: container:exec + vars: + CMD: 'task {{.TASK}}' release-prepare: desc: Prepare release files by copying replicated YAML files and packaging Helm charts - silent: false cmds: - - echo "Preparing release files..." - - rm -rf ./release - - mkdir -p ./release - - # Copy all non-config.yaml files - - echo "Copying non-config YAML files to release folder..." - - find . -path '*/replicated/*.yaml' -not -name 'config.yaml' -exec cp {} ./release/ \; - - find ./replicated -name '*.yaml' -not -name 'config.yaml' -exec cp {} ./release/ \; 2>/dev/null || true - - # extract namespaces from helmChart files - - yq ea '[.spec.namespace] | unique' */replicated/helmChart-*.yaml | yq '.spec.additionalNamespaces *= load("/dev/stdin") | .spec.additionalNamespaces += "*" ' replicated/application.yaml > release/application.yaml.new - - mv release/application.yaml.new release/application.yaml - - # set helmChart versions from associated helm Chart.yaml - - echo "Setting helmChart versions..." - - | - while read directory; do - - echo $directory - parent=$(basename $(dirname $directory)) - - helmChartName="helmChart-$parent.yaml" - export version=$(yq -r '.version' $parent/Chart.yaml ) - - yq '.spec.chart.chartVersion = strenv(version) | .spec.chart.chartVersion style="single"' $directory/$helmChartName | tee release/$helmChartName - - done < <(find . -maxdepth 2 -mindepth 2 -type d -name replicated) - - # Merge config.yaml files - - echo "Merging config.yaml files..." - - | - # Start with an empty config file - echo "{}" > ./release/config.yaml - - # Merge all app config.yaml files first (excluding root replicated) - for config_file in $(find . -path '*/replicated/config.yaml' | grep -v "^./replicated/"); do - echo "Merging $config_file..." - yq eval-all '. as $item ireduce ({}; . * $item)' ./release/config.yaml "$config_file" > ./release/config.yaml.new - mv ./release/config.yaml.new ./release/config.yaml - done - - # Merge root config.yaml last - if [ -f "./replicated/config.yaml" ]; then - echo "Merging root config.yaml last..." - yq eval-all '. as $item ireduce ({}; . * $item)' ./release/config.yaml "./replicated/config.yaml" > ./release/config.yaml.new - mv ./release/config.yaml.new ./release/config.yaml - fi - - # Package Helm charts - - echo "Packaging Helm charts..." - - | - # Find top-level directories containing Chart.yaml files - for chart_dir in $(find charts/ -maxdepth 2 -name "Chart.yaml" | xargs dirname); do - echo "Packaging chart: $chart_dir" - # Navigate to chart directory, package it, and move the resulting .tgz to release folder - (cd "$chart_dir" && helm package . && mv *.tgz ../../release/) - done - - - echo "Release files prepared in ./release/ directory" - deps: - - dependencies-update + - task: container:exec + vars: + CMD: 'task {{.TASK}}' release-create: desc: Create and promote a release using the Replicated CLI - silent: false - run: once - vars: - CHANNEL: '{{.CHANNEL | default "Unstable"}}' - VERSION: '{{.VERSION | default "0.0.1"}}' - RELEASE_NOTES: '{{.RELEASE_NOTES | default "Release created via task release-create"}}' - requires: - vars: [APP_SLUG, VERSION] cmds: - - echo "Creating and promoting release for {{.APP_SLUG}} to channel {{.CHANNEL}}..." - - | - # Create and promote the release in one step - echo "Creating release from files in ./release directory..." - replicated release create --app {{.APP_SLUG}} --yaml-dir ./release --release-notes "{{.RELEASE_NOTES}}" --promote {{.CHANNEL}} --version {{.VERSION}} - echo "Release version {{.VERSION}} created and promoted to channel {{.CHANNEL}}" - deps: - - release-prepare + - task: container:exec + vars: + CMD: 'task {{.TASK}}' customer-create: desc: Create a new customer or get existing customer with matching name and return their ID - silent: false - run: once - vars: - CUSTOMER_NAME: '{{.CUSTOMER_NAME | default "test-customer"}}' - CUSTOMER_EMAIL: '{{.CUSTOMER_EMAIL | default "test@example.com"}}' - CHANNEL: '{{.CHANNEL | default "Unstable"}}' - LICENSE_TYPE: '{{.LICENSE_TYPE | default "dev"}}' - EXPIRES_IN: '{{.EXPIRES_IN | default ""}}' - requires: - vars: [APP_SLUG] cmds: - - | - # First check if customer already exists - echo "Looking for existing customer {{.CUSTOMER_NAME}} for app {{.APP_SLUG}}..." - EXISTING_CUSTOMER=$(replicated customer ls --app {{.APP_SLUG}} --output json | jq -r '.[] | select(.name=="{{.CUSTOMER_NAME}}") | .id' | head -1) - - if [ -n "$EXISTING_CUSTOMER" ]; then - echo "Found existing customer {{.CUSTOMER_NAME}} with ID: $EXISTING_CUSTOMER" - echo "$EXISTING_CUSTOMER" - exit 0 - fi - - # No existing customer found, create a new one - echo "Creating new customer {{.CUSTOMER_NAME}} for app {{.APP_SLUG}}..." - - # Build the command with optional expiration - CMD="replicated customer create \ - --app {{.APP_SLUG}} \ - --name {{.CUSTOMER_NAME}} \ - --email {{.CUSTOMER_EMAIL}} \ - --channel {{.CHANNEL}} \ - --type {{.LICENSE_TYPE}} \ - --output json" - - # Add expiration if specified - if [ -n "{{.EXPIRES_IN}}" ]; then - CMD="$CMD --expires-in {{.EXPIRES_IN}}" - fi - - # Create the customer and capture the output - CUSTOMER_JSON=$($CMD) - - # Extract and output just the customer ID - echo "$CUSTOMER_JSON" | jq -r '.id' + - task: container:exec + vars: + CMD: 'task {{.TASK}} CUSTOMER_NAME={{.CUSTOMER_NAME}} CUSTOMER_EMAIL={{.CUSTOMER_EMAIL}} LICENSE_TYPE={{.LICENSE_TYPE}} EXPIRES_IN={{.EXPIRES_IN}}' gcp-vm-create: desc: Create a simple GCP VM instance - silent: false - vars: - GCP_MACHINE_TYPE: '{{.GCP_MACHINE_TYPE | default "e2-standard-2"}}' - GCP_DISK_SIZE: '{{.GCP_DISK_SIZE | default "100"}}' - GCP_DISK_TYPE: '{{.GCP_DISK_TYPE | default "pd-standard"}}' - GCP_IMAGE_FAMILY: '{{.GCP_IMAGE_FAMILY | default "ubuntu-2204-lts"}}' - GCP_IMAGE_PROJECT: '{{.GCP_IMAGE_PROJECT | default "ubuntu-os-cloud"}}' - status: - - gcloud compute instances describe {{.VM_NAME}} --project={{.GCP_PROJECT}} --zone={{.GCP_ZONE}} &>/dev/null cmds: - - task: utils:gcp-operations + - task: container:exec vars: - OPERATION: "create" - GCP_MACHINE_TYPE: '{{.GCP_MACHINE_TYPE}}' - GCP_DISK_SIZE: '{{.GCP_DISK_SIZE}}' - GCP_DISK_TYPE: '{{.GCP_DISK_TYPE}}' - GCP_IMAGE_FAMILY: '{{.GCP_IMAGE_FAMILY}}' - GCP_IMAGE_PROJECT: '{{.GCP_IMAGE_PROJECT}}' + CMD: 'task {{.TASK}}' gcp-vm-delete: desc: Delete the GCP VM instance for K8s and VPN - silent: false - status: - - "! gcloud compute instances describe {{.VM_NAME}} --project={{.GCP_PROJECT}} --zone={{.GCP_ZONE}} &>/dev/null" cmds: - - task: utils:gcp-operations + - task: container:exec vars: - OPERATION: "delete" - GCP_PROJECT: '{{.GCP_PROJECT}}' - GCP_ZONE: '{{.GCP_ZONE}}' - VM_NAME: '{{.VM_NAME}}' + CMD: 'task {{.TASK}}' embedded-cluster-setup: desc: Setup Replicated embedded cluster on the GCP VM - silent: false - vars: - CHANNEL: '{{.CHANNEL | default "Unstable"}}' - AUTH_TOKEN: '{{.AUTH_TOKEN | default "2usDXzovcJNcpn54yS5tFQVNvCq"}}' - deps: - - gcp-vm-create - status: - - | - # Check if the application tarball has already been downloaded and extracted - gcloud compute ssh {{.VM_NAME}} --project={{.GCP_PROJECT}} --zone={{.GCP_ZONE}} --command="test -d ./{{.APP_SLUG}}" &>/dev/null cmds: - - task: utils:gcp-operations + - task: container:exec vars: - OPERATION: "setup-embedded" - APP_SLUG: '{{.APP_SLUG}}' - CHANNEL: '{{.CHANNEL}}' - AUTH_TOKEN: '{{.AUTH_TOKEN}}' - GCP_PROJECT: '{{.GCP_PROJECT}}' - GCP_ZONE: '{{.GCP_ZONE}}' - VM_NAME: '{{.VM_NAME}}' + CMD: 'task {{.TASK}}' customer-ls: desc: List customers for the application - silent: false - vars: - OUTPUT_FORMAT: '{{.OUTPUT_FORMAT | default "table"}}' - requires: - vars: [APP_SLUG] cmds: - - echo "Listing customers for app {{.APP_SLUG}}..." - - replicated customer ls --app {{.APP_SLUG}} --output {{.OUTPUT_FORMAT}} + - task: container:exec + vars: + CMD: 'task {{.TASK}}' customer-delete: desc: Archive a customer by ID - silent: false - vars: - CUSTOMER_ID: '{{.CUSTOMER_ID}}' - requires: - vars: [APP_SLUG, CUSTOMER_ID] cmds: - - echo "Archiving customer with ID {{.CUSTOMER_ID}} from app {{.APP_SLUG}}..." - - | - # Verify customer exists before attempting to archive - CUSTOMER_EXISTS=$(replicated customer ls --app {{.APP_SLUG}} --output json | jq -r '.[] | select(.id=="{{.CUSTOMER_ID}}") | .id') - if [ -z "$CUSTOMER_EXISTS" ]; then - echo "Error: Customer with ID {{.CUSTOMER_ID}} not found for app {{.APP_SLUG}}" - exit 1 - fi - - # Get customer name for confirmation message - CUSTOMER_NAME=$(replicated customer ls --app {{.APP_SLUG}} --output json | jq -r '.[] | select(.id=="{{.CUSTOMER_ID}}") | .name') - - # Archive the customer - replicated customer archive {{.CUSTOMER_ID}} --app {{.APP_SLUG}} - - # Confirm archiving - echo "Customer '$CUSTOMER_NAME' (ID: {{.CUSTOMER_ID}}) successfully archived" + - task: container:exec + vars: + CMD: 'task {{.TASK}}' clean: desc: Remove temporary Helm directories, chart dependencies, and release folder - silent: false cmds: - - echo "Cleaning temporary directories and dependencies..." - - | - # Remove the release directory - if [ -d "./release" ]; then - echo "Removing release directory..." - rm -rf ./release - fi - - # Find and remove tmpcharts-* directories in charts/ - echo "Removing temporary chart directories..." - find charts/ -type d -name "tmpcharts-*" -print - find charts/ -type d -name "tmpcharts-*" -exec rm -rf {} \; 2>/dev/null || true - - # Clean up chart dependencies (.tgz files) in charts/*/charts/ - echo "Removing chart dependencies..." - find charts/ -path "*/charts/*.tgz" -type f -print - find charts/ -path "*/charts/*.tgz" -type f -delete - - # Clean up any tmpcharts directories in subdirectories - echo "Cleaning up any remaining tmpcharts directories..." - find . -type d -name "tmpcharts-*" -print - find . -type d -name "tmpcharts-*" -exec rm -rf {} \; 2>/dev/null || true - - echo "Cleaning complete!" + - task: container:exec + vars: + CMD: 'task {{.TASK}}' full-test-cycle: desc: Create cluster, get kubeconfig, expose ports, update dependencies, deploy charts, test, and delete - silent: false cmds: - - task: cluster-create - - task: setup-kubeconfig - - task: cluster-ports-expose - - task: dependencies-update - - task: helm-install - - task: test - - task: cluster-delete + - task: container:exec + vars: + CMD: 'task {{.TASK}}' diff --git a/applications/wg-easy/container/Containerfile b/applications/wg-easy/container/Containerfile index c628dfbf..8ea116c4 100644 --- a/applications/wg-easy/container/Containerfile +++ b/applications/wg-easy/container/Containerfile @@ -63,7 +63,14 @@ RUN curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | b | cut -d : -f 2,3 \ | tr -d \") -o replicated.tar.gz \ && tar xf replicated.tar.gz replicated && rm replicated.tar.gz \ - && mv replicated /usr/local/bin/replicated + && mv replicated /usr/local/bin/replicated \ + + # Install yq + && BINARY=yq_linux_amd64 \ + && VERSION=v4.45.1 \ + && curl -Ls https://github.com/mikefarah/yq/releases/download/${VERSION}/${BINARY}.tar.gz -O \ + && tar xf ${BINARY}.tar.gz && rm ${BINARY}.tar.gz \ + && mv ${BINARY} /usr/local/bin/yq # Create a non-root user for better security RUN groupadd -r devuser && useradd -r -g devuser -m -s /bin/bash devuser diff --git a/applications/wg-easy/docs/development-workflow.md b/applications/wg-easy/docs/development-workflow.md index 29e359dc..a7eea4e6 100644 --- a/applications/wg-easy/docs/development-workflow.md +++ b/applications/wg-easy/docs/development-workflow.md @@ -20,6 +20,18 @@ Before starting the development workflow, ensure you have the following tools in All other tools will be automatically provided through task commands and containers. +## Running helm and kubectl commands inside the container + +You can run consistent versions of `helm` and `kubectl` within the dev container using tasks by the same name. + +See documentation on these tasks: + +```console +$ task --list | grep -E 'helm:|kubectl' +* helm: Run helm with args inside the container (example: "task helm -- show chart ./cert-manager") +* kubectl: Run kubectl with args inside the container (example: "task kubectl -- version --client") +``` + ## Workflow Stages ### Stage 1: Chart Dependencies and Verification @@ -44,7 +56,7 @@ Begin by defining and verifying chart dependencies. ```bash task dependencies-update # Or for a single chart: - helm dependency update ./cert-manager + task helm -- dependency update ./cert-manager ``` 3. Verify charts were downloaded: @@ -90,23 +102,13 @@ Configure chart values and create or modify templates. ### Stage 3: Local Validation with helm template -> [!IMPORTANT] -> Tools required by tasks in this project will be made available in a container. Run the commands below to start the dev environment - -``` -# Open shell to execute tasks -task dev:shell - -# Start/restart tools container. Idempotent. -task dev:restart -``` - Validate chart templates locally without deploying to a cluster. -1. Run helm template to render the chart and inspect manifests: +1. Run helm template to render the chart and inspect manifests (quit with `:q!`): ```bash - helm template ./cert-manager | less + task helm -- template ./cert-manager | vim - ``` + Note this is the equivalent of running `helm template ./cert-manager | vim -` outside of the container. **Validation point**: Generated Kubernetes manifests should be valid and contain the expected resources. @@ -124,27 +126,27 @@ Deploy individual charts to a test cluster to verify functionality. 2. Install a single chart: ```bash - helm install cert-manager ./cert-manager -n cert-manager --create-namespace + task helm -- install cert-manager ./cert-manager -n cert-manager --create-namespace ``` 3. Verify the deployment: ```bash - kubectl get pods -n cert-manager + task kubectl -- get pods -n cert-manager ``` 4. Test chart functionality: ```bash # Example: Test cert-manager with a test certificate - kubectl apply -f ./some-test-certificate.yaml - kubectl get certificate -A + task kubectl -- apply -f ./some-test-certificate.yaml + task kubectl -- get certificate -A ``` 5. Uninstall when done or making changes and repeat step 2: ```bash - helm uninstall cert-manager -n cert-manager + task helm -- uninstall cert-manager -n cert-manager ``` **Validation point**: Chart should deploy successfully and function as expected. @@ -179,11 +181,10 @@ Test multiple charts working together using Helmfile orchestration. ```bash # Check if issuers are correctly using cert-manager - kubectl get clusterissuers - kubectl get issuers -A + task kubectl -- get clusterissuers # Verify Traefik routes - kubectl get ingressroutes -A + task kubectl -- get ingressroutes -A ``` **Validation point**: All components should deploy in the correct order and work together. diff --git a/applications/wg-easy/taskfiles/container.yml b/applications/wg-easy/taskfiles/container.yml index 77e94ca8..5d299d52 100644 --- a/applications/wg-easy/taskfiles/container.yml +++ b/applications/wg-easy/taskfiles/container.yml @@ -1,90 +1,69 @@ version: "3" -# Development environment tasks -tasks: - build-image: - desc: Build development container image - vars: - IMAGE_NAME: '{{.DEV_CONTAINER_REGISTRY}}/{{.DEV_CONTAINER_IMAGE}}:{{.DEV_CONTAINER_TAG}}' - CONTAINERFILE: '{{.CONTAINERFILE | default "./container/Containerfile"}}' - BUILD_ARGS: '{{.BUILD_ARGS | default ""}}' - requires: - vars: [DEV_CONTAINER_REGISTRY, DEV_CONTAINER_IMAGE, CONTAINERFILE] +vars: + CONTAINER_REGISTRY: '{{.CONTAINER_REGISTRY | default "ghcr.io"}}' + CONTAINER_IMAGE: '{{.CONTAINER_IMAGE | default "replicatedhq/platform-examples/wg-easy-tools"}}' + CONTAINER_TAG: '{{.CONTAINER_TAG | default "latest"}}' + CONTAINER_NAME: '{{.CONTAINER_NAME | default "wg-easy-tools"}}' + CONTAINER_RUNTIME: '{{.CONTAINER_RUNTIME | default "podman"}}' - cmds: - - '{{.CONTAINER_RUNTIME}} build -t {{.IMAGE_NAME}} -f {{.CONTAINERFILE}} .' - - # Start development container in background. - start: - desc: Start development container in background +tasks: + exec: + desc: Run command in container silent: true run: once - vars: - IMAGE_NAME: '{{.DEV_CONTAINER_REGISTRY}}/{{.DEV_CONTAINER_IMAGE}}:{{.DEV_CONTAINER_TAG}}' - CONTAINERFILE: '{{.CONTAINERFILE | default "./container/Containerfile"}}' - BUILD_ARGS: '{{.BUILD_ARGS | default ""}}' requires: - vars: [DEV_CONTAINER_REGISTRY, DEV_CONTAINER_IMAGE, DEV_CONTAINER_TAG, DEV_CONTAINER_NAME, REPLICATED_API_TOKEN] - - status: - - '{{.CONTAINER_RUNTIME}} ps | grep -q "{{.DEV_CONTAINER_NAME}}"' + vars: [CONTAINER_NAME, IMAGE_NAME] + deps: + - task: check-image-exists cmds: - | - # Check if the image exists locally - # If not, pull it from the registry - # If that fails, build it locally - if ! {{.CONTAINER_RUNTIME}} image exists {{.IMAGE_NAME}}; then - echo "Image {{.IMAGE_NAME}} not found locally." - echo "Attempting to pull the image..." - if ! {{.CONTAINER_RUNTIME}} pull {{.IMAGE_NAME}}; then - echo "Failed to pull image. Building it locally..." - {{.CONTAINER_RUNTIME}} build -t {{.IMAGE_NAME}} -f {{.CONTAINERFILE}} . - fi - fi - - # Start container with host networking for kubectl port-forward compatibility - CONTAINER_ID=$({{.CONTAINER_RUNTIME}} run --rm --name {{.DEV_CONTAINER_NAME}} -d \ + {{.CONTAINER_RUNTIME}} run --rm \ -v $(pwd):/workspace \ + -v $(pwd)/taskfiles/internal.yaml:/workspace/Taskfile.yaml \ -e REPLICATED_API_TOKEN={{ .REPLICATED_API_TOKEN }} \ - {{.IMAGE_NAME}} bash -c 'trap "exit 0" TERM; sleep infinity & wait') - - if [ $? -eq 0 ]; then - echo "Development container started successfully with ID: $CONTAINER_ID" - else - echo "Failed to start development container" - exit 1 - fi + {{.IMAGE_NAME}} {{.CMD}} + vars: + CMD: "{{if .CMD}}bash -c '{{.CMD}}'{{else}}/bin/bash{{end}}" + IMAGE_NAME: '{{.CONTAINER_REGISTRY}}/{{.CONTAINER_IMAGE}}:{{.CONTAINER_TAG}}' - shell: - desc: Attach to development container shell - silent: true - requires: - vars: [DEV_CONTAINER_NAME] - deps: - - task: start + build: + run: once + desc: Build container image + vars: + IMAGE_NAME: '{{.CONTAINER_REGISTRY}}/{{.CONTAINER_IMAGE}}:{{.CONTAINER_TAG}}' + CONTAINERFILE: '{{.CONTAINERFILE | default "./container/Containerfile"}}' + BUILD_ARGS: '{{.BUILD_ARGS | default ""}}' + MESSAGE: "{{.MESSAGE}}" cmds: - - echo "Connecting to {{.DEV_CONTAINER_NAME}}..." - - '{{.CONTAINER_RUNTIME}} exec -it {{.DEV_CONTAINER_NAME}} /bin/bash' + - '{{if .MESSAGE}}echo "{{.MESSAGE}}"{{end}}' + - '{{.CONTAINER_RUNTIME}} build -t {{.IMAGE_NAME}} -f {{.CONTAINERFILE}} .' + status: + - '{{.CONTAINER_RUNTIME}} image exists {{.IMAGE_NAME}}' - stop: - desc: Stop development container - silent: true - requires: - vars: [DEV_CONTAINER_NAME] + pull: + run: once + desc: Pull container image + vars: + IMAGE_NAME: '{{.CONTAINER_REGISTRY}}/{{.CONTAINER_IMAGE}}:{{.CONTAINER_TAG}}' + CONTAINERFILE: '{{.CONTAINERFILE | default "./container/Containerfile"}}' cmds: - - | - if {{.CONTAINER_RUNTIME}} ps | grep -q "{{.DEV_CONTAINER_NAME}}"; then - echo "Stopping {{.DEV_CONTAINER_NAME}} development container..." - {{.CONTAINER_RUNTIME}} stop {{.DEV_CONTAINER_NAME}} - else - echo "Container {{.DEV_CONTAINER_NAME}} is not running" - fi + - cmd: "{{.CONTAINER_RUNTIME}} pull {{.IMAGE_NAME}}" + ignore_error: true + status: + - '{{.CONTAINER_RUNTIME}} image exists {{.IMAGE_NAME}}' - restart: - desc: Restart development container - silent: true - requires: - vars: [DEV_CONTAINER_NAME] + # Replaces long if/else commands with idempotent tasks and task-color user feedback + check-image-exists: + desc: If image does not exist, tries to pull. If pull fails, tries to build + run: always + vars: + IMAGE_NAME: '{{.CONTAINER_REGISTRY}}/{{.CONTAINER_IMAGE}}:{{.CONTAINER_TAG}}' cmds: - - task: stop - - task: start + - echo "Image '{{.IMAGE_NAME}}' does not exist. Trying pull" + - task: pull + - task: build + vars: + MESSAGE: "Pull failed. Trying build" + status: + - '{{.CONTAINER_RUNTIME}} image exists {{.IMAGE_NAME}}' diff --git a/applications/wg-easy/taskfiles/internal.yaml b/applications/wg-easy/taskfiles/internal.yaml new file mode 100644 index 00000000..059e7956 --- /dev/null +++ b/applications/wg-easy/taskfiles/internal.yaml @@ -0,0 +1,494 @@ +version: "3" + +includes: + utils: ./taskfiles/utils.yml + +## TO-DO: clean our vars usage +# - some are global, but unused +# - some are global, but also re-stated as task vars +# - some are "requires.vars[]", even though they're set as global vars +# - some are "requires.vars[]", even though they're set as task vars +# - some are only set as task vars +vars: + # Application configuration + APP_NAME: '{{.REPLICATED_APP | default "wg-easy"}}' + APP_SLUG: '{{.APP_SLUG | default "wg-easy-cre"}}' + + # Release configuration + RELEASE_CHANNEL: '{{.RELEASE_CHANNEL | default "Unstable"}}' + RELEASE_VERSION: '{{.RELEASE_VERSION | default "0.0.1"}}' + RELEASE_NOTES: '{{.RELEASE_NOTES | default "Release created via task release-create"}}' + + # Cluster configuration + CLUSTER_NAME: '{{.CLUSTER_NAME | default "test-cluster"}}' + K8S_VERSION: '{{.K8S_VERSION | default "1.32.2"}}' + DISK_SIZE: '{{.DISK_SIZE | default "100"}}' + INSTANCE_TYPE: '{{.INSTANCE_TYPE | default "r1.small"}}' + DISTRIBUTION: '{{.DISTRIBUTION | default "k3s"}}' + # can not override + KUBECONFIG_FILE: './{{.CLUSTER_NAME}}.kubeconfig' + + # Ports configuration + EXPOSE_PORTS: + - port: 30443 + protocol: https + - port: 30080 + protocol: http + + # GCP default configuration + GCP_PROJECT: '{{.GCP_PROJECT | default "replicated-qa"}}' + GCP_ZONE: '{{.GCP_ZONE | default "us-central1-a"}}' + VM_NAME: '{{.VM_NAME | default (printf "%s-dev" (or (env "GUSER") "user"))}}' + + # Container workflow configuration + DEV_CONTAINER_REGISTRY: '{{.DEV_CONTAINER_REGISTRY | default "ghcr.io"}}' + DEV_CONTAINER_IMAGE: '{{.DEV_CONTAINER_IMAGE | default "replicatedhq/platform-examples/wg-easy-tools"}}' + DEV_CONTAINER_TAG: '{{.DEV_CONTAINER_TAG | default "latest"}}' + DEV_CONTAINER_NAME: '{{.DEV_CONTAINER_NAME | default "wg-easy-tools"}}' + CONTAINER_RUNTIME: '{{.CONTAINER_RUNTIME | default "podman"}}' + +tasks: + default: + desc: Show available tasks + silent: true + cmds: + - task --list + + cluster-create: + desc: Create a test cluster using Replicated Compatibility Matrix (use EMBEDDED=true for embedded clusters) + run: once + silent: false + vars: + EMBEDDED: '{{.EMBEDDED | default "false"}}' + LICENSE_ID: '{{if eq .EMBEDDED "true"}}{{.LICENSE_ID | default "2cmqT1dBVHZ3aSH21kPxWtgoYGr"}}{{end}}' + TIMEOUT: '{{if eq .EMBEDDED "true"}}420{{else}}300{{end}}' + TTL: '{{.TTL | default "4h"}}' + status: + - replicated cluster ls --output json | jq -e '.[] | select(.name == "{{.CLUSTER_NAME}}")' > /dev/null + cmds: + - | + if [ "{{.EMBEDDED}}" = "true" ]; then + echo "Creating embedded cluster {{.CLUSTER_NAME}} with license ID {{.LICENSE_ID}}..." + replicated cluster create --distribution embedded-cluster --name {{.CLUSTER_NAME}} --license-id {{.LICENSE_ID}} --ttl {{.TTL}} + else + echo "Creating cluster {{.CLUSTER_NAME}} with distribution {{.DISTRIBUTION}}..." + replicated cluster create --name {{.CLUSTER_NAME}} --distribution {{.DISTRIBUTION}} --version {{.K8S_VERSION}} --disk {{.DISK_SIZE}} --instance-type {{.INSTANCE_TYPE}} --ttl {{.TTL}} + fi + - task: utils:wait-for-cluster + vars: + TIMEOUT: "{{.TIMEOUT}}" + + cluster-list: + desc: List the cluster + cmds: + - | + CLUSTER_ID=$(replicated cluster ls --output json | jq -r '.[] | select(.name == "{{.CLUSTER_NAME}}") | .id') + EXPIRES=$(replicated cluster ls --output json | jq -r '.[] | select(.name == "{{.CLUSTER_NAME}}") | .expires_at') + echo "{{.CLUSTER_NAME}} Cluster ID: ($CLUSTER_ID) Expires: ($EXPIRES)" + + test: + desc: Run a basic test suite + silent: false + cmds: + - echo "Running basic tests..." + - echo "This is a placeholder for actual tests" + - sleep 5 + - echo "Tests completed!" + + verify-kubeconfig: + desc: Verify kubeconfig + silent: false + run: once + cmds: + - | + if [ -f {{.KUBECONFIG_FILE}} ]; then + echo "Getting Cluster ID From Replicated Cluster list" + CLUSTER_ID=$(replicated cluster ls --output json | jq -r '.[] | select(.name == "{{.CLUSTER_NAME}}") | .id') + echo "Getting Cluster ID From Kubeconfig" + CLUSTER_ID_KUBECONFIG=$(grep "current-context:" {{.KUBECONFIG_FILE}} | cut -d'-' -f3) + if [ "$CLUSTER_ID" != "$CLUSTER_ID_KUBECONFIG" ]; then + echo "{{.CLUSTER_NAME}} Cluster ID between Replicated ($CLUSTER_ID) and Kubeconfig ($CLUSTER_ID_KUBECONFIG) mismatch" + echo "Removing old kubeconfig file" + rm -f {{.KUBECONFIG_FILE}} + fi + fi + + setup-kubeconfig: + desc: Get kubeconfig and prepare cluster for application deployment + silent: false + run: once + cmds: + - task: utils:get-kubeconfig + - task: utils:remove-k3s-traefik + status: + - | + # Check if kubeconfig exists + test -f {{.KUBECONFIG_FILE}} && \ + # For k3s, also check if traefik is removed + if [ "{{.DISTRIBUTION}}" = "k3s" ]; then + KUBECONFIG={{.KUBECONFIG_FILE}} helm list -n kube-system -o json | \ + jq -e 'map(select(.name == "traefik" or .name == "traefik-crd")) | length == 0' >/dev/null + else + true + fi + deps: + - cluster-create + - verify-kubeconfig + + dependencies-update: + desc: Update Helm dependencies for all charts + silent: false + run: once + cmds: + - echo "Updating Helm dependencies for all charts..." + - | + # Find all charts and update their dependencies + for chart_dir in $(find charts/ -maxdepth 2 -name "Chart.yaml" | xargs dirname); do + echo "Updating dependency $chart_dir" + helm dependency update "$chart_dir" + done + - echo "All dependencies updated!" + + cluster-ports-expose: + desc: Expose configured ports for a cluster and capture exposed URLs + silent: false + run: once + status: + - | + CLUSTER_ID=$(replicated cluster ls --output json | jq -r '.[] | select(.name == "{{.CLUSTER_NAME}}") | .id') + if [ -z "$CLUSTER_ID" ]; then + exit 1 + fi + + # Check if all ports are already exposed + expected_count={{len .EXPOSE_PORTS}} + port_checks="" + {{range $i, $port := .EXPOSE_PORTS}} + port_checks="${port_checks}(.upstream_port == {{$port.port}} and .exposed_ports[0].protocol == \"{{$port.protocol}}\") or " + {{end}} + # Remove trailing "or " + port_checks="${port_checks% or }" + + PORT_COUNT=$(replicated cluster port ls $CLUSTER_ID --output json | jq -r ".[] | select($port_checks) | .upstream_port" | wc -l | tr -d ' ') + [ "$PORT_COUNT" -eq "$expected_count" ] + cmds: + - task: utils:port-operations + vars: + OPERATION: "expose" + deps: + - cluster-create + + helm-install: + desc: Install all charts using helmfile + silent: false + cmds: + - echo "Installing all charts via helmfile" + - | + # Get cluster ID + CLUSTER_ID=$(replicated cluster ls --output json | jq -r '.[] | select(.name == "{{.CLUSTER_NAME}}") | .id') + if [ -z "$CLUSTER_ID" ]; then + echo "Error: Could not find cluster with name {{.CLUSTER_NAME}}" + exit 1 + fi + + # Get exposed URLs + ENV_VARS=$(task utils:port-operations OPERATION=getenv CLUSTER_NAME={{.CLUSTER_NAME}}) + + # Deploy with helmfile + echo "Using $ENV_VARS" + eval "KUBECONFIG={{.KUBECONFIG_FILE}} $ENV_VARS helmfile sync --wait" + - echo "All charts installed!" + deps: + - setup-kubeconfig + - cluster-ports-expose + + cluster-delete: + desc: Delete all test clusters with matching name and clean up kubeconfig + silent: false + cmds: + - echo "Deleting clusters named {{.CLUSTER_NAME}}..." + - | + CLUSTER_IDS=$(replicated cluster ls | grep "{{.CLUSTER_NAME}}" | awk '{print $1}') + if [ -z "$CLUSTER_IDS" ]; then + echo "No clusters found with name {{.CLUSTER_NAME}}" + exit 0 + fi + + for id in $CLUSTER_IDS; do + echo "Deleting cluster ID: $id" + replicated cluster rm "$id" + done + - | + # Clean up kubeconfig file + if [ -f "{{.KUBECONFIG_FILE}}" ]; then + echo "Removing kubeconfig file {{.KUBECONFIG_FILE}}" + rm "{{.KUBECONFIG_FILE}}" + fi + - echo "All matching clusters deleted and kubeconfig cleaned up!" + + release-prepare: + desc: Prepare release files by copying replicated YAML files and packaging Helm charts + silent: false + cmds: + - echo "Preparing release files..." + - rm -rf ./release + - mkdir -p ./release + + # Copy all non-config.yaml files + - echo "Copying non-config YAML files to release folder..." + - find . -path '*/replicated/*.yaml' -not -name 'config.yaml' -exec cp {} ./release/ \; + - find ./replicated -name '*.yaml' -not -name 'config.yaml' -exec cp {} ./release/ \; 2>/dev/null || true + + # extract namespaces from helmChart files + - yq ea '[.spec.namespace] | unique' */replicated/helmChart-*.yaml | yq '.spec.additionalNamespaces *= load("/dev/stdin") | .spec.additionalNamespaces += "*" ' replicated/application.yaml > release/application.yaml.new + - mv release/application.yaml.new release/application.yaml + + # set helmChart versions from associated helm Chart.yaml + - echo "Setting helmChart versions..." + - | + while read directory; do + + echo $directory + parent=$(basename $(dirname $directory)) + + helmChartName="helmChart-$parent.yaml" + export version=$(yq -r '.version' $parent/Chart.yaml ) + + yq '.spec.chart.chartVersion = strenv(version) | .spec.chart.chartVersion style="single"' $directory/$helmChartName | tee release/$helmChartName + + done < <(find . -maxdepth 2 -mindepth 2 -type d -name replicated) + + # Merge config.yaml files + - echo "Merging config.yaml files..." + - | + # Start with an empty config file + echo "{}" > ./release/config.yaml + + # Merge all app config.yaml files first (excluding root replicated) + for config_file in $(find . -path '*/replicated/config.yaml' | grep -v "^./replicated/"); do + echo "Merging $config_file..." + yq eval-all '. as $item ireduce ({}; . * $item)' ./release/config.yaml "$config_file" > ./release/config.yaml.new + mv ./release/config.yaml.new ./release/config.yaml + done + + # Merge root config.yaml last + if [ -f "./replicated/config.yaml" ]; then + echo "Merging root config.yaml last..." + yq eval-all '. as $item ireduce ({}; . * $item)' ./release/config.yaml "./replicated/config.yaml" > ./release/config.yaml.new + mv ./release/config.yaml.new ./release/config.yaml + fi + + # Package Helm charts + - echo "Packaging Helm charts..." + - | + # Find top-level directories containing Chart.yaml files + for chart_dir in $(find charts/ -maxdepth 2 -name "Chart.yaml" | xargs dirname); do + echo "Packaging chart: $chart_dir" + # Navigate to chart directory, package it, and move the resulting .tgz to release folder + helm package "$chart_dir" && mv *.tgz release/ + done + + - echo "Release files prepared in ./release/ directory" + deps: + - dependencies-update + + release-create: + desc: Create and promote a release using the Replicated CLI + silent: false + run: once + vars: + CHANNEL: '{{.CHANNEL | default "Unstable"}}' + VERSION: '{{.VERSION | default "0.0.1"}}' + RELEASE_NOTES: '{{.RELEASE_NOTES | default "Release created via task release-create"}}' + requires: + vars: [APP_SLUG, VERSION] + cmds: + - echo "Creating and promoting release for {{.APP_SLUG}} to channel {{.RELEASE_CHANNEL}}..." + - | + # Create and promote the release in one step + echo "Creating release from files in ./release directory..." + replicated release create --app {{.APP_SLUG}} --yaml-dir ./release --release-notes "{{.RELEASE_NOTES}}" --promote {{.RELEASE_CHANNEL}} --version {{.RELEASE_VERSION}} + echo "Release version {{.RELEASE_VERSION}} created and promoted to channel {{.RELEASE_CHANNEL}}" + deps: + - release-prepare + + customer-create: + desc: Create a new customer or get existing customer with matching name and return their ID + silent: false + run: once + vars: + CUSTOMER_NAME: '{{.CUSTOMER_NAME | default "test-customer"}}' + CUSTOMER_EMAIL: '{{.CUSTOMER_EMAIL | default "test@example.com"}}' + CHANNEL: '{{.CHANNEL | default "Unstable"}}' + LICENSE_TYPE: '{{.LICENSE_TYPE | default "dev"}}' + EXPIRES_IN: '{{.EXPIRES_IN | default ""}}' + requires: + vars: [APP_SLUG] + cmds: + - | + # First check if customer already exists + echo "Looking for existing customer {{.CUSTOMER_NAME}} for app {{.APP_SLUG}}..." + EXISTING_CUSTOMER=$(replicated customer ls --app {{.APP_SLUG}} --output json | jq -r '.[] | select(.name=="{{.CUSTOMER_NAME}}") | .id' | head -1) + + if [ -n "$EXISTING_CUSTOMER" ]; then + echo "Found existing customer {{.CUSTOMER_NAME}} with ID: $EXISTING_CUSTOMER" + echo "$EXISTING_CUSTOMER" + exit 0 + fi + + # No existing customer found, create a new one + echo "Creating new customer {{.CUSTOMER_NAME}} for app {{.APP_SLUG}}..." + + # Build the command with optional expiration + CMD="replicated customer create \ + --app {{.APP_SLUG}} \ + --name {{.CUSTOMER_NAME}} \ + --email {{.CUSTOMER_EMAIL}} \ + --channel {{.RELEASE_CHANNEL}} \ + --type {{.LICENSE_TYPE}} \ + --output json" + + # Add expiration if specified + if [ -n "{{.EXPIRES_IN}}" ]; then + CMD="$CMD --expires-in {{.EXPIRES_IN}}" + fi + + # Create the customer and capture the output + CUSTOMER_JSON=$($CMD) + + # Extract and output just the customer ID + echo "$CUSTOMER_JSON" | jq -r '.id' + + gcp-vm-create: + desc: Create a simple GCP VM instance + silent: false + vars: + GCP_MACHINE_TYPE: '{{.GCP_MACHINE_TYPE | default "e2-standard-2"}}' + GCP_DISK_SIZE: '{{.GCP_DISK_SIZE | default "100"}}' + GCP_DISK_TYPE: '{{.GCP_DISK_TYPE | default "pd-standard"}}' + GCP_IMAGE_FAMILY: '{{.GCP_IMAGE_FAMILY | default "ubuntu-2204-lts"}}' + GCP_IMAGE_PROJECT: '{{.GCP_IMAGE_PROJECT | default "ubuntu-os-cloud"}}' + status: + - gcloud compute instances describe {{.VM_NAME}} --project={{.GCP_PROJECT}} --zone={{.GCP_ZONE}} &>/dev/null + cmds: + - task: utils:gcp-operations + vars: + OPERATION: "create" + GCP_MACHINE_TYPE: '{{.GCP_MACHINE_TYPE}}' + GCP_DISK_SIZE: '{{.GCP_DISK_SIZE}}' + GCP_DISK_TYPE: '{{.GCP_DISK_TYPE}}' + GCP_IMAGE_FAMILY: '{{.GCP_IMAGE_FAMILY}}' + GCP_IMAGE_PROJECT: '{{.GCP_IMAGE_PROJECT}}' + + gcp-vm-delete: + desc: Delete the GCP VM instance for K8s and VPN + silent: false + status: + - "! gcloud compute instances describe {{.VM_NAME}} --project={{.GCP_PROJECT}} --zone={{.GCP_ZONE}} &>/dev/null" + cmds: + - task: utils:gcp-operations + vars: + OPERATION: "delete" + GCP_PROJECT: '{{.GCP_PROJECT}}' + GCP_ZONE: '{{.GCP_ZONE}}' + VM_NAME: '{{.VM_NAME}}' + + embedded-cluster-setup: + desc: Setup Replicated embedded cluster on the GCP VM + silent: false + vars: + CHANNEL: '{{.CHANNEL | default "Unstable"}}' + AUTH_TOKEN: '{{.AUTH_TOKEN | default "2usDXzovcJNcpn54yS5tFQVNvCq"}}' + deps: + - gcp-vm-create + status: + - | + # Check if the application tarball has already been downloaded and extracted + gcloud compute ssh {{.VM_NAME}} --project={{.GCP_PROJECT}} --zone={{.GCP_ZONE}} --command="test -d ./{{.APP_SLUG}}" &>/dev/null + cmds: + - task: utils:gcp-operations + vars: + OPERATION: "setup-embedded" + APP_SLUG: '{{.APP_SLUG}}' + CHANNEL: '{{.CHANNEL}}' + AUTH_TOKEN: '{{.AUTH_TOKEN}}' + GCP_PROJECT: '{{.GCP_PROJECT}}' + GCP_ZONE: '{{.GCP_ZONE}}' + VM_NAME: '{{.VM_NAME}}' + + customer-ls: + desc: List customers for the application + silent: false + vars: + OUTPUT_FORMAT: '{{.OUTPUT_FORMAT | default "table"}}' + requires: + vars: [APP_SLUG] + cmds: + - echo "Listing customers for app {{.APP_SLUG}}..." + - replicated customer ls --app {{.APP_SLUG}} --output {{.OUTPUT_FORMAT}} + + customer-delete: + desc: Archive a customer by ID + silent: false + vars: + CUSTOMER_ID: '{{.CUSTOMER_ID}}' + requires: + vars: [APP_SLUG, CUSTOMER_ID] + cmds: + - echo "Archiving customer with ID {{.CUSTOMER_ID}} from app {{.APP_SLUG}}..." + - | + # Verify customer exists before attempting to archive + CUSTOMER_EXISTS=$(replicated customer ls --app {{.APP_SLUG}} --output json | jq -r '.[] | select(.id=="{{.CUSTOMER_ID}}") | .id') + if [ -z "$CUSTOMER_EXISTS" ]; then + echo "Error: Customer with ID {{.CUSTOMER_ID}} not found for app {{.APP_SLUG}}" + exit 1 + fi + + # Get customer name for confirmation message + CUSTOMER_NAME=$(replicated customer ls --app {{.APP_SLUG}} --output json | jq -r '.[] | select(.id=="{{.CUSTOMER_ID}}") | .name') + + # Archive the customer + replicated customer archive {{.CUSTOMER_ID}} --app {{.APP_SLUG}} + + # Confirm archiving + echo "Customer '$CUSTOMER_NAME' (ID: {{.CUSTOMER_ID}}) successfully archived" + + clean: + desc: Remove temporary Helm directories, chart dependencies, and release folder + silent: false + cmds: + - echo "Cleaning temporary directories and dependencies..." + - | + # Remove the release directory + if [ -d "./release" ]; then + echo "Removing release directory..." + rm -rf ./release + fi + + # Find and remove tmpcharts-* directories in charts/ + echo "Removing temporary chart directories..." + find charts/ -type d -name "tmpcharts-*" -print + find charts/ -type d -name "tmpcharts-*" -exec rm -rf {} \; 2>/dev/null || true + + # Clean up chart dependencies (.tgz files) in charts/*/charts/ + echo "Removing chart dependencies..." + find charts/ -path "*/charts/*.tgz" -type f -print + find charts/ -path "*/charts/*.tgz" -type f -delete + + # Clean up any tmpcharts directories in subdirectories + echo "Cleaning up any remaining tmpcharts directories..." + find . -type d -name "tmpcharts-*" -print + find . -type d -name "tmpcharts-*" -exec rm -rf {} \; 2>/dev/null || true + - echo "Cleaning complete!" + + full-test-cycle: + desc: Create cluster, get kubeconfig, expose ports, update dependencies, deploy charts, test, and delete + silent: false + cmds: + - task: cluster-create + - task: setup-kubeconfig + - task: cluster-ports-expose + - task: dependencies-update + - task: helm-install + - task: test + - task: cluster-delete diff --git a/applications/wg-easy/taskfiles/utils.yml b/applications/wg-easy/taskfiles/utils.yml index 67e26e6f..2b2d88e7 100644 --- a/applications/wg-easy/taskfiles/utils.yml +++ b/applications/wg-easy/taskfiles/utils.yml @@ -271,10 +271,10 @@ tasks: gcloud compute ssh {{.VM_NAME}} --project={{.GCP_PROJECT}} --zone={{.GCP_ZONE}} --command=" set -e echo 'Downloading {{.APP_NAME}} installer...' - curl -f 'https://replicated.app/embedded/{{.APP_NAME}}/{{.CHANNEL}}' -H 'Authorization: {{.AUTH_TOKEN}}' -o {{.APP_NAME}}-{{.CHANNEL}}.tgz + curl -f 'https://replicated.app/embedded/{{.APP_NAME}}/{{.RELEASE_CHANNEL}}' -H 'Authorization: {{.AUTH_TOKEN}}' -o {{.APP_NAME}}-{{.RELEASE_CHANNEL}}.tgz echo 'Extracting installer...' - tar -xvzf {{.APP_NAME}}-{{.CHANNEL}}.tgz + tar -xvzf {{.APP_NAME}}-{{.RELEASE_CHANNEL}}.tgz " echo "Embedded cluster setup initiated on VM {{.VM_NAME}}"