|
| 1 | +name: Smoke Test |
| 2 | + |
| 3 | +on: |
| 4 | + pull_request: |
| 5 | + branches: [main, 'release-*'] |
| 6 | + push: |
| 7 | + branches: [main] |
| 8 | + workflow_dispatch: |
| 9 | + |
| 10 | +permissions: |
| 11 | + contents: read |
| 12 | + |
| 13 | +env: |
| 14 | + CLUSTER_NAME: capi-quickstart |
| 15 | + KIND_CLUSTER_NAME: capi-operator-smoke-test |
| 16 | + KUBERNETES_VERSION: v1.33.0 |
| 17 | + CONTROLLER_IMG: cluster-api-operator |
| 18 | + TAG: smoke-test |
| 19 | + |
| 20 | +jobs: |
| 21 | + smoke-test: |
| 22 | + name: Smoke Test (${{ matrix.install-method }}) |
| 23 | + runs-on: ubuntu-latest |
| 24 | + strategy: |
| 25 | + matrix: |
| 26 | + install-method: [legacy, recommended] |
| 27 | + steps: |
| 28 | + - name: Checkout code |
| 29 | + uses: actions/checkout@v4 |
| 30 | + with: |
| 31 | + fetch-depth: 0 |
| 32 | + |
| 33 | + - name: Set up Go |
| 34 | + uses: actions/setup-go@v5 |
| 35 | + with: |
| 36 | + go-version-file: 'go.mod' |
| 37 | + |
| 38 | + - name: Install tools |
| 39 | + run: | |
| 40 | + # kubectl |
| 41 | + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" |
| 42 | + chmod +x kubectl && sudo mv kubectl /usr/local/bin/ |
| 43 | +
|
| 44 | + # yq |
| 45 | + wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O yq |
| 46 | + chmod +x yq && sudo mv yq /usr/local/bin/ |
| 47 | +
|
| 48 | + # helm |
| 49 | + curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash |
| 50 | +
|
| 51 | + # clusterctl |
| 52 | + curl -L https://github.com/kubernetes-sigs/cluster-api/releases/latest/download/clusterctl-linux-amd64 -o clusterctl |
| 53 | + chmod +x clusterctl && sudo mv clusterctl /usr/local/bin/ |
| 54 | +
|
| 55 | + - name: Build Docker image |
| 56 | + run: | |
| 57 | + make docker-build |
| 58 | + docker tag ${CONTROLLER_IMG}-amd64:${TAG} ${CONTROLLER_IMG}:${TAG} |
| 59 | +
|
| 60 | + - name: Build charts |
| 61 | + run: | |
| 62 | + make release-chart |
| 63 | + echo "HELM_CHART_TAG=$(make -s -f Makefile -p | grep '^HELM_CHART_TAG :=' | cut -d' ' -f3)" >> $GITHUB_ENV |
| 64 | +
|
| 65 | + - name: Create kind cluster |
| 66 | + run: | |
| 67 | + chmod +x ./hack/ensure-kind.sh |
| 68 | + ./hack/ensure-kind.sh |
| 69 | +
|
| 70 | + cat <<EOF > /tmp/kind-config.yaml |
| 71 | + kind: Cluster |
| 72 | + apiVersion: kind.x-k8s.io/v1alpha4 |
| 73 | + networking: |
| 74 | + ipFamily: ipv4 |
| 75 | + nodes: |
| 76 | + - role: control-plane |
| 77 | + extraMounts: |
| 78 | + - hostPath: /var/run/docker.sock |
| 79 | + containerPath: /var/run/docker.sock |
| 80 | + containerdConfigPatches: |
| 81 | + - |- |
| 82 | + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] |
| 83 | + endpoint = ["https://mirror.gcr.io", "https://registry-1.docker.io"] |
| 84 | + EOF |
| 85 | +
|
| 86 | + kind create cluster --name ${KIND_CLUSTER_NAME} --config /tmp/kind-config.yaml --wait 5m |
| 87 | + kind load docker-image ${CONTROLLER_IMG}:${TAG} --name ${KIND_CLUSTER_NAME} |
| 88 | +
|
| 89 | + - name: Install cert-manager |
| 90 | + run: | |
| 91 | + helm repo add jetstack https://charts.jetstack.io |
| 92 | + helm repo update |
| 93 | + helm install cert-manager jetstack/cert-manager \ |
| 94 | + --namespace cert-manager \ |
| 95 | + --create-namespace \ |
| 96 | + --set installCRDs=true \ |
| 97 | + --wait \ |
| 98 | + --timeout 5m |
| 99 | +
|
| 100 | + - name: Install Cluster API Operator (Recommended) |
| 101 | + if: matrix.install-method == 'recommended' |
| 102 | + run: | |
| 103 | + CHART_PACKAGE="out/package/cluster-api-operator-${HELM_CHART_TAG}.tgz" |
| 104 | + helm install capi-operator "$CHART_PACKAGE" \ |
| 105 | + --create-namespace \ |
| 106 | + -n capi-operator-system \ |
| 107 | + --set image.manager.repository=${CONTROLLER_IMG} \ |
| 108 | + --set image.manager.tag=${TAG} \ |
| 109 | + --set image.manager.pullPolicy=IfNotPresent \ |
| 110 | + --wait \ |
| 111 | + --timeout 90s |
| 112 | +
|
| 113 | + - name: Prepare providers values |
| 114 | + run: | |
| 115 | + cat <<EOF > /tmp/providers-values.yaml |
| 116 | + core: |
| 117 | + cluster-api: |
| 118 | + namespace: capi-system |
| 119 | + bootstrap: |
| 120 | + kubeadm: |
| 121 | + namespace: capi-kubeadm-bootstrap-system |
| 122 | + controlPlane: |
| 123 | + kubeadm: |
| 124 | + namespace: capi-kubeadm-control-plane-system |
| 125 | + infrastructure: |
| 126 | + docker: |
| 127 | + namespace: capd-system |
| 128 | + manager: |
| 129 | + featureGates: |
| 130 | + core: |
| 131 | + ClusterTopology: true |
| 132 | + ClusterResourceSet: true |
| 133 | + MachinePool: true |
| 134 | + kubeadm: |
| 135 | + ClusterTopology: true |
| 136 | + MachinePool: true |
| 137 | + docker: |
| 138 | + ClusterTopology: true |
| 139 | + EOF |
| 140 | +
|
| 141 | + # Add cluster-api-operator configuration for legacy installation |
| 142 | + if [ "${{ matrix.install-method }}" = "legacy" ]; then |
| 143 | + cat <<EOF >> /tmp/providers-values.yaml |
| 144 | + cluster-api-operator: |
| 145 | + install: true |
| 146 | + image: |
| 147 | + manager: |
| 148 | + repository: ${CONTROLLER_IMG} |
| 149 | + tag: ${TAG} |
| 150 | + pullPolicy: IfNotPresent |
| 151 | + EOF |
| 152 | + fi |
| 153 | +
|
| 154 | + - name: Deploy providers (Recommended) |
| 155 | + if: matrix.install-method == 'recommended' |
| 156 | + run: | |
| 157 | + PROVIDERS_CHART_PACKAGE="out/package/cluster-api-operator-providers-${HELM_CHART_TAG}.tgz" |
| 158 | + helm install capi-providers "$PROVIDERS_CHART_PACKAGE" \ |
| 159 | + -f /tmp/providers-values.yaml \ |
| 160 | + --set cluster-api-operator.install=false \ |
| 161 | + --set enableHelmHook=false \ |
| 162 | + --wait |
| 163 | +
|
| 164 | + - name: Deploy providers (Legacy) |
| 165 | + if: matrix.install-method == 'legacy' |
| 166 | + run: | |
| 167 | + PROVIDERS_CHART_PACKAGE="out/package/cluster-api-operator-providers-${HELM_CHART_TAG}.tgz" |
| 168 | + helm install capi-providers "$PROVIDERS_CHART_PACKAGE" \ |
| 169 | + --create-namespace \ |
| 170 | + -n capi-operator-system \ |
| 171 | + -f /tmp/providers-values.yaml \ |
| 172 | + --wait \ |
| 173 | + --timeout 300s |
| 174 | +
|
| 175 | + - name: Wait for providers |
| 176 | + run: | |
| 177 | + kubectl wait --for=condition=Ready --timeout=300s -n capi-system coreprovider/cluster-api |
| 178 | + kubectl wait --for=condition=Ready --timeout=300s -n capi-kubeadm-bootstrap-system bootstrapprovider/kubeadm |
| 179 | + kubectl wait --for=condition=Ready --timeout=300s -n capi-kubeadm-control-plane-system controlplaneprovider/kubeadm |
| 180 | + kubectl wait --for=condition=Ready --timeout=300s -n capd-system infrastructureprovider/docker |
| 181 | +
|
| 182 | + kubectl wait --for=condition=Available --timeout=300s -n capi-system deployment/capi-controller-manager |
| 183 | + kubectl wait --for=condition=Available --timeout=300s -n capi-kubeadm-bootstrap-system deployment/capi-kubeadm-bootstrap-controller-manager |
| 184 | + kubectl wait --for=condition=Available --timeout=300s -n capi-kubeadm-control-plane-system deployment/capi-kubeadm-control-plane-controller-manager |
| 185 | + kubectl wait --for=condition=Available --timeout=300s -n capd-system deployment/capd-controller-manager |
| 186 | +
|
| 187 | + - name: Verify providers |
| 188 | + run: | |
| 189 | + kubectl get coreprovider,bootstrapprovider,controlplaneprovider,infrastructureprovider -A |
| 190 | + kubectl get pods -A | grep -E "(capi-|capd-)" |
| 191 | +
|
| 192 | + - name: Create workload cluster |
| 193 | + run: | |
| 194 | + clusterctl generate cluster ${CLUSTER_NAME} \ |
| 195 | + --infrastructure docker:v1.10.0 \ |
| 196 | + --flavor development \ |
| 197 | + --kubernetes-version ${KUBERNETES_VERSION} \ |
| 198 | + --control-plane-machine-count=1 \ |
| 199 | + --worker-machine-count=2 \ |
| 200 | + > capi-quickstart.yaml |
| 201 | +
|
| 202 | + kubectl apply -f capi-quickstart.yaml |
| 203 | +
|
| 204 | + - name: Get workload cluster kubeconfig |
| 205 | + run: | |
| 206 | + timeout 300s bash -c "until kubectl get secret ${CLUSTER_NAME}-kubeconfig -n default &>/dev/null; do sleep 2; done" |
| 207 | + clusterctl get kubeconfig ${CLUSTER_NAME} --namespace default > ${CLUSTER_NAME}.kubeconfig |
| 208 | + echo "KUBECONFIG=$(pwd)/${CLUSTER_NAME}.kubeconfig" >> $GITHUB_ENV |
| 209 | +
|
| 210 | + - name: Wait for workload cluster API server |
| 211 | + run: | |
| 212 | + timeout 300s bash -c "until kubectl cluster-info &>/dev/null; do sleep 5; done" |
| 213 | +
|
| 214 | + - name: Install CNI |
| 215 | + run: | |
| 216 | + kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/calico.yaml |
| 217 | + kubectl wait --for=condition=Ready --timeout=300s pods -n tigera-operator -l app.kubernetes.io/name=tigera-operator || true |
| 218 | + kubectl wait --for=condition=Ready --timeout=300s pods -n calico-system --all || true |
| 219 | +
|
| 220 | + - name: Wait for nodes |
| 221 | + run: | |
| 222 | + kubectl wait --for=condition=Ready --timeout=300s nodes --all |
| 223 | + kubectl get nodes -o wide |
| 224 | +
|
| 225 | + - name: Verify cluster |
| 226 | + run: | |
| 227 | + kubectl get po -A |
| 228 | + kubectl wait --for=condition=Ready --timeout=300s pods -n kube-system -l k8s-app=kube-proxy |
| 229 | + kubectl wait --for=condition=Ready --timeout=300s pods -n kube-system -l component=kube-apiserver |
| 230 | + kubectl wait --for=condition=Ready --timeout=300s pods -n kube-system -l component=kube-controller-manager |
| 231 | + kubectl wait --for=condition=Ready --timeout=300s pods -n kube-system -l component=kube-scheduler |
| 232 | +
|
| 233 | + - name: Collect logs on failure |
| 234 | + if: failure() |
| 235 | + run: | |
| 236 | + echo "=== Installation Method: ${{ matrix.install-method }} ===" |
| 237 | + echo "=== Recent Events ===" |
| 238 | + kubectl get events -A --sort-by='.lastTimestamp' | tail -50 |
| 239 | +
|
| 240 | + echo -e "\n=== Provider Logs ===" |
| 241 | + kubectl logs -n capi-operator-system deployment/capi-operator-cluster-api-operator --tail=50 || true |
| 242 | + kubectl logs -n capi-system deployment/capi-controller-manager --tail=50 || true |
| 243 | + kubectl logs -n capd-system deployment/capd-controller-manager --tail=50 || true |
| 244 | +
|
| 245 | + echo -e "\n=== Cluster Resources ===" |
| 246 | + kubectl get cluster,dockercluster,kubeadmcontrolplane,machine,dockermachine -A -o wide || true |
| 247 | +
|
| 248 | + echo -e "\n=== Failed Pods ===" |
| 249 | + kubectl get pods -A | grep -v Running | grep -v Completed || true |
| 250 | +
|
| 251 | + if [ -f "${CLUSTER_NAME}.kubeconfig" ]; then |
| 252 | + export KUBECONFIG=$(pwd)/${CLUSTER_NAME}.kubeconfig |
| 253 | + echo -e "\n=== Workload Cluster Status ===" |
| 254 | + kubectl get nodes -o wide || true |
| 255 | + kubectl get pods -A --field-selector=status.phase!=Running,status.phase!=Succeeded || true |
| 256 | + fi |
| 257 | +
|
| 258 | + - name: Cleanup |
| 259 | + if: always() |
| 260 | + run: | |
| 261 | + kind delete cluster --name ${CLUSTER_NAME} || true |
| 262 | + kind delete cluster --name ${KIND_CLUSTER_NAME} || true |
0 commit comments