@@ -102,7 +102,7 @@ setup() {
102102 export CCM_COUNT=" ${CCM_COUNT:- 1} "
103103 export WORKER_MACHINE_COUNT=" ${WORKER_MACHINE_COUNT:- 2} "
104104 export EXP_CLUSTER_RESOURCE_SET=" true"
105-
105+
106106 # TODO figure out a better way to account for expected Windows node count
107107 if [[ -n " ${TEST_WINDOWS:- } " ]]; then
108108 export WINDOWS_WORKER_MACHINE_COUNT=" ${WINDOWS_WORKER_MACHINE_COUNT:- 2} "
@@ -142,11 +142,11 @@ create_cluster() {
142142# and any statement must be idempotent so that subsequent retry attempts can make forward progress.
143143get_cidrs () {
144144 # Get cluster CIDRs from Cluster object
145- CIDR0=$( ${KUBECTL} --kubeconfig " ${REPO_ROOT} /${KIND_CLUSTER_NAME} .kubeconfig" get cluster " ${CLUSTER_NAME} " -o=jsonpath=' {.spec.clusterNetwork.pods.cidrBlocks[0]}' )
145+ CIDR0=$( ${KUBECTL} --kubeconfig " ${REPO_ROOT} /${KIND_CLUSTER_NAME} .kubeconfig" get cluster " ${CLUSTER_NAME} " -o=jsonpath=' {.spec.clusterNetwork.pods.cidrBlocks[0]}' ) || return 1
146146 export CIDR0
147- CIDR_LENGTH=$( ${KUBECTL} --kubeconfig " ${REPO_ROOT} /${KIND_CLUSTER_NAME} .kubeconfig" get cluster " ${CLUSTER_NAME} " -o=jsonpath=' {.spec.clusterNetwork.pods.cidrBlocks}' | jq ' . | length' )
147+ CIDR_LENGTH=$( ${KUBECTL} --kubeconfig " ${REPO_ROOT} /${KIND_CLUSTER_NAME} .kubeconfig" get cluster " ${CLUSTER_NAME} " -o=jsonpath=' {.spec.clusterNetwork.pods.cidrBlocks}' | jq ' . | length' ) || return 1
148148 if [[ " ${CIDR_LENGTH} " == " 2" ]]; then
149- CIDR1=$( ${KUBECTL} get cluster --kubeconfig " ${REPO_ROOT} /${KIND_CLUSTER_NAME} .kubeconfig" " ${CLUSTER_NAME} " -o=jsonpath=' {.spec.clusterNetwork.pods.cidrBlocks[1]}' )
149+ CIDR1=$( ${KUBECTL} get cluster --kubeconfig " ${REPO_ROOT} /${KIND_CLUSTER_NAME} .kubeconfig" " ${CLUSTER_NAME} " -o=jsonpath=' {.spec.clusterNetwork.pods.cidrBlocks[1]}' ) || return 1
150150 export CIDR1
151151 fi
152152}
@@ -156,7 +156,7 @@ get_cidrs() {
156156# retry it using a `until get_cloud_provider; do sleep 5; done` pattern;
157157# and any statement must be idempotent so that subsequent retry attempts can make forward progress.
158158get_cloud_provider () {
159- CLOUD_PROVIDER=$( " ${KUBECTL} " --kubeconfig " ${REPO_ROOT} /${KIND_CLUSTER_NAME} .kubeconfig" get kubeadmcontrolplane -l cluster.x-k8s.io/cluster-name=" ${CLUSTER_NAME} " -o=jsonpath=' {.items[0].spec.kubeadmConfigSpec.clusterConfiguration.controllerManager.extraArgs.cloud-provider}' )
159+ CLOUD_PROVIDER=$( " ${KUBECTL} " --kubeconfig " ${REPO_ROOT} /${KIND_CLUSTER_NAME} .kubeconfig" get kubeadmcontrolplane -l cluster.x-k8s.io/cluster-name=" ${CLUSTER_NAME} " -o=jsonpath=' {.items[0].spec.kubeadmConfigSpec.clusterConfiguration.controllerManager.extraArgs.cloud-provider}' ) || return 1
160160 if [[ " ${CLOUD_PROVIDER:- } " = " azure" ]]; then
161161 IN_TREE=" true"
162162 export IN_TREE
@@ -171,9 +171,11 @@ install_calico() {
171171 # Copy the kubeadm configmap to the calico-system namespace.
172172 # This is a workaround needed for the calico-node-windows daemonset
173173 # to be able to run in the calico-system namespace.
174- " ${KUBECTL} " create namespace calico-system --dry-run=client -o yaml | kubectl apply -f -
174+ # First, validate that the kubeadm-config configmap has been created.
175+ " ${KUBECTL} " get configmap kubeadm-config --namespace=kube-system -o yaml || return 1
176+ " ${KUBECTL} " create namespace calico-system --dry-run=client -o yaml | kubectl apply -f - || return 1
175177 if ! " ${KUBECTL} " get configmap kubeadm-config --namespace=calico-system; then
176- " ${KUBECTL} " get configmap kubeadm-config --namespace=kube-system -o yaml | sed ' s/namespace: kube-system/namespace: calico-system/' | " ${KUBECTL} " apply -f -
178+ " ${KUBECTL} " get configmap kubeadm-config --namespace=kube-system -o yaml | sed ' s/namespace: kube-system/namespace: calico-system/' | " ${KUBECTL} " apply -f - || return 1
177179 fi
178180 # install Calico CNI
179181 echo " Installing Calico CNI via helm"
@@ -190,7 +192,7 @@ install_calico() {
190192 CALICO_VALUES_FILE=" ${REPO_ROOT} /templates/addons/calico/values.yaml"
191193 CIDR_STRING_VALUES=" installation.calicoNetwork.ipPools[0].cidr=${CIDR0} "
192194 fi
193- " ${HELM} " upgrade calico --install --repo https://docs.tigera.io/calico/charts tigera-operator -f " ${CALICO_VALUES_FILE} " --set-string " ${CIDR_STRING_VALUES} " --namespace calico-system
195+ " ${HELM} " upgrade calico --install --repo https://docs.tigera.io/calico/charts tigera-operator -f " ${CALICO_VALUES_FILE} " --set-string " ${CIDR_STRING_VALUES} " --namespace calico-system || return 1
194196}
195197
196198# install_cloud_provider_azure installs OOT cloud-provider-azure componentry onto the Cluster.
@@ -205,7 +207,7 @@ install_cloud_provider_azure() {
205207 CLOUD_CONFIG=" "
206208 CONFIG_SECRET_NAME=" azure-cloud-provider"
207209 ENABLE_DYNAMIC_RELOADING=true
208- copy_secret
210+ copy_secret || return 1
209211 fi
210212
211213 CCM_CLUSTER_CIDR=" ${CIDR0} "
@@ -223,7 +225,7 @@ install_cloud_provider_azure() {
223225 --set cloudControllerManager.cloudConfig=" ${CLOUD_CONFIG} " \
224226 --set cloudControllerManager.cloudConfigSecretName=" ${CONFIG_SECRET_NAME} " \
225227 --set cloudControllerManager.logVerbosity=" ${CCM_LOG_VERBOSITY} " \
226- --set-string cloudControllerManager.clusterCIDR=" ${CCM_CLUSTER_CIDR} " " ${CCM_IMG_ARGS[@]} "
228+ --set-string cloudControllerManager.clusterCIDR=" ${CCM_CLUSTER_CIDR} " " ${CCM_IMG_ARGS[@]} " || return 1
227229}
228230
229231# wait_for_nodes returns when all nodes in the workload cluster are Ready.
@@ -291,11 +293,11 @@ install_addons() {
291293
292294copy_secret () {
293295 # point at the management cluster
294- " ${KUBECTL} " --kubeconfig " ${REPO_ROOT} /${KIND_CLUSTER_NAME} .kubeconfig" get secret " ${CLUSTER_NAME} -control-plane-azure-json" -o jsonpath=' {.data.control-plane-azure\.json}' | base64 --decode > azure_json
296+ " ${KUBECTL} " --kubeconfig " ${REPO_ROOT} /${KIND_CLUSTER_NAME} .kubeconfig" get secret " ${CLUSTER_NAME} -control-plane-azure-json" -o jsonpath=' {.data.control-plane-azure\.json}' | base64 --decode > azure_json || return 1
295297
296298 # create the secret on the workload cluster
297299 " ${KUBECTL} " create secret generic " ${CONFIG_SECRET_NAME} " -n kube-system \
298- --from-file=cloud-config=azure_json
300+ --from-file=cloud-config=azure_json || return 1
299301 rm azure_json
300302}
301303
0 commit comments