Skip to content

Commit 302f0f8

Browse files
Merge pull request #209 from jacobweinstock/use-tinkerbell-tinkerbell
Update CAPT playground to use tinkerbell/tinkerbell: ## Description <!--- Please describe what this PR is going to change --> This deploys the playground using the new single binary Tinkerbell stack and its Helm chart. This works with a locally built CAPT that has been updated to also use the latest Tinkerbell APIs. ## Why is this needed <!--- Link to issue you have raised --> Fixes: # ## How Has This Been Tested? <!--- Please describe in detail how you tested your changes. --> <!--- Include details of your testing environment, and the tests you ran to --> <!--- see how your change affects other areas of the code, etc. --> ## How are existing users impacted? What migration steps/scripts do we need? <!--- Fixes a bug, unblocks installation, removes a component of the stack etc --> <!--- Requires a DB migration script, etc. --> ## Checklist: I have: - [ ] updated the documentation and/or roadmap (if required) - [ ] added unit or e2e tests - [ ] provided instructions on how to upgrade
2 parents c7bb9bc + 11f56f3 commit 302f0f8

12 files changed

+252
-54
lines changed

capt/Taskfile.yaml

Lines changed: 15 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ includes:
55
delete: ./tasks/Taskfile-delete.yaml
66
vbmc: ./tasks/Taskfile-vbmc.yaml
77
capi: ./tasks/Taskfile-capi.yaml
8+
capi-pivot: ./tasks/Taskfile-capi-pivot.yaml
89

910
vars:
1011
OUTPUT_DIR:
@@ -14,6 +15,8 @@ vars:
1415
STATE_FILE: ".state"
1516
STATE_FILE_FQ_PATH:
1617
sh: echo {{joinPath .CURR_DIR .STATE_FILE}}
18+
CONFIG_FILE_FQ_PATH:
19+
sh: echo {{joinPath .CURR_DIR "config.yaml"}}
1720

1821
tasks:
1922
create-playground:
@@ -108,20 +111,27 @@ tasks:
108111
echo The workload cluster kubeconfig is located at: {{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig
109112
echo
110113
echo 1. Watch and wait for the first control plane node to be provisioned successfully: STATE_SUCCESS
111-
echo "KUBECONFIG={{.KIND_KUBECONFIG}} kubectl get workflows -n {{.NAMESPACE}} -w"
114+
echo "KUBECONFIG={{.KIND_KUBECONFIG}} kubectl get workflows -n {{.NAMESPACE}} -o wide -w"
112115
echo
113116
echo
114117
echo 2. Watch and wait for the Kubernetes API server to be ready and responding:
115-
echo "until KUBECONFIG={{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig kubectl get node; do echo 'Waiting for Kube API server to respond...'; sleep 5; done"
118+
echo "until KUBECONFIG={{.CURR_DIR}}/{{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig kubectl get node; do echo 'Waiting for Kube API server to respond...'; sleep 5; done"
116119
echo
117120
echo 3. Deploy a CNI
118121
echo Cilium
119-
echo "KUBECONFIG={{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig cilium install"
122+
echo "KUBECONFIG={{.CURR_DIR}}/{{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig cilium install"
120123
echo or KUBEROUTER
121-
echo "KUBECONFIG={{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig kubectl apply -f https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml"
124+
echo "KUBECONFIG={{.CURR_DIR}}/{{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig kubectl apply -f https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml"
122125
echo
123126
echo 4. Watch and wait for all nodes to join the cluster and be ready:
124-
echo "KUBECONFIG={{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig kubectl get nodes -w"
127+
echo "KUBECONFIG={{.CURR_DIR}}/{{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig kubectl get nodes -w"
125128
- touch {{.OUTPUT_DIR}}/.next-steps-displayed
126129
status:
127130
- echo ;[ -f {{.OUTPUT_DIR}}/.next-steps-displayed ]
131+
132+
pivot:
133+
silent: true
134+
summary: |
135+
Pivot the workload cluster to the permanent management cluster.
136+
cmds:
137+
- task: capi-pivot:ordered

capt/config.yaml

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,23 +1,26 @@
11
---
22
clusterName: "capt-playground"
33
outputDir: "output"
4-
namespace: "tink"
4+
namespace: "tinkerbell"
55
counts:
66
controlPlanes: 1
77
workers: 1
88
spares: 1
99
versions:
10-
capt: v0.6.1
11-
chart: 0.6.1
12-
kube: v1.29.4
10+
capt: v0.6.5
11+
chart: v0.20.1
12+
kube: v1.31.3
1313
os: 20.04
14-
kubevip: 0.8.7
14+
kubevip: 0.9.1
1515
capt:
1616
providerRepository: "https://github.com/tinkerbell/cluster-api-provider-tinkerbell/releases"
1717
#providerRepository: "/home/tink/repos/tinkerbell/cluster-api-provider-tinkerbell/out/release/infrastructure-tinkerbell"
1818
chart:
19-
location: "oci://ghcr.io/tinkerbell/charts/stack"
20-
#location: "/home/tink/repos/tinkerbell/charts/tinkerbell/stack"
19+
location: "oci://ghcr.io/tinkerbell/charts/tinkerbell"
20+
#location: "/home/tink/repos/tinkerbell/tinkerbell/helm/tinkerbell"
21+
extraVars:
22+
# - deployment.imagePullPolicy=Always
23+
- optional.hookos.downloadURL=https://github.com/tinkerbell/hook/releases/download/latest
2124
os:
2225
registry: ghcr.io/tinkerbell/cluster-api-provider-tinkerbell
2326
distro: ubuntu

capt/scripts/generate_state.sh

Lines changed: 43 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -5,22 +5,32 @@
55
cat <<EOF >/dev/null
66
---
77
clusterName: "capt-playground"
8-
outputDir: "/home/tink/repos/tinkerbell/cluster-api-provider-tinkerbell/playground/output"
8+
outputDir: "/home/tink/repos/tinkerbell/playground/capt/output"
99
namespace: "tink"
1010
counts:
1111
controlPlanes: 1
1212
workers: 1
1313
spares: 1
1414
versions:
15-
capt: 0.5.3
16-
chart: 0.5.0
17-
kube: v1.28.8
18-
os: 22.04
15+
capt: v0.6.5
16+
chart: v0.19.0
17+
kube: v1.29.4
18+
os: 20.04
19+
kubevip: 0.9.1
20+
capt:
21+
providerRepository: "https://github.com/tinkerbell/cluster-api-provider-tinkerbell/releases"
22+
chart:
23+
location: "oci://ghcr.io/tinkerbell/charts/tinkerbell"
24+
extraVars:
25+
- deployment.image=custom.registry/tinkerbell/tinkerbell
26+
- deployment.imageTag=v0.19.1
27+
- deployment.agentImageTag=latest
1928
os:
20-
registry: reg.weinstocklabs.com/tinkerbell/cluster-api-provider-tinkerbell
29+
registry: ghcr.io/tinkerbell/cluster-api-provider-tinkerbell
2130
distro: ubuntu
22-
sshKey: ""
23-
version: "2204"
31+
sshKey: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIH9a2GwjgVfnpjOvIqNuJTwazS3tqJ9xzcepXzKMccVf capt-playground"
32+
sshKeyAutoGenerated: true
33+
version: "2004"
2434
vm:
2535
baseName: "node"
2636
cpusPerVM: 2
@@ -51,23 +61,24 @@ vm:
5161
gateway: 172.18.0.1
5262
virtualBMC:
5363
containerName: "virtualbmc"
54-
image: ghcr.io/jacobweinstock/virtualbmc
64+
image: ghcr.io/jacobweinstock/virtualbmc:latest
5565
user: "root"
5666
pass: "calvin"
5767
ip: 172.18.0.3
68+
bootMode: netboot
5869
totalNodes: 3
5970
kind:
60-
kubeconfig: /home/tink/repos/tinkerbell/cluster-api-provider-tinkerbell/playground/output/kind.kubeconfig
71+
kubeconfig: /home/tink/repos/tinkerbell/playground/capt/output/kind.kubeconfig
6172
gatewayIP: 172.18.0.1
6273
nodeIPBase: 172.18.10.20
63-
bridgeName: br-d086780dac6b
74+
bridgeName: br-3d1549d4f99f
6475
tinkerbell:
6576
vip: 172.18.10.74
77+
hookosVip: 172.18.10.73
6678
cluster:
6779
controlPlane:
6880
vip: 172.18.10.75
6981
podCIDR: 172.100.0.0/16
70-
bootMode: netboot
7182
EOF
7283

7384
set -euo pipefail
@@ -128,6 +139,26 @@ function main() {
128139
os_version=$(yq eval '.versions.os' "$state_file")
129140
os_version=$(echo "$os_version" | tr -d '.')
130141
yq e -i '.os.version = "'$os_version'"' "$state_file"
142+
143+
# if the sshKey is not set, generate a default one
144+
ssh_key=$(yq eval '.os.sshKey' "$config_file")
145+
if [[ -z $ssh_key ]]; then
146+
rm -rf "$output_dir"/capt-ssh-key* >>"$output_dir"/error.log 2>&1
147+
ssh-keygen -t ed25519 -f "$output_dir"/capt-ssh-key -N "" -C "capt-playground" >>"$output_dir"/error.log 2>&1
148+
if [[ $? -ne 0 ]]; then
149+
echo "Error generating SSH key. Check error.log for details." >>"$output_dir"/error.log 2>&1
150+
exit 1
151+
fi
152+
ssh_key=$(cat "$output_dir/capt-ssh-key.pub" | tr -d '\n')
153+
if [[ -z $ssh_key ]]; then
154+
echo "Error reading SSH key from $output_dir/capt-ssh-key.pub" >>"$output_dir"/error.log 2>&1
155+
exit 1
156+
fi
157+
yq e -i ".os.sshKey = \"$ssh_key\"" "$state_file"
158+
yq e -i ".os.sshKeyAutoGenerated = true" "$state_file"
159+
else
160+
yq e -i ".os.sshKeyAutoGenerated = false" "$state_file"
161+
fi
131162
}
132163

133164
main "$@"

capt/scripts/update_state.sh

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,10 @@ function main() {
3030
t_lb=$(echo "$NODE_IP_BASE" | awk -F"." '{print $1"."$2"."$3}').$((IP_LAST_OCTET + idx + offset))
3131
yq e -i '.tinkerbell.vip = "'$t_lb'"' "$STATE_FILE"
3232

33+
# set the Tinkerbell HookOS VIP
34+
hookos_vip=$(echo "$NODE_IP_BASE" | awk -F"." '{print $1"."$2"."$3}').$((IP_LAST_OCTET + idx + offset - 1))
35+
yq e -i '.tinkerbell.hookosVip = "'$hookos_vip'"' "$STATE_FILE"
36+
3337
# set the cluster control plane load balancer IP (VIP)
3438
cp_lb=$(echo "$NODE_IP_BASE" | awk -F"." '{print $1"."$2"."$3}').$((IP_LAST_OCTET + idx + offset + 1))
3539
yq e -i '.cluster.controlPlane.vip = "'$cp_lb'"' "$STATE_FILE"

capt/tasks/Taskfile-capi-pivot.yaml

Lines changed: 135 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,135 @@
1+
version: "3"
2+
3+
includes:
4+
delete: ./Taskfile-delete.yaml
5+
6+
vars:
7+
OUTPUT_DIR:
8+
sh: yq eval '.outputDir' config.yaml
9+
CURR_DIR:
10+
sh: pwd
11+
STATE_FILE: ".state"
12+
STATE_FILE_FQ_PATH:
13+
sh: echo {{joinPath .CURR_DIR .STATE_FILE}}
14+
CONFIG_FILE_FQ_PATH:
15+
sh: echo {{joinPath .CURR_DIR "config.yaml"}}
16+
CLUSTER_NAME:
17+
sh: yq eval '.clusterName' config.yaml
18+
MGMT_KUBECONFIG:
19+
sh: echo {{list .CLUSTER_NAME "kubeconfig" | join "." | joinPath .CURR_DIR .OUTPUT_DIR}}
20+
KIND_KUBECONFIG:
21+
sh: echo {{ joinPath .CURR_DIR .OUTPUT_DIR "kind.kubeconfig"}}
22+
23+
tasks:
24+
ordered:
25+
summary: |
26+
CAPI pivot tasks run in order of dependency.
27+
vars:
28+
KUBECONFIG: "{{.MGMT_KUBECONFIG}}"
29+
cmds:
30+
- task: deploy-tinkerbell-helm-chart
31+
- task: init
32+
- task: pivot
33+
- task: remove-kind-cluster
34+
35+
deploy-tinkerbell-helm-chart:
36+
run: once
37+
summary: |
38+
Deploy the Tinkerbell Helm chart.
39+
vars:
40+
KUBECONFIG: "{{.MGMT_KUBECONFIG}}"
41+
LB_IP:
42+
sh: yq eval '.tinkerbell.vip' {{.STATE_FILE_FQ_PATH}}
43+
LB_IP2:
44+
sh: yq eval '.tinkerbell.hookosVip' {{.STATE_FILE_FQ_PATH}}
45+
TRUSTED_PROXIES:
46+
sh: KUBECONFIG={{.KUBECONFIG}} kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}' | tr ' ' ','
47+
STACK_CHART_VERSION:
48+
sh: yq eval '.versions.chart' {{.STATE_FILE_FQ_PATH}}
49+
NAMESPACE:
50+
sh: yq eval '.namespace' {{.STATE_FILE_FQ_PATH}}
51+
LOCATION:
52+
sh: yq eval '.chart.location' {{.STATE_FILE_FQ_PATH}}
53+
CHART_NAME: tinkerbell
54+
BOOTMODE:
55+
sh: yq eval '.bootMode' {{.STATE_FILE_FQ_PATH}}
56+
GLOBAL_VARS:
57+
- trustedProxies={"{{.TRUSTED_PROXIES}}"}
58+
- publicIP={{.LB_IP}}
59+
- artifactsFileServer=http://{{.LB_IP2}}:7173
60+
ISO_VARS:
61+
- deployment.envs.smee.dhcpEnabled=false
62+
- deployment.envs.smee.isoUpstreamURL=http://{{.LB_IP2}}:7173/hook-latest-lts-x86_64-efi-initrd.iso
63+
- optional.hookos.extension=both
64+
EXTRA_VARS:
65+
sh: yq eval '.chart.extraVars | .[]' {{.STATE_FILE_FQ_PATH}} | xargs
66+
cmds:
67+
- KUBECONFIG="{{.MGMT_KUBECONFIG}}" helm upgrade --install {{.CHART_NAME}} {{.LOCATION}} --version "{{.STACK_CHART_VERSION}}" --create-namespace --namespace {{.NAMESPACE}} --wait {{range .GLOBAL_VARS}}--set "{{.}}" {{end}} {{- if eq .BOOTMODE "isoboot" }} {{- range .ISO_VARS }}--set "{{.}}" {{ end }} {{ end }} {{- if .EXTRA_VARS }} {{- range (splitList " " .EXTRA_VARS ) }}--set "{{.}}" {{ end }} {{ end }}
68+
status:
69+
- helm_status=$(KUBECONFIG="{{.KUBECONFIG}}" helm status -n {{.NAMESPACE}} {{.CHART_NAME}} -o yaml | yq .info.status); [[ "$helm_status" == "deployed" ]]
70+
71+
init:
72+
run: once
73+
deps: [deploy-tinkerbell-helm-chart]
74+
summary: |
75+
Initialize the cluster.
76+
env:
77+
TINKERBELL_IP:
78+
sh: yq eval '.tinkerbell.vip' {{.STATE_FILE_FQ_PATH}}
79+
CLUSTERCTL_DISABLE_VERSIONCHECK: true
80+
XDG_CONFIG_HOME: "{{.OUTPUT_DIR}}/xdg"
81+
XDG_CONFIG_DIRS: "{{.OUTPUT_DIR}}/xdg"
82+
XDG_STATE_HOME: "{{.OUTPUT_DIR}}/xdg"
83+
XDG_CACHE_HOME: "{{.OUTPUT_DIR}}/xdg"
84+
XDG_RUNTIME_DIR: "{{.OUTPUT_DIR}}/xdg"
85+
XDG_DATA_HOME: "{{.OUTPUT_DIR}}/xdg"
86+
XDG_DATA_DIRS: "{{.OUTPUT_DIR}}/xdg"
87+
vars:
88+
OUTPUT_DIR:
89+
sh: echo $(yq eval '.outputDir' config.yaml)
90+
KIND_GATEWAY_IP:
91+
sh: yq eval '.kind.gatewayIP' {{.STATE_FILE_FQ_PATH}}
92+
KUBECONFIG: "{{.MGMT_KUBECONFIG}}"
93+
cmds:
94+
- KUBECONFIG="{{.KUBECONFIG}}" clusterctl --config {{.OUTPUT_DIR}}/clusterctl.yaml init --infrastructure tinkerbell
95+
status:
96+
- expected=1; got=$(KUBECONFIG="{{.KUBECONFIG}}" kubectl get pods -n capt-system |grep -ce "capt-controller"); [[ "$got" == "$expected" ]]
97+
98+
pivot:
99+
run: once
100+
deps: [init]
101+
summary: |
102+
Pivot the workload cluster (the initial mgmt cluster) to the permanent management cluster.
103+
env:
104+
CLUSTERCTL_DISABLE_VERSIONCHECK: true
105+
XDG_CONFIG_HOME: "{{.OUTPUT_DIR}}/xdg"
106+
XDG_CONFIG_DIRS: "{{.OUTPUT_DIR}}/xdg"
107+
XDG_STATE_HOME: "{{.OUTPUT_DIR}}/xdg"
108+
XDG_CACHE_HOME: "{{.OUTPUT_DIR}}/xdg"
109+
XDG_RUNTIME_DIR: "{{.OUTPUT_DIR}}/xdg"
110+
XDG_DATA_HOME: "{{.OUTPUT_DIR}}/xdg"
111+
XDG_DATA_DIRS: "{{.OUTPUT_DIR}}/xdg"
112+
vars:
113+
OUTPUT_DIR:
114+
sh: echo $(yq eval '.outputDir' config.yaml)
115+
NAMESPACE:
116+
sh: yq eval '.namespace' {{.STATE_FILE_FQ_PATH}}
117+
cmds:
118+
- KUBECONFIG="{{.KIND_KUBECONFIG}}" clusterctl move --to-kubeconfig="{{.MGMT_KUBECONFIG}}" --config {{.OUTPUT_DIR}}/clusterctl.yaml --kubeconfig "{{.KIND_KUBECONFIG}}" -n {{.NAMESPACE}}
119+
status:
120+
- expected=1; result=$(KUBECONFIG="{{.KIND_KUBECONFIG}}" kubectl get hw,machine.bmc -A | grep -i -e "hardware" -e "machine" && echo $? || echo $?); [[ "$result" == "$expected" ]]
121+
- KUBECONFIG="{{.MGMT_KUBECONFIG}}" kubectl get hw,machine.bmc -A | grep -i -e "hardware" -e "machine"
122+
123+
prompt-remove-kind-cluster:
124+
deps: [pivot]
125+
prompt: Should the KinD cluster be deleted? Press `y` to delete the KinD cluster. Press `n` to exit.
126+
cmds:
127+
- echo 'Deleting the KinD cluster...'
128+
129+
remove-kind-cluster:
130+
run: once
131+
deps: [prompt-remove-kind-cluster]
132+
summary: |
133+
Remove the kind cluster.
134+
cmds:
135+
- task: delete:kind-cluster

capt/tasks/Taskfile-capi.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,7 @@ tasks:
129129
BOOTMODE:
130130
sh: yq eval '.bootMode' {{.STATE_FILE_FQ_PATH}}
131131
KUSTOMIZE_FILE:
132-
sh: "[[ {{.BOOTMODE}} == 'iso' ]] && echo kustomization-iso.tmpl || echo kustomization-netboot.tmpl"
132+
sh: "[[ {{.BOOTMODE}} == 'isoboot' ]] && echo kustomization-iso.tmpl || echo kustomization-netboot.tmpl"
133133
sources:
134134
- config.yaml
135135
generates:

capt/tasks/Taskfile-create.yaml

Lines changed: 14 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -105,31 +105,33 @@ tasks:
105105
sh: yq eval '.kind.kubeconfig' {{.STATE_FILE_FQ_PATH}}
106106
LB_IP:
107107
sh: yq eval '.tinkerbell.vip' {{.STATE_FILE_FQ_PATH}}
108+
LB_IP2:
109+
sh: yq eval '.tinkerbell.hookosVip' {{.STATE_FILE_FQ_PATH}}
108110
TRUSTED_PROXIES:
109-
sh: KUBECONFIG={{.KUBECONFIG}} kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}'
111+
sh: KUBECONFIG={{.KUBECONFIG}} kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}' | tr ' ' ','
110112
STACK_CHART_VERSION:
111113
sh: yq eval '.versions.chart' {{.STATE_FILE_FQ_PATH}}
112114
NAMESPACE:
113115
sh: yq eval '.namespace' {{.STATE_FILE_FQ_PATH}}
114116
LOCATION:
115117
sh: yq eval '.chart.location' {{.STATE_FILE_FQ_PATH}}
116-
CHART_NAME: tink-stack
118+
CHART_NAME: tinkerbell
117119
BOOTMODE:
118120
sh: yq eval '.bootMode' {{.STATE_FILE_FQ_PATH}}
119121
GLOBAL_VARS:
120-
- global.trustedProxies={"{{.TRUSTED_PROXIES}}"}
121-
- global.publicIP={{.LB_IP}}
122+
- trustedProxies={"{{.TRUSTED_PROXIES}}"}
123+
- publicIP={{.LB_IP}}
124+
- artifactsFileServer=http://{{.LB_IP2}}:7173
122125
ISO_VARS:
123-
- stack.hook.extension=both
124-
- smee.iso.enabled=true
125-
- smee.iso.url=http://{{.LB_IP}}:8080/hook-latest-lts-x86_64-efi-initrd.iso
126-
- smee.iso.staticIPAMEnabled=true
127-
- smee.dhcp.enabled=false
128-
- stack.relay.enabled=false
126+
- deployment.envs.smee.dhcpEnabled=false
127+
- deployment.envs.smee.isoUpstreamURL=http://{{.LB_IP2}}:7173/hook-latest-lts-x86_64-efi-initrd.iso
128+
- optional.hookos.extension=both
129+
EXTRA_VARS:
130+
sh: yq eval '.chart.extraVars | .[]' {{.STATE_FILE_FQ_PATH}} | xargs
129131
cmds:
130-
- KUBECONFIG="{{.KUBECONFIG}}" helm install {{.CHART_NAME}} {{.LOCATION}} --version "{{.STACK_CHART_VERSION}}" --create-namespace --namespace {{.NAMESPACE}} --wait {{range .GLOBAL_VARS}}--set "{{.}}" {{end}} {{- if eq .BOOTMODE "iso" }} {{- range .ISO_VARS }}--set "{{.}}" {{end}} {{end}}
132+
- KUBECONFIG="{{.KUBECONFIG}}" helm upgrade --install {{.CHART_NAME}} {{.LOCATION}} --version "{{.STACK_CHART_VERSION}}" --create-namespace --namespace {{.NAMESPACE}} --wait {{range .GLOBAL_VARS}}--set "{{.}}" {{end}} {{- if eq .BOOTMODE "isoboot" }} {{- range .ISO_VARS }}--set "{{.}}" {{ end }} {{ end }} {{- if .EXTRA_VARS }} {{- range (splitList " " .EXTRA_VARS ) }}--set "{{.}}" {{ end }} {{ end }}
131133
status:
132-
- KUBECONFIG="{{.KUBECONFIG}}" helm list -n {{.NAMESPACE}} | grep -q {{.CHART_NAME}}
134+
- helm_status=$(KUBECONFIG="{{.KUBECONFIG}}" helm status -n {{.NAMESPACE}} {{.CHART_NAME}} -o yaml | yq .info.status); [[ "$helm_status" == "deployed" ]]
133135

134136
vms:
135137
run: once

0 commit comments

Comments
 (0)