Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 15 additions & 5 deletions capt/Taskfile.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ includes:
delete: ./tasks/Taskfile-delete.yaml
vbmc: ./tasks/Taskfile-vbmc.yaml
capi: ./tasks/Taskfile-capi.yaml
capi-pivot: ./tasks/Taskfile-capi-pivot.yaml

vars:
OUTPUT_DIR:
Expand All @@ -14,6 +15,8 @@ vars:
STATE_FILE: ".state"
STATE_FILE_FQ_PATH:
sh: echo {{joinPath .CURR_DIR .STATE_FILE}}
CONFIG_FILE_FQ_PATH:
sh: echo {{joinPath .CURR_DIR "config.yaml"}}

tasks:
create-playground:
Expand Down Expand Up @@ -108,20 +111,27 @@ tasks:
echo The workload cluster kubeconfig is located at: {{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig
echo
echo 1. Watch and wait for the first control plane node to be provisioned successfully: STATE_SUCCESS
echo "KUBECONFIG={{.KIND_KUBECONFIG}} kubectl get workflows -n {{.NAMESPACE}} -w"
echo "KUBECONFIG={{.KIND_KUBECONFIG}} kubectl get workflows -n {{.NAMESPACE}} -o wide -w"
echo
echo
echo 2. Watch and wait for the Kubernetes API server to be ready and responding:
echo "until KUBECONFIG={{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig kubectl get node; do echo 'Waiting for Kube API server to respond...'; sleep 5; done"
echo "until KUBECONFIG={{.CURR_DIR}}/{{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig kubectl get node; do echo 'Waiting for Kube API server to respond...'; sleep 5; done"
echo
echo 3. Deploy a CNI
echo Cilium
echo "KUBECONFIG={{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig cilium install"
echo "KUBECONFIG={{.CURR_DIR}}/{{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig cilium install"
echo or KUBEROUTER
echo "KUBECONFIG={{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig kubectl apply -f https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml"
echo "KUBECONFIG={{.CURR_DIR}}/{{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig kubectl apply -f https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml"
echo
echo 4. Watch and wait for all nodes to join the cluster and be ready:
echo "KUBECONFIG={{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig kubectl get nodes -w"
echo "KUBECONFIG={{.CURR_DIR}}/{{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig kubectl get nodes -w"
- touch {{.OUTPUT_DIR}}/.next-steps-displayed
status:
- echo ;[ -f {{.OUTPUT_DIR}}/.next-steps-displayed ]

pivot:
silent: true
summary: |
Pivot the workload cluster to the permanent management cluster.
cmds:
- task: capi-pivot:ordered
17 changes: 10 additions & 7 deletions capt/config.yaml
Original file line number Diff line number Diff line change
@@ -1,23 +1,26 @@
---
clusterName: "capt-playground"
outputDir: "output"
namespace: "tink"
namespace: "tinkerbell"
counts:
controlPlanes: 1
workers: 1
spares: 1
versions:
capt: v0.6.1
chart: 0.6.1
kube: v1.29.4
capt: v0.6.5
chart: v0.20.1
kube: v1.31.3
os: 20.04
kubevip: 0.8.7
kubevip: 0.9.1
capt:
providerRepository: "https://github.com/tinkerbell/cluster-api-provider-tinkerbell/releases"
#providerRepository: "/home/tink/repos/tinkerbell/cluster-api-provider-tinkerbell/out/release/infrastructure-tinkerbell"
chart:
location: "oci://ghcr.io/tinkerbell/charts/stack"
#location: "/home/tink/repos/tinkerbell/charts/tinkerbell/stack"
location: "oci://ghcr.io/tinkerbell/charts/tinkerbell"
#location: "/home/tink/repos/tinkerbell/tinkerbell/helm/tinkerbell"
extraVars:
# - deployment.imagePullPolicy=Always
- optional.hookos.downloadURL=https://github.com/tinkerbell/hook/releases/download/latest
os:
registry: ghcr.io/tinkerbell/cluster-api-provider-tinkerbell
distro: ubuntu
Expand Down
55 changes: 43 additions & 12 deletions capt/scripts/generate_state.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,22 +5,32 @@
cat <<EOF >/dev/null
---
clusterName: "capt-playground"
outputDir: "/home/tink/repos/tinkerbell/cluster-api-provider-tinkerbell/playground/output"
outputDir: "/home/tink/repos/tinkerbell/playground/capt/output"
namespace: "tink"
counts:
controlPlanes: 1
workers: 1
spares: 1
versions:
capt: 0.5.3
chart: 0.5.0
kube: v1.28.8
os: 22.04
capt: v0.6.5
chart: v0.19.0
kube: v1.29.4
os: 20.04
kubevip: 0.9.1
capt:
providerRepository: "https://github.com/tinkerbell/cluster-api-provider-tinkerbell/releases"
chart:
location: "oci://ghcr.io/tinkerbell/charts/tinkerbell"
extraVars:
- deployment.image=custom.registry/tinkerbell/tinkerbell
- deployment.imageTag=v0.19.1
- deployment.agentImageTag=latest
os:
registry: reg.weinstocklabs.com/tinkerbell/cluster-api-provider-tinkerbell
registry: ghcr.io/tinkerbell/cluster-api-provider-tinkerbell
distro: ubuntu
sshKey: ""
version: "2204"
sshKey: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIH9a2GwjgVfnpjOvIqNuJTwazS3tqJ9xzcepXzKMccVf capt-playground"
sshKeyAutoGenerated: true
version: "2004"
vm:
baseName: "node"
cpusPerVM: 2
Expand Down Expand Up @@ -51,23 +61,24 @@ vm:
gateway: 172.18.0.1
virtualBMC:
containerName: "virtualbmc"
image: ghcr.io/jacobweinstock/virtualbmc
image: ghcr.io/jacobweinstock/virtualbmc:latest
user: "root"
pass: "calvin"
ip: 172.18.0.3
bootMode: netboot
totalNodes: 3
kind:
kubeconfig: /home/tink/repos/tinkerbell/cluster-api-provider-tinkerbell/playground/output/kind.kubeconfig
kubeconfig: /home/tink/repos/tinkerbell/playground/capt/output/kind.kubeconfig
gatewayIP: 172.18.0.1
nodeIPBase: 172.18.10.20
bridgeName: br-d086780dac6b
bridgeName: br-3d1549d4f99f
tinkerbell:
vip: 172.18.10.74
hookosVip: 172.18.10.73
cluster:
controlPlane:
vip: 172.18.10.75
podCIDR: 172.100.0.0/16
bootMode: netboot
EOF

set -euo pipefail
Expand Down Expand Up @@ -128,6 +139,26 @@ function main() {
os_version=$(yq eval '.versions.os' "$state_file")
os_version=$(echo "$os_version" | tr -d '.')
yq e -i '.os.version = "'$os_version'"' "$state_file"

# if the sshKey is not set, generate a default one
ssh_key=$(yq eval '.os.sshKey' "$config_file")
if [[ -z $ssh_key ]]; then
rm -rf "$output_dir"/capt-ssh-key* >>"$output_dir"/error.log 2>&1
ssh-keygen -t ed25519 -f "$output_dir"/capt-ssh-key -N "" -C "capt-playground" >>"$output_dir"/error.log 2>&1
if [[ $? -ne 0 ]]; then
echo "Error generating SSH key. Check error.log for details." >>"$output_dir"/error.log 2>&1
exit 1
fi
ssh_key=$(cat "$output_dir/capt-ssh-key.pub" | tr -d '\n')
if [[ -z $ssh_key ]]; then
echo "Error reading SSH key from $output_dir/capt-ssh-key.pub" >>"$output_dir"/error.log 2>&1
exit 1
fi
yq e -i ".os.sshKey = \"$ssh_key\"" "$state_file"
yq e -i ".os.sshKeyAutoGenerated = true" "$state_file"
else
yq e -i ".os.sshKeyAutoGenerated = false" "$state_file"
fi
}

main "$@"
4 changes: 4 additions & 0 deletions capt/scripts/update_state.sh
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,10 @@ function main() {
t_lb=$(echo "$NODE_IP_BASE" | awk -F"." '{print $1"."$2"."$3}').$((IP_LAST_OCTET + idx + offset))
yq e -i '.tinkerbell.vip = "'$t_lb'"' "$STATE_FILE"

# set the Tinkerbell HookOS VIP
hookos_vip=$(echo "$NODE_IP_BASE" | awk -F"." '{print $1"."$2"."$3}').$((IP_LAST_OCTET + idx + offset - 1))
yq e -i '.tinkerbell.hookosVip = "'$hookos_vip'"' "$STATE_FILE"

# set the cluster control plane load balancer IP (VIP)
cp_lb=$(echo "$NODE_IP_BASE" | awk -F"." '{print $1"."$2"."$3}').$((IP_LAST_OCTET + idx + offset + 1))
yq e -i '.cluster.controlPlane.vip = "'$cp_lb'"' "$STATE_FILE"
Expand Down
135 changes: 135 additions & 0 deletions capt/tasks/Taskfile-capi-pivot.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,135 @@
version: "3"

includes:
delete: ./Taskfile-delete.yaml

vars:
OUTPUT_DIR:
sh: yq eval '.outputDir' config.yaml
CURR_DIR:
sh: pwd
STATE_FILE: ".state"
STATE_FILE_FQ_PATH:
sh: echo {{joinPath .CURR_DIR .STATE_FILE}}
CONFIG_FILE_FQ_PATH:
sh: echo {{joinPath .CURR_DIR "config.yaml"}}
CLUSTER_NAME:
sh: yq eval '.clusterName' config.yaml
MGMT_KUBECONFIG:
sh: echo {{list .CLUSTER_NAME "kubeconfig" | join "." | joinPath .CURR_DIR .OUTPUT_DIR}}
KIND_KUBECONFIG:
sh: echo {{ joinPath .CURR_DIR .OUTPUT_DIR "kind.kubeconfig"}}

tasks:
ordered:
summary: |
CAPI pivot tasks run in order of dependency.
vars:
KUBECONFIG: "{{.MGMT_KUBECONFIG}}"
cmds:
- task: deploy-tinkerbell-helm-chart
- task: init
- task: pivot
- task: remove-kind-cluster

deploy-tinkerbell-helm-chart:
run: once
summary: |
Deploy the Tinkerbell Helm chart.
vars:
KUBECONFIG: "{{.MGMT_KUBECONFIG}}"
LB_IP:
sh: yq eval '.tinkerbell.vip' {{.STATE_FILE_FQ_PATH}}
LB_IP2:
sh: yq eval '.tinkerbell.hookosVip' {{.STATE_FILE_FQ_PATH}}
TRUSTED_PROXIES:
sh: KUBECONFIG={{.KUBECONFIG}} kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}' | tr ' ' ','
STACK_CHART_VERSION:
sh: yq eval '.versions.chart' {{.STATE_FILE_FQ_PATH}}
NAMESPACE:
sh: yq eval '.namespace' {{.STATE_FILE_FQ_PATH}}
LOCATION:
sh: yq eval '.chart.location' {{.STATE_FILE_FQ_PATH}}
CHART_NAME: tinkerbell
BOOTMODE:
sh: yq eval '.bootMode' {{.STATE_FILE_FQ_PATH}}
GLOBAL_VARS:
- trustedProxies={"{{.TRUSTED_PROXIES}}"}
- publicIP={{.LB_IP}}
- artifactsFileServer=http://{{.LB_IP2}}:7173
ISO_VARS:
- deployment.envs.smee.dhcpEnabled=false
- deployment.envs.smee.isoUpstreamURL=http://{{.LB_IP2}}:7173/hook-latest-lts-x86_64-efi-initrd.iso
- optional.hookos.extension=both
EXTRA_VARS:
sh: yq eval '.chart.extraVars | .[]' {{.STATE_FILE_FQ_PATH}} | xargs
cmds:
- KUBECONFIG="{{.MGMT_KUBECONFIG}}" helm upgrade --install {{.CHART_NAME}} {{.LOCATION}} --version "{{.STACK_CHART_VERSION}}" --create-namespace --namespace {{.NAMESPACE}} --wait {{range .GLOBAL_VARS}}--set "{{.}}" {{end}} {{- if eq .BOOTMODE "isoboot" }} {{- range .ISO_VARS }}--set "{{.}}" {{ end }} {{ end }} {{- if .EXTRA_VARS }} {{- range (splitList " " .EXTRA_VARS ) }}--set "{{.}}" {{ end }} {{ end }}
status:
- helm_status=$(KUBECONFIG="{{.KUBECONFIG}}" helm status -n {{.NAMESPACE}} {{.CHART_NAME}} -o yaml | yq .info.status); [[ "$helm_status" == "deployed" ]]

init:
run: once
deps: [deploy-tinkerbell-helm-chart]
summary: |
Initialize the cluster.
env:
TINKERBELL_IP:
sh: yq eval '.tinkerbell.vip' {{.STATE_FILE_FQ_PATH}}
CLUSTERCTL_DISABLE_VERSIONCHECK: true
XDG_CONFIG_HOME: "{{.OUTPUT_DIR}}/xdg"
XDG_CONFIG_DIRS: "{{.OUTPUT_DIR}}/xdg"
XDG_STATE_HOME: "{{.OUTPUT_DIR}}/xdg"
XDG_CACHE_HOME: "{{.OUTPUT_DIR}}/xdg"
XDG_RUNTIME_DIR: "{{.OUTPUT_DIR}}/xdg"
XDG_DATA_HOME: "{{.OUTPUT_DIR}}/xdg"
XDG_DATA_DIRS: "{{.OUTPUT_DIR}}/xdg"
vars:
OUTPUT_DIR:
sh: echo $(yq eval '.outputDir' config.yaml)
KIND_GATEWAY_IP:
sh: yq eval '.kind.gatewayIP' {{.STATE_FILE_FQ_PATH}}
KUBECONFIG: "{{.MGMT_KUBECONFIG}}"
cmds:
- KUBECONFIG="{{.KUBECONFIG}}" clusterctl --config {{.OUTPUT_DIR}}/clusterctl.yaml init --infrastructure tinkerbell
status:
- expected=1; got=$(KUBECONFIG="{{.KUBECONFIG}}" kubectl get pods -n capt-system |grep -ce "capt-controller"); [[ "$got" == "$expected" ]]

pivot:
run: once
deps: [init]
summary: |
Pivot the workload cluster (the initial mgmt cluster) to the permanent management cluster.
env:
CLUSTERCTL_DISABLE_VERSIONCHECK: true
XDG_CONFIG_HOME: "{{.OUTPUT_DIR}}/xdg"
XDG_CONFIG_DIRS: "{{.OUTPUT_DIR}}/xdg"
XDG_STATE_HOME: "{{.OUTPUT_DIR}}/xdg"
XDG_CACHE_HOME: "{{.OUTPUT_DIR}}/xdg"
XDG_RUNTIME_DIR: "{{.OUTPUT_DIR}}/xdg"
XDG_DATA_HOME: "{{.OUTPUT_DIR}}/xdg"
XDG_DATA_DIRS: "{{.OUTPUT_DIR}}/xdg"
vars:
OUTPUT_DIR:
sh: echo $(yq eval '.outputDir' config.yaml)
NAMESPACE:
sh: yq eval '.namespace' {{.STATE_FILE_FQ_PATH}}
cmds:
- KUBECONFIG="{{.KIND_KUBECONFIG}}" clusterctl move --to-kubeconfig="{{.MGMT_KUBECONFIG}}" --config {{.OUTPUT_DIR}}/clusterctl.yaml --kubeconfig "{{.KIND_KUBECONFIG}}" -n {{.NAMESPACE}}
status:
- expected=1; result=$(KUBECONFIG="{{.KIND_KUBECONFIG}}" kubectl get hw,machine.bmc -A | grep -i -e "hardware" -e "machine" && echo $? || echo $?); [[ "$result" == "$expected" ]]
- KUBECONFIG="{{.MGMT_KUBECONFIG}}" kubectl get hw,machine.bmc -A | grep -i -e "hardware" -e "machine"

prompt-remove-kind-cluster:
deps: [pivot]
prompt: Should the KinD cluster be deleted? Press `y` to delete the KinD cluster. Press `n` to exit.
cmds:
- echo 'Deleting the KinD cluster...'

remove-kind-cluster:
run: once
deps: [prompt-remove-kind-cluster]
summary: |
Remove the kind cluster.
cmds:
- task: delete:kind-cluster
2 changes: 1 addition & 1 deletion capt/tasks/Taskfile-capi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ tasks:
BOOTMODE:
sh: yq eval '.bootMode' {{.STATE_FILE_FQ_PATH}}
KUSTOMIZE_FILE:
sh: "[[ {{.BOOTMODE}} == 'iso' ]] && echo kustomization-iso.tmpl || echo kustomization-netboot.tmpl"
sh: "[[ {{.BOOTMODE}} == 'isoboot' ]] && echo kustomization-iso.tmpl || echo kustomization-netboot.tmpl"
sources:
- config.yaml
generates:
Expand Down
26 changes: 14 additions & 12 deletions capt/tasks/Taskfile-create.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -105,31 +105,33 @@ tasks:
sh: yq eval '.kind.kubeconfig' {{.STATE_FILE_FQ_PATH}}
LB_IP:
sh: yq eval '.tinkerbell.vip' {{.STATE_FILE_FQ_PATH}}
LB_IP2:
sh: yq eval '.tinkerbell.hookosVip' {{.STATE_FILE_FQ_PATH}}
TRUSTED_PROXIES:
sh: KUBECONFIG={{.KUBECONFIG}} kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}'
sh: KUBECONFIG={{.KUBECONFIG}} kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}' | tr ' ' ','
STACK_CHART_VERSION:
sh: yq eval '.versions.chart' {{.STATE_FILE_FQ_PATH}}
NAMESPACE:
sh: yq eval '.namespace' {{.STATE_FILE_FQ_PATH}}
LOCATION:
sh: yq eval '.chart.location' {{.STATE_FILE_FQ_PATH}}
CHART_NAME: tink-stack
CHART_NAME: tinkerbell
BOOTMODE:
sh: yq eval '.bootMode' {{.STATE_FILE_FQ_PATH}}
GLOBAL_VARS:
- global.trustedProxies={"{{.TRUSTED_PROXIES}}"}
- global.publicIP={{.LB_IP}}
- trustedProxies={"{{.TRUSTED_PROXIES}}"}
- publicIP={{.LB_IP}}
- artifactsFileServer=http://{{.LB_IP2}}:7173
ISO_VARS:
- stack.hook.extension=both
- smee.iso.enabled=true
- smee.iso.url=http://{{.LB_IP}}:8080/hook-latest-lts-x86_64-efi-initrd.iso
- smee.iso.staticIPAMEnabled=true
- smee.dhcp.enabled=false
- stack.relay.enabled=false
- deployment.envs.smee.dhcpEnabled=false
- deployment.envs.smee.isoUpstreamURL=http://{{.LB_IP2}}:7173/hook-latest-lts-x86_64-efi-initrd.iso
- optional.hookos.extension=both
EXTRA_VARS:
sh: yq eval '.chart.extraVars | .[]' {{.STATE_FILE_FQ_PATH}} | xargs
cmds:
- KUBECONFIG="{{.KUBECONFIG}}" helm install {{.CHART_NAME}} {{.LOCATION}} --version "{{.STACK_CHART_VERSION}}" --create-namespace --namespace {{.NAMESPACE}} --wait {{range .GLOBAL_VARS}}--set "{{.}}" {{end}} {{- if eq .BOOTMODE "iso" }} {{- range .ISO_VARS }}--set "{{.}}" {{end}} {{end}}
- KUBECONFIG="{{.KUBECONFIG}}" helm upgrade --install {{.CHART_NAME}} {{.LOCATION}} --version "{{.STACK_CHART_VERSION}}" --create-namespace --namespace {{.NAMESPACE}} --wait {{range .GLOBAL_VARS}}--set "{{.}}" {{end}} {{- if eq .BOOTMODE "isoboot" }} {{- range .ISO_VARS }}--set "{{.}}" {{ end }} {{ end }} {{- if .EXTRA_VARS }} {{- range (splitList " " .EXTRA_VARS ) }}--set "{{.}}" {{ end }} {{ end }}
status:
- KUBECONFIG="{{.KUBECONFIG}}" helm list -n {{.NAMESPACE}} | grep -q {{.CHART_NAME}}
- helm_status=$(KUBECONFIG="{{.KUBECONFIG}}" helm status -n {{.NAMESPACE}} {{.CHART_NAME}} -o yaml | yq .info.status); [[ "$helm_status" == "deployed" ]]

vms:
run: once
Expand Down
Loading