diff --git a/capt/Taskfile.yaml b/capt/Taskfile.yaml index b05e9b7a..af2878b9 100644 --- a/capt/Taskfile.yaml +++ b/capt/Taskfile.yaml @@ -5,6 +5,7 @@ includes: delete: ./tasks/Taskfile-delete.yaml vbmc: ./tasks/Taskfile-vbmc.yaml capi: ./tasks/Taskfile-capi.yaml + capi-pivot: ./tasks/Taskfile-capi-pivot.yaml vars: OUTPUT_DIR: @@ -14,6 +15,8 @@ vars: STATE_FILE: ".state" STATE_FILE_FQ_PATH: sh: echo {{joinPath .CURR_DIR .STATE_FILE}} + CONFIG_FILE_FQ_PATH: + sh: echo {{joinPath .CURR_DIR "config.yaml"}} tasks: create-playground: @@ -108,20 +111,27 @@ tasks: echo The workload cluster kubeconfig is located at: {{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig echo echo 1. Watch and wait for the first control plane node to be provisioned successfully: STATE_SUCCESS - echo "KUBECONFIG={{.KIND_KUBECONFIG}} kubectl get workflows -n {{.NAMESPACE}} -w" + echo "KUBECONFIG={{.KIND_KUBECONFIG}} kubectl get workflows -n {{.NAMESPACE}} -o wide -w" echo echo echo 2. Watch and wait for the Kubernetes API server to be ready and responding: - echo "until KUBECONFIG={{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig kubectl get node; do echo 'Waiting for Kube API server to respond...'; sleep 5; done" + echo "until KUBECONFIG={{.CURR_DIR}}/{{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig kubectl get node; do echo 'Waiting for Kube API server to respond...'; sleep 5; done" echo echo 3. Deploy a CNI echo Cilium - echo "KUBECONFIG={{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig cilium install" + echo "KUBECONFIG={{.CURR_DIR}}/{{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig cilium install" echo or KUBEROUTER - echo "KUBECONFIG={{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig kubectl apply -f https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml" + echo "KUBECONFIG={{.CURR_DIR}}/{{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig kubectl apply -f https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml" echo echo 4. Watch and wait for all nodes to join the cluster and be ready: - echo "KUBECONFIG={{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig kubectl get nodes -w" + echo "KUBECONFIG={{.CURR_DIR}}/{{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig kubectl get nodes -w" - touch {{.OUTPUT_DIR}}/.next-steps-displayed status: - echo ;[ -f {{.OUTPUT_DIR}}/.next-steps-displayed ] + + pivot: + silent: true + summary: | + Pivot the workload cluster to the permanent management cluster. + cmds: + - task: capi-pivot:ordered diff --git a/capt/config.yaml b/capt/config.yaml index 27145888..e6ce8b0a 100644 --- a/capt/config.yaml +++ b/capt/config.yaml @@ -1,23 +1,26 @@ --- clusterName: "capt-playground" outputDir: "output" -namespace: "tink" +namespace: "tinkerbell" counts: controlPlanes: 1 workers: 1 spares: 1 versions: - capt: v0.6.1 - chart: 0.6.1 - kube: v1.29.4 + capt: v0.6.5 + chart: v0.20.1 + kube: v1.31.3 os: 20.04 - kubevip: 0.8.7 + kubevip: 0.9.1 capt: providerRepository: "https://github.com/tinkerbell/cluster-api-provider-tinkerbell/releases" #providerRepository: "/home/tink/repos/tinkerbell/cluster-api-provider-tinkerbell/out/release/infrastructure-tinkerbell" chart: - location: "oci://ghcr.io/tinkerbell/charts/stack" - #location: "/home/tink/repos/tinkerbell/charts/tinkerbell/stack" + location: "oci://ghcr.io/tinkerbell/charts/tinkerbell" + #location: "/home/tink/repos/tinkerbell/tinkerbell/helm/tinkerbell" + extraVars: + # - deployment.imagePullPolicy=Always + - optional.hookos.downloadURL=https://github.com/tinkerbell/hook/releases/download/latest os: registry: ghcr.io/tinkerbell/cluster-api-provider-tinkerbell distro: ubuntu diff --git a/capt/scripts/generate_state.sh b/capt/scripts/generate_state.sh index ee0042ef..8897bb11 100755 --- a/capt/scripts/generate_state.sh +++ b/capt/scripts/generate_state.sh @@ -5,22 +5,32 @@ cat </dev/null --- clusterName: "capt-playground" -outputDir: "/home/tink/repos/tinkerbell/cluster-api-provider-tinkerbell/playground/output" +outputDir: "/home/tink/repos/tinkerbell/playground/capt/output" namespace: "tink" counts: controlPlanes: 1 workers: 1 spares: 1 versions: - capt: 0.5.3 - chart: 0.5.0 - kube: v1.28.8 - os: 22.04 + capt: v0.6.5 + chart: v0.19.0 + kube: v1.29.4 + os: 20.04 + kubevip: 0.9.1 +capt: + providerRepository: "https://github.com/tinkerbell/cluster-api-provider-tinkerbell/releases" +chart: + location: "oci://ghcr.io/tinkerbell/charts/tinkerbell" + extraVars: + - deployment.image=custom.registry/tinkerbell/tinkerbell + - deployment.imageTag=v0.19.1 + - deployment.agentImageTag=latest os: - registry: reg.weinstocklabs.com/tinkerbell/cluster-api-provider-tinkerbell + registry: ghcr.io/tinkerbell/cluster-api-provider-tinkerbell distro: ubuntu - sshKey: "" - version: "2204" + sshKey: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIH9a2GwjgVfnpjOvIqNuJTwazS3tqJ9xzcepXzKMccVf capt-playground" + sshKeyAutoGenerated: true + version: "2004" vm: baseName: "node" cpusPerVM: 2 @@ -51,23 +61,24 @@ vm: gateway: 172.18.0.1 virtualBMC: containerName: "virtualbmc" - image: ghcr.io/jacobweinstock/virtualbmc + image: ghcr.io/jacobweinstock/virtualbmc:latest user: "root" pass: "calvin" ip: 172.18.0.3 +bootMode: netboot totalNodes: 3 kind: - kubeconfig: /home/tink/repos/tinkerbell/cluster-api-provider-tinkerbell/playground/output/kind.kubeconfig + kubeconfig: /home/tink/repos/tinkerbell/playground/capt/output/kind.kubeconfig gatewayIP: 172.18.0.1 nodeIPBase: 172.18.10.20 - bridgeName: br-d086780dac6b + bridgeName: br-3d1549d4f99f tinkerbell: vip: 172.18.10.74 + hookosVip: 172.18.10.73 cluster: controlPlane: vip: 172.18.10.75 podCIDR: 172.100.0.0/16 -bootMode: netboot EOF set -euo pipefail @@ -128,6 +139,26 @@ function main() { os_version=$(yq eval '.versions.os' "$state_file") os_version=$(echo "$os_version" | tr -d '.') yq e -i '.os.version = "'$os_version'"' "$state_file" + + # if the sshKey is not set, generate a default one + ssh_key=$(yq eval '.os.sshKey' "$config_file") + if [[ -z $ssh_key ]]; then + rm -rf "$output_dir"/capt-ssh-key* >>"$output_dir"/error.log 2>&1 + ssh-keygen -t ed25519 -f "$output_dir"/capt-ssh-key -N "" -C "capt-playground" >>"$output_dir"/error.log 2>&1 + if [[ $? -ne 0 ]]; then + echo "Error generating SSH key. Check error.log for details." >>"$output_dir"/error.log 2>&1 + exit 1 + fi + ssh_key=$(cat "$output_dir/capt-ssh-key.pub" | tr -d '\n') + if [[ -z $ssh_key ]]; then + echo "Error reading SSH key from $output_dir/capt-ssh-key.pub" >>"$output_dir"/error.log 2>&1 + exit 1 + fi + yq e -i ".os.sshKey = \"$ssh_key\"" "$state_file" + yq e -i ".os.sshKeyAutoGenerated = true" "$state_file" + else + yq e -i ".os.sshKeyAutoGenerated = false" "$state_file" + fi } main "$@" diff --git a/capt/scripts/update_state.sh b/capt/scripts/update_state.sh index f27a6479..a58627df 100755 --- a/capt/scripts/update_state.sh +++ b/capt/scripts/update_state.sh @@ -30,6 +30,10 @@ function main() { t_lb=$(echo "$NODE_IP_BASE" | awk -F"." '{print $1"."$2"."$3}').$((IP_LAST_OCTET + idx + offset)) yq e -i '.tinkerbell.vip = "'$t_lb'"' "$STATE_FILE" + # set the Tinkerbell HookOS VIP + hookos_vip=$(echo "$NODE_IP_BASE" | awk -F"." '{print $1"."$2"."$3}').$((IP_LAST_OCTET + idx + offset - 1)) + yq e -i '.tinkerbell.hookosVip = "'$hookos_vip'"' "$STATE_FILE" + # set the cluster control plane load balancer IP (VIP) cp_lb=$(echo "$NODE_IP_BASE" | awk -F"." '{print $1"."$2"."$3}').$((IP_LAST_OCTET + idx + offset + 1)) yq e -i '.cluster.controlPlane.vip = "'$cp_lb'"' "$STATE_FILE" diff --git a/capt/tasks/Taskfile-capi-pivot.yaml b/capt/tasks/Taskfile-capi-pivot.yaml new file mode 100644 index 00000000..02b05b7b --- /dev/null +++ b/capt/tasks/Taskfile-capi-pivot.yaml @@ -0,0 +1,135 @@ +version: "3" + +includes: + delete: ./Taskfile-delete.yaml + +vars: + OUTPUT_DIR: + sh: yq eval '.outputDir' config.yaml + CURR_DIR: + sh: pwd + STATE_FILE: ".state" + STATE_FILE_FQ_PATH: + sh: echo {{joinPath .CURR_DIR .STATE_FILE}} + CONFIG_FILE_FQ_PATH: + sh: echo {{joinPath .CURR_DIR "config.yaml"}} + CLUSTER_NAME: + sh: yq eval '.clusterName' config.yaml + MGMT_KUBECONFIG: + sh: echo {{list .CLUSTER_NAME "kubeconfig" | join "." | joinPath .CURR_DIR .OUTPUT_DIR}} + KIND_KUBECONFIG: + sh: echo {{ joinPath .CURR_DIR .OUTPUT_DIR "kind.kubeconfig"}} + +tasks: + ordered: + summary: | + CAPI pivot tasks run in order of dependency. + vars: + KUBECONFIG: "{{.MGMT_KUBECONFIG}}" + cmds: + - task: deploy-tinkerbell-helm-chart + - task: init + - task: pivot + - task: remove-kind-cluster + + deploy-tinkerbell-helm-chart: + run: once + summary: | + Deploy the Tinkerbell Helm chart. + vars: + KUBECONFIG: "{{.MGMT_KUBECONFIG}}" + LB_IP: + sh: yq eval '.tinkerbell.vip' {{.STATE_FILE_FQ_PATH}} + LB_IP2: + sh: yq eval '.tinkerbell.hookosVip' {{.STATE_FILE_FQ_PATH}} + TRUSTED_PROXIES: + sh: KUBECONFIG={{.KUBECONFIG}} kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}' | tr ' ' ',' + STACK_CHART_VERSION: + sh: yq eval '.versions.chart' {{.STATE_FILE_FQ_PATH}} + NAMESPACE: + sh: yq eval '.namespace' {{.STATE_FILE_FQ_PATH}} + LOCATION: + sh: yq eval '.chart.location' {{.STATE_FILE_FQ_PATH}} + CHART_NAME: tinkerbell + BOOTMODE: + sh: yq eval '.bootMode' {{.STATE_FILE_FQ_PATH}} + GLOBAL_VARS: + - trustedProxies={"{{.TRUSTED_PROXIES}}"} + - publicIP={{.LB_IP}} + - artifactsFileServer=http://{{.LB_IP2}}:7173 + ISO_VARS: + - deployment.envs.smee.dhcpEnabled=false + - deployment.envs.smee.isoUpstreamURL=http://{{.LB_IP2}}:7173/hook-latest-lts-x86_64-efi-initrd.iso + - optional.hookos.extension=both + EXTRA_VARS: + sh: yq eval '.chart.extraVars | .[]' {{.STATE_FILE_FQ_PATH}} | xargs + cmds: + - KUBECONFIG="{{.MGMT_KUBECONFIG}}" helm upgrade --install {{.CHART_NAME}} {{.LOCATION}} --version "{{.STACK_CHART_VERSION}}" --create-namespace --namespace {{.NAMESPACE}} --wait {{range .GLOBAL_VARS}}--set "{{.}}" {{end}} {{- if eq .BOOTMODE "isoboot" }} {{- range .ISO_VARS }}--set "{{.}}" {{ end }} {{ end }} {{- if .EXTRA_VARS }} {{- range (splitList " " .EXTRA_VARS ) }}--set "{{.}}" {{ end }} {{ end }} + status: + - helm_status=$(KUBECONFIG="{{.KUBECONFIG}}" helm status -n {{.NAMESPACE}} {{.CHART_NAME}} -o yaml | yq .info.status); [[ "$helm_status" == "deployed" ]] + + init: + run: once + deps: [deploy-tinkerbell-helm-chart] + summary: | + Initialize the cluster. + env: + TINKERBELL_IP: + sh: yq eval '.tinkerbell.vip' {{.STATE_FILE_FQ_PATH}} + CLUSTERCTL_DISABLE_VERSIONCHECK: true + XDG_CONFIG_HOME: "{{.OUTPUT_DIR}}/xdg" + XDG_CONFIG_DIRS: "{{.OUTPUT_DIR}}/xdg" + XDG_STATE_HOME: "{{.OUTPUT_DIR}}/xdg" + XDG_CACHE_HOME: "{{.OUTPUT_DIR}}/xdg" + XDG_RUNTIME_DIR: "{{.OUTPUT_DIR}}/xdg" + XDG_DATA_HOME: "{{.OUTPUT_DIR}}/xdg" + XDG_DATA_DIRS: "{{.OUTPUT_DIR}}/xdg" + vars: + OUTPUT_DIR: + sh: echo $(yq eval '.outputDir' config.yaml) + KIND_GATEWAY_IP: + sh: yq eval '.kind.gatewayIP' {{.STATE_FILE_FQ_PATH}} + KUBECONFIG: "{{.MGMT_KUBECONFIG}}" + cmds: + - KUBECONFIG="{{.KUBECONFIG}}" clusterctl --config {{.OUTPUT_DIR}}/clusterctl.yaml init --infrastructure tinkerbell + status: + - expected=1; got=$(KUBECONFIG="{{.KUBECONFIG}}" kubectl get pods -n capt-system |grep -ce "capt-controller"); [[ "$got" == "$expected" ]] + + pivot: + run: once + deps: [init] + summary: | + Pivot the workload cluster (the initial mgmt cluster) to the permanent management cluster. + env: + CLUSTERCTL_DISABLE_VERSIONCHECK: true + XDG_CONFIG_HOME: "{{.OUTPUT_DIR}}/xdg" + XDG_CONFIG_DIRS: "{{.OUTPUT_DIR}}/xdg" + XDG_STATE_HOME: "{{.OUTPUT_DIR}}/xdg" + XDG_CACHE_HOME: "{{.OUTPUT_DIR}}/xdg" + XDG_RUNTIME_DIR: "{{.OUTPUT_DIR}}/xdg" + XDG_DATA_HOME: "{{.OUTPUT_DIR}}/xdg" + XDG_DATA_DIRS: "{{.OUTPUT_DIR}}/xdg" + vars: + OUTPUT_DIR: + sh: echo $(yq eval '.outputDir' config.yaml) + NAMESPACE: + sh: yq eval '.namespace' {{.STATE_FILE_FQ_PATH}} + cmds: + - KUBECONFIG="{{.KIND_KUBECONFIG}}" clusterctl move --to-kubeconfig="{{.MGMT_KUBECONFIG}}" --config {{.OUTPUT_DIR}}/clusterctl.yaml --kubeconfig "{{.KIND_KUBECONFIG}}" -n {{.NAMESPACE}} + status: + - expected=1; result=$(KUBECONFIG="{{.KIND_KUBECONFIG}}" kubectl get hw,machine.bmc -A | grep -i -e "hardware" -e "machine" && echo $? || echo $?); [[ "$result" == "$expected" ]] + - KUBECONFIG="{{.MGMT_KUBECONFIG}}" kubectl get hw,machine.bmc -A | grep -i -e "hardware" -e "machine" + + prompt-remove-kind-cluster: + deps: [pivot] + prompt: Should the KinD cluster be deleted? Press `y` to delete the KinD cluster. Press `n` to exit. + cmds: + - echo 'Deleting the KinD cluster...' + + remove-kind-cluster: + run: once + deps: [prompt-remove-kind-cluster] + summary: | + Remove the kind cluster. + cmds: + - task: delete:kind-cluster diff --git a/capt/tasks/Taskfile-capi.yaml b/capt/tasks/Taskfile-capi.yaml index c266d5a4..b18f98f9 100644 --- a/capt/tasks/Taskfile-capi.yaml +++ b/capt/tasks/Taskfile-capi.yaml @@ -129,7 +129,7 @@ tasks: BOOTMODE: sh: yq eval '.bootMode' {{.STATE_FILE_FQ_PATH}} KUSTOMIZE_FILE: - sh: "[[ {{.BOOTMODE}} == 'iso' ]] && echo kustomization-iso.tmpl || echo kustomization-netboot.tmpl" + sh: "[[ {{.BOOTMODE}} == 'isoboot' ]] && echo kustomization-iso.tmpl || echo kustomization-netboot.tmpl" sources: - config.yaml generates: diff --git a/capt/tasks/Taskfile-create.yaml b/capt/tasks/Taskfile-create.yaml index ef954f5b..e813d7a5 100644 --- a/capt/tasks/Taskfile-create.yaml +++ b/capt/tasks/Taskfile-create.yaml @@ -105,31 +105,33 @@ tasks: sh: yq eval '.kind.kubeconfig' {{.STATE_FILE_FQ_PATH}} LB_IP: sh: yq eval '.tinkerbell.vip' {{.STATE_FILE_FQ_PATH}} + LB_IP2: + sh: yq eval '.tinkerbell.hookosVip' {{.STATE_FILE_FQ_PATH}} TRUSTED_PROXIES: - sh: KUBECONFIG={{.KUBECONFIG}} kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}' + sh: KUBECONFIG={{.KUBECONFIG}} kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}' | tr ' ' ',' STACK_CHART_VERSION: sh: yq eval '.versions.chart' {{.STATE_FILE_FQ_PATH}} NAMESPACE: sh: yq eval '.namespace' {{.STATE_FILE_FQ_PATH}} LOCATION: sh: yq eval '.chart.location' {{.STATE_FILE_FQ_PATH}} - CHART_NAME: tink-stack + CHART_NAME: tinkerbell BOOTMODE: sh: yq eval '.bootMode' {{.STATE_FILE_FQ_PATH}} GLOBAL_VARS: - - global.trustedProxies={"{{.TRUSTED_PROXIES}}"} - - global.publicIP={{.LB_IP}} + - trustedProxies={"{{.TRUSTED_PROXIES}}"} + - publicIP={{.LB_IP}} + - artifactsFileServer=http://{{.LB_IP2}}:7173 ISO_VARS: - - stack.hook.extension=both - - smee.iso.enabled=true - - smee.iso.url=http://{{.LB_IP}}:8080/hook-latest-lts-x86_64-efi-initrd.iso - - smee.iso.staticIPAMEnabled=true - - smee.dhcp.enabled=false - - stack.relay.enabled=false + - deployment.envs.smee.dhcpEnabled=false + - deployment.envs.smee.isoUpstreamURL=http://{{.LB_IP2}}:7173/hook-latest-lts-x86_64-efi-initrd.iso + - optional.hookos.extension=both + EXTRA_VARS: + sh: yq eval '.chart.extraVars | .[]' {{.STATE_FILE_FQ_PATH}} | xargs cmds: - - KUBECONFIG="{{.KUBECONFIG}}" helm install {{.CHART_NAME}} {{.LOCATION}} --version "{{.STACK_CHART_VERSION}}" --create-namespace --namespace {{.NAMESPACE}} --wait {{range .GLOBAL_VARS}}--set "{{.}}" {{end}} {{- if eq .BOOTMODE "iso" }} {{- range .ISO_VARS }}--set "{{.}}" {{end}} {{end}} + - KUBECONFIG="{{.KUBECONFIG}}" helm upgrade --install {{.CHART_NAME}} {{.LOCATION}} --version "{{.STACK_CHART_VERSION}}" --create-namespace --namespace {{.NAMESPACE}} --wait {{range .GLOBAL_VARS}}--set "{{.}}" {{end}} {{- if eq .BOOTMODE "isoboot" }} {{- range .ISO_VARS }}--set "{{.}}" {{ end }} {{ end }} {{- if .EXTRA_VARS }} {{- range (splitList " " .EXTRA_VARS ) }}--set "{{.}}" {{ end }} {{ end }} status: - - KUBECONFIG="{{.KUBECONFIG}}" helm list -n {{.NAMESPACE}} | grep -q {{.CHART_NAME}} + - helm_status=$(KUBECONFIG="{{.KUBECONFIG}}" helm status -n {{.NAMESPACE}} {{.CHART_NAME}} -o yaml | yq .info.status); [[ "$helm_status" == "deployed" ]] vms: run: once diff --git a/capt/tasks/Taskfile-delete.yaml b/capt/tasks/Taskfile-delete.yaml index 4699003d..cd9acba6 100644 --- a/capt/tasks/Taskfile-delete.yaml +++ b/capt/tasks/Taskfile-delete.yaml @@ -11,6 +11,7 @@ tasks: - task: vms - task: default-storage-pool - task: output-dir + - task: remove-auto-generated-ssh-key kind-cluster: summary: | @@ -77,3 +78,12 @@ tasks: - rm -rf {{.OUTPUT_DIR}} status: - echo ;[ ! -d {{.OUTPUT_DIR}} ] + + remove-auto-generated-ssh-key: + summary: | + Remove the auto generated ssh key from the .state file. + cmds: + - yq e -i '.os.sshKey = ""' {{.STATE_FILE_FQ_PATH}} + - yq e -i '.os.sshKeyAutoGenerated = false' {{.STATE_FILE_FQ_PATH}} + status: + - v=$(yq eval '.os.sshKey' {{.CONFIG_FILE_FQ_PATH}}); [[ "$v" != "" ]] || [[ "$(yq eval '.os.sshKey' {{.STATE_FILE_FQ_PATH}})" == "" ]] diff --git a/capt/templates/bmc-secret.tmpl b/capt/templates/bmc-secret.tmpl index 35fa3e9c..85b62c78 100644 --- a/capt/templates/bmc-secret.tmpl +++ b/capt/templates/bmc-secret.tmpl @@ -1,9 +1,12 @@ apiVersion: v1 -data: - password: $BMC_PASS_BASE64 - username: $BMC_USER_BASE64 kind: Secret metadata: name: bmc-creds namespace: $NAMESPACE -type: kubernetes.io/basic-auth \ No newline at end of file + labels: + clusterctl.cluster.x-k8s.io/move: "" + clusterctl.cluster.x-k8s.io: "" +data: + password: $BMC_PASS_BASE64 + username: $BMC_USER_BASE64 +type: kubernetes.io/basic-auth diff --git a/capt/templates/kustomization-iso.tmpl b/capt/templates/kustomization-iso.tmpl index fba20990..88e354a2 100644 --- a/capt/templates/kustomization-iso.tmpl +++ b/capt/templates/kustomization-iso.tmpl @@ -14,8 +14,8 @@ patches: path: /spec/template/spec value: bootOptions: - bootMode: iso - isoURL: "http://$TINKERBELL_VIP:7171/iso/:macAddress/hook.iso" + bootMode: isoboot + isoURL: "http://$TINKERBELL_VIP:7171/iso/hook.iso" hardwareAffinity: required: - labelSelector: @@ -54,7 +54,7 @@ patches: CONTENTS: | datasource: Ec2: - metadata_urls: ["http://$TINKERBELL_VIP:50061"] + metadata_urls: ["http://$TINKERBELL_VIP:7172"] strict_id: false system_info: default_user: @@ -117,7 +117,7 @@ patches: CONTENTS: | datasource: Ec2 - name: "kexec image" - image: ghcr.io/jacobweinstock/waitdaemon:0.2.1 + image: ghcr.io/jacobweinstock/waitdaemon:0.2.2 timeout: 90 pid: host environment: @@ -137,8 +137,8 @@ patches: path: /spec/template/spec value: bootOptions: - bootMode: iso - isoURL: "http://$TINKERBELL_VIP:7171/iso/:macAddress/hook.iso" + bootMode: isoboot + isoURL: "http://$TINKERBELL_VIP:7171/iso/hook.iso" hardwareAffinity: required: - labelSelector: @@ -177,7 +177,7 @@ patches: CONTENTS: | datasource: Ec2: - metadata_urls: ["http://$TINKERBELL_VIP:50061"] + metadata_urls: ["http://$TINKERBELL_VIP:7172"] strict_id: false system_info: default_user: @@ -240,7 +240,7 @@ patches: CONTENTS: | datasource: Ec2 - name: "kexec image" - image: ghcr.io/jacobweinstock/waitdaemon:0.2.1 + image: ghcr.io/jacobweinstock/waitdaemon:0.2.2 timeout: 90 pid: host environment: diff --git a/capt/templates/kustomization-netboot.tmpl b/capt/templates/kustomization-netboot.tmpl index ac3429da..44b88dc9 100644 --- a/capt/templates/kustomization-netboot.tmpl +++ b/capt/templates/kustomization-netboot.tmpl @@ -53,7 +53,7 @@ patches: CONTENTS: | datasource: Ec2: - metadata_urls: ["http://$TINKERBELL_VIP:50061"] + metadata_urls: ["http://$TINKERBELL_VIP:7172"] strict_id: false system_info: default_user: @@ -78,7 +78,7 @@ patches: CONTENTS: | datasource: Ec2 - name: "kexec image" - image: ghcr.io/jacobweinstock/waitdaemon:0.2.1 + image: ghcr.io/jacobweinstock/waitdaemon:0.2.2 timeout: 90 pid: host environment: @@ -137,7 +137,7 @@ patches: CONTENTS: | datasource: Ec2: - metadata_urls: ["http://$TINKERBELL_VIP:50061"] + metadata_urls: ["http://$TINKERBELL_VIP:7172"] strict_id: false system_info: default_user: @@ -162,7 +162,7 @@ patches: CONTENTS: | datasource: Ec2 - name: "kexec image" - image: ghcr.io/jacobweinstock/waitdaemon:0.2.1 + image: ghcr.io/jacobweinstock/waitdaemon:0.2.2 timeout: 90 pid: host environment: diff --git a/stack/vagrant/.env b/stack/vagrant/.env index 58b1ca47..c3f73dc0 100644 --- a/stack/vagrant/.env +++ b/stack/vagrant/.env @@ -6,7 +6,7 @@ MACHINE1_IP=192.168.56.43 MACHINE1_MAC=08:00:27:9e:f5:3a # https://github.com/tinkerbell/tinkerbell/pkgs/container/charts%2Ftinkerbell -HELM_CHART_VERSION=v0.19.2 +HELM_CHART_VERSION=v0.20.1 KUBECTL_VERSION=1.32.4 K3D_VERSION=v5.8.3 HELM_VERSION=v3.17.0