|
| 1 | +# Cluster Stacks |
| 2 | + |
| 3 | +## Getting started |
| 4 | + |
| 5 | +At first you need a Ready installed Rancher Management Dashboard (without any existing Downstreamclusters installed via custom-cluster. The cso will breake the management). |
| 6 | + |
| 7 | +For Rancher Version < 2.13 you must install the Rancher Turtles to open the preinstalled capi. https://ranchermanager.docs.rancher.com/integrations-in-rancher/cluster-api/overview |
| 8 | + |
| 9 | +For Rancher Version >= 2.13 the Rancher Turtles is preinstalled and enabled by default. Only the Rancher Turtles UI must be installed sepratly. https://turtles.docs.rancher.com/turtles/v0.24/en/tutorials/quickstart.html#_capi_ui_extension_installation |
| 10 | + |
| 11 | +Now you must install following providers (via GUI Cluster Management > CAPI > Provider > Create) |
| 12 | + |
| 13 | +|Key|Value bootstrap|Value controlplane|Value infrastructure| |
| 14 | +|---|---|---|--| |
| 15 | +|Namespace|rke2-bootstrap|rke2-controlplane|capo-system| |
| 16 | +|Name|rke2-bootstrap|rke2-controlplane|infrastructure-openstack| |
| 17 | +|Provider|rke2|rke2|openstack| |
| 18 | +|Provider type|bootstrap|controlPlane|infrastructure| |
| 19 | +|Features Enable cluster resource set|yes|yes|yes| |
| 20 | +|Features Enable cluster topology|yes|yes|yes| |
| 21 | +|Features Enable machine pool|yes|yes|yes| |
| 22 | +|Variables|EXP_RUNTIME_SDK=true|EXP_RUNTIME_SDK=true|EXP_RUNTIME_SDK=true| |
| 23 | + |
| 24 | + |
| 25 | + |
| 26 | +```sh |
| 27 | +# Init openstack resource controller |
| 28 | +kubectl apply -f https://github.com/k-orc/openstack-resource-controller/releases/latest/download/install.yaml |
| 29 | + |
| 30 | +``` |
| 31 | + |
| 32 | +``` |
| 33 | +# Install CSO and CSPO |
| 34 | +helm upgrade -i cso \ |
| 35 | +-n cso-system \ |
| 36 | +--create-namespace \ |
| 37 | +oci://registry.scs.community/cluster-stacks/cso |
| 38 | +``` |
| 39 | + |
| 40 | +```sh |
| 41 | +export CLUSTER_NAMESPACE=cluster |
| 42 | +export CLUSTER_NAME=my-cluster |
| 43 | +export CLUSTERSTACK_NAMESPACE=cluster |
| 44 | +export CLUSTERSTACK_VERSION=v6 |
| 45 | +export OS_CLIENT_CONFIG_FILE=${PWD}/clouds.yaml |
| 46 | +kubectl create namespace $CLUSTER_NAMESPACE --dry-run=client -o yaml | kubectl apply -f - |
| 47 | +kubectl label namespace $CLUSTER_NAMESPACE cluster-api.cattle.io/rancher-auto-import=true |
| 48 | +``` |
| 49 | + |
| 50 | +```sh |
| 51 | +# Create secret for CAPO |
| 52 | +kubectl create secret -n $CLUSTER_NAMESPACE generic openstack --from-file=clouds.yaml=$OS_CLIENT_CONFIG_FILE --dry-run=client -oyaml | kubectl apply -f - |
| 53 | + |
| 54 | +# Prepare the Secret as it will be deployed in the Workload Cluster |
| 55 | +kubectl create secret -n kube-system generic clouds-yaml --from-file=clouds.yaml=$OS_CLIENT_CONFIG_FILE --dry-run=client -oyaml > clouds-yaml-secret |
| 56 | + |
| 57 | +# Add the Secret to the ClusterResourceSet Secret in the Management Cluster |
| 58 | +kubectl create -n $CLUSTER_NAMESPACE secret generic clouds-yaml --from-file=clouds-yaml-secret --type=addons.cluster.x-k8s.io/resource-set --dry-run=client -oyaml | kubectl apply -f - |
| 59 | +``` |
| 60 | + |
| 61 | +```yaml |
| 62 | +cat <<EOF | kubectl apply -f - |
| 63 | +apiVersion: addons.cluster.x-k8s.io/v1beta1 |
| 64 | +kind: ClusterResourceSet |
| 65 | +metadata: |
| 66 | + name: clouds-yaml |
| 67 | + namespace: $CLUSTER_NAMESPACE |
| 68 | +spec: |
| 69 | + strategy: "Reconcile" |
| 70 | + clusterSelector: |
| 71 | + matchLabels: |
| 72 | + managed-secret: clouds-yaml |
| 73 | + resources: |
| 74 | + - name: clouds-yaml |
| 75 | + kind: Secret |
| 76 | +EOF |
| 77 | +``` |
| 78 | + |
| 79 | +```sh |
| 80 | +# Apply ClusterStack resource |
| 81 | +cat <<EOF | kubectl apply -f - |
| 82 | +apiVersion: clusterstack.x-k8s.io/v1alpha1 |
| 83 | +kind: ClusterStack |
| 84 | +metadata: |
| 85 | + name: openstack |
| 86 | + namespace: $CLUSTERSTACK_NAMESPACE |
| 87 | +spec: |
| 88 | + provider: openstack |
| 89 | + name: rke2 |
| 90 | + kubernetesVersion: "1.33" |
| 91 | + channel: stable |
| 92 | + autoSubscribe: false |
| 93 | + noProvider: true |
| 94 | + versions: |
| 95 | + - $CLUSTERSTACK_VERSION |
| 96 | +EOF |
| 97 | +``` |
| 98 | + |
| 99 | +```sh |
| 100 | +# Apply Cluster resource |
| 101 | +cat <<EOF | kubectl apply -f - |
| 102 | +apiVersion: cluster.x-k8s.io/v1beta1 |
| 103 | +kind: Cluster |
| 104 | +metadata: |
| 105 | + name: $CLUSTER_NAME |
| 106 | + namespace: $CLUSTER_NAMESPACE |
| 107 | +
|
| 108 | + labels: |
| 109 | + managed-secret: clouds-yaml |
| 110 | +spec: |
| 111 | + clusterNetwork: |
| 112 | + pods: |
| 113 | + cidrBlocks: |
| 114 | + - "172.16.0.0/16" |
| 115 | + serviceDomain: cluster.local |
| 116 | + services: |
| 117 | + cidrBlocks: |
| 118 | + - "10.96.0.0/12" |
| 119 | + topology: |
| 120 | + variables: |
| 121 | + - name: clusterCNI |
| 122 | + value: "cilium" # also calicio is posible, but musst be manual patched after install: kubectl patch ippools.crd.projectcalico.org default-ipv4-ippool --type='json' -p '[{"op": "replace", "path": "/spec/ipipMode", "value":"CrossSubnet"}]' |
| 123 | + - name: apiServerLoadBalancer |
| 124 | + value: "octavia-ovn" |
| 125 | + - name: imageAddVersion |
| 126 | + value: false |
| 127 | + - name: imageName |
| 128 | + value: "Ubuntu 24.04" |
| 129 | + - name: workerFlavor |
| 130 | + value: "SCS-4V-8" |
| 131 | + - name: controlPlaneFlavor |
| 132 | + value: "SCS-4V-8" |
| 133 | + - name: bastionFlavor |
| 134 | + value: "SCS-2V-4" |
| 135 | + - name: bastionEnabled |
| 136 | + value: true |
| 137 | + class: openstack-rke2-1-33-$CLUSTERSTACK_VERSION |
| 138 | + classNamespace: $CLUSTERSTACK_NAMESPACE |
| 139 | + controlPlane: |
| 140 | + replicas: 1 |
| 141 | + version: v1.33.6+rke2r1 |
| 142 | + workers: |
| 143 | + machineDeployments: |
| 144 | + - class: default-worker |
| 145 | + name: md-0 |
| 146 | + replicas: 1 |
| 147 | +EOF |
| 148 | +``` |
| 149 | + |
| 150 | +```sh |
| 151 | +clusterctl get kubeconfig -n $CLUSTER_NAMESPACE $CLUSTER_NAME > /tmp/kubeconfig |
| 152 | +kubectl get nodes --kubeconfig /tmp/kubeconfig |
| 153 | +# Enable rke2-ingress-loadbalancer |
| 154 | +kubectl --kubeconfig /tmp/kubeconfig -n kube-system patch HelmChart.helm.cattle.io rke2-ingress-nginx --type='json' -p '[{"op": "add", "path": "/spec/set/'controller.service.enabled'", "value":"true"}]' |
| 155 | +# Set rke2-ingress-loadbalancer-IP |
| 156 | +kubectl --kubeconfig /tmp/kubeconfig -n kube-system patch HelmChart.helm.cattle.io rke2-ingress-nginx --type='json' -p '[{"op": "add", "path": "/spec/set/'controller.service.loadBalancerIP'", "value":"xxx.xxx.xxx.xxx"}]' |
| 157 | + |
| 158 | +``` |
0 commit comments