Skip to content

Commit a22a9d9

Browse files
authored
Merge pull request #149 from gitgrave/feat/proxmox
samples: add proxmox
2 parents 0346afc + 6f33d5b commit a22a9d9

File tree

3 files changed

+392
-0
lines changed

3 files changed

+392
-0
lines changed

samples/proxmox/README.md

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
# importing a vm template into proxmox
2+
3+
A standard KVM optimized Ubuntu 22.04 image can be imported via
4+
5+
```
6+
TEMPLATE_URL=https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64-disk-kvm.img
7+
TEMPLATE_VMID=10000
8+
TEMPLATE_NAME=ubuntu-22.04
9+
TEMPLATE_STORAGE=tank
10+
TEMPLATE_DISK_OPTIONS="discard=on,iothread=1,ssd=1"
11+
12+
curl -o template.img ${TEMPLATE_URL}
13+
14+
qm create "${TEMPLATE_VMID}" --name ${TEMPLATE_NAME} --memory 16
15+
qm importdisk "${TEMPLATE_VMID}" template.img "${TEMPLATE_STORAGE}"
16+
qm set "${TEMPLATE_VMID}" \
17+
--scsihw virtio-scsi-single \
18+
--scsi0 ${TEMPLATE_STORAGE}:vm-${TEMPLATE_VMID}-disk-0,${TEMPLATE_DISK_OPTIONS} \
19+
--boot order=scsi0 \
20+
--cpu host \
21+
--rng0 source=/dev/urandom \
22+
--template 1 \
23+
--agent 1 \
24+
--onboot 1
25+
```
26+
Then for Ubuntu to work properly, you have to extend the `preK3sCommands` of both `KThreesConfigTemplate` and `KThreesControlPlane` with `apt update && apt -y install qemu-guest-agent && systemctl enable --now qemu-guest-agent`
Lines changed: 283 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,283 @@
1+
---
2+
apiVersion: cluster.x-k8s.io/v1beta1
3+
kind: Cluster
4+
metadata:
5+
name: "${CLUSTER_NAME}"
6+
spec:
7+
clusterNetwork:
8+
pods:
9+
cidrBlocks:
10+
- 10.42.0.0/16
11+
services:
12+
cidrBlocks:
13+
- 10.43.0.0/16
14+
serviceDomain: cluster.local
15+
infrastructureRef:
16+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
17+
kind: ProxmoxCluster
18+
name: "${CLUSTER_NAME}"
19+
controlPlaneRef:
20+
apiVersion: controlplane.cluster.x-k8s.io/v1beta2
21+
kind: KThreesControlPlane
22+
name: "${CLUSTER_NAME}-control-plane"
23+
---
24+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
25+
kind: ProxmoxCluster
26+
metadata:
27+
name: "${CLUSTER_NAME}"
28+
spec:
29+
controlPlaneEndpoint:
30+
host: ${CONTROL_PLANE_ENDPOINT_IP}
31+
port: 6443
32+
ipv4Config:
33+
addresses: ${NODE_IP_RANGES}
34+
prefix: ${IP_PREFIX}
35+
gateway: ${GATEWAY}
36+
dnsServers: ${DNS_SERVERS}
37+
allowedNodes: ${ALLOWED_NODES:=[]}
38+
---
39+
kind: ProxmoxMachineTemplate
40+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
41+
metadata:
42+
name: "${CLUSTER_NAME}-control-plane"
43+
spec:
44+
template:
45+
spec:
46+
sourceNode: "${PROXMOX_SOURCENODE}"
47+
templateID: ${PROXMOX_TEMPLATE_VMID}
48+
format: "qcow2"
49+
full: true
50+
numSockets: ${NUM_SOCKETS:=1}
51+
numCores: ${NUM_CORES:=2}
52+
memoryMiB: ${MEMORY_MIB:=2048}
53+
disks:
54+
bootVolume:
55+
disk: ${BOOT_VOLUME_DEVICE:=scsi0}
56+
sizeGb: ${BOOT_VOLUME_SIZE:=32}
57+
network:
58+
default:
59+
bridge: ${BRIDGE}
60+
model: virtio
61+
---
62+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
63+
kind: ProxmoxMachineTemplate
64+
metadata:
65+
name: "${CLUSTER_NAME}-worker"
66+
spec:
67+
template:
68+
spec:
69+
sourceNode: "${PROXMOX_SOURCENODE}"
70+
templateID: ${PROXMOX_TEMPLATE_VMID}
71+
format: "qcow2"
72+
full: true
73+
numSockets: ${NUM_SOCKETS:=1}
74+
numCores: ${NUM_CORES:=1}
75+
memoryMiB: ${MEMORY_MIB:=2048}
76+
disks:
77+
bootVolume:
78+
disk: ${BOOT_VOLUME_DEVICE:=scsi0}
79+
sizeGb: ${BOOT_VOLUME_SIZE:=32}
80+
network:
81+
default:
82+
bridge: ${BRIDGE}
83+
model: virtio
84+
---
85+
apiVersion: controlplane.cluster.x-k8s.io/v1beta2
86+
kind: KThreesControlPlane
87+
metadata:
88+
name: "${CLUSTER_NAME}-control-plane"
89+
spec:
90+
machineTemplate:
91+
infrastructureRef:
92+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
93+
kind: ProxmoxMachineTemplate
94+
name: "${CLUSTER_NAME}-control-plane"
95+
kthreesConfigSpec:
96+
serverConfig:
97+
# cloudProviderName: "external"
98+
disableCloudController: false
99+
disableComponents: ${K3S_DISABLE_COMPONENTS:=[]}
100+
agentConfig:
101+
nodeName: "{{ ds.meta_data.local_hostname }}"
102+
kubeletArgs:
103+
- "provider-id=proxmox://{{ ds.meta_data.instance_id }}"
104+
files:
105+
- path: /var/lib/rancher/k3s/server/manifests/kube-vip.yaml
106+
owner: root:root
107+
content: |
108+
---
109+
apiVersion: v1
110+
kind: ServiceAccount
111+
metadata:
112+
name: kube-vip
113+
namespace: kube-system
114+
---
115+
apiVersion: rbac.authorization.k8s.io/v1
116+
kind: ClusterRole
117+
metadata:
118+
annotations:
119+
rbac.authorization.kubernetes.io/autoupdate: "true"
120+
name: system:kube-vip-role
121+
rules:
122+
- apiGroups: [""]
123+
resources: ["services/status"]
124+
verbs: ["update"]
125+
- apiGroups: [""]
126+
resources: ["services", "endpoints"]
127+
verbs: ["list","get","watch", "update"]
128+
- apiGroups: [""]
129+
resources: ["nodes"]
130+
verbs: ["list","get","watch", "update", "patch"]
131+
- apiGroups: ["coordination.k8s.io"]
132+
resources: ["leases"]
133+
verbs: ["list", "get", "watch", "update", "create"]
134+
- apiGroups: ["discovery.k8s.io"]
135+
resources: ["endpointslices"]
136+
verbs: ["list","get","watch", "update"]
137+
---
138+
kind: ClusterRoleBinding
139+
apiVersion: rbac.authorization.k8s.io/v1
140+
metadata:
141+
name: system:kube-vip-binding
142+
roleRef:
143+
apiGroup: rbac.authorization.k8s.io
144+
kind: ClusterRole
145+
name: system:kube-vip-role
146+
subjects:
147+
- kind: ServiceAccount
148+
name: kube-vip
149+
namespace: kube-system
150+
---
151+
apiVersion: apps/v1
152+
kind: DaemonSet
153+
metadata:
154+
creationTimestamp: null
155+
labels:
156+
app.kubernetes.io/name: kube-vip-ds
157+
app.kubernetes.io/version: v0.8.7
158+
name: kube-vip-ds
159+
namespace: kube-system
160+
spec:
161+
selector:
162+
matchLabels:
163+
app.kubernetes.io/name: kube-vip-ds
164+
template:
165+
metadata:
166+
creationTimestamp: null
167+
labels:
168+
app.kubernetes.io/name: kube-vip-ds
169+
app.kubernetes.io/version: v0.8.7
170+
spec:
171+
affinity:
172+
nodeAffinity:
173+
requiredDuringSchedulingIgnoredDuringExecution:
174+
nodeSelectorTerms:
175+
- matchExpressions:
176+
- key: node-role.kubernetes.io/master
177+
operator: Exists
178+
- matchExpressions:
179+
- key: node-role.kubernetes.io/control-plane
180+
operator: Exists
181+
containers:
182+
- args:
183+
- manager
184+
env:
185+
- name: vip_arp
186+
value: "true"
187+
- name: vip_nodename
188+
valueFrom:
189+
fieldRef:
190+
fieldPath: spec.nodeName
191+
- name: vip_interface
192+
value: ""
193+
- name: address
194+
value: ${CONTROL_PLANE_ENDPOINT_IP}
195+
- name: port
196+
value: ${CONTROL_PLANE_ENDPOINT_PORT="6443"}
197+
- name: dns_mode
198+
value: first
199+
- name: cp_enable
200+
value: "true"
201+
- name: cp_namespace
202+
value: kube-system
203+
- name: svc_enable
204+
value: "true"
205+
- name: svc_leasename
206+
value: plndr-svcs-lock
207+
- name: vip_leaderelection
208+
value: "true"
209+
- name: vip_leasename
210+
value: plndr-cp-lock
211+
- name: vip_leaseduration
212+
value: "5"
213+
- name: vip_renewdeadline
214+
value: "3"
215+
- name: vip_retryperiod
216+
value: "1"
217+
- name: prometheus_server
218+
value: :2112
219+
- name: enableUPNP
220+
value: "false"
221+
image: ghcr.io/kube-vip/kube-vip:v0.8.7
222+
imagePullPolicy: IfNotPresent
223+
name: kube-vip
224+
resources: {}
225+
securityContext:
226+
capabilities:
227+
add:
228+
- NET_ADMIN
229+
- NET_RAW
230+
hostNetwork: true
231+
serviceAccountName: kube-vip
232+
tolerations:
233+
- effect: NoSchedule
234+
operator: Exists
235+
- effect: NoExecute
236+
operator: Exists
237+
updateStrategy: {}
238+
preK3sCommands:
239+
- mkdir -p /root/.ssh
240+
- chmod 700 /root/.ssh
241+
- echo "${VM_SSH_KEYS}" > /root/.ssh/authorized_keys
242+
- chmod 600 /root/.ssh/authorized_keys
243+
replicas: ${CONTROL_PLANE_MACHINE_COUNT=1}
244+
version: "${KUBERNETES_VERSION}"
245+
---
246+
apiVersion: bootstrap.cluster.x-k8s.io/v1beta2
247+
kind: KThreesConfigTemplate
248+
metadata:
249+
name: "${CLUSTER_NAME}-worker"
250+
spec:
251+
template:
252+
spec:
253+
preK3sCommands:
254+
- mkdir -p /root/.ssh
255+
- chmod 700 /root/.ssh
256+
- echo "${VM_SSH_KEYS}" > /root/.ssh/authorized_keys
257+
- chmod 600 /root/.ssh/authorized_keys
258+
---
259+
apiVersion: cluster.x-k8s.io/v1beta1
260+
kind: MachineDeployment
261+
metadata:
262+
name: "${CLUSTER_NAME}-worker"
263+
spec:
264+
clusterName: "${CLUSTER_NAME}"
265+
replicas: ${WORKER_MACHINE_COUNT=1}
266+
selector:
267+
matchLabels: {}
268+
template:
269+
metadata:
270+
labels:
271+
node-role.kubernetes.io/node: ""
272+
spec:
273+
clusterName: "${CLUSTER_NAME}"
274+
version: "${KUBERNETES_VERSION}"
275+
bootstrap:
276+
configRef:
277+
apiVersion: bootstrap.cluster.x-k8s.io/v1beta2
278+
kind: KThreesConfigTemplate
279+
name: "${CLUSTER_NAME}-worker"
280+
infrastructureRef:
281+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
282+
kind: ProxmoxMachineTemplate
283+
name: "${CLUSTER_NAME}-worker"

samples/proxmox/setup.sh

Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,83 @@
1+
## Configure your Proxmox parameters
2+
3+
if [ -z "${CLUSTER_NAME}" ]; then
4+
echo "Please set CLUSTER_NAME"
5+
exit 0
6+
fi
7+
8+
if [ -z "${KUBERNETES_VERSION}" ]; then
9+
echo "Please set KUBERNETES_VERSION. For ex. v1.31.2+k3s1"
10+
exit 0
11+
fi
12+
13+
if [ -z "${CONTROL_PLANE_ENDPOINT_IP}" ]; then
14+
echo "Please set CONTROL_PLANE_ENDPOINT_IP. For ex. '10.10.10.4'"
15+
exit 0
16+
fi
17+
18+
if [ -z "${NODE_IP_RANGES}" ] || [ -z "${GATEWAY}" ] || [ -z "${IP_PREFIX}" ] || [ -z "${DNS_SERVERS}" ] || [ -z "${BRIDGE}" ]; then
19+
echo "Please set NODE_IP_RANGES. For ex. '[10.10.10.5-10.10.10.50]'"
20+
echo "Please set GATEWAY. For ex. '10.10.10.1'"
21+
echo "Please set IP_PREFIX. For ex. '24'"
22+
echo "Please set DNS_SERVERS. For ex. '[8.8.8.8,8.8.4.4]'"
23+
echo "Please set BRIDGE. For ex. 'vmbr0'"
24+
exit 0
25+
fi
26+
27+
if [ -z "${PROXMOX_URL}" ] || [ -z "${PROXMOX_TOKEN}" ] || [ -z "${PROXMOX_SECRET}" ] || [ -z "${PROXMOX_SOURCENODE}" ] || [ -z "${PROXMOX_TEMPLATE_VMID}" ]; then
28+
echo "Please set PROXMOX_URL, PROXMOX_TOKEN, PROXMOX_SECRET, PROXMOX_SOURCENODE, PROXMOX_TEMPLATE_VMID"
29+
echo "- See https://github.com/ionos-cloud/cluster-api-provider-proxmox/blob/main/docs/Usage.md"
30+
exit 0
31+
fi
32+
33+
# The device used for the boot disk.
34+
export BOOT_VOLUME_DEVICE="scsi0"
35+
# The size of the boot disk in GB.
36+
export BOOT_VOLUME_SIZE="32"
37+
# The number of sockets for the VMs.
38+
export NUM_SOCKETS="1"
39+
# The number of cores for the VMs.
40+
export NUM_CORES="1"
41+
# The memory size for the VMs.
42+
export MEMORY_MIB="4069"
43+
44+
# K3s components to disable
45+
# For example because you plan to use MetalLB over ServiceLB, or Longhorn over local-storage, or...
46+
# export K3S_DISABLE_COMPONENTS="[servicelb,local-storage,traefik,metrics-server,helm-controller]"
47+
48+
## Install your cluser-api-k3s provider correctly
49+
mkdir -p ~/.cluster-api
50+
cat samples/clusterctl.yaml | envsubst > ~/.cluster-api/clusterctl.yaml
51+
52+
cat >> ~/.cluster-api/clusterctl.yaml <<EOC
53+
- name: "in-cluster"
54+
url: https://github.com/kubernetes-sigs/cluster-api-ipam-provider-in-cluster/releases/latest/ipam-components.yaml
55+
type: "IPAMProvider"
56+
EOC
57+
58+
clusterctl init \
59+
--infrastructure proxmox \
60+
--bootstrap k3s \
61+
--control-plane k3s \
62+
--ipam in-cluster
63+
64+
kubectl wait --for=condition=Available --timeout=5m \
65+
-n capi-system deployment/capi-controller-manager
66+
kubectl wait --for=condition=Available --timeout=5m \
67+
-n capi-k3s-control-plane-system deployment/capi-k3s-control-plane-controller-manager
68+
kubectl wait --for=condition=Available --timeout=5m \
69+
-n capi-k3s-bootstrap-system deployment/capi-k3s-bootstrap-controller-manager
70+
kubectl wait --for=condition=Available --timeout=5m \
71+
-n capmox-system deployment/capmox-controller-manager
72+
73+
clusterctl generate cluster \
74+
"${CLUSTER_NAME}" \
75+
--from samples/proxmox/cluster-template-k3s.yaml \
76+
| kubectl apply -f -
77+
78+
echo "Once the cluster is up, run 'clusterctl get kubeconfig $CLUSTER_NAME > k3s.yaml' to retrieve your kubeconfig"
79+
echo "- Run 'kubectl scale kthreescontrolplane $CLUSTER_NAME-control-plane --replicas 3' to enable HA for your control-planes"
80+
echo "- or run 'kubectl scale machinedeployment $CLUSTER_NAME-worker --replicas 3' to deploy worker nodes"
81+
echo "- or to just use the single node cluster, you might need to also run the following commands:"
82+
echo " kubectl taint nodes --all node-role.kubernetes.io/control-plane-"
83+
echo " kubectl taint nodes --all node-role.kubernetes.io/master-"

0 commit comments

Comments
 (0)