Skip to content

Commit 7a29de5

Browse files
committed
fixup! test: Add volumeattachment demo
1 parent 1260928 commit 7a29de5

File tree

2 files changed

+321
-0
lines changed

2 files changed

+321
-0
lines changed
Lines changed: 212 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,212 @@
1+
#!/usr/bin/env bash
2+
3+
# Copyright 2025 Nutanix. All rights reserved.
4+
# SPDX-License-Identifier: Apache-2.0
5+
6+
set -euo pipefail
7+
IFS=$'\n\t'
8+
9+
# This script demonstrates:
10+
# 1. Creating an EKS cluster via ClusterClass
11+
# 2. Creating a PersistentVolumeClaim (PVC)
12+
# 3. Populating it with data via a Pod
13+
# 4. Taking a VolumeSnapshot
14+
# 5. Restoring the snapshot to a new PVC
15+
# 6. Attaching the restored volume to a node via VolumeAttachment
16+
# 7. Validating the data is present on the attached volume
17+
18+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
19+
readonly SCRIPT_DIR
20+
cd "${SCRIPT_DIR}"
21+
22+
if [[ -z ${EKS_CLUSTER_NAME:-} ]]; then
23+
EKS_CLUSTER_NAME="$(kubectl get clusters -l caren.nutanix.com/demo-name="EBSSnapshotVolumeAttach" -o custom-columns=NAME:.metadata.name --no-headers)"
24+
if [[ -z ${EKS_CLUSTER_NAME} ]]; then
25+
EKS_CLUSTER_NAME="eks-volumeattach-$(head /dev/urandom | tr -dc a-z0-9 | head -c6)"
26+
fi
27+
fi
28+
export EKS_CLUSTER_NAME
29+
echo "Using EKS cluster name: ${EKS_CLUSTER_NAME}"
30+
31+
echo
32+
echo "Step 1: Create an EKS cluster via ClusterClass"
33+
envsubst --no-unset -i eks-test.yaml | kubectl apply --server-side -f -
34+
kubectl wait --for=condition=Ready cluster/"${EKS_CLUSTER_NAME}" --timeout=20m
35+
36+
echo "Cluster is ready, getting kubeconfig"
37+
EKS_KUBECONFIG="$(mktemp -p "${TMPDIR:-/tmp}")"
38+
kubectl get secrets "${EKS_CLUSTER_NAME}-user-kubeconfig" -oyaml |
39+
gojq --yaml-input -r '.data.value | @base64d' >"${EKS_KUBECONFIG}"
40+
export KUBECONFIG="${EKS_KUBECONFIG}"
41+
echo "Using kubeconfig: ${EKS_KUBECONFIG}"
42+
43+
STORAGE_CLASS="${STORAGE_CLASS:-aws-ebs-default}"
44+
STORAGE_CLASS_IMMEDIATE="${STORAGE_CLASS_IMMEDIATE:-aws-ebs-immediate-binding}"
45+
VOLUME_SNAPSHOT_CLASS="${VOLUME_SNAPSHOT_CLASS:-ebs-snapclass}"
46+
ORIGINAL_PVC="${ORIGINAL_PVC:-pvc-demo-original}"
47+
RESTORED_PVC="${RESTORED_PVC:-pvc-demo-restored}"
48+
SNAPSHOT_NAME="${SNAPSHOT_NAME:-pvc-demo-snapshot}"
49+
DATA_POD="${DATA_POD:-data-writer}"
50+
RESTORE_POD="${RESTORE_POD:-data-reader}"
51+
52+
NAMESPACE="$(kubectl get namespace -l caren.nutanix.com/demo-name="EBSSnapshotVolumeAttach" -o custom-columns=NAME:.metadata.name --no-headers)"
53+
if [[ -z ${NAMESPACE} ]]; then
54+
NAMESPACE="ebs-demo-$(head /dev/urandom | tr -dc a-z0-9 | head -c6)"
55+
cat <<EOF | kubectl apply --server-side -f -
56+
apiVersion: v1
57+
kind: Namespace
58+
metadata:
59+
name: ${NAMESPACE}
60+
labels:
61+
caren.nutanix.com/demo-name: "EBSSnapshotVolumeAttach"
62+
EOF
63+
fi
64+
echo "Using namespace: ${NAMESPACE}"
65+
66+
echo
67+
echo "Step 2: Create a PersistentVolumeClaim"
68+
cat <<EOF | kubectl apply --server-side -f -
69+
apiVersion: v1
70+
kind: PersistentVolumeClaim
71+
metadata:
72+
name: ${ORIGINAL_PVC}
73+
namespace: ${NAMESPACE}
74+
spec:
75+
accessModes:
76+
- ReadWriteOnce
77+
resources:
78+
requests:
79+
storage: 1Gi
80+
storageClassName: ${STORAGE_CLASS}
81+
EOF
82+
kubectl -n "${NAMESPACE}" get pvc "${ORIGINAL_PVC}"
83+
84+
echo
85+
echo "Step 3: Create a Pod to write data to the PVC"
86+
cat <<EOF | kubectl apply --server-side -f -
87+
apiVersion: v1
88+
kind: Pod
89+
metadata:
90+
name: ${DATA_POD}
91+
namespace: ${NAMESPACE}
92+
spec:
93+
restartPolicy: Never
94+
containers:
95+
- name: writer
96+
image: busybox
97+
command: ["/bin/sh", "-c"]
98+
args:
99+
- echo "Hello from original PVC!" > /data/hello.txt; sleep 10
100+
volumeMounts:
101+
- name: data
102+
mountPath: /data
103+
volumes:
104+
- name: data
105+
persistentVolumeClaim:
106+
claimName: ${ORIGINAL_PVC}
107+
EOF
108+
kubectl -n "${NAMESPACE}" get pod "${DATA_POD}"
109+
110+
echo "Waiting for PVC to be bound..."
111+
kubectl -n "${NAMESPACE}" wait --for=jsonpath='{.status.phase}'=Bound pvc/"${ORIGINAL_PVC}" --timeout=120s
112+
kubectl -n "${NAMESPACE}" get pvc "${ORIGINAL_PVC}"
113+
114+
echo "Waiting for data writer pod to complete..."
115+
kubectl -n "${NAMESPACE}" wait --for=jsonpath='{.status.phase}'=Succeeded pod/"${DATA_POD}" --timeout=120s
116+
kubectl -n "${NAMESPACE}" get pod "${DATA_POD}"
117+
118+
echo
119+
echo "Step 4: Create a VolumeSnapshotClass"
120+
cat <<EOF | kubectl apply --server-side -f -
121+
apiVersion: snapshot.storage.k8s.io/v1
122+
kind: VolumeSnapshotClass
123+
metadata:
124+
name: ${VOLUME_SNAPSHOT_CLASS}
125+
driver: ebs.csi.aws.com
126+
deletionPolicy: Delete
127+
EOF
128+
kubectl -n "${NAMESPACE}" get volumesnapshotclass "${VOLUME_SNAPSHOT_CLASS}"
129+
130+
echo
131+
echo "Step 5: Take a snapshot of the PVC"
132+
cat <<EOF | kubectl apply --server-side -f -
133+
apiVersion: snapshot.storage.k8s.io/v1
134+
kind: VolumeSnapshot
135+
metadata:
136+
name: ${SNAPSHOT_NAME}
137+
namespace: ${NAMESPACE}
138+
spec:
139+
volumeSnapshotClassName: ${VOLUME_SNAPSHOT_CLASS}
140+
source:
141+
persistentVolumeClaimName: ${ORIGINAL_PVC}
142+
EOF
143+
kubectl -n "${NAMESPACE}" get volumesnapshot "${SNAPSHOT_NAME}"
144+
145+
echo "Waiting for VolumeSnapshot to be ready..."
146+
kubectl -n "${NAMESPACE}" wait --for=jsonpath='{.status.readyToUse}'=true volumesnapshot/"${SNAPSHOT_NAME}" --timeout=120s
147+
kubectl -n "${NAMESPACE}" get volumesnapshot "${SNAPSHOT_NAME}"
148+
149+
echo
150+
echo "Step 6: Restore the snapshot to a new PVC"
151+
cat <<EOF | kubectl apply --server-side -f -
152+
apiVersion: v1
153+
kind: PersistentVolumeClaim
154+
metadata:
155+
name: ${RESTORED_PVC}
156+
namespace: ${NAMESPACE}
157+
spec:
158+
accessModes:
159+
- ReadWriteOnce
160+
resources:
161+
requests:
162+
storage: 1Gi
163+
storageClassName: ${STORAGE_CLASS_IMMEDIATE}
164+
dataSource:
165+
name: ${SNAPSHOT_NAME}
166+
kind: VolumeSnapshot
167+
apiGroup: snapshot.storage.k8s.io
168+
EOF
169+
kubectl -n "${NAMESPACE}" get pvc "${RESTORED_PVC}"
170+
171+
echo "Waiting for restored PVC to be bound..."
172+
kubectl -n "${NAMESPACE}" wait --for=jsonpath='{.status.phase}'=Bound pvc/"${RESTORED_PVC}" --timeout=120s
173+
kubectl -n "${NAMESPACE}" get pvc "${RESTORED_PVC}"
174+
175+
echo
176+
echo "Step 7: Attach the restored volume to a node via VolumeAttachment"
177+
# Find the PV backing the restored PVC
178+
RESTORED_PV="$(kubectl -n "${NAMESPACE}" get pvc "${RESTORED_PVC}" -o jsonpath='{.spec.volumeName}')"
179+
# Find a node to attach to
180+
NODE_NAME=$(kubectl get nodes -o jsonpath='{.items[0].metadata.name}')
181+
182+
ATTACHMENT_NAME="attach-${RESTORED_PV}"
183+
184+
cat <<EOF | kubectl apply --server-side -f -
185+
apiVersion: storage.k8s.io/v1
186+
kind: VolumeAttachment
187+
metadata:
188+
name: ${ATTACHMENT_NAME}
189+
spec:
190+
attacher: ebs.csi.aws.com
191+
nodeName: ${NODE_NAME}
192+
source:
193+
persistentVolumeName: ${RESTORED_PV}
194+
EOF
195+
kubectl -n "${NAMESPACE}" get volumeattachment "${ATTACHMENT_NAME}"
196+
197+
echo "Waiting for VolumeAttachment to be attached..."
198+
kubectl wait --for=jsonpath='{.status.attached}'=true volumeattachment/"${ATTACHMENT_NAME}" --timeout=120s
199+
kubectl -n "${NAMESPACE}" get volumeattachment "${ATTACHMENT_NAME}"
200+
201+
echo
202+
echo "Step 8: Show that the restored volume is attached directly to the node with the correct data"
203+
ATTACHMENT_DEVICE_PATH=$(kubectl -n "${NAMESPACE}" get volumeattachment "${ATTACHMENT_NAME}" -o jsonpath='{.status.attachmentMetadata.devicePath}')
204+
kubectl debug "node/${NODE_NAME}" --image=ubuntu -it --profile=sysadmin --quiet -- \
205+
bash -ec "chroot /host bash -ec \"mkdir -p /tmp/attached; mount \"${ATTACHMENT_DEVICE_PATH}\" /tmp/attached; cat /tmp/attached/hello.txt; umount /tmp/attached\""
206+
207+
echo
208+
echo
209+
echo "When you are ready, cleanup the resources created by this demo by running:"
210+
echo
211+
echo "kubectl --kubeconfig=${KUBECONFIG} delete namespace ${NAMESPACE}"
212+
echo "kubectl delete cluster ${EKS_CLUSTER_NAME}"
Lines changed: 109 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,109 @@
1+
apiVersion: v1
2+
data:
3+
values.yaml: |-
4+
cni:
5+
exclusive: false
6+
hubble:
7+
enabled: true
8+
tls:
9+
auto:
10+
enabled: true # enable automatic TLS certificate generation
11+
method: cronJob # auto generate certificates using cronJob method
12+
certValidityDuration: 60 # certificates validity duration in days (default 2 months)
13+
schedule: "0 0 1 * *" # schedule on the 1st day regeneration of each month
14+
relay:
15+
enabled: true
16+
tls:
17+
server:
18+
enabled: true
19+
mtls: true
20+
image:
21+
useDigest: false
22+
priorityClassName: system-cluster-critical
23+
image:
24+
useDigest: false
25+
operator:
26+
image:
27+
useDigest: false
28+
certgen:
29+
image:
30+
useDigest: false
31+
socketLB:
32+
hostNamespaceOnly: true
33+
envoy:
34+
image:
35+
useDigest: false
36+
kubeProxyReplacement: true
37+
k8sServiceHost: "{{ trimPrefix "https://" .Cluster.spec.controlPlaneEndpoint.host }}"
38+
k8sServicePort: "{{ .Cluster.spec.controlPlaneEndpoint.port }}"
39+
ipam:
40+
mode: eni
41+
enableIPv4Masquerade: false
42+
eni:
43+
enabled: true
44+
awsReleaseExcessIPs: true
45+
routingMode: native
46+
endpointRoutes:
47+
enabled: true
48+
kind: ConfigMap
49+
metadata:
50+
labels:
51+
cluster.x-k8s.io/provider: eks
52+
name: "${EKS_CLUSTER_NAME}-cilium-cni-helm-values-template"
53+
namespace: default
54+
---
55+
apiVersion: cluster.x-k8s.io/v1beta1
56+
kind: Cluster
57+
metadata:
58+
annotations:
59+
preflight.cluster.caren.nutanix.com/skip: all
60+
labels:
61+
cluster.x-k8s.io/provider: eks
62+
caren.nutanix.com/demo-name: "EBSSnapshotVolumeAttach"
63+
name: "${EKS_CLUSTER_NAME}"
64+
namespace: default
65+
spec:
66+
topology:
67+
class: eks-quick-start
68+
controlPlane:
69+
metadata:
70+
annotations:
71+
controlplane.cluster.x-k8s.io/skip-kube-proxy: ""
72+
variables:
73+
- name: clusterConfig
74+
value:
75+
addons:
76+
clusterAutoscaler: {}
77+
cni:
78+
provider: Cilium
79+
values:
80+
sourceRef:
81+
kind: ConfigMap
82+
name: ${EKS_CLUSTER_NAME}-cilium-cni-helm-values-template
83+
csi:
84+
defaultStorage:
85+
provider: aws-ebs
86+
storageClassConfig: default
87+
providers:
88+
aws-ebs:
89+
storageClassConfigs:
90+
default: {}
91+
immediate-binding:
92+
volumeBindingMode: Immediate
93+
snapshotController: {}
94+
nfd: {}
95+
eks:
96+
region: us-west-2
97+
- name: workerConfig
98+
value:
99+
eks:
100+
instanceType: m5.2xlarge
101+
version: v1.32.7
102+
workers:
103+
machineDeployments:
104+
- class: default-worker
105+
metadata:
106+
annotations:
107+
cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "1"
108+
cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size: "1"
109+
name: md-0

0 commit comments

Comments
 (0)