|
1 | 1 | #!/bin/bash |
2 | 2 |
|
3 | 3 | # Parameters |
4 | | -K3S_SERVER_URL=${1:-"https://172.18.107.219:6443"} # Default K3S server URL |
5 | | -K3S_SERVER_URL=${1:-"https://$(hostname -I | awk '{print $1}'):6443"} # Default K3S server URL |
| 4 | +K3S_SERVER_URL=${1:-"https://$(hostname -I | cut -d' ' -f1):6443"} # Default K3S server URL |
6 | 5 | K3S_TOKEN=${2:-$(sudo cat /var/lib/rancher/k3s/server/node-token)} # Retrieve K3S token |
7 | 6 | NODE_NAME=${3:-"kube-node"} # Default node name |
8 | 7 | OS_IMAGE_URL="https://cloud-images.ubuntu.com/focal/current/focal-server-cloudimg-amd64.img" # Ubuntu OS image |
@@ -37,17 +36,13 @@ check_command cloud-localds |
37 | 36 |
|
38 | 37 | # Step 1: Create user-data file for cloud-init |
39 | 38 | log_message "INFO" "Creating user-data file for cloud-init..." |
40 | | -# cat <<EOF > user-data |
41 | | -# #cloud-config |
42 | | -# runcmd: |
43 | | -# - export K3S_URL=$K3S_SERVER_URL |
44 | | -# - export K3S_TOKEN=$K3S_TOKEN |
45 | | -# - curl -sfL https://get.k3s.io | sh - |
46 | | -# EOF |
47 | 39 | cat <<EOF > user-data |
48 | | -#cloud-config |
| 40 | +users: |
| 41 | + - name: ubuntu |
| 42 | + sudo: ALL=(ALL) NOPASSWD:ALL |
| 43 | + groups: sudo,containerd,cni |
49 | 44 | runcmd: |
50 | | - - curl -sfL https://get.k3s.io | K3S_URL=$K3S_SERVER_URL K3S_TOKEN=$K3S_TOKEN sh - |
| 45 | + - curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="agent" K3S_URL="$K3S_SERVER_URL" K3S_TOKEN="$K3S_TOKEN" sh - |
51 | 46 | EOF |
52 | 47 |
|
53 | 48 | # Step 2: Create meta-data file for cloud-init |
|
62 | 57 | log_message "INFO" "Creating cloud-init ISO..." |
63 | 58 | cloud-localds $CLOUD_INIT_ISO user-data meta-data |
64 | 59 |
|
| 60 | +# wget $OS_IMAGE_URL -O $QCOW2_IMAGE_FILE && qemu-img resize $QCOW2_IMAGE_FILE 10G |
| 61 | + |
65 | 62 | # Step 4: Download and prepare the OS image |
66 | 63 | if [ ! -f "$QCOW2_IMAGE_FILE" ]; then |
67 | 64 | log_message "INFO" "Downloading and resizing the OS image..." |
@@ -106,4 +103,23 @@ log_message "INFO" "Assigning worker role to the new node..." |
106 | 103 | kubectl label node $NODE_NAME node-role.kubernetes.io/worker=worker |
107 | 104 |
|
108 | 105 | log_message "SUCCESS" "New node $NODE_NAME has been successfully added to the cluster!" |
| 106 | +# Step 8: Deploy and Test Nginx with 2 Replicas (TODO:remove after testing) |
| 107 | +log_message "INFO" "Deploying Nginx with 2 replicas..." |
| 108 | +kubectl create deployment nginx --image=nginx --replicas=2 |
| 109 | + |
| 110 | +log_message "INFO" "Waiting for Nginx pods to become ready..." |
| 111 | +kubectl wait --for=condition=ready pod -l app=nginx --timeout=120s |
| 112 | +kga |
| 113 | +log_message "INFO" "Verifying that Nginx pods are running on both nodes..." |
| 114 | +PODS_ON_MASTER=$(kubectl get pods -o wide --field-selector spec.nodeName=$(hostname) -l app=nginx | grep -c Running) |
| 115 | +PODS_ON_WORKER=$(kubectl get pods -o wide --field-selector spec.nodeName=$NODE_NAME -l app=nginx | grep -c Running) |
| 116 | + |
| 117 | +if [[ $PODS_ON_MASTER -gt 0 && $PODS_ON_WORKER -gt 0 ]]; then |
| 118 | + log_message "SUCCESS" "Nginx pods are running correctly: $PODS_ON_MASTER on master, $PODS_ON_WORKER on worker." |
| 119 | +else |
| 120 | + log_message "ERROR" "Nginx pods are not running correctly. Master: $PODS_ON_MASTER, Worker: $PODS_ON_WORKER." |
| 121 | + exit 1 |
| 122 | +fi |
| 123 | + |
| 124 | +log_message "SUCCESS" "Test passed: Nginx pods are running correctly on both nodes." |
109 | 125 | log_message "INFO" "Use the cleanup script to remove the node when it is no longer needed." |
0 commit comments