Skip to content

Commit 3fae97e

Browse files
chore: adding unit tests on infra package (#58)
* chore: adding unit tests on infra package adding some basic unit tests on infra package. * chore: fix some flaky e2e tests some e2e tests were failing due to lack of disk space in the github runner. this commit hopefully solves all of them.
1 parent 3a4a982 commit 3fae97e

File tree

11 files changed

+222
-149
lines changed

11 files changed

+222
-149
lines changed

.github/workflows/e2e.yaml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ jobs:
55
run:
66
runs-on: ubuntu-latest
77
strategy:
8+
fail-fast: false
89
matrix:
910
tests:
1011
- TestBuildBundle
@@ -45,6 +46,11 @@ jobs:
4546
uses: actions/setup-go@v4
4647
with:
4748
go-version: "1.21.0"
49+
- name: Free up runner disk space
50+
run: |
51+
sudo rm -rf /usr/local/lib/android
52+
sudo rm -rf /opt/hostedtoolcache/CodeQL
53+
sudo rm -rf /opt/hostedtoolcache/Python
4854
- name: E2E
4955
run: |
5056
make e2e-test TEST_NAME=${{ matrix.tests }}

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@ e2e-test: helmvm-linux-amd64
154154
mkdir -p output/tmp
155155
rm -rf output/tmp/id_rsa*
156156
ssh-keygen -t rsa -N "" -C "Integration Test Key" -f output/tmp/id_rsa
157-
go test -timeout 30m -v ./e2e -run $(TEST_NAME)$
157+
go test -timeout 45m -v ./e2e -run $(TEST_NAME)$
158158

159159
.PHONY: create-e2e-workflows
160160
create-e2e-workflows:

e2e/embed_test.go

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -27,11 +27,6 @@ func TestEmbedAndInstall(t *testing.T) {
2727
if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil {
2828
t.Fatalf("fail to install embedded ssh in node 0: %v", err)
2929
}
30-
t.Log("creating deployment mounting pvc")
31-
line = []string{"deploy-with-pvc.sh"}
32-
if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil {
33-
t.Fatalf("fail to create deployment with pvc: %v", err)
34-
}
3530
}
3631

3732
func TestEmbedAddonsOnly(t *testing.T) {

e2e/install_test.go

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -87,11 +87,6 @@ func TestSingleNodeInstallation(t *testing.T) {
8787
if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil {
8888
t.Fatalf("fail to install helmvm on node %s: %v", tc.Nodes[0], err)
8989
}
90-
t.Log("creating deployment mounting pvc")
91-
line = []string{"deploy-with-pvc.sh"}
92-
if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil {
93-
t.Fatalf("fail to create deployment with pvc: %v", err)
94-
}
9590
}
9691

9792
func TestMultiNodeInstallation(t *testing.T) {
@@ -145,11 +140,6 @@ func TestSingleNodeInstallationRockyLinux8(t *testing.T) {
145140
if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil {
146141
t.Fatalf("fail to install helmvm on node %s: %v", tc.Nodes[0], err)
147142
}
148-
t.Log("creating deployment mounting pvc")
149-
line = []string{"deploy-with-pvc.sh"}
150-
if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil {
151-
t.Fatalf("fail to create deployment with pvc: %v", err)
152-
}
153143
}
154144

155145
func TestSingleNodeInstallationDebian12(t *testing.T) {
@@ -174,10 +164,6 @@ func TestSingleNodeInstallationDebian12(t *testing.T) {
174164
t.Fatalf("fail to install helmvm on node %s: %v", tc.Nodes[0], err)
175165
}
176166
t.Log("creating deployment mounting pvc")
177-
line = []string{"deploy-with-pvc.sh"}
178-
if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil {
179-
t.Fatalf("fail to create deployment with pvc: %v", err)
180-
}
181167
}
182168

183169
func TestSingleNodeInstallationCentos8Stream(t *testing.T) {
@@ -205,11 +191,6 @@ func TestSingleNodeInstallationCentos8Stream(t *testing.T) {
205191
if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil {
206192
t.Fatalf("fail to install helmvm on node %s: %v", tc.Nodes[0], err)
207193
}
208-
t.Log("creating deployment mounting pvc")
209-
line = []string{"deploy-with-pvc.sh"}
210-
if _, _, err := RunCommandOnNode(t, tc, 0, line); err != nil {
211-
t.Fatalf("fail to create deployment with pvc: %v", err)
212-
}
213194
}
214195

215196
func TestMultiNodeInteractiveInstallation(t *testing.T) {

e2e/scripts/addons-only.sh

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -53,16 +53,16 @@ embed_helm_chart() {
5353
}
5454

5555
wait_for_memcached_pods() {
56-
ready=$(kubectl get pods -n helmvm | grep -v NotReady | grep Ready | grep -c memcached || true)
56+
ready=$(kubectl get pods -n helmvm | grep -c memcached || true)
5757
counter=0
5858
while [ -z "$ready" ] || [ "$ready" -lt "1" ]; do
5959
if [ "$counter" -gt 36 ]; then
6060
return 1
6161
fi
6262
sleep 5
6363
counter=$((counter+1))
64-
echo "Waiting for memcached pods to be ready"
65-
ready=$(kubectl get pods -n helmvm | grep Running | grep -c memcached || true)
64+
echo "Waiting for memcached pods"
65+
ready=$(kubectl get pods -n helmvm | grep -c memcached || true)
6666
kubectl get pods -n helmvm 2>&1 || true
6767
echo "$ready"
6868
done
@@ -97,7 +97,7 @@ main() {
9797
fi
9898
echo "waiting for memcached " >> /tmp/log
9999
if ! wait_for_memcached_pods; then
100-
echo "Memcached pods not reporting healthy"
100+
echo "Memcached pods not appearing"
101101
exit 1
102102
fi
103103
}

e2e/scripts/deploy-with-pvc.sh

Lines changed: 0 additions & 96 deletions
This file was deleted.

e2e/scripts/embed-and-install.sh

Lines changed: 36 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -54,22 +54,50 @@ embed_helm_chart() {
5454
}
5555

5656
wait_for_memcached_pods() {
57-
ready=$(kubectl get pods -n helmvm | grep -v NotReady | grep Ready | grep -c memcached || true)
57+
ready=$(kubectl get pods -n helmvm | grep -c memcached || true)
5858
counter=0
5959
while [ "$ready" -lt "1" ]; do
6060
if [ "$counter" -gt 36 ]; then
6161
return 1
6262
fi
6363
sleep 5
6464
counter=$((counter+1))
65-
echo "Waiting for memcached pods to be ready"
66-
ready=$(kubectl get pods -n helmvm | grep Running | grep -c memcached || true)
65+
echo "Waiting for memcached pods"
66+
ready=$(kubectl get pods -n helmvm | grep -c memcached || true)
6767
kubectl get pods -n helmvm 2>&1 || true
6868
echo "$ready"
6969
done
7070
return 0
7171
}
7272

73+
wait_for_pods_running() {
74+
local timeout="$1"
75+
local start_time
76+
local current_time
77+
local elapsed_time
78+
start_time=$(date +%s)
79+
while true; do
80+
current_time=$(date +%s)
81+
elapsed_time=$((current_time - start_time))
82+
if [ "$elapsed_time" -ge "$timeout" ]; then
83+
kubectl get pods -A -o yaml || true
84+
kubectl describe nodes || true
85+
echo "Timed out waiting for all pods to be running."
86+
return 1
87+
fi
88+
local non_running_pods
89+
non_running_pods=$(kubectl get pods --all-namespaces --no-headers 2>/dev/null | awk '$4 != "Running" && $4 != "Completed" { print $0 }' | wc -l || echo 1)
90+
if [ "$non_running_pods" -ne 0 ]; then
91+
echo "Not all pods are running. Waiting."
92+
kubectl get pods,nodes -A || true
93+
sleep 5
94+
continue
95+
fi
96+
echo "All pods are running."
97+
return 0
98+
done
99+
}
100+
73101
main() {
74102
if ! install_helm ; then
75103
echo "Failed to install helm"
@@ -96,9 +124,13 @@ main() {
96124
echo "Nodes not reporting healthy"
97125
exit 1
98126
fi
127+
if ! wait_for_pods_running 900; then
128+
echo "Pods not running"
129+
exit 1
130+
fi
99131
echo "waiting for memcached " >> /tmp/log
100132
if ! wait_for_memcached_pods; then
101-
echo "Memcached pods not reporting healthy"
133+
echo "Memcached pods not present"
102134
exit 1
103135
fi
104136
}

e2e/scripts/single-node-install.sh

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,34 @@ wait_for_healthy_node() {
1717
return 0
1818
}
1919

20+
wait_for_pods_running() {
21+
local timeout="$1"
22+
local start_time
23+
local current_time
24+
local elapsed_time
25+
start_time=$(date +%s)
26+
while true; do
27+
current_time=$(date +%s)
28+
elapsed_time=$((current_time - start_time))
29+
if [ "$elapsed_time" -ge "$timeout" ]; then
30+
kubectl get pods -A -o yaml || true
31+
kubectl describe nodes || true
32+
echo "Timed out waiting for all pods to be running."
33+
return 1
34+
fi
35+
local non_running_pods
36+
non_running_pods=$(kubectl get pods --all-namespaces --no-headers 2>/dev/null | awk '$4 != "Running" && $4 != "Completed" { print $0 }' | wc -l || echo 1)
37+
if [ "$non_running_pods" -ne 0 ]; then
38+
echo "Not all pods are running. Waiting."
39+
kubectl get pods,nodes -A || true
40+
sleep 5
41+
continue
42+
fi
43+
echo "All pods are running."
44+
return 0
45+
done
46+
}
47+
2048
main() {
2149
if ! helmvm install --no-prompt 2>&1 | tee /tmp/log ; then
2250
cat /etc/os-release
@@ -31,6 +59,10 @@ main() {
3159
echo "Failed to install helmvm"
3260
exit 1
3361
fi
62+
if ! wait_for_pods_running 900; then
63+
echo "Failed to install helmvm"
64+
exit 1
65+
fi
3466
}
3567

3668
export KUBECONFIG=/root/.helmvm/etc/kubeconfig

e2e/utils.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ func RunCommandOnNode(t *testing.T, cl *cluster.Output, node int, line []string)
3838
Stdout: stdout,
3939
Stderr: stderr,
4040
}
41-
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
41+
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute)
4242
defer cancel()
4343
if err := cluster.Run(ctx, t, cmd); err != nil {
4444
t.Logf("stdout:\n%s", stdout.String())

0 commit comments

Comments
 (0)