Skip to content

Commit e282c76

Browse files
authored
Merge pull request #9085 from zalando-incubator/karpenter-tests-2
Extend test case to ensure that node pools can scale-out after cluster update
2 parents 647bb13 + 7a75c5a commit e282c76

File tree

2 files changed

+39
-1
lines changed

2 files changed

+39
-1
lines changed

cluster/manifests/e2e-resources/pool-reserve.yaml

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,16 @@ spec:
2222
spec:
2323
nodeSelector:
2424
node.kubernetes.io/node-pool: "{{$pool}}"
25+
affinity:
26+
podAntiAffinity:
27+
requiredDuringSchedulingIgnoredDuringExecution:
28+
- labelSelector:
29+
matchExpressions:
30+
- key: pool
31+
operator: In
32+
values:
33+
- "{{$pool}}"
34+
topologyKey: "kubernetes.io/hostname"
2535
tolerations:
2636
{{ if eq $pool "worker-node-tests" }}
2737
- effect: NoSchedule

test/e2e/infra.go

Lines changed: 29 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ import (
1111
admissionapi "k8s.io/pod-security-admission/api"
1212

1313
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
14+
applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1"
1415
"k8s.io/client-go/kubernetes"
1516
"k8s.io/kubernetes/test/e2e/framework"
1617
"k8s.io/kubernetes/test/e2e/framework/deployment"
@@ -35,7 +36,34 @@ var _ = describe("Infrastructure tests", func() {
3536

3637
It("All node pools should be able to run pods [Zalando]", func() {
3738
// When modifying this list, don't forget to modify cluster/manifests/e2e-resources/pool-reserve.yaml
38-
for _, pool := range []string{"default-worker-splitaz", "worker-combined", "worker-limit-az", "worker-instance-storage"} {
39+
nodePools := []string{
40+
"default-worker-splitaz",
41+
"worker-combined",
42+
"worker-limit-az",
43+
"worker-instance-storage",
44+
"worker-node-tests",
45+
"worker-karpenter",
46+
"worker-arm64",
47+
}
48+
49+
for _, pool := range nodePools {
50+
deploy, err := cs.AppsV1().Deployments("default").Get(context.Background(), fmt.Sprintf("pool-reserve-%s", pool), metav1.GetOptions{})
51+
framework.ExpectNoError(err)
52+
53+
err = deployment.WaitForDeploymentComplete(cs, deploy)
54+
framework.ExpectNoError(err)
55+
56+
// Scale out deployment to one more replica. In combination with Pod-Anti-Affinity, this should require one more node.
57+
_, err = cs.AppsV1().Deployments("default").ApplyScale(
58+
context.Background(),
59+
fmt.Sprintf("pool-reserve-%s", pool),
60+
applyconfigurationsautoscalingv1.Scale().WithSpec(applyconfigurationsautoscalingv1.ScaleSpec().WithReplicas(2)),
61+
metav1.ApplyOptions{FieldManager: "e2e.test", Force: true},
62+
)
63+
framework.ExpectNoError(err)
64+
}
65+
66+
for _, pool := range nodePools {
3967
deploy, err := cs.AppsV1().Deployments("default").Get(context.Background(), fmt.Sprintf("pool-reserve-%s", pool), metav1.GetOptions{})
4068
framework.ExpectNoError(err)
4169

0 commit comments

Comments
 (0)