Skip to content

Commit c903c29

Browse files
committed
e2e: support admissionapi.LevelRestricted in test/e2e/framwork/pod
CreatePod and MakePod only accepted an `isPrivileged` boolean, which made it impossible to write tests using those helpers which work in a default framework.Framework, because the default there is LevelRestricted. The simple boolean gets replaced with admissionapi.Level. Passing LevelRestricted does the same as calling e2epod.MixinRestrictedPodSecurity. Instead of explicitly passing a constant to these modified helpers, most tests get updated to pass f.NamespacePodSecurityLevel. This has the advantage that if that level gets lowered in the future, tests only need to be updated in one place. In some cases, helpers taking client+namespace+timeouts parameters get replaced with passing the Framework instance to get access to f.NamespacePodSecurityEnforceLevel. These helpers don't need separate parameters because in practice all they ever used where the values from the Framework instance.
1 parent 8c33d3e commit c903c29

24 files changed

+163
-129
lines changed

test/e2e/dra/dra.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -643,7 +643,7 @@ func (b *builder) parameters(kv ...string) *v1.ConfigMap {
643643
// makePod returns a simple pod with no resource claims.
644644
// The pod prints its env and waits.
645645
func (b *builder) pod() *v1.Pod {
646-
pod := e2epod.MakePod(b.f.Namespace.Name, nil, nil, false, "env && sleep 100000")
646+
pod := e2epod.MakePod(b.f.Namespace.Name, nil, nil, b.f.NamespacePodSecurityLevel, "env && sleep 100000")
647647
pod.Labels = make(map[string]string)
648648
pod.Spec.RestartPolicy = v1.RestartPolicyNever
649649
// Let kubelet kill the pods quickly. Setting

test/e2e/framework/deployment/fixtures.go

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import (
3030
"k8s.io/kubernetes/test/e2e/framework"
3131
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
3232
testutils "k8s.io/kubernetes/test/utils"
33+
admissionapi "k8s.io/pod-security-admission/api"
3334
)
3435

3536
// UpdateDeploymentWithRetries updates the specified deployment with retries.
@@ -71,8 +72,8 @@ func NewDeployment(deploymentName string, replicas int32, podLabels map[string]s
7172
}
7273

7374
// CreateDeployment creates a deployment.
74-
func CreateDeployment(ctx context.Context, client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*appsv1.Deployment, error) {
75-
deploymentSpec := testDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, false, command)
75+
func CreateDeployment(ctx context.Context, client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, securityLevel admissionapi.Level, command string) (*appsv1.Deployment, error) {
76+
deploymentSpec := testDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, securityLevel, command)
7677
deployment, err := client.AppsV1().Deployments(namespace).Create(ctx, deploymentSpec, metav1.CreateOptions{})
7778
if err != nil {
7879
return nil, fmt.Errorf("deployment %q Create API error: %w", deploymentSpec.Name, err)
@@ -175,7 +176,7 @@ func (o replicaSetsByCreationTimestamp) Less(i, j int) bool {
175176

176177
// testDeployment creates a deployment definition based on the namespace. The deployment references the PVC's
177178
// name. A slice of BASH commands can be supplied as args to be run by the pod
178-
func testDeployment(replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *appsv1.Deployment {
179+
func testDeployment(replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, securityLevel admissionapi.Level, command string) *appsv1.Deployment {
179180
if len(command) == 0 {
180181
command = "trap exit TERM; while true; do sleep 1; done"
181182
}
@@ -202,7 +203,7 @@ func testDeployment(replicas int32, podLabels map[string]string, nodeSelector ma
202203
Name: "write-pod",
203204
Image: e2epod.GetDefaultTestImage(),
204205
Command: e2epod.GenerateScriptCmd(command),
205-
SecurityContext: e2epod.GenerateContainerSecurityContext(isPrivileged),
206+
SecurityContext: e2epod.GenerateContainerSecurityContext(securityLevel),
206207
},
207208
},
208209
RestartPolicy: v1.RestartPolicyAlways,

test/e2e/framework/pod/create.go

Lines changed: 18 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ import (
2626
"k8s.io/apimachinery/pkg/util/uuid"
2727
clientset "k8s.io/client-go/kubernetes"
2828
imageutils "k8s.io/kubernetes/test/utils/image"
29+
admissionapi "k8s.io/pod-security-admission/api"
2930
)
3031

3132
const (
@@ -40,7 +41,7 @@ type Config struct {
4041
PVCs []*v1.PersistentVolumeClaim
4142
PVCsReadOnly bool
4243
InlineVolumeSources []*v1.VolumeSource
43-
IsPrivileged bool
44+
SecurityLevel admissionapi.Level
4445
Command string
4546
HostIPC bool
4647
HostPID bool
@@ -52,8 +53,8 @@ type Config struct {
5253
}
5354

5455
// CreateUnschedulablePod with given claims based on node selector
55-
func CreateUnschedulablePod(ctx context.Context, client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
56-
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
56+
func CreateUnschedulablePod(ctx context.Context, client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, securityLevel admissionapi.Level, command string) (*v1.Pod, error) {
57+
pod := MakePod(namespace, nodeSelector, pvclaims, securityLevel, command)
5758
pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
5859
if err != nil {
5960
return nil, fmt.Errorf("pod Create API error: %w", err)
@@ -73,12 +74,12 @@ func CreateUnschedulablePod(ctx context.Context, client clientset.Interface, nam
7374

7475
// CreateClientPod defines and creates a pod with a mounted PV. Pod runs infinite loop until killed.
7576
func CreateClientPod(ctx context.Context, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
76-
return CreatePod(ctx, c, ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")
77+
return CreatePod(ctx, c, ns, nil, []*v1.PersistentVolumeClaim{pvc}, admissionapi.LevelPrivileged, "")
7778
}
7879

7980
// CreatePod with given claims based on node selector
80-
func CreatePod(ctx context.Context, client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
81-
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
81+
func CreatePod(ctx context.Context, client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, securityLevel admissionapi.Level, command string) (*v1.Pod, error) {
82+
pod := MakePod(namespace, nodeSelector, pvclaims, securityLevel, command)
8283
pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
8384
if err != nil {
8485
return nil, fmt.Errorf("pod Create API error: %w", err)
@@ -128,7 +129,7 @@ func CreateSecPodWithNodeSelection(ctx context.Context, client clientset.Interfa
128129

129130
// MakePod returns a pod definition based on the namespace. The pod references the PVC's
130131
// name. A slice of BASH commands can be supplied as args to be run by the pod
131-
func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *v1.Pod {
132+
func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, securityLevel admissionapi.Level, command string) *v1.Pod {
132133
if len(command) == 0 {
133134
command = "trap exit TERM; while true; do sleep 1; done"
134135
}
@@ -147,7 +148,7 @@ func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.Persisten
147148
Name: "write-pod",
148149
Image: GetDefaultTestImage(),
149150
Command: GenerateScriptCmd(command),
150-
SecurityContext: GenerateContainerSecurityContext(isPrivileged),
151+
SecurityContext: GenerateContainerSecurityContext(securityLevel),
151152
},
152153
},
153154
RestartPolicy: v1.RestartPolicyOnFailure,
@@ -157,6 +158,10 @@ func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.Persisten
157158
if nodeSelector != nil {
158159
podSpec.Spec.NodeSelector = nodeSelector
159160
}
161+
if securityLevel == admissionapi.LevelRestricted {
162+
podSpec = MustMixinRestrictedPodSecurity(podSpec)
163+
}
164+
160165
return podSpec
161166
}
162167

@@ -196,6 +201,10 @@ func MakePodSpec(podConfig *Config) *v1.PodSpec {
196201
if podConfig.ImageID != imageutils.None {
197202
image = podConfig.ImageID
198203
}
204+
securityLevel := podConfig.SecurityLevel
205+
if securityLevel == "" {
206+
securityLevel = admissionapi.LevelBaseline
207+
}
199208
podSpec := &v1.PodSpec{
200209
HostIPC: podConfig.HostIPC,
201210
HostPID: podConfig.HostPID,
@@ -205,7 +214,7 @@ func MakePodSpec(podConfig *Config) *v1.PodSpec {
205214
Name: "write-pod",
206215
Image: GetTestImage(image),
207216
Command: GenerateScriptCmd(podConfig.Command),
208-
SecurityContext: GenerateContainerSecurityContext(podConfig.IsPrivileged),
217+
SecurityContext: GenerateContainerSecurityContext(securityLevel),
209218
},
210219
},
211220
RestartPolicy: v1.RestartPolicyOnFailure,

test/e2e/framework/pod/utils.go

Lines changed: 17 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ import (
2020
"flag"
2121
"fmt"
2222

23+
"github.com/onsi/ginkgo/v2"
2324
"github.com/onsi/gomega"
2425

2526
v1 "k8s.io/api/core/v1"
@@ -111,12 +112,25 @@ func GeneratePodSecurityContext(fsGroup *int64, seLinuxOptions *v1.SELinuxOption
111112
// GenerateContainerSecurityContext generates the corresponding container security context with the given inputs
112113
// If the Node OS is windows, currently we will ignore the inputs and return nil.
113114
// TODO: Will modify it after windows has its own security context
114-
func GenerateContainerSecurityContext(privileged bool) *v1.SecurityContext {
115+
func GenerateContainerSecurityContext(level psaapi.Level) *v1.SecurityContext {
115116
if NodeOSDistroIs("windows") {
116117
return nil
117118
}
118-
return &v1.SecurityContext{
119-
Privileged: &privileged,
119+
120+
switch level {
121+
case psaapi.LevelBaseline:
122+
return &v1.SecurityContext{
123+
Privileged: pointer.Bool(false),
124+
}
125+
case psaapi.LevelPrivileged:
126+
return &v1.SecurityContext{
127+
Privileged: pointer.Bool(true),
128+
}
129+
case psaapi.LevelRestricted:
130+
return GetRestrictedContainerSecurityContext()
131+
default:
132+
ginkgo.Fail(fmt.Sprintf("unknown k8s.io/pod-security-admission/policy.Level %q", level))
133+
panic("not reached")
120134
}
121135
}
122136

test/e2e/framework/volume/fixtures.go

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@ import (
5959
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
6060
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
6161
imageutils "k8s.io/kubernetes/test/utils/image"
62+
admissionapi "k8s.io/pod-security-admission/api"
6263
uexec "k8s.io/utils/exec"
6364

6465
"github.com/onsi/ginkgo/v2"
@@ -398,8 +399,9 @@ func runVolumeTesterPod(ctx context.Context, client clientset.Interface, timeout
398399
When SELinux is enabled on the host, client-pod can not read the content, with permission denied.
399400
Invoking client-pod as privileged, so that it can access the volume content, even when SELinux is enabled on the host.
400401
*/
401-
if config.Prefix == "hostpathsymlink" || config.Prefix == "hostpath" {
402-
privileged = true
402+
securityLevel := admissionapi.LevelBaseline // TODO (#118184): also support LevelRestricted
403+
if privileged || config.Prefix == "hostpathsymlink" || config.Prefix == "hostpath" {
404+
securityLevel = admissionapi.LevelPrivileged
403405
}
404406
command = "while true ; do sleep 2; done "
405407
seLinuxOptions := &v1.SELinuxOptions{Level: "s0:c0,c1"}
@@ -443,9 +445,9 @@ func runVolumeTesterPod(ctx context.Context, client clientset.Interface, timeout
443445
// a privileged container, so we don't go privileged for block volumes.
444446
// https://github.com/moby/moby/issues/35991
445447
if privileged && test.Mode == v1.PersistentVolumeBlock {
446-
privileged = false
448+
securityLevel = admissionapi.LevelBaseline
447449
}
448-
clientPod.Spec.Containers[0].SecurityContext = e2epod.GenerateContainerSecurityContext(privileged)
450+
clientPod.Spec.Containers[0].SecurityContext = e2epod.GenerateContainerSecurityContext(securityLevel)
449451

450452
if test.Mode == v1.PersistentVolumeBlock {
451453
clientPod.Spec.Containers[0].VolumeDevices = append(clientPod.Spec.Containers[0].VolumeDevices, v1.VolumeDevice{

test/e2e/storage/flexvolume_mounted_volume_resize.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow]
141141
framework.ExpectEqual(len(pvs), 1)
142142

143143
ginkgo.By("Creating a deployment with the provisioned volume")
144-
deployment, err := e2edeployment.CreateDeployment(ctx, c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "")
144+
deployment, err := e2edeployment.CreateDeployment(ctx, c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, admissionapi.LevelRestricted, "")
145145
framework.ExpectNoError(err, "Failed creating deployment %v", err)
146146
ginkgo.DeferCleanup(c.AppsV1().Deployments(ns).Delete, deployment.Name, metav1.DeleteOptions{})
147147

test/e2e/storage/mounted_volume_resize.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun
111111
// Keeping pod on same node reproduces the scenario that volume might already be mounted when resize is attempted.
112112
// We should consider adding a unit test that exercises this better.
113113
ginkgo.By("Creating a deployment with selected PVC")
114-
deployment, err := e2edeployment.CreateDeployment(ctx, c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "")
114+
deployment, err := e2edeployment.CreateDeployment(ctx, c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, admissionapi.LevelRestricted, "")
115115
framework.ExpectNoError(err, "Failed creating deployment %v", err)
116116
ginkgo.DeferCleanup(c.AppsV1().Deployments(ns).Delete, deployment.Name, metav1.DeleteOptions{})
117117

test/e2e/storage/nfs_persistent_volume-disruptive.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
178178
framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, c, f.Timeouts, ns, pv2, pvc2))
179179

180180
ginkgo.By("Attaching both PVC's to a single pod")
181-
clientPod, err = e2epod.CreatePod(ctx, c, ns, nil, []*v1.PersistentVolumeClaim{pvc1, pvc2}, true, "")
181+
clientPod, err = e2epod.CreatePod(ctx, c, ns, nil, []*v1.PersistentVolumeClaim{pvc1, pvc2}, f.NamespacePodSecurityLevel, "")
182182
framework.ExpectNoError(err)
183183
})
184184

@@ -301,7 +301,7 @@ func initTestCase(ctx context.Context, f *framework.Framework, c clientset.Inter
301301
}
302302
}()
303303
framework.ExpectNoError(err)
304-
pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")
304+
pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, f.NamespacePodSecurityLevel, "")
305305
pod.Spec.NodeName = nodeName
306306
framework.Logf("Creating NFS client pod.")
307307
pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})

test/e2e/storage/persistent_volumes.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -285,7 +285,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
285285
// (and test) succeed.
286286
ginkgo.It("should test that a PV becomes Available and is clean after the PVC is deleted.", func(ctx context.Context) {
287287
ginkgo.By("Writing to the volume.")
288-
pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')")
288+
pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, f.NamespacePodSecurityLevel, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')")
289289
pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
290290
framework.ExpectNoError(err)
291291
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, c, pod.Name, ns, f.Timeouts.PodStart))
@@ -303,7 +303,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
303303
// If a file is detected in /mnt, fail the pod and do not restart it.
304304
ginkgo.By("Verifying the mount has been cleaned.")
305305
mount := pod.Spec.Containers[0].VolumeMounts[0].MountPath
306-
pod = e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount))
306+
pod = e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, f.NamespacePodSecurityLevel, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount))
307307
pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
308308
framework.ExpectNoError(err)
309309
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, c, pod.Name, ns, f.Timeouts.PodStart))
@@ -447,7 +447,7 @@ func makeStatefulSetWithPVCs(ns, cmd string, mounts []v1.VolumeMount, claims []v
447447
// Has been shown to be necessary using Go 1.7.
448448
func createWaitAndDeletePod(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns string, pvc *v1.PersistentVolumeClaim, command string) (err error) {
449449
framework.Logf("Creating nfs test pod")
450-
pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, command)
450+
pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, admissionapi.LevelPrivileged, command)
451451
runPod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
452452
if err != nil {
453453
return fmt.Errorf("pod Create API error: %w", err)

test/e2e/storage/pvc_protection.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
9595

9696
ginkgo.By("Creating a Pod that becomes Running and therefore is actively using the PVC")
9797
pvcClaims := []*v1.PersistentVolumeClaim{pvc}
98-
pod, err = e2epod.CreatePod(ctx, client, nameSpace, nil, pvcClaims, false, "")
98+
pod, err = e2epod.CreatePod(ctx, client, nameSpace, nil, pvcClaims, f.NamespacePodSecurityLevel, "")
9999
framework.ExpectNoError(err, "While creating pod that uses the PVC or waiting for the Pod to become Running")
100100

101101
ginkgo.By("Waiting for PVC to become Bound")
@@ -156,7 +156,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
156156
framework.ExpectNotEqual(pvc.ObjectMeta.DeletionTimestamp, nil)
157157

158158
ginkgo.By("Creating second Pod whose scheduling fails because it uses a PVC that is being deleted")
159-
secondPod, err2 := e2epod.CreateUnschedulablePod(ctx, client, nameSpace, nil, []*v1.PersistentVolumeClaim{pvc}, false, "")
159+
secondPod, err2 := e2epod.CreateUnschedulablePod(ctx, client, nameSpace, nil, []*v1.PersistentVolumeClaim{pvc}, f.NamespacePodSecurityLevel, "")
160160
framework.ExpectNoError(err2, "While creating second pod that uses a PVC that is being deleted and that is Unschedulable")
161161

162162
ginkgo.By("Deleting the second pod that uses the PVC that is being deleted")

0 commit comments

Comments
 (0)