Skip to content

Commit a89bb01

Browse files
committed
KEP-3619: implement e2e test for SupplementalGroupsPolicy
1 parent f46ecf5 commit a89bb01

File tree

2 files changed

+172
-0
lines changed

2 files changed

+172
-0
lines changed

test/e2e/feature/feature.go

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -334,6 +334,11 @@ var (
334334
// TODO: document the feature (owning SIG, when to use this feature for a test)
335335
StorageVersionAPI = framework.WithFeature(framework.ValidFeatures.Add("StorageVersionAPI"))
336336

337+
// Owner: sig-node
338+
// Marks tests that require a cluster with SupplementalGroupsPolicy
339+
// (used for testing fine-grained SupplementalGroups control <https://kep.k8s.io/3619>)
340+
SupplementalGroupsPolicy = framework.WithFeature(framework.ValidFeatures.Add("SupplementalGroupsPolicy"))
341+
337342
// Owner: sig-network
338343
// Marks tests that require a cluster with Topology Hints enabled.
339344
TopologyHints = framework.WithFeature(framework.ValidFeatures.Add("Topology Hints"))

test/e2e/node/security_context.go

Lines changed: 167 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,19 +25,25 @@ package node
2525
import (
2626
"context"
2727
"fmt"
28+
"reflect"
29+
"time"
2830

2931
v1 "k8s.io/api/core/v1"
3032
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3133
"k8s.io/apimachinery/pkg/util/uuid"
34+
"k8s.io/kubernetes/test/e2e/feature"
3235
"k8s.io/kubernetes/test/e2e/framework"
3336
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
3437
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
3538
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
39+
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
3640
imageutils "k8s.io/kubernetes/test/utils/image"
3741
admissionapi "k8s.io/pod-security-admission/api"
42+
ptr "k8s.io/utils/ptr"
3843

3944
"github.com/onsi/ginkgo/v2"
4045
"github.com/onsi/gomega"
46+
"github.com/onsi/gomega/gcustom"
4147
)
4248

4349
// SeccompProcStatusField is the field of /proc/$PID/status referencing the seccomp filter type.
@@ -114,6 +120,167 @@ var _ = SIGDescribe("Security Context", func() {
114120
})
115121
})
116122

123+
SIGDescribe("SupplementalGroupsPolicy", feature.SupplementalGroupsPolicy, func() {
124+
timeout := 3 * time.Minute
125+
126+
agnhostImage := imageutils.GetE2EImage(imageutils.Agnhost)
127+
uidInImage := int64(1000)
128+
gidDefinedInImage := int64(50000)
129+
supplementalGroup := int64(60000)
130+
131+
supportsSupplementalGroupsPolicy := func(ctx context.Context, f *framework.Framework, nodeName string) bool {
132+
node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
133+
framework.ExpectNoError(err)
134+
gomega.Expect(node).NotTo(gomega.BeNil())
135+
if node.Status.Features != nil {
136+
supportsSupplementalGroupsPolicy := node.Status.Features.SupplementalGroupsPolicy
137+
if supportsSupplementalGroupsPolicy != nil && *supportsSupplementalGroupsPolicy {
138+
return true
139+
}
140+
}
141+
return false
142+
}
143+
mkPod := func(policy *v1.SupplementalGroupsPolicy) *v1.Pod {
144+
pod := scTestPod(false, false)
145+
146+
// In specified image(agnhost E2E image),
147+
// - user-defined-in-image(uid=1000) is defined
148+
// - user-defined-in-image belongs to group-defined-in-image(gid=50000)
149+
// thus, resultant supplementary group of the container processes should be
150+
// - 1000 : self
151+
// - 50000: pre-defined groups defined in the container image(/etc/group) of self(uid=1000)
152+
// - 60000: specified in SupplementalGroups
153+
// $ id -G
154+
// 1000 50000 60000 (if SupplementalGroupsPolicy=Merge or not set)
155+
// 1000 60000 (if SupplementalGroupsPolicy=Strict)
156+
pod.Spec.SecurityContext.RunAsUser = &uidInImage
157+
pod.Spec.SecurityContext.SupplementalGroupsPolicy = policy
158+
pod.Spec.SecurityContext.SupplementalGroups = []int64{supplementalGroup}
159+
pod.Spec.Containers[0].Image = agnhostImage
160+
pod.Spec.Containers[0].Command = []string{"sh", "-c", "id -G; while :; do sleep 1; done"}
161+
162+
return pod
163+
}
164+
waitForContainerUser := func(ctx context.Context, f *framework.Framework, podName string, containerName string, expectedContainerUser *v1.ContainerUser) error {
165+
return framework.Gomega().Eventually(ctx,
166+
framework.RetryNotFound(framework.GetObject(f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get, podName, metav1.GetOptions{}))).
167+
WithTimeout(timeout).
168+
Should(gcustom.MakeMatcher(func(p *v1.Pod) (bool, error) {
169+
for _, s := range p.Status.ContainerStatuses {
170+
if s.Name == containerName {
171+
return reflect.DeepEqual(s.User, expectedContainerUser), nil
172+
}
173+
}
174+
return false, nil
175+
}))
176+
}
177+
waitForPodLogs := func(ctx context.Context, f *framework.Framework, podName string, containerName string, expectedLog string) error {
178+
return framework.Gomega().Eventually(ctx,
179+
framework.RetryNotFound(framework.GetObject(f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get, podName, metav1.GetOptions{}))).
180+
WithTimeout(timeout).
181+
Should(gcustom.MakeMatcher(func(p *v1.Pod) (bool, error) {
182+
podLogs, err := e2epod.GetPodLogs(ctx, f.ClientSet, p.Namespace, p.Name, containerName)
183+
if err != nil {
184+
return false, err
185+
}
186+
return podLogs == expectedLog, nil
187+
}))
188+
}
189+
190+
ginkgo.When("SupplementalGroupsPolicy was not set", func() {
191+
ginkgo.It("if the container's primary UID belongs to some groups in the image, it should add SupplementalGroups to them [LinuxOnly]", func(ctx context.Context) {
192+
var pod *v1.Pod
193+
ginkgo.By("creating a pod", func() {
194+
pod = e2epod.NewPodClient(f).Create(ctx, mkPod(nil))
195+
framework.ExpectNoError(e2epod.WaitForPodScheduled(ctx, f.ClientSet, pod.Namespace, pod.Name))
196+
var err error
197+
pod, err = e2epod.NewPodClient(f).Get(ctx, pod.Name, metav1.GetOptions{})
198+
framework.ExpectNoError(err)
199+
if !supportsSupplementalGroupsPolicy(ctx, f, pod.Spec.NodeName) {
200+
e2eskipper.Skipf("node does not support SupplementalGroupsPolicy")
201+
}
202+
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod))
203+
})
204+
expectedOutput := fmt.Sprintf("%d %d %d", uidInImage, gidDefinedInImage, supplementalGroup)
205+
expectedContainerUser := &v1.ContainerUser{
206+
Linux: &v1.LinuxContainerUser{
207+
UID: uidInImage,
208+
GID: uidInImage,
209+
SupplementalGroups: []int64{uidInImage, gidDefinedInImage, supplementalGroup},
210+
},
211+
}
212+
213+
framework.ExpectNoError(waitForContainerUser(ctx, f, pod.Name, pod.Spec.Containers[0].Name, expectedContainerUser))
214+
framework.ExpectNoError(waitForPodLogs(ctx, f, pod.Name, pod.Spec.Containers[0].Name, expectedOutput+"\n"))
215+
216+
stdout := e2epod.ExecCommandInContainer(f, pod.Name, pod.Spec.Containers[0].Name, "id", "-G")
217+
gomega.Expect(stdout).To(gomega.Equal(expectedOutput))
218+
})
219+
})
220+
ginkgo.When("SupplementalGroupsPolicy was set to Merge", func() {
221+
ginkgo.It("if the container's primary UID belongs to some groups in the image, it should add SupplementalGroups to them [LinuxOnly]", func(ctx context.Context) {
222+
var pod *v1.Pod
223+
ginkgo.By("creating a pod", func() {
224+
pod = e2epod.NewPodClient(f).Create(ctx, mkPod(ptr.To(v1.SupplementalGroupsPolicyMerge)))
225+
framework.ExpectNoError(e2epod.WaitForPodScheduled(ctx, f.ClientSet, pod.Namespace, pod.Name))
226+
var err error
227+
pod, err = e2epod.NewPodClient(f).Get(ctx, pod.Name, metav1.GetOptions{})
228+
framework.ExpectNoError(err)
229+
if !supportsSupplementalGroupsPolicy(ctx, f, pod.Spec.NodeName) {
230+
e2eskipper.Skipf("node does not support SupplementalGroupsPolicy")
231+
}
232+
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod))
233+
})
234+
235+
expectedOutput := fmt.Sprintf("%d %d %d", uidInImage, gidDefinedInImage, supplementalGroup)
236+
expectedContainerUser := &v1.ContainerUser{
237+
Linux: &v1.LinuxContainerUser{
238+
UID: uidInImage,
239+
GID: uidInImage,
240+
SupplementalGroups: []int64{uidInImage, gidDefinedInImage, supplementalGroup},
241+
},
242+
}
243+
244+
framework.ExpectNoError(waitForContainerUser(ctx, f, pod.Name, pod.Spec.Containers[0].Name, expectedContainerUser))
245+
framework.ExpectNoError(waitForPodLogs(ctx, f, pod.Name, pod.Spec.Containers[0].Name, expectedOutput+"\n"))
246+
247+
stdout := e2epod.ExecCommandInContainer(f, pod.Name, pod.Spec.Containers[0].Name, "id", "-G")
248+
gomega.Expect(stdout).To(gomega.Equal(expectedOutput))
249+
})
250+
})
251+
ginkgo.When("SupplementalGroupsPolicy was set to Strict", func() {
252+
ginkgo.It("even if the container's primary UID belongs to some groups in the image, it should not add SupplementalGroups to them [LinuxOnly]", func(ctx context.Context) {
253+
var pod *v1.Pod
254+
ginkgo.By("creating a pod", func() {
255+
pod = e2epod.NewPodClient(f).Create(ctx, mkPod(ptr.To(v1.SupplementalGroupsPolicyStrict)))
256+
framework.ExpectNoError(e2epod.WaitForPodScheduled(ctx, f.ClientSet, pod.Namespace, pod.Name))
257+
var err error
258+
pod, err = e2epod.NewPodClient(f).Get(ctx, pod.Name, metav1.GetOptions{})
259+
framework.ExpectNoError(err)
260+
if !supportsSupplementalGroupsPolicy(ctx, f, pod.Spec.NodeName) {
261+
e2eskipper.Skipf("node does not support SupplementalGroupsPolicy")
262+
}
263+
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod))
264+
})
265+
266+
expectedOutput := fmt.Sprintf("%d %d", uidInImage, supplementalGroup)
267+
expectedContainerUser := &v1.ContainerUser{
268+
Linux: &v1.LinuxContainerUser{
269+
UID: uidInImage,
270+
GID: uidInImage,
271+
SupplementalGroups: []int64{uidInImage, supplementalGroup},
272+
},
273+
}
274+
275+
framework.ExpectNoError(waitForContainerUser(ctx, f, pod.Name, pod.Spec.Containers[0].Name, expectedContainerUser))
276+
framework.ExpectNoError(waitForPodLogs(ctx, f, pod.Name, pod.Spec.Containers[0].Name, expectedOutput+"\n"))
277+
278+
stdout := e2epod.ExecCommandInContainer(f, pod.Name, pod.Spec.Containers[0].Name, "id", "-G")
279+
gomega.Expect(stdout).To(gomega.Equal(expectedOutput))
280+
})
281+
})
282+
})
283+
117284
ginkgo.It("should support pod.Spec.SecurityContext.RunAsUser [LinuxOnly]", func(ctx context.Context) {
118285
pod := scTestPod(false, false)
119286
userID := int64(1001)

0 commit comments

Comments
 (0)