Skip to content

Commit df030f3

Browse files
authored
Merge pull request kubernetes#130472 from jsafrane/selinux-controller-ignore-recursive
selinux: Ignore pods with Recursive policy
2 parents 90cf6f2 + 052f1fe commit df030f3

File tree

4 files changed

+98
-57
lines changed

4 files changed

+98
-57
lines changed

pkg/controller/volume/selinuxwarning/cache/volumecache_test.go

Lines changed: 6 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -119,21 +119,21 @@ func TestVolumeCache_AddVolumeSendConflicts(t *testing.T) {
119119
podNamespace: "ns2",
120120
podName: "pod2-recursive",
121121
volumeName: "vol2",
122-
label: "system_u:system_r:label2",
122+
label: "", // labels on volumes with Recursive policy are cleared, we don't want the controller to report label conflicts on them
123123
changePolicy: v1.SELinuxChangePolicyRecursive,
124124
},
125125
{
126126
podNamespace: "ns3",
127127
podName: "pod3-1",
128128
volumeName: "vol3", // vol3 is used by 2 pods with the same label + recursive policy
129-
label: "system_u:system_r:label3",
129+
label: "", // labels on volumes with Recursive policy are cleared, we don't want the controller to report label conflicts on them
130130
changePolicy: v1.SELinuxChangePolicyRecursive,
131131
},
132132
{
133133
podNamespace: "ns3",
134134
podName: "pod3-2",
135135
volumeName: "vol3", // vol3 is used by 2 pods with the same label + recursive policy
136-
label: "system_u:system_r:label3",
136+
label: "", // labels on volumes with Recursive policy are cleared, we don't want the controller to report label conflicts on them
137137
changePolicy: v1.SELinuxChangePolicyRecursive,
138138
},
139139
{
@@ -244,34 +244,13 @@ func TestVolumeCache_AddVolumeSendConflicts(t *testing.T) {
244244
},
245245
},
246246
{
247-
name: "existing volume in a new pod with new conflicting policy and existing label",
247+
name: "existing volume in a new pod with new conflicting policy",
248248
initialPods: existingPods,
249249
podToAdd: podWithVolume{
250250
podNamespace: "testns",
251251
podName: "testpod",
252252
volumeName: "vol1",
253-
label: "system_u:system_r:label1",
254-
changePolicy: v1.SELinuxChangePolicyRecursive,
255-
},
256-
expectedConflicts: []Conflict{
257-
{
258-
PropertyName: "SELinuxChangePolicy",
259-
EventReason: "SELinuxChangePolicyConflict",
260-
Pod: cache.ObjectName{Namespace: "testns", Name: "testpod"},
261-
PropertyValue: "Recursive",
262-
OtherPod: cache.ObjectName{Namespace: "ns1", Name: "pod1-mountOption"},
263-
OtherPropertyValue: "MountOption",
264-
},
265-
},
266-
},
267-
{
268-
name: "existing volume in a new pod with new conflicting policy and new conflicting label",
269-
initialPods: existingPods,
270-
podToAdd: podWithVolume{
271-
podNamespace: "testns",
272-
podName: "testpod",
273-
volumeName: "vol1",
274-
label: "system_u:system_r:label-new",
253+
label: "",
275254
changePolicy: v1.SELinuxChangePolicyRecursive,
276255
},
277256
expectedConflicts: []Conflict{
@@ -283,14 +262,6 @@ func TestVolumeCache_AddVolumeSendConflicts(t *testing.T) {
283262
OtherPod: cache.ObjectName{Namespace: "ns1", Name: "pod1-mountOption"},
284263
OtherPropertyValue: "MountOption",
285264
},
286-
{
287-
PropertyName: "SELinuxLabel",
288-
EventReason: "SELinuxLabelConflict",
289-
Pod: cache.ObjectName{Namespace: "testns", Name: "testpod"},
290-
PropertyValue: "system_u:system_r:label-new",
291-
OtherPod: cache.ObjectName{Namespace: "ns1", Name: "pod1-mountOption"},
292-
OtherPropertyValue: "system_u:system_r:label1",
293-
},
294265
},
295266
},
296267
{
@@ -307,7 +278,7 @@ func TestVolumeCache_AddVolumeSendConflicts(t *testing.T) {
307278
expectedConflicts: nil,
308279
},
309280
{
310-
name: "existing pod is replaced with conflicting policy and label",
281+
name: "existing pod is replaced with conflicting policy",
311282
initialPods: existingPods,
312283
podToAdd: podWithVolume{
313284

@@ -326,14 +297,6 @@ func TestVolumeCache_AddVolumeSendConflicts(t *testing.T) {
326297
OtherPod: cache.ObjectName{Namespace: "ns3", Name: "pod3-2"},
327298
OtherPropertyValue: "Recursive",
328299
},
329-
{
330-
PropertyName: "SELinuxLabel",
331-
EventReason: "SELinuxLabelConflict",
332-
Pod: cache.ObjectName{Namespace: "ns3", Name: "pod3-1"},
333-
PropertyValue: "system_u:system_r:label-new",
334-
OtherPod: cache.ObjectName{Namespace: "ns3", Name: "pod3-2"},
335-
OtherPropertyValue: "system_u:system_r:label3",
336-
},
337300
},
338301
},
339302
{

pkg/controller/volume/selinuxwarning/selinux_warning_controller.go

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -451,10 +451,9 @@ func (c *Controller) syncPod(ctx context.Context, pod *v1.Pod) error {
451451
continue
452452
}
453453

454-
// Ignore how the volume is going to be mounted.
455-
// Report any errors when a volume is used by two pods with different SELinux labels regardless of their
456-
// SELinuxChangePolicy
457-
seLinuxLabel := mountInfo.SELinuxProcessLabel
454+
// Use the same label as kubelet will use for mount -o context.
455+
// If the Pod has opted in to Recursive policy, it will be empty string here and no conflicts will be reported for it.
456+
seLinuxLabel := mountInfo.SELinuxMountLabel
458457

459458
err = c.syncVolume(logger, pod, spec, seLinuxLabel, mountInfo.PluginSupportsSELinuxContextMount)
460459
if err != nil {

pkg/controller/volume/selinuxwarning/selinux_warning_controller_test.go

Lines changed: 75 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,14 +25,17 @@ import (
2525
storagev1 "k8s.io/api/storage/v1"
2626
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2727
"k8s.io/apimachinery/pkg/util/sets"
28+
utilfeature "k8s.io/apiserver/pkg/util/feature"
2829
"k8s.io/client-go/informers"
2930
"k8s.io/client-go/kubernetes/fake"
3031
"k8s.io/client-go/tools/cache"
3132
"k8s.io/client-go/tools/record"
33+
featuregatetesting "k8s.io/component-base/featuregate/testing"
3234
"k8s.io/klog/v2"
3335
"k8s.io/klog/v2/ktesting"
3436
"k8s.io/kubernetes/pkg/controller"
3537
volumecache "k8s.io/kubernetes/pkg/controller/volume/selinuxwarning/cache"
38+
"k8s.io/kubernetes/pkg/features"
3639
"k8s.io/kubernetes/pkg/volume"
3740
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
3841
"k8s.io/utils/ptr"
@@ -117,7 +120,7 @@ func TestSELinuxWarningController_Sync(t *testing.T) {
117120
{
118121
volumeName: "fake-plugin/pv1",
119122
podKey: cache.ObjectName{Namespace: namespace, Name: "pod1"},
120-
label: ":::s0:c1,c2",
123+
label: "", // Label is cleared with the Recursive policy
121124
changePolicy: v1.SELinuxChangePolicyRecursive,
122125
csiDriver: "ebs.csi.aws.com", // The PV is a fake EBS volume
123126
},
@@ -221,6 +224,75 @@ func TestSELinuxWarningController_Sync(t *testing.T) {
221224
`Normal SELinuxLabelConflict SELinuxLabel ":::s0:c98,c99" conflicts with pod pod1 that uses the same volume as this pod with SELinuxLabel ":::s0:c1,c2". If both pods land on the same node, only one of them may access the volume.`,
222225
},
223226
},
227+
{
228+
name: "existing pod with Recursive policy does not generate conflicts",
229+
existingPVCs: []*v1.PersistentVolumeClaim{
230+
pvcBoundToPV("pv1", "pvc1"),
231+
},
232+
existingPVs: []*v1.PersistentVolume{
233+
pvBoundToPVC("pv1", "pvc1"),
234+
},
235+
existingPods: []*v1.Pod{
236+
podWithPVC("pod1", "s0:c1,c2", ptr.To(v1.SELinuxChangePolicyRecursive), "pvc1", "vol1"),
237+
pod("pod2", "s0:c98,c99", ptr.To(v1.SELinuxChangePolicyRecursive)),
238+
},
239+
pod: cache.ObjectName{Namespace: namespace, Name: "pod1"},
240+
conflicts: []volumecache.Conflict{},
241+
expectedAddedVolumes: []addedVolume{
242+
{
243+
volumeName: "fake-plugin/pv1",
244+
podKey: cache.ObjectName{Namespace: namespace, Name: "pod1"},
245+
label: "", // Label is cleared with the Recursive policy
246+
changePolicy: v1.SELinuxChangePolicyRecursive,
247+
csiDriver: "ebs.csi.aws.com", // The PV is a fake EBS volume
248+
},
249+
},
250+
},
251+
{
252+
name: "existing pod with Recursive policy does not conflict with pod with MountOption policy label, only with the policy",
253+
existingPVCs: []*v1.PersistentVolumeClaim{
254+
pvcBoundToPV("pv1", "pvc1"),
255+
},
256+
existingPVs: []*v1.PersistentVolume{
257+
pvBoundToPVC("pv1", "pvc1"),
258+
},
259+
existingPods: []*v1.Pod{
260+
podWithPVC("pod1", "s0:c1,c2", ptr.To(v1.SELinuxChangePolicyRecursive), "pvc1", "vol1"),
261+
podWithPVC("pod2", "s0:c98,c99", ptr.To(v1.SELinuxChangePolicyMountOption), "pvc1", "vol1"),
262+
},
263+
pod: cache.ObjectName{Namespace: namespace, Name: "pod1"},
264+
conflicts: []volumecache.Conflict{
265+
{
266+
PropertyName: "SELinuxChangePolicy",
267+
EventReason: "SELinuxChangePolicyConflict",
268+
Pod: cache.ObjectName{Namespace: namespace, Name: "pod1"},
269+
PropertyValue: string(v1.SELinuxChangePolicyRecursive),
270+
OtherPod: cache.ObjectName{Namespace: namespace, Name: "pod2"},
271+
OtherPropertyValue: string(v1.SELinuxChangePolicyMountOption),
272+
},
273+
{
274+
PropertyName: "SELinuxChangePolicy",
275+
EventReason: "SELinuxChangePolicyConflict",
276+
Pod: cache.ObjectName{Namespace: namespace, Name: "pod2"},
277+
PropertyValue: string(v1.SELinuxChangePolicyMountOption),
278+
OtherPod: cache.ObjectName{Namespace: namespace, Name: "pod1"},
279+
OtherPropertyValue: string(v1.SELinuxChangePolicyRecursive),
280+
},
281+
},
282+
expectedAddedVolumes: []addedVolume{
283+
{
284+
volumeName: "fake-plugin/pv1",
285+
podKey: cache.ObjectName{Namespace: namespace, Name: "pod1"},
286+
label: "", // Label is cleared with the Recursive policy
287+
changePolicy: v1.SELinuxChangePolicyRecursive,
288+
csiDriver: "ebs.csi.aws.com", // The PV is a fake EBS volume
289+
},
290+
},
291+
expectedEvents: []string{
292+
`Normal SELinuxChangePolicyConflict SELinuxChangePolicy "Recursive" conflicts with pod pod2 that uses the same volume as this pod with SELinuxChangePolicy "MountOption". If both pods land on the same node, only one of them may access the volume.`,
293+
`Normal SELinuxChangePolicyConflict SELinuxChangePolicy "MountOption" conflicts with pod pod1 that uses the same volume as this pod with SELinuxChangePolicy "Recursive". If both pods land on the same node, only one of them may access the volume.`,
294+
},
295+
},
224296
{
225297
name: "existing pod with PVC generates conflict, the other pod doesn't exist",
226298
existingPVCs: []*v1.PersistentVolumeClaim{
@@ -281,6 +353,8 @@ func TestSELinuxWarningController_Sync(t *testing.T) {
281353

282354
for _, tt := range tests {
283355
t.Run(tt.name, func(t *testing.T) {
356+
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SELinuxChangePolicy, true)
357+
284358
_, ctx := ktesting.NewTestContext(t)
285359
_, plugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
286360
plugin.SupportsSELinux = true

test/e2e/storage/csimock/csi_selinux_mount.go

Lines changed: 14 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,10 @@ import (
7272
//
7373
// All other feature gate combinations should be invalid.
7474

75+
const (
76+
controllerSELinuxMetricName = "selinux_warning_controller_selinux_volume_conflict"
77+
)
78+
7579
var (
7680
defaultSELinuxLabels = map[string]struct{ defaultProcessLabel, defaultFileLabel string }{
7781
"debian": {"svirt_lxc_net_t", "svirt_lxc_file_t"},
@@ -502,7 +506,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC
502506
volumeMode: v1.ReadWriteOnce,
503507
waitForSecondPodStart: true,
504508
expectNodeIncreases: sets.New[string]( /* no metric is increased, admitted_total was already increased when the first pod started */ ),
505-
expectControllerConflictProperty: "SELinuxLabel", /* SELinuxController does emit a warning for Recursive policy, while kubelet does not! */
509+
expectControllerConflictProperty: "", /* SELinuxController does not emit any warning either */
506510
testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMountReadWriteOncePod), framework.WithFeatureGate(features.SELinuxChangePolicy), feature.SELinuxMountReadWriteOncePodOnly},
507511
},
508512
{
@@ -591,7 +595,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC
591595
volumeMode: v1.ReadWriteMany,
592596
waitForSecondPodStart: true,
593597
expectNodeIncreases: sets.New[string]( /* no metric is increased, admitted_total was already increased when the first pod started */ ),
594-
expectControllerConflictProperty: "SELinuxLabel", /* SELinuxController does emit a warning for Recursive policy, while kubelet does not! */
598+
expectControllerConflictProperty: "", /* SELinuxController does not emit any warning either */
595599
testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMountReadWriteOncePod), framework.WithFeatureGate(features.SELinuxChangePolicy), framework.WithFeatureGate(features.SELinuxMount)},
596600
},
597601
{
@@ -713,13 +717,13 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC
713717
// We don't need to compare the initial and final KCM metrics,
714718
// KCM metrics report exact pod namespaces+names as labels and the metric value is always "1".
715719
err = waitForControllerMetric(ctx, grabber, f.Namespace.Name, pod.Name, pod2.Name, t.expectControllerConflictProperty, framework.PodStartShortTimeout)
716-
framework.ExpectNoError(err, "failed to get metrics from KCM")
720+
framework.ExpectNoError(err, "while waiting for metrics from KCM")
717721
// Check the controler generated a conflict event on the first pod
718722
err = waitForConflictEvent(ctx, m.cs, pod, pod2, t.expectControllerConflictProperty, f.Timeouts.PodStart)
719-
framework.ExpectNoError(err, "failed to receive event on the first pod")
723+
framework.ExpectNoError(err, "while waiting for an event on the first pod")
720724
// Check the controler generated event on the second pod
721725
err = waitForConflictEvent(ctx, m.cs, pod2, pod, t.expectControllerConflictProperty, f.Timeouts.PodStart)
722-
framework.ExpectNoError(err, "failed to receive event on the second pod")
726+
framework.ExpectNoError(err, "while waiting for an event on the second pod")
723727
}
724728
}
725729
// t.testTags is array and it's not possible to use It("name", func(){xxx}, t.testTags...)
@@ -778,7 +782,7 @@ func grabKCMSELinuxMetrics(ctx context.Context, grabber *e2emetrics.Grabber, nam
778782
for i := range samples {
779783
// E.g. "selinux_warning_controller_selinux_volume_conflict"
780784
metricName := samples[i].Metric[testutil.MetricNameLabel]
781-
if metricName != "selinux_warning_controller_selinux_volume_conflict" {
785+
if metricName != controllerSELinuxMetricName {
782786
continue
783787
}
784788

@@ -836,7 +840,6 @@ func waitForNodeMetricIncrease(ctx context.Context, grabber *e2emetrics.Grabber,
836840
}
837841

838842
func waitForControllerMetric(ctx context.Context, grabber *e2emetrics.Grabber, namespace, pod1Name, pod2Name, propertyName string, timeout time.Duration) error {
839-
var noIncreaseMetrics sets.Set[string]
840843
var metrics map[string]float64
841844

842845
expectLabels := []string{
@@ -846,6 +849,8 @@ func waitForControllerMetric(ctx context.Context, grabber *e2emetrics.Grabber, n
846849
fmt.Sprintf("pod2_namespace=%q", namespace),
847850
fmt.Sprintf("property=%q", propertyName),
848851
}
852+
framework.Logf("Waiting for KCM metric %s{%+v}", controllerSELinuxMetricName, expectLabels)
853+
849854
err := wait.PollUntilContextTimeout(ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) {
850855
var err error
851856
metrics, err = grabKCMSELinuxMetrics(ctx, grabber, namespace)
@@ -876,8 +881,8 @@ func waitForControllerMetric(ctx context.Context, grabber *e2emetrics.Grabber, n
876881
ginkgo.By("Dumping final KCM metrics")
877882
dumpMetrics(metrics)
878883

879-
if err == context.DeadlineExceeded {
880-
return fmt.Errorf("timed out waiting for KCM metrics %v", noIncreaseMetrics.UnsortedList())
884+
if err != nil {
885+
return fmt.Errorf("error waiting for KCM metrics %s{%+v}: %w", controllerSELinuxMetricName, expectLabels, err)
881886
}
882887
return err
883888
}

0 commit comments

Comments
 (0)