@@ -19,6 +19,8 @@ package node
19
19
import (
20
20
"context"
21
21
"fmt"
22
+ "os/exec"
23
+ "strconv"
22
24
"strings"
23
25
"time"
24
26
@@ -43,6 +45,10 @@ import (
43
45
var (
44
46
// non-root UID used in tests.
45
47
nonRootTestUserID = int64 (1000 )
48
+
49
+ // kubelet user used for userns mapping.
50
+ kubeletUserForUsernsMapping = "kubelet"
51
+ getsubuidsBinary = "getsubids"
46
52
)
47
53
48
54
var _ = SIGDescribe ("Security Context" , func () {
@@ -54,6 +60,8 @@ var _ = SIGDescribe("Security Context", func() {
54
60
})
55
61
56
62
ginkgo .Context ("When creating a pod with HostUsers" , func () {
63
+ e2eskipper .SkipIfNodeOSDistroIs ("windows" )
64
+
57
65
containerName := "userns-test"
58
66
makePod := func (hostUsers bool ) * v1.Pod {
59
67
return & v1.Pod {
@@ -112,6 +120,74 @@ var _ = SIGDescribe("Security Context", func() {
112
120
}
113
121
})
114
122
123
+ f .It ("must create the user namespace in the configured hostUID/hostGID range [LinuxOnly]" , feature .UserNamespacesSupport , func (ctx context.Context ) {
124
+ // We need to check with the binary "getsubuids" the mappings for the kubelet.
125
+ // If something is not present, we skip the test as the node wasn't configured to run this test.
126
+ id , length , err := kubeletUsernsMappings (getsubuidsBinary )
127
+ if err != nil {
128
+ e2eskipper .Skipf ("node is not setup for userns with kubelet mappings: %v" , err )
129
+ }
130
+
131
+ for i := 0 ; i < 4 ; i ++ {
132
+ // makePod(false) creates the pod with user namespace
133
+ podClient := e2epod .PodClientNS (f , f .Namespace .Name )
134
+ createdPod := podClient .Create (ctx , makePod (false ))
135
+ ginkgo .DeferCleanup (func (ctx context.Context ) {
136
+ ginkgo .By ("delete the pods" )
137
+ podClient .DeleteSync (ctx , createdPod .Name , metav1.DeleteOptions {}, f .Timeouts .PodDelete )
138
+ })
139
+ getLogs := func (pod * v1.Pod ) (string , error ) {
140
+ err := e2epod .WaitForPodSuccessInNamespaceTimeout (ctx , f .ClientSet , createdPod .Name , f .Namespace .Name , f .Timeouts .PodStart )
141
+ if err != nil {
142
+ return "" , err
143
+ }
144
+ podStatus , err := podClient .Get (ctx , pod .Name , metav1.GetOptions {})
145
+ if err != nil {
146
+ return "" , err
147
+ }
148
+ return e2epod .GetPodLogs (ctx , f .ClientSet , f .Namespace .Name , podStatus .Name , containerName )
149
+ }
150
+
151
+ logs , err := getLogs (createdPod )
152
+ framework .ExpectNoError (err )
153
+
154
+ // The hostUID is the second field in the /proc/self/uid_map file.
155
+ hostMap := strings .Fields (logs )
156
+ if len (hostMap ) != 3 {
157
+ framework .Failf ("can't detect hostUID for container, is the format of /proc/self/uid_map correct?" )
158
+ }
159
+
160
+ tmp , err := strconv .ParseUint (hostMap [1 ], 10 , 32 )
161
+ if err != nil {
162
+ framework .Failf ("can't convert hostUID to int: %v" , err )
163
+ }
164
+ hostUID := uint32 (tmp )
165
+
166
+ // Here we check the pod got a userns mapping within the range
167
+ // configured for the kubelet.
168
+ // To make sure the pod mapping doesn't fall within range by chance,
169
+ // we do the following:
170
+ // * The configured kubelet range as small as possible (enough to
171
+ // fit 110 pods, the default of the kubelet) to minimize the chance
172
+ // of this range being used "by chance" in the node configuration.
173
+ // * We also run this in a loop, so it is less likely to get lucky
174
+ // several times in a row.
175
+ //
176
+ // There are 65536 ranges possible and we configured the kubelet to
177
+ // use 110 of them. The chances of this test passing by chance 4
178
+ // times in a row and the kubelet not using only the configured
179
+ // range are:
180
+ //
181
+ // (110/65536) ^ 4 = 4.73e-12. IOW, less than 1 in a trillion.
182
+ //
183
+ // Furthermore, the unit tests would also need to be buggy and not
184
+ // detect the bug. We expect to catch off-by-one errors there.
185
+ if hostUID < id || hostUID > id + length {
186
+ framework .Failf ("user namespace created outside of the configured range. Expected range: %v-%v, got: %v" , id , id + length , hostUID )
187
+ }
188
+ }
189
+ })
190
+
115
191
f .It ("must not create the user namespace if set to true [LinuxOnly]" , feature .UserNamespacesSupport , func (ctx context.Context ) {
116
192
// with hostUsers=true the pod must use the host user namespace
117
193
pod := makePod (true )
@@ -124,8 +200,6 @@ var _ = SIGDescribe("Security Context", func() {
124
200
})
125
201
126
202
f .It ("should mount all volumes with proper permissions with hostUsers=false [LinuxOnly]" , feature .UserNamespacesSupport , func (ctx context.Context ) {
127
- // Create all volume types supported: configmap, secret, downwardAPI, projected.
128
-
129
203
// Create configmap.
130
204
name := "userns-volumes-test-" + string (uuid .NewUUID ())
131
205
configMap := newConfigMap (f , name )
@@ -636,6 +710,8 @@ var _ = SIGDescribe("Security Context", func() {
636
710
})
637
711
638
712
var _ = SIGDescribe ("User Namespaces for Pod Security Standards [LinuxOnly]" , func () {
713
+ e2eskipper .SkipIfNodeOSDistroIs ("windows" )
714
+
639
715
f := framework .NewDefaultFramework ("user-namespaces-pss-test" )
640
716
f .NamespacePodSecurityLevel = admissionapi .LevelRestricted
641
717
@@ -683,3 +759,57 @@ func waitForFailure(ctx context.Context, f *framework.Framework, name string, ti
683
759
},
684
760
)).To (gomega .Succeed (), "wait for pod %q to fail" , name )
685
761
}
762
+
763
+ // parseGetSubIdsOutput parses the output from the `getsubids` tool, which is used to query subordinate user or group ID ranges for
764
+ // a given user or group. getsubids produces a line for each mapping configured.
765
+ // Here we expect that there is a single mapping, and the same values are used for the subordinate user and group ID ranges.
766
+ // The output is something like:
767
+ // $ getsubids kubelet
768
+ // 0: kubelet 65536 2147483648
769
+ // $ getsubids -g kubelet
770
+ // 0: kubelet 65536 2147483648
771
+ // XXX: this is a c&p from pkg/kubelet/kubelet_pods.go. It is simpler to c&p than to try to reuse it.
772
+ func parseGetSubIdsOutput (input string ) (uint32 , uint32 , error ) {
773
+ lines := strings .Split (strings .Trim (input , "\n " ), "\n " )
774
+ if len (lines ) != 1 {
775
+ return 0 , 0 , fmt .Errorf ("error parsing line %q: it must contain only one line" , input )
776
+ }
777
+
778
+ parts := strings .Fields (lines [0 ])
779
+ if len (parts ) != 4 {
780
+ return 0 , 0 , fmt .Errorf ("invalid line %q" , input )
781
+ }
782
+
783
+ // Parsing the numbers
784
+ num1 , err := strconv .ParseUint (parts [2 ], 10 , 32 )
785
+ if err != nil {
786
+ return 0 , 0 , fmt .Errorf ("error parsing line %q: %w" , input , err )
787
+ }
788
+
789
+ num2 , err := strconv .ParseUint (parts [3 ], 10 , 32 )
790
+ if err != nil {
791
+ return 0 , 0 , fmt .Errorf ("error parsing line %q: %w" , input , err )
792
+ }
793
+
794
+ return uint32 (num1 ), uint32 (num2 ), nil
795
+ }
796
+
797
+ func kubeletUsernsMappings (subuidBinary string ) (uint32 , uint32 , error ) {
798
+ cmd , err := exec .LookPath (getsubuidsBinary )
799
+ if err != nil {
800
+ return 0 , 0 , fmt .Errorf ("getsubids binary not found in PATH" )
801
+ }
802
+ outUids , err := exec .Command (cmd , kubeletUserForUsernsMapping ).Output ()
803
+ if err != nil {
804
+ return 0 , 0 , fmt .Errorf ("no additional uids for user %q: %w" , kubeletUserForUsernsMapping , err )
805
+ }
806
+ outGids , err := exec .Command (cmd , "-g" , kubeletUserForUsernsMapping ).Output ()
807
+ if err != nil {
808
+ return 0 , 0 , fmt .Errorf ("no additional gids for user %q" , kubeletUserForUsernsMapping )
809
+ }
810
+ if string (outUids ) != string (outGids ) {
811
+ return 0 , 0 , fmt .Errorf ("mismatched subuids and subgids for user %q" , kubeletUserForUsernsMapping )
812
+ }
813
+
814
+ return parseGetSubIdsOutput (string (outUids ))
815
+ }
0 commit comments