Skip to content
This repository was archived by the owner on Jul 30, 2021. It is now read-only.

Commit 55014c7

Browse files
authored
recover: fix security contexts for bootstrap pods. (#577)
Bootstrapping relies on the bootstrap pods running as root so they can access files on the host machine. Bootkube now runs self-hosted pods as non-root, so recover needs to strip the security context when outputting bootstrap manifests.
1 parent 9d96f00 commit 55014c7

File tree

2 files changed

+21
-1
lines changed

2 files changed

+21
-1
lines changed

pkg/recovery/recover.go

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -192,14 +192,21 @@ func setBootstrapPodMetadata(pod *v1.Pod, parent metav1.ObjectMeta) error {
192192
}
193193

194194
// fixUpBootstrapPods modifies extracted bootstrap pod specs to have correct metadata and point to
195-
// filesystem-mount-based secrets. It returns mappings from configMap and secret names to output
195+
// filesystem-mount-based secrets, and removes any security contexts that might prevent the pods
196+
// from accessing those secrets. It returns mappings from configMap and secret names to output
196197
// paths that must also be rendered in order for the bootstrap pods to be functional.
197198
// If selfHostedEtcd is true, it also fixes up the etcd servers flag for the API server.
198199
func fixUpBootstrapPods(pods []v1.Pod, selfHostedEtcd bool) (requiredConfigMaps, requiredSecrets map[string]string) {
199200
requiredConfigMaps, requiredSecrets = make(map[string]string), make(map[string]string)
200201
for i := range pods {
201202
pod := &pods[i]
202203

204+
// Fix SecurityContext to ensure the pod runs as root.
205+
if pod.Spec.SecurityContext != nil {
206+
pod.Spec.SecurityContext.RunAsNonRoot = nil
207+
pod.Spec.SecurityContext.RunAsUser = nil
208+
}
209+
203210
// Change secret volumes to point to file mounts.
204211
for i := range pod.Spec.Volumes {
205212
vol := &pod.Spec.Volumes[i]
@@ -219,6 +226,13 @@ func fixUpBootstrapPods(pods []v1.Pod, selfHostedEtcd bool) (requiredConfigMaps,
219226
// Make sure the kubeconfig is in the commandline.
220227
for i := range pod.Spec.Containers {
221228
cn := &pod.Spec.Containers[i]
229+
230+
// Fix SecurityContext to ensure the container runs as root.
231+
if cn.SecurityContext != nil {
232+
cn.SecurityContext.RunAsNonRoot = nil
233+
cn.SecurityContext.RunAsUser = nil
234+
}
235+
222236
// Assumes the bootkube naming convention is used. Could also just make sure the image uses hyperkube.
223237
if _, ok := kubeConfigK8sContainers[cn.Name]; ok {
224238
cn.Command = append(cn.Command, "--kubeconfig=/kubeconfig/kubeconfig")

pkg/recovery/recover_test.go

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -207,6 +207,7 @@ func TestFixUpBootstrapPods(t *testing.T) {
207207
Namespace: "kube-system",
208208
},
209209
Spec: v1.PodSpec{
210+
SecurityContext: &v1.PodSecurityContext{RunAsNonRoot: boolPtr(true), RunAsUser: int64Ptr(65543)},
210211
Containers: []v1.Container{{
211212
Name: "kube-scheduler",
212213
Image: "quay.io/coreos/hyperkube:v1.6.4_coreos.0",
@@ -266,6 +267,7 @@ func TestFixUpBootstrapPods(t *testing.T) {
266267
Namespace: "kube-system",
267268
},
268269
Spec: v1.PodSpec{
270+
SecurityContext: &v1.PodSecurityContext{},
269271
Containers: []v1.Container{{
270272
Name: "kube-scheduler",
271273
Image: "quay.io/coreos/hyperkube:v1.6.4_coreos.0",
@@ -378,3 +380,7 @@ func TestSetTypeMeta(t *testing.T) {
378380
}
379381
}
380382
}
383+
384+
func boolPtr(b bool) *bool { return &b }
385+
386+
func int64Ptr(i int64) *int64 { return &i }

0 commit comments

Comments
 (0)