diff --git a/pkg/scheduler/webhook.go b/pkg/scheduler/webhook.go index 5fc866017..b0bd13c2e 100644 --- a/pkg/scheduler/webhook.go +++ b/pkg/scheduler/webhook.go @@ -66,12 +66,29 @@ func (h *webhook) Handle(_ context.Context, req admission.Request) admission.Res klog.Infof(template+" - Pod already has different scheduler assigned", req.Namespace, req.Name, req.UID) return admission.Allowed("pod already has different scheduler assigned") } - klog.Infof(template, pod.Namespace, pod.Name, pod.UID) + klog.Infof(template+" - Checking resources and scheduler assignment", pod.Namespace, pod.Name, pod.UID) hasResource := false - for idx, ctr := range pod.Spec.Containers { + for idx := range pod.Spec.InitContainers { + c := &pod.Spec.InitContainers[idx] + if c.SecurityContext != nil { + if c.SecurityContext.Privileged != nil && *c.SecurityContext.Privileged { + klog.Warningf(template+" - Denying admission as init container %s is privileged", pod.Namespace, pod.Name, pod.UID, c.Name) + continue + } + } + for _, val := range device.GetDevices() { + found, err := val.MutateAdmission(c, pod) + if err != nil { + klog.Errorf("validating pod failed:%s", err.Error()) + return admission.Errored(http.StatusInternalServerError, err) + } + hasResource = hasResource || found + } + } + for idx := range pod.Spec.Containers { c := &pod.Spec.Containers[idx] - if ctr.SecurityContext != nil { - if ctr.SecurityContext.Privileged != nil && *ctr.SecurityContext.Privileged { + if c.SecurityContext != nil { + if c.SecurityContext.Privileged != nil && *c.SecurityContext.Privileged { klog.Warningf(template+" - Denying admission as container %s is privileged", pod.Namespace, pod.Name, pod.UID, c.Name) continue } diff --git a/pkg/scheduler/webhook_test.go b/pkg/scheduler/webhook_test.go index 5f0daf0dd..eb96be909 100644 --- a/pkg/scheduler/webhook_test.go +++ b/pkg/scheduler/webhook_test.go @@ -241,3 +241,88 @@ func TestPodHasDifferentScheduler(t *testing.T) { t.Errorf("Expected allowed response for pod with different scheduler, but got: %v", resp) } } + +func TestPodHasInitContainer(t *testing.T) { + config.SchedulerName = "hami-scheduler" + config.ForceOverwriteDefaultScheduler = true + deviceConfig := &device.Config{ + NvidiaConfig: nvidia.NvidiaConfig{ + ResourceCountName: "hami.io/gpu", + ResourceMemoryName: "hami.io/gpumem", + ResourceMemoryPercentageName: "hami.io/gpumem-percentage", + ResourceCoreName: "hami.io/gpucores", + DefaultMemory: 0, + DefaultCores: 0, + DefaultGPUNum: 1, + }, + } + + if err := device.InitDevicesWithConfig(deviceConfig); err != nil { + klog.Fatalf("Failed to initialize devices with config: %v", err) + } + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "container1", + SecurityContext: &corev1.SecurityContext{ + Privileged: nil, + }, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "hami.io/gpu": resource.MustParse("1"), + }, + }, + }, + }, + InitContainers: []corev1.Container{ + { + Name: "container2", + SecurityContext: &corev1.SecurityContext{ + Privileged: nil, + }, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "hami.io/gpu": resource.MustParse("1"), + }, + }, + }, + }, + }, + } + + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + codec := serializer.NewCodecFactory(scheme).LegacyCodec(corev1.SchemeGroupVersion) + podBytes, err := runtime.Encode(codec, pod) + if err != nil { + t.Fatalf("Error encoding pod: %v", err) + } + + req := admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + UID: "test-uid", + Namespace: "default", + Name: "test-pod", + Object: runtime.RawExtension{ + Raw: podBytes, + }, + }, + } + wh, err := NewWebHook() + if err != nil { + t.Fatalf("Error creating WebHook: %v", err) + } + + resp := wh.Handle(context.Background(), req) + + if !resp.Allowed { + t.Errorf("Expected allowed response, but got: %v", resp) + } + +}