|
| 1 | +package node |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "fmt" |
| 6 | + "os" |
| 7 | + |
| 8 | + g "github.com/onsi/ginkgo/v2" |
| 9 | + o "github.com/onsi/gomega" |
| 10 | + exutil "github.com/openshift/origin/test/extended/util" |
| 11 | + corev1 "k8s.io/api/core/v1" |
| 12 | + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 13 | + admissionapi "k8s.io/pod-security-admission/api" |
| 14 | + "k8s.io/utils/pointer" |
| 15 | + "k8s.io/utils/ptr" |
| 16 | +) |
| 17 | + |
| 18 | +var _ = g.Describe("[Suite:openshift/usernamespace] [sig-node] [FeatureGate:ProcMountType] [FeatureGate:UserNamespacesSupport] nested container", func() { |
| 19 | + oc := exutil.NewCLIWithPodSecurityLevel("nested-podman", admissionapi.LevelBaseline) |
| 20 | + g.It("should pass podman localsystem test in baseline mode", |
| 21 | + func(ctx context.Context) { |
| 22 | + if !exutil.IsTechPreviewNoUpgrade(ctx, oc.AdminConfigClient()) { |
| 23 | + g.Skip("skipping, this feature is only supported on TechPreviewNoUpgrade clusters") |
| 24 | + } |
| 25 | + runNestedPod(ctx, oc) |
| 26 | + }, |
| 27 | + ) |
| 28 | +}) |
| 29 | + |
| 30 | +func runNestedPod(ctx context.Context, oc *exutil.CLI) { |
| 31 | + g.By("creating custom builder image") |
| 32 | + name := "baseline-nested-container" |
| 33 | + customImage := exutil.FixturePath("testdata", "node", "nested_container") |
| 34 | + err := oc.Run("new-build").Args("--binary", "--strategy=docker", fmt.Sprintf("--name=%s", name)).Execute() |
| 35 | + o.Expect(err).NotTo(o.HaveOccurred()) |
| 36 | + br, _ := exutil.StartBuildAndWait(oc, name, fmt.Sprintf("--from-dir=%s", customImage)) |
| 37 | + br.AssertSuccess() |
| 38 | + |
| 39 | + g.By("creating a pod with a nested container") |
| 40 | + namespace := oc.Namespace() |
| 41 | + pod := &corev1.Pod{ |
| 42 | + TypeMeta: metav1.TypeMeta{ |
| 43 | + APIVersion: "v1", |
| 44 | + Kind: "Pod", |
| 45 | + }, |
| 46 | + ObjectMeta: metav1.ObjectMeta{ |
| 47 | + Name: name, |
| 48 | + Annotations: map[string]string{ |
| 49 | + "io.kubernetes.cri-o.Devices": "/dev/fuse,/dev/net/tun", |
| 50 | + }, |
| 51 | + }, |
| 52 | + Spec: corev1.PodSpec{ |
| 53 | + HostUsers: pointer.Bool(false), |
| 54 | + DNSPolicy: corev1.DNSNone, |
| 55 | + DNSConfig: &corev1.PodDNSConfig{ |
| 56 | + Nameservers: []string{"1.1.1.1"}, |
| 57 | + }, |
| 58 | + RestartPolicy: corev1.RestartPolicyNever, |
| 59 | + Containers: []corev1.Container{ |
| 60 | + { |
| 61 | + Name: "nested-podman", |
| 62 | + Image: fmt.Sprintf("image-registry.openshift-image-registry.svc:5000/%s/%s", namespace, name), |
| 63 | + ImagePullPolicy: corev1.PullAlways, |
| 64 | + Args: []string{ |
| 65 | + "./run_tests.sh", |
| 66 | + }, |
| 67 | + SecurityContext: &corev1.SecurityContext{ |
| 68 | + RunAsUser: pointer.Int64(1000), |
| 69 | + ProcMount: ptr.To(corev1.UnmaskedProcMount), |
| 70 | + Capabilities: &corev1.Capabilities{ |
| 71 | + Add: []corev1.Capability{ |
| 72 | + "SETUID", |
| 73 | + "SETGID", |
| 74 | + }, |
| 75 | + }, |
| 76 | + SELinuxOptions: &corev1.SELinuxOptions{ |
| 77 | + Type: "container_engine_t", |
| 78 | + }, |
| 79 | + }, |
| 80 | + }, |
| 81 | + }, |
| 82 | + }, |
| 83 | + } |
| 84 | + _, err = oc.AsAdmin().KubeClient().CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{}) |
| 85 | + o.Expect(err).NotTo(o.HaveOccurred()) |
| 86 | + |
| 87 | + g.By("waiting for the pod to complete") |
| 88 | + o.Eventually(func() error { |
| 89 | + _, err := oc.AsAdmin().Run("exec").Args(pod.Name, "--", "[", "-f", "done", "]").Output() |
| 90 | + if err != nil { |
| 91 | + return err |
| 92 | + } |
| 93 | + return nil |
| 94 | + }, "30m", "10s").Should(o.Succeed()) |
| 95 | + |
| 96 | + // To upload test results from podman system test, use ARTIFACT_DIR env var. |
| 97 | + // It's not a smart way, but there's no other way to pass the artifact directory |
| 98 | + // to each test case. |
| 99 | + g.By("uploading results from podman system test") |
| 100 | + artifact := os.Getenv("ARTIFACT_DIR") |
| 101 | + _, err = oc.AsAdmin().Run("cp").Args( |
| 102 | + fmt.Sprintf("%s:serial-junit/report.xml", pod.Name), |
| 103 | + fmt.Sprintf("%s/junit/podman-system-serial.xml", artifact), |
| 104 | + ).Output() |
| 105 | + o.Expect(err).NotTo(o.HaveOccurred()) |
| 106 | + _, err = oc.AsAdmin().Run("cp").Args( |
| 107 | + fmt.Sprintf("%s:parallel-junit/report.xml", pod.Name), |
| 108 | + fmt.Sprintf("%s/junit/podman-system-parallel.xml", artifact), |
| 109 | + ).Output() |
| 110 | + o.Expect(err).NotTo(o.HaveOccurred()) |
| 111 | + |
| 112 | + logs, err := oc.AsAdmin().KubeClient().CoreV1().Pods(namespace).GetLogs(pod.Name, &corev1.PodLogOptions{}).Do(ctx).Raw() |
| 113 | + o.Expect(err).NotTo(o.HaveOccurred()) |
| 114 | + |
| 115 | + _, err = oc.AsAdmin().Run("exec").Args(pod.Name, "--", "[", "!", "-f", "fail", "]").Output() |
| 116 | + o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("more than one of the podman system tests failed:\n%s", logs)) |
| 117 | +} |
0 commit comments