|
| 1 | +// +build e2e |
| 2 | + |
| 3 | +/* |
| 4 | +Copyright 2022 The Kubernetes Authors. |
| 5 | +
|
| 6 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 7 | +you may not use this file except in compliance with the License. |
| 8 | +You may obtain a copy of the License at |
| 9 | +
|
| 10 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 11 | +
|
| 12 | +Unless required by applicable law or agreed to in writing, software |
| 13 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 14 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 15 | +See the License for the specific language governing permissions and |
| 16 | +limitations under the License. |
| 17 | +*/ |
| 18 | + |
| 19 | +package e2e |
| 20 | + |
| 21 | +import ( |
| 22 | + "context" |
| 23 | + "fmt" |
| 24 | + "strconv" |
| 25 | + |
| 26 | + . "github.com/onsi/ginkgo" |
| 27 | + . "github.com/onsi/gomega" |
| 28 | + batchv1 "k8s.io/api/batch/v1" |
| 29 | + corev1 "k8s.io/api/core/v1" |
| 30 | + rbacv1 "k8s.io/api/rbac/v1" |
| 31 | + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 32 | + "sigs.k8s.io/cluster-api/test/framework" |
| 33 | +) |
| 34 | + |
| 35 | +// KubescapeSpecInput is the input for KubescapeSpec. |
| 36 | +type KubescapeSpecInput struct { |
| 37 | + BootstrapClusterProxy framework.ClusterProxy |
| 38 | + Namespace *corev1.Namespace |
| 39 | + ClusterName string |
| 40 | + FailThreshold string |
| 41 | + Container string |
| 42 | + SkipCleanup bool |
| 43 | +} |
| 44 | + |
| 45 | +// KubescapeSpec implements a test that runs the kubescape security scanner. |
| 46 | +// See https://github.com/armosec/kubescape for details about kubescape. |
| 47 | +func KubescapeSpec(ctx context.Context, inputGetter func() KubescapeSpecInput) { |
| 48 | + var ( |
| 49 | + specName = "kubescape-scan" |
| 50 | + input KubescapeSpecInput |
| 51 | + failThreshold int |
| 52 | + ) |
| 53 | + |
| 54 | + input = inputGetter() |
| 55 | + Expect(input.Namespace).NotTo(BeNil(), "Invalid argument. input.Namespace can't be nil when calling %s spec", specName) |
| 56 | + Expect(input.ClusterName).NotTo(BeEmpty(), "Invalid argument. input.ClusterName can't be empty when calling %s spec", specName) |
| 57 | + failThreshold, err := strconv.Atoi(input.FailThreshold) |
| 58 | + Expect(err).NotTo(HaveOccurred(), "Invalid argument. input.FailThreshold can't be parsed to int when calling %s spec", specName) |
| 59 | + Expect(failThreshold).To(BeNumerically(">=", 0), "Invalid argument. input.FailThreshold can't be less than 0 when calling %s spec", specName) |
| 60 | + Expect(failThreshold).To(BeNumerically("<=", 100), "Invalid argument. input.FailThreshold can't be more than 100 when calling %s spec", specName) |
| 61 | + Expect(input.Container).NotTo(BeEmpty(), "Invalid argument. input.Container can't be empty when calling %s spec", specName) |
| 62 | + |
| 63 | + By("creating a Kubernetes client to the workload cluster") |
| 64 | + clusterProxy := input.BootstrapClusterProxy.GetWorkloadCluster(ctx, input.Namespace.Name, input.ClusterName) |
| 65 | + Expect(clusterProxy).NotTo(BeNil()) |
| 66 | + clientset := clusterProxy.GetClientSet() |
| 67 | + Expect(clientset).NotTo(BeNil()) |
| 68 | + |
| 69 | + By("running a security scan job") |
| 70 | + const ( |
| 71 | + saName = "kubescape-discovery" |
| 72 | + roleName = saName + "-role" |
| 73 | + roleBindingName = roleName + "binding" |
| 74 | + clusterRoleName = saName + "-clusterrole" |
| 75 | + clusterRoleBindingName = clusterRoleName + "binding" |
| 76 | + ) |
| 77 | + |
| 78 | + Log("Creating a service account") |
| 79 | + saClient := clientset.CoreV1().ServiceAccounts(corev1.NamespaceDefault) |
| 80 | + serviceAccount := &corev1.ServiceAccount{ |
| 81 | + ObjectMeta: metav1.ObjectMeta{ |
| 82 | + Name: saName, |
| 83 | + Namespace: corev1.NamespaceDefault, |
| 84 | + Labels: map[string]string{"app": "kubescape"}, |
| 85 | + }, |
| 86 | + } |
| 87 | + _, err = saClient.Create(ctx, serviceAccount, metav1.CreateOptions{}) |
| 88 | + Expect(err).NotTo(HaveOccurred()) |
| 89 | + |
| 90 | + Log("Creating a role") |
| 91 | + rolesClient := clientset.RbacV1().Roles(corev1.NamespaceDefault) |
| 92 | + role := &rbacv1.Role{ |
| 93 | + ObjectMeta: metav1.ObjectMeta{Name: roleName, Namespace: corev1.NamespaceDefault}, |
| 94 | + Rules: []rbacv1.PolicyRule{ |
| 95 | + { |
| 96 | + APIGroups: []string{rbacv1.APIGroupAll}, |
| 97 | + Resources: []string{rbacv1.ResourceAll}, |
| 98 | + Verbs: []string{"get", "list", "describe"}, |
| 99 | + }, |
| 100 | + }, |
| 101 | + } |
| 102 | + _, err = rolesClient.Create(ctx, role, metav1.CreateOptions{}) |
| 103 | + Expect(err).NotTo(HaveOccurred()) |
| 104 | + |
| 105 | + Log("Creating a role binding") |
| 106 | + rolebindingsClient := clientset.RbacV1().RoleBindings(corev1.NamespaceDefault) |
| 107 | + rolebinding := &rbacv1.RoleBinding{ |
| 108 | + ObjectMeta: metav1.ObjectMeta{Name: roleBindingName, Namespace: corev1.NamespaceDefault}, |
| 109 | + RoleRef: rbacv1.RoleRef{APIGroup: rbacv1.GroupName, Kind: "Role", Name: roleName}, |
| 110 | + Subjects: []rbacv1.Subject{{Kind: rbacv1.ServiceAccountKind, Name: saName}}, |
| 111 | + } |
| 112 | + _, err = rolebindingsClient.Create(ctx, rolebinding, metav1.CreateOptions{}) |
| 113 | + Expect(err).NotTo(HaveOccurred()) |
| 114 | + |
| 115 | + Log("Creating a cluster role") |
| 116 | + clusterRolesClient := clientset.RbacV1().ClusterRoles() |
| 117 | + clusterRole := &rbacv1.ClusterRole{ |
| 118 | + ObjectMeta: metav1.ObjectMeta{Name: clusterRoleName}, |
| 119 | + Rules: []rbacv1.PolicyRule{ |
| 120 | + { |
| 121 | + APIGroups: []string{rbacv1.APIGroupAll}, |
| 122 | + Resources: []string{rbacv1.ResourceAll}, |
| 123 | + Verbs: []string{"get", "list", "describe"}, |
| 124 | + }, |
| 125 | + }, |
| 126 | + } |
| 127 | + _, err = clusterRolesClient.Create(ctx, clusterRole, metav1.CreateOptions{}) |
| 128 | + Expect(err).NotTo(HaveOccurred()) |
| 129 | + |
| 130 | + Log("Creating a cluster role binding") |
| 131 | + clusterRolebindingsClient := clientset.RbacV1().ClusterRoleBindings() |
| 132 | + clusterRolebinding := &rbacv1.ClusterRoleBinding{ |
| 133 | + ObjectMeta: metav1.ObjectMeta{Name: clusterRoleBindingName}, |
| 134 | + RoleRef: rbacv1.RoleRef{APIGroup: rbacv1.GroupName, Kind: "ClusterRole", Name: clusterRoleName}, |
| 135 | + Subjects: []rbacv1.Subject{{Kind: rbacv1.ServiceAccountKind, Name: saName, Namespace: corev1.NamespaceDefault}}, |
| 136 | + } |
| 137 | + _, err = clusterRolebindingsClient.Create(ctx, clusterRolebinding, metav1.CreateOptions{}) |
| 138 | + Expect(err).NotTo(HaveOccurred()) |
| 139 | + |
| 140 | + Log("Creating a security scan job") |
| 141 | + jobsClient := clientset.BatchV1().Jobs(corev1.NamespaceDefault) |
| 142 | + args := []string{"scan", "framework", "nsa", "--enable-host-scan", "--exclude-namespaces", "kube-system,kube-public"} |
| 143 | + if failThreshold < 100 { |
| 144 | + args = append(args, "--fail-threshold", strconv.Itoa(failThreshold)) |
| 145 | + } |
| 146 | + scanJob := &batchv1.Job{ |
| 147 | + ObjectMeta: metav1.ObjectMeta{Name: specName, Namespace: corev1.NamespaceDefault}, |
| 148 | + Spec: batchv1.JobSpec{ |
| 149 | + Template: corev1.PodTemplateSpec{ |
| 150 | + Spec: corev1.PodSpec{ |
| 151 | + Containers: []corev1.Container{ |
| 152 | + { |
| 153 | + Name: specName, |
| 154 | + Image: input.Container, |
| 155 | + Args: args, |
| 156 | + }, |
| 157 | + }, |
| 158 | + NodeSelector: map[string]string{corev1.LabelOSStable: "linux"}, |
| 159 | + RestartPolicy: corev1.RestartPolicyNever, |
| 160 | + ServiceAccountName: saName, |
| 161 | + }, |
| 162 | + }, |
| 163 | + }, |
| 164 | + } |
| 165 | + _, err = jobsClient.Create(ctx, scanJob, metav1.CreateOptions{}) |
| 166 | + Expect(err).NotTo(HaveOccurred()) |
| 167 | + scanJobInput := WaitForJobCompleteInput{ |
| 168 | + Getter: jobsClientAdapter{client: jobsClient}, |
| 169 | + Job: scanJob, |
| 170 | + Clientset: clientset, |
| 171 | + } |
| 172 | + WaitForJobComplete(ctx, scanJobInput, e2eConfig.GetIntervals(specName, "wait-job")...) |
| 173 | + |
| 174 | + fmt.Fprint(GinkgoWriter, getJobPodLogs(ctx, scanJobInput)) |
| 175 | + |
| 176 | + if !input.SkipCleanup { |
| 177 | + Log("Cleaning up resources") |
| 178 | + if err := jobsClient.Delete(ctx, specName, metav1.DeleteOptions{}); err != nil { |
| 179 | + Logf("Failed to delete job %s: %v", specName, err) |
| 180 | + } |
| 181 | + if err := clusterRolebindingsClient.Delete(ctx, clusterRoleBindingName, metav1.DeleteOptions{}); err != nil { |
| 182 | + Logf("Failed to delete cluster role binding %s: %v", clusterRoleBindingName, err) |
| 183 | + } |
| 184 | + if err := clusterRolesClient.Delete(ctx, clusterRoleName, metav1.DeleteOptions{}); err != nil { |
| 185 | + Logf("Failed to delete cluster role %s: %v", clusterRoleName, err) |
| 186 | + } |
| 187 | + if err := rolebindingsClient.Delete(ctx, roleBindingName, metav1.DeleteOptions{}); err != nil { |
| 188 | + Logf("Failed to delete role binding %s: %v", roleBindingName, err) |
| 189 | + } |
| 190 | + if err := rolesClient.Delete(ctx, roleName, metav1.DeleteOptions{}); err != nil { |
| 191 | + Logf("Failed to delete role %s: %v", roleName, err) |
| 192 | + } |
| 193 | + if err := saClient.Delete(ctx, saName, metav1.DeleteOptions{}); err != nil { |
| 194 | + Logf("Failed to delete service account %s: %v", saName, err) |
| 195 | + } |
| 196 | + } |
| 197 | +} |
0 commit comments