diff --git a/domain/kube-score.go b/domain/kube-score.go index a962a7f1..3b5e5c62 100644 --- a/domain/kube-score.go +++ b/domain/kube-score.go @@ -27,6 +27,7 @@ type NamedReader interface { type FileLocation struct { Name string + Skip bool Line int } diff --git a/parser/parse.go b/parser/parse.go index ffba172a..6dc9123d 100644 --- a/parser/parse.go +++ b/parser/parse.go @@ -23,6 +23,7 @@ import ( networkingv1beta1 "k8s.io/api/networking/v1beta1" policyv1 "k8s.io/api/policy/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -247,9 +248,31 @@ func detectFileLocation(fileName string, fileOffset int, fileContents []byte) ks return ks.FileLocation{ Name: fileName, Line: fileOffset, + Skip: false, } } +const ( + SkippedResourceAnnotation = "kube-score/skip" +) + +func IsSkipped(errs []error, annotations ...map[string]string) bool { + skip := false + for _, annotations := range annotations { + if skipAnnotation, ok := annotations[SkippedResourceAnnotation]; ok { + if err := yaml.Unmarshal([]byte(skipAnnotation), &skip); err != nil { + errs = append(errs, fmt.Errorf("invalid skip annotation %q, must be boolean", skipAnnotation)) + } + } + } + return skip +} + +func (p *Parser) isSkipped(res metav1.ObjectMetaAccessor, errs parseErrors) bool { + annotations := res.GetObjectMeta().GetAnnotations() + return IsSkipped(errs, annotations) +} + func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersionKind, fileName string, fileOffset int, fileContents []byte) error { addPodSpeccer := func(ps ks.PodSpecer) { s.podspecers = append(s.podspecers, ps) @@ -268,6 +291,7 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case corev1.SchemeGroupVersion.WithKind("Pod"): var pod corev1.Pod errs.AddIfErr(p.decode(fileContents, &pod)) + fileLocation.Skip = p.isSkipped(&pod, errs) p := internalpod.Pod{Obj: pod, Location: fileLocation} s.pods = append(s.pods, p) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: pod.TypeMeta, ObjectMeta: pod.ObjectMeta, FileLocationer: p}) @@ -275,11 +299,13 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case batchv1.SchemeGroupVersion.WithKind("Job"): var job batchv1.Job errs.AddIfErr(p.decode(fileContents, &job)) + fileLocation.Skip = p.isSkipped(&job, errs) addPodSpeccer(internal.Batchv1Job{Job: job, Location: fileLocation}) case batchv1beta1.SchemeGroupVersion.WithKind("CronJob"): var cronjob batchv1beta1.CronJob errs.AddIfErr(p.decode(fileContents, &cronjob)) + fileLocation.Skip = p.isSkipped(&cronjob, errs) cjob := internalcronjob.CronJobV1beta1{Obj: cronjob, Location: fileLocation} addPodSpeccer(cjob) s.cronjobs = append(s.cronjobs, cjob) @@ -287,6 +313,7 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case batchv1.SchemeGroupVersion.WithKind("CronJob"): var cronjob batchv1.CronJob errs.AddIfErr(p.decode(fileContents, &cronjob)) + fileLocation.Skip = p.isSkipped(&cronjob, errs) cjob := internalcronjob.CronJobV1{Obj: cronjob, Location: fileLocation} addPodSpeccer(cjob) s.cronjobs = append(s.cronjobs, cjob) @@ -294,6 +321,7 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case appsv1.SchemeGroupVersion.WithKind("Deployment"): var deployment appsv1.Deployment errs.AddIfErr(p.decode(fileContents, &deployment)) + fileLocation.Skip = p.isSkipped(&deployment, errs) deploy := internal.Appsv1Deployment{Obj: deployment, Location: fileLocation} addPodSpeccer(deploy) @@ -302,19 +330,24 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case appsv1beta1.SchemeGroupVersion.WithKind("Deployment"): var deployment appsv1beta1.Deployment errs.AddIfErr(p.decode(fileContents, &deployment)) + fileLocation.Skip = p.isSkipped(&deployment, errs) addPodSpeccer(internal.Appsv1beta1Deployment{Deployment: deployment, Location: fileLocation}) case appsv1beta2.SchemeGroupVersion.WithKind("Deployment"): var deployment appsv1beta2.Deployment errs.AddIfErr(p.decode(fileContents, &deployment)) + fileLocation.Skip = p.isSkipped(&deployment, errs) addPodSpeccer(internal.Appsv1beta2Deployment{Deployment: deployment, Location: fileLocation}) case extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment"): var deployment extensionsv1beta1.Deployment errs.AddIfErr(p.decode(fileContents, &deployment)) + fileLocation.Skip = p.isSkipped(&deployment, errs) addPodSpeccer(internal.Extensionsv1beta1Deployment{Deployment: deployment, Location: fileLocation}) case appsv1.SchemeGroupVersion.WithKind("StatefulSet"): var statefulSet appsv1.StatefulSet errs.AddIfErr(p.decode(fileContents, &statefulSet)) + fileLocation.Skip = p.isSkipped(&statefulSet, errs) + sset := internal.Appsv1StatefulSet{Obj: statefulSet, Location: fileLocation} addPodSpeccer(sset) @@ -323,28 +356,36 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case appsv1beta1.SchemeGroupVersion.WithKind("StatefulSet"): var statefulSet appsv1beta1.StatefulSet errs.AddIfErr(p.decode(fileContents, &statefulSet)) + fileLocation.Skip = p.isSkipped(&statefulSet, errs) + addPodSpeccer(internal.Appsv1beta1StatefulSet{StatefulSet: statefulSet, Location: fileLocation}) case appsv1beta2.SchemeGroupVersion.WithKind("StatefulSet"): var statefulSet appsv1beta2.StatefulSet errs.AddIfErr(p.decode(fileContents, &statefulSet)) + fileLocation.Skip = p.isSkipped(&statefulSet, errs) + addPodSpeccer(internal.Appsv1beta2StatefulSet{StatefulSet: statefulSet, Location: fileLocation}) case appsv1.SchemeGroupVersion.WithKind("DaemonSet"): var daemonset appsv1.DaemonSet errs.AddIfErr(p.decode(fileContents, &daemonset)) + fileLocation.Skip = p.isSkipped(&daemonset, errs) addPodSpeccer(internal.Appsv1DaemonSet{DaemonSet: daemonset, Location: fileLocation}) case appsv1beta2.SchemeGroupVersion.WithKind("DaemonSet"): var daemonset appsv1beta2.DaemonSet errs.AddIfErr(p.decode(fileContents, &daemonset)) + fileLocation.Skip = p.isSkipped(&daemonset, errs) addPodSpeccer(internal.Appsv1beta2DaemonSet{DaemonSet: daemonset, Location: fileLocation}) case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSet"): var daemonset extensionsv1beta1.DaemonSet errs.AddIfErr(p.decode(fileContents, &daemonset)) + fileLocation.Skip = p.isSkipped(&daemonset, errs) addPodSpeccer(internal.Extensionsv1beta1DaemonSet{DaemonSet: daemonset, Location: fileLocation}) case networkingv1.SchemeGroupVersion.WithKind("NetworkPolicy"): var netpol networkingv1.NetworkPolicy errs.AddIfErr(p.decode(fileContents, &netpol)) + fileLocation.Skip = p.isSkipped(&netpol, errs) np := internalnetpol.NetworkPolicy{Obj: netpol, Location: fileLocation} s.networkPolicies = append(s.networkPolicies, np) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: netpol.TypeMeta, ObjectMeta: netpol.ObjectMeta, FileLocationer: np}) @@ -352,6 +393,7 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case corev1.SchemeGroupVersion.WithKind("Service"): var service corev1.Service errs.AddIfErr(p.decode(fileContents, &service)) + fileLocation.Skip = p.isSkipped(&service, errs) serv := internalservice.Service{Obj: service, Location: fileLocation} s.services = append(s.services, serv) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: service.TypeMeta, ObjectMeta: service.ObjectMeta, FileLocationer: serv}) @@ -359,12 +401,14 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case policyv1beta1.SchemeGroupVersion.WithKind("PodDisruptionBudget"): var disruptBudget policyv1beta1.PodDisruptionBudget errs.AddIfErr(p.decode(fileContents, &disruptBudget)) + fileLocation.Skip = p.isSkipped(&disruptBudget, errs) dbug := internalpdb.PodDisruptionBudgetV1beta1{Obj: disruptBudget, Location: fileLocation} s.podDisruptionBudgets = append(s.podDisruptionBudgets, dbug) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: disruptBudget.TypeMeta, ObjectMeta: disruptBudget.ObjectMeta, FileLocationer: dbug}) case policyv1.SchemeGroupVersion.WithKind("PodDisruptionBudget"): var disruptBudget policyv1.PodDisruptionBudget errs.AddIfErr(p.decode(fileContents, &disruptBudget)) + fileLocation.Skip = p.isSkipped(&disruptBudget, errs) dbug := internalpdb.PodDisruptionBudgetV1{Obj: disruptBudget, Location: fileLocation} s.podDisruptionBudgets = append(s.podDisruptionBudgets, dbug) s.bothMetas = append(s.bothMetas, ks.BothMeta{ @@ -376,6 +420,7 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case extensionsv1beta1.SchemeGroupVersion.WithKind("Ingress"): var ingress extensionsv1beta1.Ingress errs.AddIfErr(p.decode(fileContents, &ingress)) + fileLocation.Skip = p.isSkipped(&ingress, errs) ing := internal.ExtensionsIngressV1beta1{Ingress: ingress, Location: fileLocation} s.ingresses = append(s.ingresses, ing) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: ingress.TypeMeta, ObjectMeta: ingress.ObjectMeta, FileLocationer: ing}) @@ -383,6 +428,7 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case networkingv1beta1.SchemeGroupVersion.WithKind("Ingress"): var ingress networkingv1beta1.Ingress errs.AddIfErr(p.decode(fileContents, &ingress)) + fileLocation.Skip = p.isSkipped(&ingress, errs) ing := internal.IngressV1beta1{Ingress: ingress, Location: fileLocation} s.ingresses = append(s.ingresses, ing) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: ingress.TypeMeta, ObjectMeta: ingress.ObjectMeta, FileLocationer: ing}) @@ -390,6 +436,7 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case networkingv1.SchemeGroupVersion.WithKind("Ingress"): var ingress networkingv1.Ingress errs.AddIfErr(p.decode(fileContents, &ingress)) + fileLocation.Skip = p.isSkipped(&ingress, errs) ing := internal.IngressV1{Ingress: ingress, Location: fileLocation} s.ingresses = append(s.ingresses, ing) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: ingress.TypeMeta, ObjectMeta: ingress.ObjectMeta, FileLocationer: ing}) @@ -397,6 +444,7 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case autoscalingv1.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"): var hpa autoscalingv1.HorizontalPodAutoscaler errs.AddIfErr(p.decode(fileContents, &hpa)) + fileLocation.Skip = p.isSkipped(&hpa, errs) h := internal.HPAv1{HorizontalPodAutoscaler: hpa, Location: fileLocation} s.hpaTargeters = append(s.hpaTargeters, h) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: hpa.TypeMeta, ObjectMeta: hpa.ObjectMeta, FileLocationer: h}) @@ -404,6 +452,7 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case autoscalingv2beta1.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"): var hpa autoscalingv2beta1.HorizontalPodAutoscaler errs.AddIfErr(p.decode(fileContents, &hpa)) + fileLocation.Skip = p.isSkipped(&hpa, errs) h := internal.HPAv2beta1{HorizontalPodAutoscaler: hpa, Location: fileLocation} s.hpaTargeters = append(s.hpaTargeters, h) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: hpa.TypeMeta, ObjectMeta: hpa.ObjectMeta, FileLocationer: h}) @@ -411,6 +460,7 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case autoscalingv2beta2.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"): var hpa autoscalingv2beta2.HorizontalPodAutoscaler errs.AddIfErr(p.decode(fileContents, &hpa)) + fileLocation.Skip = p.isSkipped(&hpa, errs) h := internal.HPAv2beta2{HorizontalPodAutoscaler: hpa, Location: fileLocation} s.hpaTargeters = append(s.hpaTargeters, h) s.bothMetas = append(s.bothMetas, ks.BothMeta{ @@ -422,6 +472,7 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case autoscalingv2.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"): var hpa autoscalingv2.HorizontalPodAutoscaler errs.AddIfErr(p.decode(fileContents, &hpa)) + fileLocation.Skip = p.isSkipped(&hpa, errs) h := internal.HPAv2{HorizontalPodAutoscaler: hpa, Location: fileLocation} s.hpaTargeters = append(s.hpaTargeters, h) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: hpa.TypeMeta, ObjectMeta: hpa.ObjectMeta, FileLocationer: h}) diff --git a/parser/parse_test.go b/parser/parse_test.go index d7c7b8eb..617d46b9 100644 --- a/parser/parse_test.go +++ b/parser/parse_test.go @@ -2,7 +2,9 @@ package parser import ( "fmt" + "io" "os" + "strings" "testing" ks "github.com/zegl/kube-score/domain" @@ -73,3 +75,117 @@ spec: assert.Equal(t, "someName", fl.Name) assert.Equal(t, 123, fl.Line) } + +type namedReader struct { + io.Reader + name string +} + +func (n namedReader) Name() string { + return n.name +} + +func parse(t *testing.T, doc, name string) ks.AllTypes { + p, err := New(nil) + assert.NoError(t, err) + parsedFiles, err := p.ParseFiles([]ks.NamedReader{ + namedReader{Reader: strings.NewReader(doc), name: name}, + }) + assert.NoError(t, err) + return parsedFiles +} + +func TestSkipNo(t *testing.T) { + t.Parallel() + doc := `kind: Deployment +apiVersion: apps/v1 +metadata: + name: foo + annotations: + kube-score/skip: "No" +spec: + template: + metadata: + labels: + foo: bar` + + location := parse(t, doc, "skip-yes.yaml").Deployments()[0].FileLocation() + assert.Equal(t, "skip-yes.yaml", location.Name) + assert.Equal(t, false, location.Skip) +} + +func TestSkipYes(t *testing.T) { + t.Parallel() + doc := `kind: Deployment +apiVersion: apps/v1 +metadata: + name: foo + annotations: + kube-score/skip: " yes " +spec: + template: + metadata: + labels: + foo: bar` + + location := parse(t, doc, "skip-yes.yaml").Deployments()[0].FileLocation() + assert.Equal(t, "skip-yes.yaml", location.Name) + assert.Equal(t, true, location.Skip) +} + +func TestSkipTrueUppercase(t *testing.T) { + t.Parallel() + doc := `kind: Deployment +apiVersion: apps/v1 +metadata: + name: foo + annotations: + "kube-score/skip": "True" +spec: + template: + metadata: + labels: + foo: bar` + + location := parse(t, doc, "skip-true-uppercase.yaml").Deployments()[0].FileLocation() + assert.Equal(t, "skip-true-uppercase.yaml", location.Name) + assert.Equal(t, true, location.Skip) +} + +func TestSkipTrue(t *testing.T) { + t.Parallel() + doc := `kind: Deployment +apiVersion: apps/v1 +metadata: + name: foo + annotations: + "kube-score/skip": "true" +spec: + template: + metadata: + labels: + foo: bar` + + location := parse(t, doc, "skip-true.yaml").Deployments()[0].FileLocation() + assert.Equal(t, "skip-true.yaml", location.Name) + assert.Equal(t, true, location.Skip) +} + +func TestSkipFalse(t *testing.T) { + t.Parallel() + doc := `kind: Deployment +apiVersion: apps/v1 +metadata: + name: foo + annotations: + "kube-score/skip": "false" +spec: + template: + metadata: + labels: + foo: bar` + + location := parse(t, doc, "skip-false.yaml").Deployments()[0].FileLocation() + assert.Equal(t, "skip-false.yaml", location.Name) + assert.Equal(t, false, location.Skip) +} diff --git a/renderer/human/human.go b/renderer/human/human.go index 4b098625..56158d82 100644 --- a/renderer/human/human.go +++ b/renderer/human/human.go @@ -64,10 +64,22 @@ func Human(scoreCard *scorecard.Scorecard, verboseOutput int, termWidth int, use } } - for _, card := range scoredObject.Checks { - r := outputHumanStep(card, verboseOutput, termWidth) - if _, err := io.Copy(w, r); err != nil { - return nil, fmt.Errorf("failed to copy output: %w", err) + if scoredObject.FileLocation.Skip { + if verboseOutput >= 2 { + // Only print skipped files if verbosity is at least 2 + color.New(color.FgGreen).Fprintf( + w, + " [SKIPPED] %s#L%d\n", + scoredObject.FileLocation.Name, + scoredObject.FileLocation.Line, + ) + } + } else { + for _, card := range scoredObject.Checks { + r := outputHumanStep(card, verboseOutput, termWidth) + if _, err := io.Copy(w, r); err != nil { + return nil, fmt.Errorf("failed to copy output: %w", err) + } } } } diff --git a/score/apps_test.go b/score/apps_test.go index f62a3c8e..799efd64 100644 --- a/score/apps_test.go +++ b/score/apps_test.go @@ -123,3 +123,42 @@ func TestStatefulsetTemplateIgnoresNotIgnoredWhenFlagDisabled(t *testing.T) { }, "Container Image Tag") assert.False(t, skipped) } + +func TestStatefulsetTemplateNestedSkip(t *testing.T) { + t.Parallel() + sc, err := testScore( + []ks.NamedReader{testFile("statefulset-nested-skip.yaml")}, + nil, + &config.RunConfiguration{ + UseIgnoreChecksAnnotation: true, + UseOptionalChecksAnnotation: true, + }, + ) + assert.NoError(t, err) + + for _, objectScore := range sc { + for _, s := range objectScore.Checks { + t.Logf("id=%s type=%v skipped=%v\n", s.Check.ID, s.Check.TargetType, s.Skipped) + switch s.Check.TargetType { + case "StatefulSet", "all": + assert.False(t, s.Skipped) + default: + assert.True(t, s.Skipped) + } + } + } +} + +func TestStatefulsetTemplateSkip(t *testing.T) { + skipped := fileWasSkipped( + t, + []ks.NamedReader{testFile("statefulset-skip.yaml")}, + nil, + &config.RunConfiguration{ + UseIgnoreChecksAnnotation: true, + UseOptionalChecksAnnotation: true, + }, + "testdata/statefulset-skip.yaml", + ) + assert.True(t, skipped) +} diff --git a/score/score_test.go b/score/score_test.go index d32d6445..20c009fc 100644 --- a/score/score_test.go +++ b/score/score_test.go @@ -40,6 +40,24 @@ func testExpectedScoreWithConfig(t *testing.T, files []ks.NamedReader, checksCon return nil } +func fileWasSkipped(t *testing.T, files []ks.NamedReader, checksConfig *checks.Config, runConfig *config.RunConfiguration, filename string) bool { + sc, err := testScore(files, checksConfig, runConfig) + assert.NoError(t, err) + for _, objectScore := range sc { + t.Logf("path=%s skip=%v\n", objectScore.FileLocation.Name, objectScore.FileLocation.Skip) + if objectScore.FileLocation.Name == filename { + if objectScore.FileLocation.Skip { + // Make sure all checks are skipped too + for _, s := range objectScore.Checks { + assert.Equal(t, s.Skipped, true) + } + } + return objectScore.FileLocation.Skip + } + } + return false +} + func wasSkipped(t *testing.T, files []ks.NamedReader, checksConfig *checks.Config, runConfig *config.RunConfiguration, testcase string) bool { sc, err := testScore(files, checksConfig, runConfig) assert.NoError(t, err) diff --git a/score/testdata/statefulset-nested-skip.yaml b/score/testdata/statefulset-nested-skip.yaml new file mode 100644 index 00000000..00993cc7 --- /dev/null +++ b/score/testdata/statefulset-nested-skip.yaml @@ -0,0 +1,110 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: trivy + namespace: trivy-staging +spec: + podManagementPolicy: Parallel + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/instance: trivy + app.kubernetes.io/name: trivy + serviceName: trivy + template: + metadata: + annotations: + kube-score/skip: "true" + kube-score/ignore: container-image-tag,pod-probes + labels: + app.kubernetes.io/instance: trivy + app.kubernetes.io/name: trivy + spec: + automountServiceAccountToken: false + containers: + - args: + - server + envFrom: + - configMapRef: + name: trivy + - secretRef: + name: trivy + image: aquasec/trivy:latest + imagePullPolicy: Always + livenessProbe: + failureThreshold: 10 + httpGet: + path: /healthz + port: trivy-http + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + name: main + ports: + - containerPort: 4954 + name: trivy-http + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: trivy-http + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + cpu: "1" + ephemeral-storage: 128Mi + memory: 1Gi + requests: + cpu: 200m + memory: 512Mi + securityContext: + privileged: false + readOnlyRootFilesystem: true + runAsGroup: 65534 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /tmp + name: tmp-data + - mountPath: /home/scanner/.cache + name: data + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: + fsGroup: 65534 + runAsNonRoot: true + runAsUser: 65534 + serviceAccount: trivy + serviceAccountName: trivy + terminationGracePeriodSeconds: 30 + volumes: + - emptyDir: {} + name: tmp-data + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + creationTimestamp: null + name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + volumeMode: Filesystem + status: + phase: Pending diff --git a/score/testdata/statefulset-skip.yaml b/score/testdata/statefulset-skip.yaml new file mode 100644 index 00000000..0a1f3bac --- /dev/null +++ b/score/testdata/statefulset-skip.yaml @@ -0,0 +1,112 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: trivy + namespace: trivy-staging + annotations: + kube-score/skip: "true" +spec: + podManagementPolicy: Parallel + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/instance: trivy + app.kubernetes.io/name: trivy + serviceName: trivy + template: + metadata: + annotations: + kube-score/skip: "true" + kube-score/ignore: container-image-tag,pod-probes + labels: + app.kubernetes.io/instance: trivy + app.kubernetes.io/name: trivy + spec: + automountServiceAccountToken: false + containers: + - args: + - server + envFrom: + - configMapRef: + name: trivy + - secretRef: + name: trivy + image: aquasec/trivy:latest + imagePullPolicy: Always + livenessProbe: + failureThreshold: 10 + httpGet: + path: /healthz + port: trivy-http + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + name: main + ports: + - containerPort: 4954 + name: trivy-http + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: trivy-http + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + cpu: "1" + ephemeral-storage: 128Mi + memory: 1Gi + requests: + cpu: 200m + memory: 512Mi + securityContext: + privileged: false + readOnlyRootFilesystem: true + runAsGroup: 65534 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /tmp + name: tmp-data + - mountPath: /home/scanner/.cache + name: data + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: + fsGroup: 65534 + runAsNonRoot: true + runAsUser: 65534 + serviceAccount: trivy + serviceAccountName: trivy + terminationGracePeriodSeconds: 30 + volumes: + - emptyDir: {} + name: tmp-data + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + creationTimestamp: null + name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + volumeMode: Filesystem + status: + phase: Pending diff --git a/scorecard/enabled.go b/scorecard/enabled.go index 90c53ba7..e5ceeeb1 100644 --- a/scorecard/enabled.go +++ b/scorecard/enabled.go @@ -13,6 +13,10 @@ func (so *ScoredObject) isEnabled(check ks.Check, annotations, childAnnotations if v == key { return true } + if v == "*" { + // "*" wildcard matches all checks + return true + } if vals, ok := impliedIgnoreAnnotations[v]; ok { for i := range vals { if vals[i] == key { diff --git a/scorecard/scorecard.go b/scorecard/scorecard.go index 6f59e094..8a07ad46 100644 --- a/scorecard/scorecard.go +++ b/scorecard/scorecard.go @@ -5,6 +5,7 @@ import ( "github.com/zegl/kube-score/config" ks "github.com/zegl/kube-score/domain" + "github.com/zegl/kube-score/parser" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -95,8 +96,11 @@ func (so *ScoredObject) Add(ts TestScore, check ks.Check, locationer ks.FileLoca ts.Check = check so.FileLocation = locationer.FileLocation() - var skip bool - if annotations != nil { + skip := false + skipAll := so.FileLocation.Skip + + if !skipAll && annotations != nil { + skipAll = skipAll || parser.IsSkipped([]error{}, annotations...) if len(annotations) == 1 && !so.isEnabled(check, annotations[0], nil) { skip = true } @@ -106,7 +110,13 @@ func (so *ScoredObject) Add(ts TestScore, check ks.Check, locationer ks.FileLoca } // This test is ignored (via annotations), don't save the score - if skip { + if skipAll { + ts.Skipped = true + ts.Comments = []TestScoreComment{{Summary: fmt.Sprintf( + "Skipped because %s#L%d is skipped", + so.FileLocation.Name, so.FileLocation.Line, + )}} + } else if skip { ts.Skipped = true ts.Comments = []TestScoreComment{{Summary: fmt.Sprintf("Skipped because %s is ignored", check.ID)}} }