From f387eba3c9db4100a665790873c52b73ddc4f640 Mon Sep 17 00:00:00 2001 From: romnn Date: Tue, 8 Oct 2024 11:56:28 +0200 Subject: [PATCH 1/6] initial commit --- domain/kube-score.go | 1 + parser/parse.go | 103 ++++++++++++++++++++++++++++++++++++++++ renderer/human/human.go | 21 ++++++-- score/score.go | 54 +++++++++++++++++++++ scorecard/enabled.go | 20 ++++++++ scorecard/scorecard.go | 32 +++++++++++-- 6 files changed, 224 insertions(+), 7 deletions(-) diff --git a/domain/kube-score.go b/domain/kube-score.go index a962a7f..3b5e5c6 100644 --- a/domain/kube-score.go +++ b/domain/kube-score.go @@ -27,6 +27,7 @@ type NamedReader interface { type FileLocation struct { Name string + Skip bool Line int } diff --git a/parser/parse.go b/parser/parse.go index ffba172..810f7a6 100644 --- a/parser/parse.go +++ b/parser/parse.go @@ -23,6 +23,7 @@ import ( networkingv1beta1 "k8s.io/api/networking/v1beta1" policyv1 "k8s.io/api/policy/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -247,9 +248,36 @@ func detectFileLocation(fileName string, fileOffset int, fileContents []byte) ks return ks.FileLocation{ Name: fileName, Line: fileOffset, + Skip: false, } } +func (p *Parser) shouldSkip(res metav1.ObjectMetaAccessor) bool { + skip := false + meta := res.GetObjectMeta() + if skipAnnotation, ok := meta.GetAnnotations()["kube-score/skip"]; ok { + if err := yaml.Unmarshal([]byte(skipAnnotation), &skip); err != nil { + if p.config.VerboseOutput > 1 { + log.Printf( + "Invalid skip annotation: %q for %s in namespace %s", + skipAnnotation, + meta.GetName(), + meta.GetNamespace(), + ) + } + + } + } + if p.config.VerboseOutput > 1 && skip { + log.Printf( + "Skipping %s in namespace %s", + meta.GetName(), + meta.GetNamespace(), + ) + } + return skip +} + func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersionKind, fileName string, fileOffset int, fileContents []byte) error { addPodSpeccer := func(ps ks.PodSpecer) { s.podspecers = append(s.podspecers, ps) @@ -268,103 +296,156 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case corev1.SchemeGroupVersion.WithKind("Pod"): var pod corev1.Pod errs.AddIfErr(p.decode(fileContents, &pod)) + fileLocation.Skip = p.shouldSkip(&pod) + // if !fileLocation.Skip { p := internalpod.Pod{Obj: pod, Location: fileLocation} s.pods = append(s.pods, p) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: pod.TypeMeta, ObjectMeta: pod.ObjectMeta, FileLocationer: p}) + // } case batchv1.SchemeGroupVersion.WithKind("Job"): var job batchv1.Job errs.AddIfErr(p.decode(fileContents, &job)) + fileLocation.Skip = p.shouldSkip(&job) + // if !fileLocation.Skip { addPodSpeccer(internal.Batchv1Job{Job: job, Location: fileLocation}) + // } case batchv1beta1.SchemeGroupVersion.WithKind("CronJob"): var cronjob batchv1beta1.CronJob errs.AddIfErr(p.decode(fileContents, &cronjob)) + fileLocation.Skip = p.shouldSkip(&cronjob) + // if !p.shouldSkip(&cronjob) { cjob := internalcronjob.CronJobV1beta1{Obj: cronjob, Location: fileLocation} addPodSpeccer(cjob) s.cronjobs = append(s.cronjobs, cjob) + // } case batchv1.SchemeGroupVersion.WithKind("CronJob"): var cronjob batchv1.CronJob errs.AddIfErr(p.decode(fileContents, &cronjob)) + fileLocation.Skip = p.shouldSkip(&cronjob) + // if !p.shouldSkip(&cronjob) { cjob := internalcronjob.CronJobV1{Obj: cronjob, Location: fileLocation} addPodSpeccer(cjob) s.cronjobs = append(s.cronjobs, cjob) + // } case appsv1.SchemeGroupVersion.WithKind("Deployment"): var deployment appsv1.Deployment errs.AddIfErr(p.decode(fileContents, &deployment)) + fileLocation.Skip = p.shouldSkip(&deployment) + // if !p.shouldSkip(&deployment) { deploy := internal.Appsv1Deployment{Obj: deployment, Location: fileLocation} addPodSpeccer(deploy) // TODO: Support older versions of Deployment as well? s.deployments = append(s.deployments, deploy) + // } case appsv1beta1.SchemeGroupVersion.WithKind("Deployment"): var deployment appsv1beta1.Deployment + // if !p.shouldSkip(&deployment) { errs.AddIfErr(p.decode(fileContents, &deployment)) + fileLocation.Skip = p.shouldSkip(&deployment) addPodSpeccer(internal.Appsv1beta1Deployment{Deployment: deployment, Location: fileLocation}) + // } case appsv1beta2.SchemeGroupVersion.WithKind("Deployment"): var deployment appsv1beta2.Deployment + // if !p.shouldSkip(&deployment) { errs.AddIfErr(p.decode(fileContents, &deployment)) + fileLocation.Skip = p.shouldSkip(&deployment) addPodSpeccer(internal.Appsv1beta2Deployment{Deployment: deployment, Location: fileLocation}) + // } case extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment"): var deployment extensionsv1beta1.Deployment errs.AddIfErr(p.decode(fileContents, &deployment)) + fileLocation.Skip = p.shouldSkip(&deployment) + // if !p.shouldSkip(&deployment) { addPodSpeccer(internal.Extensionsv1beta1Deployment{Deployment: deployment, Location: fileLocation}) + // } case appsv1.SchemeGroupVersion.WithKind("StatefulSet"): var statefulSet appsv1.StatefulSet errs.AddIfErr(p.decode(fileContents, &statefulSet)) + fileLocation.Skip = p.shouldSkip(&statefulSet) + // if !p.shouldSkip(&statefulSet) { sset := internal.Appsv1StatefulSet{Obj: statefulSet, Location: fileLocation} addPodSpeccer(sset) // TODO: Support older versions of StatefulSet as well? s.statefulsets = append(s.statefulsets, sset) + // } case appsv1beta1.SchemeGroupVersion.WithKind("StatefulSet"): var statefulSet appsv1beta1.StatefulSet errs.AddIfErr(p.decode(fileContents, &statefulSet)) + fileLocation.Skip = p.shouldSkip(&statefulSet) + // if !p.shouldSkip(&statefulSet) { addPodSpeccer(internal.Appsv1beta1StatefulSet{StatefulSet: statefulSet, Location: fileLocation}) + // } case appsv1beta2.SchemeGroupVersion.WithKind("StatefulSet"): var statefulSet appsv1beta2.StatefulSet errs.AddIfErr(p.decode(fileContents, &statefulSet)) + fileLocation.Skip = p.shouldSkip(&statefulSet) + // if !p.shouldSkip(&statefulSet) { addPodSpeccer(internal.Appsv1beta2StatefulSet{StatefulSet: statefulSet, Location: fileLocation}) + // } case appsv1.SchemeGroupVersion.WithKind("DaemonSet"): var daemonset appsv1.DaemonSet errs.AddIfErr(p.decode(fileContents, &daemonset)) + fileLocation.Skip = p.shouldSkip(&daemonset) + // if !p.shouldSkip(&daemonset) { addPodSpeccer(internal.Appsv1DaemonSet{DaemonSet: daemonset, Location: fileLocation}) + // } case appsv1beta2.SchemeGroupVersion.WithKind("DaemonSet"): var daemonset appsv1beta2.DaemonSet errs.AddIfErr(p.decode(fileContents, &daemonset)) + // if !p.shouldSkip(&daemonset) { + fileLocation.Skip = p.shouldSkip(&daemonset) addPodSpeccer(internal.Appsv1beta2DaemonSet{DaemonSet: daemonset, Location: fileLocation}) + // } case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSet"): var daemonset extensionsv1beta1.DaemonSet errs.AddIfErr(p.decode(fileContents, &daemonset)) + fileLocation.Skip = p.shouldSkip(&daemonset) + // if !p.shouldSkip(&daemonset) { addPodSpeccer(internal.Extensionsv1beta1DaemonSet{DaemonSet: daemonset, Location: fileLocation}) + // } case networkingv1.SchemeGroupVersion.WithKind("NetworkPolicy"): var netpol networkingv1.NetworkPolicy errs.AddIfErr(p.decode(fileContents, &netpol)) + fileLocation.Skip = p.shouldSkip(&netpol) + // if !p.shouldSkip(&netpol) { np := internalnetpol.NetworkPolicy{Obj: netpol, Location: fileLocation} s.networkPolicies = append(s.networkPolicies, np) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: netpol.TypeMeta, ObjectMeta: netpol.ObjectMeta, FileLocationer: np}) + // } case corev1.SchemeGroupVersion.WithKind("Service"): var service corev1.Service errs.AddIfErr(p.decode(fileContents, &service)) + fileLocation.Skip = p.shouldSkip(&service) + // if !p.shouldSkip(&service) { serv := internalservice.Service{Obj: service, Location: fileLocation} s.services = append(s.services, serv) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: service.TypeMeta, ObjectMeta: service.ObjectMeta, FileLocationer: serv}) + // } case policyv1beta1.SchemeGroupVersion.WithKind("PodDisruptionBudget"): var disruptBudget policyv1beta1.PodDisruptionBudget errs.AddIfErr(p.decode(fileContents, &disruptBudget)) + fileLocation.Skip = p.shouldSkip(&disruptBudget) + // if !p.shouldSkip(&disruptBudget) { dbug := internalpdb.PodDisruptionBudgetV1beta1{Obj: disruptBudget, Location: fileLocation} s.podDisruptionBudgets = append(s.podDisruptionBudgets, dbug) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: disruptBudget.TypeMeta, ObjectMeta: disruptBudget.ObjectMeta, FileLocationer: dbug}) + // } case policyv1.SchemeGroupVersion.WithKind("PodDisruptionBudget"): var disruptBudget policyv1.PodDisruptionBudget errs.AddIfErr(p.decode(fileContents, &disruptBudget)) + fileLocation.Skip = p.shouldSkip(&disruptBudget) + // if !p.shouldSkip(&disruptBudget) { dbug := internalpdb.PodDisruptionBudgetV1{Obj: disruptBudget, Location: fileLocation} s.podDisruptionBudgets = append(s.podDisruptionBudgets, dbug) s.bothMetas = append(s.bothMetas, ks.BothMeta{ @@ -372,45 +453,63 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio ObjectMeta: disruptBudget.ObjectMeta, FileLocationer: dbug, }) + // } case extensionsv1beta1.SchemeGroupVersion.WithKind("Ingress"): var ingress extensionsv1beta1.Ingress errs.AddIfErr(p.decode(fileContents, &ingress)) + fileLocation.Skip = p.shouldSkip(&ingress) + // if !p.shouldSkip(&ingress) { ing := internal.ExtensionsIngressV1beta1{Ingress: ingress, Location: fileLocation} s.ingresses = append(s.ingresses, ing) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: ingress.TypeMeta, ObjectMeta: ingress.ObjectMeta, FileLocationer: ing}) + // } case networkingv1beta1.SchemeGroupVersion.WithKind("Ingress"): var ingress networkingv1beta1.Ingress errs.AddIfErr(p.decode(fileContents, &ingress)) + fileLocation.Skip = p.shouldSkip(&ingress) + // if !p.shouldSkip(&ingress) { ing := internal.IngressV1beta1{Ingress: ingress, Location: fileLocation} s.ingresses = append(s.ingresses, ing) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: ingress.TypeMeta, ObjectMeta: ingress.ObjectMeta, FileLocationer: ing}) + // } case networkingv1.SchemeGroupVersion.WithKind("Ingress"): var ingress networkingv1.Ingress errs.AddIfErr(p.decode(fileContents, &ingress)) + fileLocation.Skip = p.shouldSkip(&ingress) + // if !p.shouldSkip(&ingress) { ing := internal.IngressV1{Ingress: ingress, Location: fileLocation} s.ingresses = append(s.ingresses, ing) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: ingress.TypeMeta, ObjectMeta: ingress.ObjectMeta, FileLocationer: ing}) + // } case autoscalingv1.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"): var hpa autoscalingv1.HorizontalPodAutoscaler errs.AddIfErr(p.decode(fileContents, &hpa)) + fileLocation.Skip = p.shouldSkip(&hpa) + // if !p.shouldSkip(&hpa) { h := internal.HPAv1{HorizontalPodAutoscaler: hpa, Location: fileLocation} s.hpaTargeters = append(s.hpaTargeters, h) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: hpa.TypeMeta, ObjectMeta: hpa.ObjectMeta, FileLocationer: h}) + // } case autoscalingv2beta1.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"): var hpa autoscalingv2beta1.HorizontalPodAutoscaler errs.AddIfErr(p.decode(fileContents, &hpa)) + fileLocation.Skip = p.shouldSkip(&hpa) + // if !p.shouldSkip(&hpa) { h := internal.HPAv2beta1{HorizontalPodAutoscaler: hpa, Location: fileLocation} s.hpaTargeters = append(s.hpaTargeters, h) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: hpa.TypeMeta, ObjectMeta: hpa.ObjectMeta, FileLocationer: h}) + // } case autoscalingv2beta2.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"): var hpa autoscalingv2beta2.HorizontalPodAutoscaler errs.AddIfErr(p.decode(fileContents, &hpa)) + fileLocation.Skip = p.shouldSkip(&hpa) + // if !p.shouldSkip(&hpa) { h := internal.HPAv2beta2{HorizontalPodAutoscaler: hpa, Location: fileLocation} s.hpaTargeters = append(s.hpaTargeters, h) s.bothMetas = append(s.bothMetas, ks.BothMeta{ @@ -418,13 +517,17 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio ObjectMeta: hpa.ObjectMeta, FileLocationer: h, }) + // } case autoscalingv2.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"): var hpa autoscalingv2.HorizontalPodAutoscaler errs.AddIfErr(p.decode(fileContents, &hpa)) + fileLocation.Skip = p.shouldSkip(&hpa) + // if !skip { h := internal.HPAv2{HorizontalPodAutoscaler: hpa, Location: fileLocation} s.hpaTargeters = append(s.hpaTargeters, h) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: hpa.TypeMeta, ObjectMeta: hpa.ObjectMeta, FileLocationer: h}) + // } default: if p.config.VerboseOutput > 1 { diff --git a/renderer/human/human.go b/renderer/human/human.go index 4b09862..5f203b0 100644 --- a/renderer/human/human.go +++ b/renderer/human/human.go @@ -64,10 +64,23 @@ func Human(scoreCard *scorecard.Scorecard, verboseOutput int, termWidth int, use } } - for _, card := range scoredObject.Checks { - r := outputHumanStep(card, verboseOutput, termWidth) - if _, err := io.Copy(w, r); err != nil { - return nil, fmt.Errorf("failed to copy output: %w", err) + if scoredObject.FileLocation.Skip { + if verboseOutput >= 2 { + // Only print skipped files if verbosity is at least 2 + color.New(color.FgGreen).Fprintf( + w, + " [SKIPPED] %s#L%d\n", + scoredObject.FileLocation.Name, + scoredObject.FileLocation.Line, + ) + } + } else { + for _, card := range scoredObject.Checks { + // fmt.Printf("path=%s check=%s skipped=%v grade=%d\n", scoredObject.FileLocation.Name, card.Check.Name, card.Skipped, card.Grade) + r := outputHumanStep(card, verboseOutput, termWidth) + if _, err := io.Copy(w, r); err != nil { + return nil, fmt.Errorf("failed to copy output: %w", err) + } } } } diff --git a/score/score.go b/score/score.go index 390d7af..d744ac0 100644 --- a/score/score.go +++ b/score/score.go @@ -68,6 +68,27 @@ func (p *podSpeccer) FileLocation() ks.FileLocation { return ks.FileLocation{} } +// func shouldSkip(meta metav1.ObjectMeta) bool { +// // type annotations struct { +// // Skip bool `yaml:"kube-score/skip"` +// // } +// // var annotations +// // if err != nil { +// // log.Fatal("Failed to unmarshall config.yaml:", err) +// // } +// // config, err = setDefaults(config) +// skip := false +// // fmt.Printf("skip? %v\n\n", meta.Annotations) +// if skipAnnotation, ok := meta.Annotations["kube-score/skip"]; ok { +// // fmt.Printf("skip? %v\n\n", skipAnnotation) +// if err := yaml.Unmarshal([]byte(skipAnnotation), &skip); err != nil { +// // TODO: warning +// // fmt.Printf("err? %v\n\n", err) +// } +// } +// return skip +// } + // Score runs a pre-configured list of tests against the files defined in the configuration, and returns a scorecard. // Additional configuration and tuning parameters can be provided via the config. func Score(allObjects ks.AllTypes, allChecks *checks.Checks, cnf *config.RunConfiguration) (*scorecard.Scorecard, error) { @@ -86,6 +107,10 @@ func Score(allObjects ks.AllTypes, allChecks *checks.Checks, cnf *config.RunConf } for _, ingress := range allObjects.Ingresses() { + // if shouldSkip(ingress.GetObjectMeta()) { + // fmt.Printf("skipping %v\n\n", ingress.GetObjectMeta().Name) + // continue + // } o := newObject(ingress.GetTypeMeta(), ingress.GetObjectMeta()) for _, test := range allChecks.Ingresses() { fn, err := test.Fn(ingress) @@ -97,6 +122,10 @@ func Score(allObjects ks.AllTypes, allChecks *checks.Checks, cnf *config.RunConf } for _, meta := range allObjects.Metas() { + // if shouldSkip(meta.ObjectMeta) { + // fmt.Printf("skipping %v\n\n", meta.ObjectMeta.Name) + // continue + // } o := newObject(meta.TypeMeta, meta.ObjectMeta) for _, test := range allChecks.Metas() { fn, err := test.Fn(meta) @@ -108,6 +137,11 @@ func Score(allObjects ks.AllTypes, allChecks *checks.Checks, cnf *config.RunConf } for _, pod := range allObjects.Pods() { + // if shouldSkip(pod.Pod().ObjectMeta) { + // fmt.Printf("skipping %v\n\n", pod.Pod().ObjectMeta.Name) + // continue + // } + o := newObject(pod.Pod().TypeMeta, pod.Pod().ObjectMeta) for _, test := range allChecks.Pods() { @@ -126,6 +160,11 @@ func Score(allObjects ks.AllTypes, allChecks *checks.Checks, cnf *config.RunConf } for _, podspecer := range allObjects.PodSpeccers() { + // if shouldSkip(podspecer.GetObjectMeta()) { + // fmt.Printf("skipping %v\n\n", podspecer.GetObjectMeta().Name) + // continue + // } + o := newObject(podspecer.GetTypeMeta(), podspecer.GetObjectMeta()) for _, test := range allChecks.Pods() { score, _ := test.Fn(podspecer) @@ -137,6 +176,11 @@ func Score(allObjects ks.AllTypes, allChecks *checks.Checks, cnf *config.RunConf } for _, service := range allObjects.Services() { + // if shouldSkip(service.Service().ObjectMeta) { + // fmt.Printf("skipping %v\n\n", service.Service().ObjectMeta.Name) + // continue + // } + o := newObject(service.Service().TypeMeta, service.Service().ObjectMeta) for _, test := range allChecks.Services() { fn, err := test.Fn(service.Service()) @@ -148,6 +192,11 @@ func Score(allObjects ks.AllTypes, allChecks *checks.Checks, cnf *config.RunConf } for _, statefulset := range allObjects.StatefulSets() { + // if shouldSkip(statefulset.StatefulSet().ObjectMeta) { + // fmt.Printf("skipping %v\n\n", statefulset.StatefulSet().ObjectMeta.Name) + // continue + // } + o := newObject(statefulset.StatefulSet().TypeMeta, statefulset.StatefulSet().ObjectMeta) for _, test := range allChecks.StatefulSets() { fn, err := test.Fn(statefulset.StatefulSet()) @@ -159,6 +208,11 @@ func Score(allObjects ks.AllTypes, allChecks *checks.Checks, cnf *config.RunConf } for _, deployment := range allObjects.Deployments() { + // if shouldSkip(deployment.Deployment().ObjectMeta) { + // fmt.Printf("skipping %v\n\n", deployment.Deployment().ObjectMeta.Name) + // continue + // } + o := newObject(deployment.Deployment().TypeMeta, deployment.Deployment().ObjectMeta) for _, test := range allChecks.Deployments() { res, err := test.Fn(deployment.Deployment()) diff --git a/scorecard/enabled.go b/scorecard/enabled.go index 90c53ba..d2d9622 100644 --- a/scorecard/enabled.go +++ b/scorecard/enabled.go @@ -1,11 +1,31 @@ package scorecard import ( + "fmt" "strings" ks "github.com/zegl/kube-score/domain" + "gopkg.in/yaml.v3" ) +func (so *ScoredObject) isSkipped(allAnnotations []map[string]string) (bool, error) { + skip := false + for _, annotations := range allAnnotations { + if skipAnnotation, ok := annotations["kube-score/skip"]; ok { + // fmt.Printf("skip: %s\n", skipAnnotation) + if err := yaml.Unmarshal([]byte(skipAnnotation), &skip); err != nil { + return false, fmt.Errorf("invalid skip annotation: %q", skipAnnotation) + } + } + if ignoreAnnotation, ok := annotations["kube-score/ignore"]; ok { + if strings.TrimSpace(ignoreAnnotation) == "*" { + skip = true + } + } + } + return skip, nil +} + func (so *ScoredObject) isEnabled(check ks.Check, annotations, childAnnotations map[string]string) bool { isIn := func(csv string, key string) bool { for _, v := range strings.Split(csv, ",") { diff --git a/scorecard/scorecard.go b/scorecard/scorecard.go index 6f59e09..05adbc3 100644 --- a/scorecard/scorecard.go +++ b/scorecard/scorecard.go @@ -2,6 +2,7 @@ package scorecard import ( "fmt" + "log" "github.com/zegl/kube-score/config" ks "github.com/zegl/kube-score/domain" @@ -95,8 +96,26 @@ func (so *ScoredObject) Add(ts TestScore, check ks.Check, locationer ks.FileLoca ts.Check = check so.FileLocation = locationer.FileLocation() - var skip bool - if annotations != nil { + skipAll := so.FileLocation.Skip + skip := skipAll + if !skip && annotations != nil { + var err error + skipAll, err = so.isSkipped(annotations) + if err != nil { + log.Printf( + "failed to parse %s#L%d", + so.FileLocation.Name, + so.FileLocation.Line, + ) + } + // if skipAll { + // log.Printf("skip all for %s#L%d %v\n", + // so.FileLocation.Name, + // so.FileLocation.Line, + // annotations, + // ) + // } + // skip = skipAll if len(annotations) == 1 && !so.isEnabled(check, annotations[0], nil) { skip = true } @@ -106,7 +125,14 @@ func (so *ScoredObject) Add(ts TestScore, check ks.Check, locationer ks.FileLoca } // This test is ignored (via annotations), don't save the score - if skip { + // ts.Skipped = skip || skipAll + if skipAll { + ts.Skipped = true + ts.Comments = []TestScoreComment{{Summary: fmt.Sprintf( + "Skipped because %s (line %d) is ignored", + so.FileLocation.Name, so.FileLocation.Line, + )}} + } else if skip { ts.Skipped = true ts.Comments = []TestScoreComment{{Summary: fmt.Sprintf("Skipped because %s is ignored", check.ID)}} } From 2b5afc50228a9530865de01c57cbbabfd6331947 Mon Sep 17 00:00:00 2001 From: romnn Date: Tue, 8 Oct 2024 12:19:47 +0200 Subject: [PATCH 2/6] cleanup --- parser/parse.go | 119 +++++++++------------------------------- renderer/human/human.go | 1 - scorecard/enabled.go | 16 +++--- scorecard/scorecard.go | 2 +- 4 files changed, 37 insertions(+), 101 deletions(-) diff --git a/parser/parse.go b/parser/parse.go index 810f7a6..7520047 100644 --- a/parser/parse.go +++ b/parser/parse.go @@ -252,29 +252,14 @@ func detectFileLocation(fileName string, fileOffset int, fileContents []byte) ks } } -func (p *Parser) shouldSkip(res metav1.ObjectMetaAccessor) bool { +func (p *Parser) shouldSkip(res metav1.ObjectMetaAccessor, errs parseErrors) bool { skip := false meta := res.GetObjectMeta() if skipAnnotation, ok := meta.GetAnnotations()["kube-score/skip"]; ok { if err := yaml.Unmarshal([]byte(skipAnnotation), &skip); err != nil { - if p.config.VerboseOutput > 1 { - log.Printf( - "Invalid skip annotation: %q for %s in namespace %s", - skipAnnotation, - meta.GetName(), - meta.GetNamespace(), - ) - } - + errs.AddIfErr(fmt.Errorf("invalid skip annotation %q, must be boolean", skipAnnotation)) } } - if p.config.VerboseOutput > 1 && skip { - log.Printf( - "Skipping %s in namespace %s", - meta.GetName(), - meta.GetNamespace(), - ) - } return skip } @@ -296,156 +281,121 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case corev1.SchemeGroupVersion.WithKind("Pod"): var pod corev1.Pod errs.AddIfErr(p.decode(fileContents, &pod)) - fileLocation.Skip = p.shouldSkip(&pod) - // if !fileLocation.Skip { + fileLocation.Skip = p.shouldSkip(&pod, errs) p := internalpod.Pod{Obj: pod, Location: fileLocation} s.pods = append(s.pods, p) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: pod.TypeMeta, ObjectMeta: pod.ObjectMeta, FileLocationer: p}) - // } case batchv1.SchemeGroupVersion.WithKind("Job"): var job batchv1.Job errs.AddIfErr(p.decode(fileContents, &job)) - fileLocation.Skip = p.shouldSkip(&job) - // if !fileLocation.Skip { + fileLocation.Skip = p.shouldSkip(&job, errs) addPodSpeccer(internal.Batchv1Job{Job: job, Location: fileLocation}) - // } case batchv1beta1.SchemeGroupVersion.WithKind("CronJob"): var cronjob batchv1beta1.CronJob errs.AddIfErr(p.decode(fileContents, &cronjob)) - fileLocation.Skip = p.shouldSkip(&cronjob) - // if !p.shouldSkip(&cronjob) { + fileLocation.Skip = p.shouldSkip(&cronjob, errs) cjob := internalcronjob.CronJobV1beta1{Obj: cronjob, Location: fileLocation} addPodSpeccer(cjob) s.cronjobs = append(s.cronjobs, cjob) - // } case batchv1.SchemeGroupVersion.WithKind("CronJob"): var cronjob batchv1.CronJob errs.AddIfErr(p.decode(fileContents, &cronjob)) - fileLocation.Skip = p.shouldSkip(&cronjob) - // if !p.shouldSkip(&cronjob) { + fileLocation.Skip = p.shouldSkip(&cronjob, errs) cjob := internalcronjob.CronJobV1{Obj: cronjob, Location: fileLocation} addPodSpeccer(cjob) s.cronjobs = append(s.cronjobs, cjob) - // } case appsv1.SchemeGroupVersion.WithKind("Deployment"): var deployment appsv1.Deployment errs.AddIfErr(p.decode(fileContents, &deployment)) - fileLocation.Skip = p.shouldSkip(&deployment) - // if !p.shouldSkip(&deployment) { + fileLocation.Skip = p.shouldSkip(&deployment, errs) deploy := internal.Appsv1Deployment{Obj: deployment, Location: fileLocation} addPodSpeccer(deploy) // TODO: Support older versions of Deployment as well? s.deployments = append(s.deployments, deploy) - // } case appsv1beta1.SchemeGroupVersion.WithKind("Deployment"): var deployment appsv1beta1.Deployment - // if !p.shouldSkip(&deployment) { errs.AddIfErr(p.decode(fileContents, &deployment)) - fileLocation.Skip = p.shouldSkip(&deployment) + fileLocation.Skip = p.shouldSkip(&deployment, errs) addPodSpeccer(internal.Appsv1beta1Deployment{Deployment: deployment, Location: fileLocation}) - // } case appsv1beta2.SchemeGroupVersion.WithKind("Deployment"): var deployment appsv1beta2.Deployment - // if !p.shouldSkip(&deployment) { errs.AddIfErr(p.decode(fileContents, &deployment)) - fileLocation.Skip = p.shouldSkip(&deployment) + fileLocation.Skip = p.shouldSkip(&deployment, errs) addPodSpeccer(internal.Appsv1beta2Deployment{Deployment: deployment, Location: fileLocation}) - // } case extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment"): var deployment extensionsv1beta1.Deployment errs.AddIfErr(p.decode(fileContents, &deployment)) - fileLocation.Skip = p.shouldSkip(&deployment) - // if !p.shouldSkip(&deployment) { + fileLocation.Skip = p.shouldSkip(&deployment, errs) addPodSpeccer(internal.Extensionsv1beta1Deployment{Deployment: deployment, Location: fileLocation}) - // } case appsv1.SchemeGroupVersion.WithKind("StatefulSet"): var statefulSet appsv1.StatefulSet errs.AddIfErr(p.decode(fileContents, &statefulSet)) - fileLocation.Skip = p.shouldSkip(&statefulSet) - // if !p.shouldSkip(&statefulSet) { + fileLocation.Skip = p.shouldSkip(&statefulSet, errs) sset := internal.Appsv1StatefulSet{Obj: statefulSet, Location: fileLocation} addPodSpeccer(sset) // TODO: Support older versions of StatefulSet as well? s.statefulsets = append(s.statefulsets, sset) - // } case appsv1beta1.SchemeGroupVersion.WithKind("StatefulSet"): var statefulSet appsv1beta1.StatefulSet errs.AddIfErr(p.decode(fileContents, &statefulSet)) - fileLocation.Skip = p.shouldSkip(&statefulSet) - // if !p.shouldSkip(&statefulSet) { + fileLocation.Skip = p.shouldSkip(&statefulSet, errs) addPodSpeccer(internal.Appsv1beta1StatefulSet{StatefulSet: statefulSet, Location: fileLocation}) - // } case appsv1beta2.SchemeGroupVersion.WithKind("StatefulSet"): var statefulSet appsv1beta2.StatefulSet errs.AddIfErr(p.decode(fileContents, &statefulSet)) - fileLocation.Skip = p.shouldSkip(&statefulSet) - // if !p.shouldSkip(&statefulSet) { + fileLocation.Skip = p.shouldSkip(&statefulSet, errs) addPodSpeccer(internal.Appsv1beta2StatefulSet{StatefulSet: statefulSet, Location: fileLocation}) - // } case appsv1.SchemeGroupVersion.WithKind("DaemonSet"): var daemonset appsv1.DaemonSet errs.AddIfErr(p.decode(fileContents, &daemonset)) - fileLocation.Skip = p.shouldSkip(&daemonset) - // if !p.shouldSkip(&daemonset) { + fileLocation.Skip = p.shouldSkip(&daemonset, errs) addPodSpeccer(internal.Appsv1DaemonSet{DaemonSet: daemonset, Location: fileLocation}) - // } case appsv1beta2.SchemeGroupVersion.WithKind("DaemonSet"): var daemonset appsv1beta2.DaemonSet errs.AddIfErr(p.decode(fileContents, &daemonset)) - // if !p.shouldSkip(&daemonset) { - fileLocation.Skip = p.shouldSkip(&daemonset) + fileLocation.Skip = p.shouldSkip(&daemonset, errs) addPodSpeccer(internal.Appsv1beta2DaemonSet{DaemonSet: daemonset, Location: fileLocation}) - // } case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSet"): var daemonset extensionsv1beta1.DaemonSet errs.AddIfErr(p.decode(fileContents, &daemonset)) - fileLocation.Skip = p.shouldSkip(&daemonset) - // if !p.shouldSkip(&daemonset) { + fileLocation.Skip = p.shouldSkip(&daemonset, errs) addPodSpeccer(internal.Extensionsv1beta1DaemonSet{DaemonSet: daemonset, Location: fileLocation}) - // } case networkingv1.SchemeGroupVersion.WithKind("NetworkPolicy"): var netpol networkingv1.NetworkPolicy errs.AddIfErr(p.decode(fileContents, &netpol)) - fileLocation.Skip = p.shouldSkip(&netpol) - // if !p.shouldSkip(&netpol) { + fileLocation.Skip = p.shouldSkip(&netpol, errs) np := internalnetpol.NetworkPolicy{Obj: netpol, Location: fileLocation} s.networkPolicies = append(s.networkPolicies, np) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: netpol.TypeMeta, ObjectMeta: netpol.ObjectMeta, FileLocationer: np}) - // } case corev1.SchemeGroupVersion.WithKind("Service"): var service corev1.Service errs.AddIfErr(p.decode(fileContents, &service)) - fileLocation.Skip = p.shouldSkip(&service) - // if !p.shouldSkip(&service) { + fileLocation.Skip = p.shouldSkip(&service, errs) serv := internalservice.Service{Obj: service, Location: fileLocation} s.services = append(s.services, serv) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: service.TypeMeta, ObjectMeta: service.ObjectMeta, FileLocationer: serv}) - // } case policyv1beta1.SchemeGroupVersion.WithKind("PodDisruptionBudget"): var disruptBudget policyv1beta1.PodDisruptionBudget errs.AddIfErr(p.decode(fileContents, &disruptBudget)) - fileLocation.Skip = p.shouldSkip(&disruptBudget) - // if !p.shouldSkip(&disruptBudget) { + fileLocation.Skip = p.shouldSkip(&disruptBudget, errs) dbug := internalpdb.PodDisruptionBudgetV1beta1{Obj: disruptBudget, Location: fileLocation} s.podDisruptionBudgets = append(s.podDisruptionBudgets, dbug) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: disruptBudget.TypeMeta, ObjectMeta: disruptBudget.ObjectMeta, FileLocationer: dbug}) - // } case policyv1.SchemeGroupVersion.WithKind("PodDisruptionBudget"): var disruptBudget policyv1.PodDisruptionBudget errs.AddIfErr(p.decode(fileContents, &disruptBudget)) - fileLocation.Skip = p.shouldSkip(&disruptBudget) - // if !p.shouldSkip(&disruptBudget) { + fileLocation.Skip = p.shouldSkip(&disruptBudget, errs) dbug := internalpdb.PodDisruptionBudgetV1{Obj: disruptBudget, Location: fileLocation} s.podDisruptionBudgets = append(s.podDisruptionBudgets, dbug) s.bothMetas = append(s.bothMetas, ks.BothMeta{ @@ -453,63 +403,51 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio ObjectMeta: disruptBudget.ObjectMeta, FileLocationer: dbug, }) - // } case extensionsv1beta1.SchemeGroupVersion.WithKind("Ingress"): var ingress extensionsv1beta1.Ingress errs.AddIfErr(p.decode(fileContents, &ingress)) - fileLocation.Skip = p.shouldSkip(&ingress) - // if !p.shouldSkip(&ingress) { + fileLocation.Skip = p.shouldSkip(&ingress, errs) ing := internal.ExtensionsIngressV1beta1{Ingress: ingress, Location: fileLocation} s.ingresses = append(s.ingresses, ing) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: ingress.TypeMeta, ObjectMeta: ingress.ObjectMeta, FileLocationer: ing}) - // } case networkingv1beta1.SchemeGroupVersion.WithKind("Ingress"): var ingress networkingv1beta1.Ingress errs.AddIfErr(p.decode(fileContents, &ingress)) - fileLocation.Skip = p.shouldSkip(&ingress) - // if !p.shouldSkip(&ingress) { + fileLocation.Skip = p.shouldSkip(&ingress, errs) ing := internal.IngressV1beta1{Ingress: ingress, Location: fileLocation} s.ingresses = append(s.ingresses, ing) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: ingress.TypeMeta, ObjectMeta: ingress.ObjectMeta, FileLocationer: ing}) - // } case networkingv1.SchemeGroupVersion.WithKind("Ingress"): var ingress networkingv1.Ingress errs.AddIfErr(p.decode(fileContents, &ingress)) - fileLocation.Skip = p.shouldSkip(&ingress) - // if !p.shouldSkip(&ingress) { + fileLocation.Skip = p.shouldSkip(&ingress, errs) ing := internal.IngressV1{Ingress: ingress, Location: fileLocation} s.ingresses = append(s.ingresses, ing) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: ingress.TypeMeta, ObjectMeta: ingress.ObjectMeta, FileLocationer: ing}) - // } case autoscalingv1.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"): var hpa autoscalingv1.HorizontalPodAutoscaler errs.AddIfErr(p.decode(fileContents, &hpa)) - fileLocation.Skip = p.shouldSkip(&hpa) - // if !p.shouldSkip(&hpa) { + fileLocation.Skip = p.shouldSkip(&hpa, errs) h := internal.HPAv1{HorizontalPodAutoscaler: hpa, Location: fileLocation} s.hpaTargeters = append(s.hpaTargeters, h) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: hpa.TypeMeta, ObjectMeta: hpa.ObjectMeta, FileLocationer: h}) - // } case autoscalingv2beta1.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"): var hpa autoscalingv2beta1.HorizontalPodAutoscaler errs.AddIfErr(p.decode(fileContents, &hpa)) - fileLocation.Skip = p.shouldSkip(&hpa) - // if !p.shouldSkip(&hpa) { + fileLocation.Skip = p.shouldSkip(&hpa, errs) h := internal.HPAv2beta1{HorizontalPodAutoscaler: hpa, Location: fileLocation} s.hpaTargeters = append(s.hpaTargeters, h) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: hpa.TypeMeta, ObjectMeta: hpa.ObjectMeta, FileLocationer: h}) - // } case autoscalingv2beta2.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"): var hpa autoscalingv2beta2.HorizontalPodAutoscaler errs.AddIfErr(p.decode(fileContents, &hpa)) - fileLocation.Skip = p.shouldSkip(&hpa) - // if !p.shouldSkip(&hpa) { + fileLocation.Skip = p.shouldSkip(&hpa, errs) h := internal.HPAv2beta2{HorizontalPodAutoscaler: hpa, Location: fileLocation} s.hpaTargeters = append(s.hpaTargeters, h) s.bothMetas = append(s.bothMetas, ks.BothMeta{ @@ -517,17 +455,14 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio ObjectMeta: hpa.ObjectMeta, FileLocationer: h, }) - // } case autoscalingv2.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"): var hpa autoscalingv2.HorizontalPodAutoscaler errs.AddIfErr(p.decode(fileContents, &hpa)) - fileLocation.Skip = p.shouldSkip(&hpa) - // if !skip { + fileLocation.Skip = p.shouldSkip(&hpa, errs) h := internal.HPAv2{HorizontalPodAutoscaler: hpa, Location: fileLocation} s.hpaTargeters = append(s.hpaTargeters, h) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: hpa.TypeMeta, ObjectMeta: hpa.ObjectMeta, FileLocationer: h}) - // } default: if p.config.VerboseOutput > 1 { diff --git a/renderer/human/human.go b/renderer/human/human.go index 5f203b0..56158d8 100644 --- a/renderer/human/human.go +++ b/renderer/human/human.go @@ -76,7 +76,6 @@ func Human(scoreCard *scorecard.Scorecard, verboseOutput int, termWidth int, use } } else { for _, card := range scoredObject.Checks { - // fmt.Printf("path=%s check=%s skipped=%v grade=%d\n", scoredObject.FileLocation.Name, card.Check.Name, card.Skipped, card.Grade) r := outputHumanStep(card, verboseOutput, termWidth) if _, err := io.Copy(w, r); err != nil { return nil, fmt.Errorf("failed to copy output: %w", err) diff --git a/scorecard/enabled.go b/scorecard/enabled.go index d2d9622..648e1f9 100644 --- a/scorecard/enabled.go +++ b/scorecard/enabled.go @@ -12,16 +12,15 @@ func (so *ScoredObject) isSkipped(allAnnotations []map[string]string) (bool, err skip := false for _, annotations := range allAnnotations { if skipAnnotation, ok := annotations["kube-score/skip"]; ok { - // fmt.Printf("skip: %s\n", skipAnnotation) if err := yaml.Unmarshal([]byte(skipAnnotation), &skip); err != nil { - return false, fmt.Errorf("invalid skip annotation: %q", skipAnnotation) - } - } - if ignoreAnnotation, ok := annotations["kube-score/ignore"]; ok { - if strings.TrimSpace(ignoreAnnotation) == "*" { - skip = true + return false, fmt.Errorf("invalid skip annotation %q, must be boolean", skipAnnotation) } } + // if ignoreAnnotation, ok := annotations["kube-score/ignore"]; ok { + // if strings.TrimSpace(ignoreAnnotation) == "*" { + // skip = true + // } + // } } return skip, nil } @@ -33,6 +32,9 @@ func (so *ScoredObject) isEnabled(check ks.Check, annotations, childAnnotations if v == key { return true } + if v == "*" { + return true + } if vals, ok := impliedIgnoreAnnotations[v]; ok { for i := range vals { if vals[i] == key { diff --git a/scorecard/scorecard.go b/scorecard/scorecard.go index 05adbc3..88f9187 100644 --- a/scorecard/scorecard.go +++ b/scorecard/scorecard.go @@ -129,7 +129,7 @@ func (so *ScoredObject) Add(ts TestScore, check ks.Check, locationer ks.FileLoca if skipAll { ts.Skipped = true ts.Comments = []TestScoreComment{{Summary: fmt.Sprintf( - "Skipped because %s (line %d) is ignored", + "Skipped because %s#L%d is ignored", so.FileLocation.Name, so.FileLocation.Line, )}} } else if skip { From 453af3f78c432b68febf00fd65e8f1ca4c493a68 Mon Sep 17 00:00:00 2001 From: romnn Date: Tue, 8 Oct 2024 12:27:38 +0200 Subject: [PATCH 3/6] cleanup --- score/score.go | 54 ------------------------------------------ scorecard/enabled.go | 20 +--------------- scorecard/scorecard.go | 24 +++---------------- 3 files changed, 4 insertions(+), 94 deletions(-) diff --git a/score/score.go b/score/score.go index d744ac0..390d7af 100644 --- a/score/score.go +++ b/score/score.go @@ -68,27 +68,6 @@ func (p *podSpeccer) FileLocation() ks.FileLocation { return ks.FileLocation{} } -// func shouldSkip(meta metav1.ObjectMeta) bool { -// // type annotations struct { -// // Skip bool `yaml:"kube-score/skip"` -// // } -// // var annotations -// // if err != nil { -// // log.Fatal("Failed to unmarshall config.yaml:", err) -// // } -// // config, err = setDefaults(config) -// skip := false -// // fmt.Printf("skip? %v\n\n", meta.Annotations) -// if skipAnnotation, ok := meta.Annotations["kube-score/skip"]; ok { -// // fmt.Printf("skip? %v\n\n", skipAnnotation) -// if err := yaml.Unmarshal([]byte(skipAnnotation), &skip); err != nil { -// // TODO: warning -// // fmt.Printf("err? %v\n\n", err) -// } -// } -// return skip -// } - // Score runs a pre-configured list of tests against the files defined in the configuration, and returns a scorecard. // Additional configuration and tuning parameters can be provided via the config. func Score(allObjects ks.AllTypes, allChecks *checks.Checks, cnf *config.RunConfiguration) (*scorecard.Scorecard, error) { @@ -107,10 +86,6 @@ func Score(allObjects ks.AllTypes, allChecks *checks.Checks, cnf *config.RunConf } for _, ingress := range allObjects.Ingresses() { - // if shouldSkip(ingress.GetObjectMeta()) { - // fmt.Printf("skipping %v\n\n", ingress.GetObjectMeta().Name) - // continue - // } o := newObject(ingress.GetTypeMeta(), ingress.GetObjectMeta()) for _, test := range allChecks.Ingresses() { fn, err := test.Fn(ingress) @@ -122,10 +97,6 @@ func Score(allObjects ks.AllTypes, allChecks *checks.Checks, cnf *config.RunConf } for _, meta := range allObjects.Metas() { - // if shouldSkip(meta.ObjectMeta) { - // fmt.Printf("skipping %v\n\n", meta.ObjectMeta.Name) - // continue - // } o := newObject(meta.TypeMeta, meta.ObjectMeta) for _, test := range allChecks.Metas() { fn, err := test.Fn(meta) @@ -137,11 +108,6 @@ func Score(allObjects ks.AllTypes, allChecks *checks.Checks, cnf *config.RunConf } for _, pod := range allObjects.Pods() { - // if shouldSkip(pod.Pod().ObjectMeta) { - // fmt.Printf("skipping %v\n\n", pod.Pod().ObjectMeta.Name) - // continue - // } - o := newObject(pod.Pod().TypeMeta, pod.Pod().ObjectMeta) for _, test := range allChecks.Pods() { @@ -160,11 +126,6 @@ func Score(allObjects ks.AllTypes, allChecks *checks.Checks, cnf *config.RunConf } for _, podspecer := range allObjects.PodSpeccers() { - // if shouldSkip(podspecer.GetObjectMeta()) { - // fmt.Printf("skipping %v\n\n", podspecer.GetObjectMeta().Name) - // continue - // } - o := newObject(podspecer.GetTypeMeta(), podspecer.GetObjectMeta()) for _, test := range allChecks.Pods() { score, _ := test.Fn(podspecer) @@ -176,11 +137,6 @@ func Score(allObjects ks.AllTypes, allChecks *checks.Checks, cnf *config.RunConf } for _, service := range allObjects.Services() { - // if shouldSkip(service.Service().ObjectMeta) { - // fmt.Printf("skipping %v\n\n", service.Service().ObjectMeta.Name) - // continue - // } - o := newObject(service.Service().TypeMeta, service.Service().ObjectMeta) for _, test := range allChecks.Services() { fn, err := test.Fn(service.Service()) @@ -192,11 +148,6 @@ func Score(allObjects ks.AllTypes, allChecks *checks.Checks, cnf *config.RunConf } for _, statefulset := range allObjects.StatefulSets() { - // if shouldSkip(statefulset.StatefulSet().ObjectMeta) { - // fmt.Printf("skipping %v\n\n", statefulset.StatefulSet().ObjectMeta.Name) - // continue - // } - o := newObject(statefulset.StatefulSet().TypeMeta, statefulset.StatefulSet().ObjectMeta) for _, test := range allChecks.StatefulSets() { fn, err := test.Fn(statefulset.StatefulSet()) @@ -208,11 +159,6 @@ func Score(allObjects ks.AllTypes, allChecks *checks.Checks, cnf *config.RunConf } for _, deployment := range allObjects.Deployments() { - // if shouldSkip(deployment.Deployment().ObjectMeta) { - // fmt.Printf("skipping %v\n\n", deployment.Deployment().ObjectMeta.Name) - // continue - // } - o := newObject(deployment.Deployment().TypeMeta, deployment.Deployment().ObjectMeta) for _, test := range allChecks.Deployments() { res, err := test.Fn(deployment.Deployment()) diff --git a/scorecard/enabled.go b/scorecard/enabled.go index 648e1f9..e5ceeeb 100644 --- a/scorecard/enabled.go +++ b/scorecard/enabled.go @@ -1,30 +1,11 @@ package scorecard import ( - "fmt" "strings" ks "github.com/zegl/kube-score/domain" - "gopkg.in/yaml.v3" ) -func (so *ScoredObject) isSkipped(allAnnotations []map[string]string) (bool, error) { - skip := false - for _, annotations := range allAnnotations { - if skipAnnotation, ok := annotations["kube-score/skip"]; ok { - if err := yaml.Unmarshal([]byte(skipAnnotation), &skip); err != nil { - return false, fmt.Errorf("invalid skip annotation %q, must be boolean", skipAnnotation) - } - } - // if ignoreAnnotation, ok := annotations["kube-score/ignore"]; ok { - // if strings.TrimSpace(ignoreAnnotation) == "*" { - // skip = true - // } - // } - } - return skip, nil -} - func (so *ScoredObject) isEnabled(check ks.Check, annotations, childAnnotations map[string]string) bool { isIn := func(csv string, key string) bool { for _, v := range strings.Split(csv, ",") { @@ -33,6 +14,7 @@ func (so *ScoredObject) isEnabled(check ks.Check, annotations, childAnnotations return true } if v == "*" { + // "*" wildcard matches all checks return true } if vals, ok := impliedIgnoreAnnotations[v]; ok { diff --git a/scorecard/scorecard.go b/scorecard/scorecard.go index 88f9187..d4895b9 100644 --- a/scorecard/scorecard.go +++ b/scorecard/scorecard.go @@ -2,7 +2,6 @@ package scorecard import ( "fmt" - "log" "github.com/zegl/kube-score/config" ks "github.com/zegl/kube-score/domain" @@ -96,26 +95,10 @@ func (so *ScoredObject) Add(ts TestScore, check ks.Check, locationer ks.FileLoca ts.Check = check so.FileLocation = locationer.FileLocation() + skip := false skipAll := so.FileLocation.Skip - skip := skipAll - if !skip && annotations != nil { - var err error - skipAll, err = so.isSkipped(annotations) - if err != nil { - log.Printf( - "failed to parse %s#L%d", - so.FileLocation.Name, - so.FileLocation.Line, - ) - } - // if skipAll { - // log.Printf("skip all for %s#L%d %v\n", - // so.FileLocation.Name, - // so.FileLocation.Line, - // annotations, - // ) - // } - // skip = skipAll + + if !skipAll && annotations != nil { if len(annotations) == 1 && !so.isEnabled(check, annotations[0], nil) { skip = true } @@ -125,7 +108,6 @@ func (so *ScoredObject) Add(ts TestScore, check ks.Check, locationer ks.FileLoca } // This test is ignored (via annotations), don't save the score - // ts.Skipped = skip || skipAll if skipAll { ts.Skipped = true ts.Comments = []TestScoreComment{{Summary: fmt.Sprintf( From 8781976d70a1cf1c8a1bb5136fd0103e20a24fb8 Mon Sep 17 00:00:00 2001 From: romnn Date: Tue, 8 Oct 2024 19:28:08 +0200 Subject: [PATCH 4/6] add tests for skip --- parser/parse.go | 77 ++++++++----- parser/parse_test.go | 117 ++++++++++++++++++++ score/apps_test.go | 39 +++++++ score/score_test.go | 18 +++ score/testdata/statefulset-nested-skip.yaml | 110 ++++++++++++++++++ score/testdata/statefulset-skip.yaml | 112 +++++++++++++++++++ scorecard/scorecard.go | 7 ++ 7 files changed, 450 insertions(+), 30 deletions(-) create mode 100644 score/testdata/statefulset-nested-skip.yaml create mode 100644 score/testdata/statefulset-skip.yaml diff --git a/parser/parse.go b/parser/parse.go index 7520047..51eb633 100644 --- a/parser/parse.go +++ b/parser/parse.go @@ -252,17 +252,28 @@ func detectFileLocation(fileName string, fileOffset int, fileContents []byte) ks } } -func (p *Parser) shouldSkip(res metav1.ObjectMetaAccessor, errs parseErrors) bool { +const ( + SkippedResourceAnnotation = "kube-score/skip" +) + +func IsSkipped(errs []error, annotations ...map[string]string) bool { skip := false - meta := res.GetObjectMeta() - if skipAnnotation, ok := meta.GetAnnotations()["kube-score/skip"]; ok { - if err := yaml.Unmarshal([]byte(skipAnnotation), &skip); err != nil { - errs.AddIfErr(fmt.Errorf("invalid skip annotation %q, must be boolean", skipAnnotation)) + for _, annotations := range annotations { + // fmt.Printf("annotation: %v => %v\n", annotations, annotations[skippedResourceAnnotation]) + if skipAnnotation, ok := annotations[SkippedResourceAnnotation]; ok { + if err := yaml.Unmarshal([]byte(skipAnnotation), &skip); err != nil { + errs = append(errs, fmt.Errorf("invalid skip annotation %q, must be boolean", skipAnnotation)) + } } } return skip } +func (p *Parser) isSkipped(res metav1.ObjectMetaAccessor, errs parseErrors) bool { + annotations := res.GetObjectMeta().GetAnnotations() + return IsSkipped(errs, annotations) +} + func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersionKind, fileName string, fileOffset int, fileContents []byte) error { addPodSpeccer := func(ps ks.PodSpecer) { s.podspecers = append(s.podspecers, ps) @@ -281,7 +292,7 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case corev1.SchemeGroupVersion.WithKind("Pod"): var pod corev1.Pod errs.AddIfErr(p.decode(fileContents, &pod)) - fileLocation.Skip = p.shouldSkip(&pod, errs) + fileLocation.Skip = p.isSkipped(&pod, errs) p := internalpod.Pod{Obj: pod, Location: fileLocation} s.pods = append(s.pods, p) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: pod.TypeMeta, ObjectMeta: pod.ObjectMeta, FileLocationer: p}) @@ -289,13 +300,13 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case batchv1.SchemeGroupVersion.WithKind("Job"): var job batchv1.Job errs.AddIfErr(p.decode(fileContents, &job)) - fileLocation.Skip = p.shouldSkip(&job, errs) + fileLocation.Skip = p.isSkipped(&job, errs) addPodSpeccer(internal.Batchv1Job{Job: job, Location: fileLocation}) case batchv1beta1.SchemeGroupVersion.WithKind("CronJob"): var cronjob batchv1beta1.CronJob errs.AddIfErr(p.decode(fileContents, &cronjob)) - fileLocation.Skip = p.shouldSkip(&cronjob, errs) + fileLocation.Skip = p.isSkipped(&cronjob, errs) cjob := internalcronjob.CronJobV1beta1{Obj: cronjob, Location: fileLocation} addPodSpeccer(cjob) s.cronjobs = append(s.cronjobs, cjob) @@ -303,7 +314,7 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case batchv1.SchemeGroupVersion.WithKind("CronJob"): var cronjob batchv1.CronJob errs.AddIfErr(p.decode(fileContents, &cronjob)) - fileLocation.Skip = p.shouldSkip(&cronjob, errs) + fileLocation.Skip = p.isSkipped(&cronjob, errs) cjob := internalcronjob.CronJobV1{Obj: cronjob, Location: fileLocation} addPodSpeccer(cjob) s.cronjobs = append(s.cronjobs, cjob) @@ -311,7 +322,7 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case appsv1.SchemeGroupVersion.WithKind("Deployment"): var deployment appsv1.Deployment errs.AddIfErr(p.decode(fileContents, &deployment)) - fileLocation.Skip = p.shouldSkip(&deployment, errs) + fileLocation.Skip = p.isSkipped(&deployment, errs) deploy := internal.Appsv1Deployment{Obj: deployment, Location: fileLocation} addPodSpeccer(deploy) @@ -320,23 +331,25 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case appsv1beta1.SchemeGroupVersion.WithKind("Deployment"): var deployment appsv1beta1.Deployment errs.AddIfErr(p.decode(fileContents, &deployment)) - fileLocation.Skip = p.shouldSkip(&deployment, errs) + fileLocation.Skip = p.isSkipped(&deployment, errs) addPodSpeccer(internal.Appsv1beta1Deployment{Deployment: deployment, Location: fileLocation}) case appsv1beta2.SchemeGroupVersion.WithKind("Deployment"): var deployment appsv1beta2.Deployment errs.AddIfErr(p.decode(fileContents, &deployment)) - fileLocation.Skip = p.shouldSkip(&deployment, errs) + fileLocation.Skip = p.isSkipped(&deployment, errs) addPodSpeccer(internal.Appsv1beta2Deployment{Deployment: deployment, Location: fileLocation}) case extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment"): var deployment extensionsv1beta1.Deployment errs.AddIfErr(p.decode(fileContents, &deployment)) - fileLocation.Skip = p.shouldSkip(&deployment, errs) + fileLocation.Skip = p.isSkipped(&deployment, errs) addPodSpeccer(internal.Extensionsv1beta1Deployment{Deployment: deployment, Location: fileLocation}) case appsv1.SchemeGroupVersion.WithKind("StatefulSet"): var statefulSet appsv1.StatefulSet errs.AddIfErr(p.decode(fileContents, &statefulSet)) - fileLocation.Skip = p.shouldSkip(&statefulSet, errs) + fileLocation.Skip = p.isSkipped(&statefulSet, errs) + fmt.Printf("sfs skip=%v\n", fileLocation.Skip) + sset := internal.Appsv1StatefulSet{Obj: statefulSet, Location: fileLocation} addPodSpeccer(sset) @@ -345,34 +358,38 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case appsv1beta1.SchemeGroupVersion.WithKind("StatefulSet"): var statefulSet appsv1beta1.StatefulSet errs.AddIfErr(p.decode(fileContents, &statefulSet)) - fileLocation.Skip = p.shouldSkip(&statefulSet, errs) + fileLocation.Skip = p.isSkipped(&statefulSet, errs) + fmt.Printf("sfs skip=%v\n", fileLocation.Skip) + addPodSpeccer(internal.Appsv1beta1StatefulSet{StatefulSet: statefulSet, Location: fileLocation}) case appsv1beta2.SchemeGroupVersion.WithKind("StatefulSet"): var statefulSet appsv1beta2.StatefulSet errs.AddIfErr(p.decode(fileContents, &statefulSet)) - fileLocation.Skip = p.shouldSkip(&statefulSet, errs) + fileLocation.Skip = p.isSkipped(&statefulSet, errs) + fmt.Printf("sfs skip=%v\n", fileLocation.Skip) + addPodSpeccer(internal.Appsv1beta2StatefulSet{StatefulSet: statefulSet, Location: fileLocation}) case appsv1.SchemeGroupVersion.WithKind("DaemonSet"): var daemonset appsv1.DaemonSet errs.AddIfErr(p.decode(fileContents, &daemonset)) - fileLocation.Skip = p.shouldSkip(&daemonset, errs) + fileLocation.Skip = p.isSkipped(&daemonset, errs) addPodSpeccer(internal.Appsv1DaemonSet{DaemonSet: daemonset, Location: fileLocation}) case appsv1beta2.SchemeGroupVersion.WithKind("DaemonSet"): var daemonset appsv1beta2.DaemonSet errs.AddIfErr(p.decode(fileContents, &daemonset)) - fileLocation.Skip = p.shouldSkip(&daemonset, errs) + fileLocation.Skip = p.isSkipped(&daemonset, errs) addPodSpeccer(internal.Appsv1beta2DaemonSet{DaemonSet: daemonset, Location: fileLocation}) case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSet"): var daemonset extensionsv1beta1.DaemonSet errs.AddIfErr(p.decode(fileContents, &daemonset)) - fileLocation.Skip = p.shouldSkip(&daemonset, errs) + fileLocation.Skip = p.isSkipped(&daemonset, errs) addPodSpeccer(internal.Extensionsv1beta1DaemonSet{DaemonSet: daemonset, Location: fileLocation}) case networkingv1.SchemeGroupVersion.WithKind("NetworkPolicy"): var netpol networkingv1.NetworkPolicy errs.AddIfErr(p.decode(fileContents, &netpol)) - fileLocation.Skip = p.shouldSkip(&netpol, errs) + fileLocation.Skip = p.isSkipped(&netpol, errs) np := internalnetpol.NetworkPolicy{Obj: netpol, Location: fileLocation} s.networkPolicies = append(s.networkPolicies, np) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: netpol.TypeMeta, ObjectMeta: netpol.ObjectMeta, FileLocationer: np}) @@ -380,7 +397,7 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case corev1.SchemeGroupVersion.WithKind("Service"): var service corev1.Service errs.AddIfErr(p.decode(fileContents, &service)) - fileLocation.Skip = p.shouldSkip(&service, errs) + fileLocation.Skip = p.isSkipped(&service, errs) serv := internalservice.Service{Obj: service, Location: fileLocation} s.services = append(s.services, serv) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: service.TypeMeta, ObjectMeta: service.ObjectMeta, FileLocationer: serv}) @@ -388,14 +405,14 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case policyv1beta1.SchemeGroupVersion.WithKind("PodDisruptionBudget"): var disruptBudget policyv1beta1.PodDisruptionBudget errs.AddIfErr(p.decode(fileContents, &disruptBudget)) - fileLocation.Skip = p.shouldSkip(&disruptBudget, errs) + fileLocation.Skip = p.isSkipped(&disruptBudget, errs) dbug := internalpdb.PodDisruptionBudgetV1beta1{Obj: disruptBudget, Location: fileLocation} s.podDisruptionBudgets = append(s.podDisruptionBudgets, dbug) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: disruptBudget.TypeMeta, ObjectMeta: disruptBudget.ObjectMeta, FileLocationer: dbug}) case policyv1.SchemeGroupVersion.WithKind("PodDisruptionBudget"): var disruptBudget policyv1.PodDisruptionBudget errs.AddIfErr(p.decode(fileContents, &disruptBudget)) - fileLocation.Skip = p.shouldSkip(&disruptBudget, errs) + fileLocation.Skip = p.isSkipped(&disruptBudget, errs) dbug := internalpdb.PodDisruptionBudgetV1{Obj: disruptBudget, Location: fileLocation} s.podDisruptionBudgets = append(s.podDisruptionBudgets, dbug) s.bothMetas = append(s.bothMetas, ks.BothMeta{ @@ -407,7 +424,7 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case extensionsv1beta1.SchemeGroupVersion.WithKind("Ingress"): var ingress extensionsv1beta1.Ingress errs.AddIfErr(p.decode(fileContents, &ingress)) - fileLocation.Skip = p.shouldSkip(&ingress, errs) + fileLocation.Skip = p.isSkipped(&ingress, errs) ing := internal.ExtensionsIngressV1beta1{Ingress: ingress, Location: fileLocation} s.ingresses = append(s.ingresses, ing) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: ingress.TypeMeta, ObjectMeta: ingress.ObjectMeta, FileLocationer: ing}) @@ -415,7 +432,7 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case networkingv1beta1.SchemeGroupVersion.WithKind("Ingress"): var ingress networkingv1beta1.Ingress errs.AddIfErr(p.decode(fileContents, &ingress)) - fileLocation.Skip = p.shouldSkip(&ingress, errs) + fileLocation.Skip = p.isSkipped(&ingress, errs) ing := internal.IngressV1beta1{Ingress: ingress, Location: fileLocation} s.ingresses = append(s.ingresses, ing) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: ingress.TypeMeta, ObjectMeta: ingress.ObjectMeta, FileLocationer: ing}) @@ -423,7 +440,7 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case networkingv1.SchemeGroupVersion.WithKind("Ingress"): var ingress networkingv1.Ingress errs.AddIfErr(p.decode(fileContents, &ingress)) - fileLocation.Skip = p.shouldSkip(&ingress, errs) + fileLocation.Skip = p.isSkipped(&ingress, errs) ing := internal.IngressV1{Ingress: ingress, Location: fileLocation} s.ingresses = append(s.ingresses, ing) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: ingress.TypeMeta, ObjectMeta: ingress.ObjectMeta, FileLocationer: ing}) @@ -431,7 +448,7 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case autoscalingv1.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"): var hpa autoscalingv1.HorizontalPodAutoscaler errs.AddIfErr(p.decode(fileContents, &hpa)) - fileLocation.Skip = p.shouldSkip(&hpa, errs) + fileLocation.Skip = p.isSkipped(&hpa, errs) h := internal.HPAv1{HorizontalPodAutoscaler: hpa, Location: fileLocation} s.hpaTargeters = append(s.hpaTargeters, h) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: hpa.TypeMeta, ObjectMeta: hpa.ObjectMeta, FileLocationer: h}) @@ -439,7 +456,7 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case autoscalingv2beta1.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"): var hpa autoscalingv2beta1.HorizontalPodAutoscaler errs.AddIfErr(p.decode(fileContents, &hpa)) - fileLocation.Skip = p.shouldSkip(&hpa, errs) + fileLocation.Skip = p.isSkipped(&hpa, errs) h := internal.HPAv2beta1{HorizontalPodAutoscaler: hpa, Location: fileLocation} s.hpaTargeters = append(s.hpaTargeters, h) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: hpa.TypeMeta, ObjectMeta: hpa.ObjectMeta, FileLocationer: h}) @@ -447,7 +464,7 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case autoscalingv2beta2.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"): var hpa autoscalingv2beta2.HorizontalPodAutoscaler errs.AddIfErr(p.decode(fileContents, &hpa)) - fileLocation.Skip = p.shouldSkip(&hpa, errs) + fileLocation.Skip = p.isSkipped(&hpa, errs) h := internal.HPAv2beta2{HorizontalPodAutoscaler: hpa, Location: fileLocation} s.hpaTargeters = append(s.hpaTargeters, h) s.bothMetas = append(s.bothMetas, ks.BothMeta{ @@ -459,7 +476,7 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio case autoscalingv2.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"): var hpa autoscalingv2.HorizontalPodAutoscaler errs.AddIfErr(p.decode(fileContents, &hpa)) - fileLocation.Skip = p.shouldSkip(&hpa, errs) + fileLocation.Skip = p.isSkipped(&hpa, errs) h := internal.HPAv2{HorizontalPodAutoscaler: hpa, Location: fileLocation} s.hpaTargeters = append(s.hpaTargeters, h) s.bothMetas = append(s.bothMetas, ks.BothMeta{TypeMeta: hpa.TypeMeta, ObjectMeta: hpa.ObjectMeta, FileLocationer: h}) diff --git a/parser/parse_test.go b/parser/parse_test.go index d7c7b8e..9303e21 100644 --- a/parser/parse_test.go +++ b/parser/parse_test.go @@ -2,10 +2,13 @@ package parser import ( "fmt" + "io" "os" + "strings" "testing" ks "github.com/zegl/kube-score/domain" + // "github.com/zegl/kube-score/parser" "github.com/stretchr/testify/assert" ) @@ -73,3 +76,117 @@ spec: assert.Equal(t, "someName", fl.Name) assert.Equal(t, 123, fl.Line) } + +type namedReader struct { + io.Reader + name string +} + +func (n namedReader) Name() string { + return n.name +} + +func parse(t *testing.T, doc, name string) ks.AllTypes { + p, err := New(nil) + assert.NoError(t, err) + parsedFiles, err := p.ParseFiles([]ks.NamedReader{ + namedReader{Reader: strings.NewReader(doc), name: name}, + }) + assert.NoError(t, err) + return parsedFiles +} + +func TestSkipNo(t *testing.T) { + t.Parallel() + doc := `kind: Deployment +apiVersion: apps/v1 +metadata: + name: foo + annotations: + kube-score/skip: "No" +spec: + template: + metadata: + labels: + foo: bar` + + location := parse(t, doc, "skip-yes.yaml").Deployments()[0].FileLocation() + assert.Equal(t, "skip-yes.yaml", location.Name) + assert.Equal(t, false, location.Skip) +} + +func TestSkipYes(t *testing.T) { + t.Parallel() + doc := `kind: Deployment +apiVersion: apps/v1 +metadata: + name: foo + annotations: + kube-score/skip: " yes " +spec: + template: + metadata: + labels: + foo: bar` + + location := parse(t, doc, "skip-yes.yaml").Deployments()[0].FileLocation() + assert.Equal(t, "skip-yes.yaml", location.Name) + assert.Equal(t, true, location.Skip) +} + +func TestSkipTrueUppercase(t *testing.T) { + t.Parallel() + doc := `kind: Deployment +apiVersion: apps/v1 +metadata: + name: foo + annotations: + "kube-score/skip": "True" +spec: + template: + metadata: + labels: + foo: bar` + + location := parse(t, doc, "skip-true-uppercase.yaml").Deployments()[0].FileLocation() + assert.Equal(t, "skip-true-uppercase.yaml", location.Name) + assert.Equal(t, true, location.Skip) +} + +func TestSkipTrue(t *testing.T) { + t.Parallel() + doc := `kind: Deployment +apiVersion: apps/v1 +metadata: + name: foo + annotations: + "kube-score/skip": "true" +spec: + template: + metadata: + labels: + foo: bar` + + location := parse(t, doc, "skip-true.yaml").Deployments()[0].FileLocation() + assert.Equal(t, "skip-true.yaml", location.Name) + assert.Equal(t, true, location.Skip) +} + +func TestSkipFalse(t *testing.T) { + t.Parallel() + doc := `kind: Deployment +apiVersion: apps/v1 +metadata: + name: foo + annotations: + "kube-score/skip": "false" +spec: + template: + metadata: + labels: + foo: bar` + + location := parse(t, doc, "skip-false.yaml").Deployments()[0].FileLocation() + assert.Equal(t, "skip-false.yaml", location.Name) + assert.Equal(t, false, location.Skip) +} diff --git a/score/apps_test.go b/score/apps_test.go index f62a3c8..799efd6 100644 --- a/score/apps_test.go +++ b/score/apps_test.go @@ -123,3 +123,42 @@ func TestStatefulsetTemplateIgnoresNotIgnoredWhenFlagDisabled(t *testing.T) { }, "Container Image Tag") assert.False(t, skipped) } + +func TestStatefulsetTemplateNestedSkip(t *testing.T) { + t.Parallel() + sc, err := testScore( + []ks.NamedReader{testFile("statefulset-nested-skip.yaml")}, + nil, + &config.RunConfiguration{ + UseIgnoreChecksAnnotation: true, + UseOptionalChecksAnnotation: true, + }, + ) + assert.NoError(t, err) + + for _, objectScore := range sc { + for _, s := range objectScore.Checks { + t.Logf("id=%s type=%v skipped=%v\n", s.Check.ID, s.Check.TargetType, s.Skipped) + switch s.Check.TargetType { + case "StatefulSet", "all": + assert.False(t, s.Skipped) + default: + assert.True(t, s.Skipped) + } + } + } +} + +func TestStatefulsetTemplateSkip(t *testing.T) { + skipped := fileWasSkipped( + t, + []ks.NamedReader{testFile("statefulset-skip.yaml")}, + nil, + &config.RunConfiguration{ + UseIgnoreChecksAnnotation: true, + UseOptionalChecksAnnotation: true, + }, + "testdata/statefulset-skip.yaml", + ) + assert.True(t, skipped) +} diff --git a/score/score_test.go b/score/score_test.go index d32d644..20c009f 100644 --- a/score/score_test.go +++ b/score/score_test.go @@ -40,6 +40,24 @@ func testExpectedScoreWithConfig(t *testing.T, files []ks.NamedReader, checksCon return nil } +func fileWasSkipped(t *testing.T, files []ks.NamedReader, checksConfig *checks.Config, runConfig *config.RunConfiguration, filename string) bool { + sc, err := testScore(files, checksConfig, runConfig) + assert.NoError(t, err) + for _, objectScore := range sc { + t.Logf("path=%s skip=%v\n", objectScore.FileLocation.Name, objectScore.FileLocation.Skip) + if objectScore.FileLocation.Name == filename { + if objectScore.FileLocation.Skip { + // Make sure all checks are skipped too + for _, s := range objectScore.Checks { + assert.Equal(t, s.Skipped, true) + } + } + return objectScore.FileLocation.Skip + } + } + return false +} + func wasSkipped(t *testing.T, files []ks.NamedReader, checksConfig *checks.Config, runConfig *config.RunConfiguration, testcase string) bool { sc, err := testScore(files, checksConfig, runConfig) assert.NoError(t, err) diff --git a/score/testdata/statefulset-nested-skip.yaml b/score/testdata/statefulset-nested-skip.yaml new file mode 100644 index 0000000..00993cc --- /dev/null +++ b/score/testdata/statefulset-nested-skip.yaml @@ -0,0 +1,110 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: trivy + namespace: trivy-staging +spec: + podManagementPolicy: Parallel + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/instance: trivy + app.kubernetes.io/name: trivy + serviceName: trivy + template: + metadata: + annotations: + kube-score/skip: "true" + kube-score/ignore: container-image-tag,pod-probes + labels: + app.kubernetes.io/instance: trivy + app.kubernetes.io/name: trivy + spec: + automountServiceAccountToken: false + containers: + - args: + - server + envFrom: + - configMapRef: + name: trivy + - secretRef: + name: trivy + image: aquasec/trivy:latest + imagePullPolicy: Always + livenessProbe: + failureThreshold: 10 + httpGet: + path: /healthz + port: trivy-http + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + name: main + ports: + - containerPort: 4954 + name: trivy-http + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: trivy-http + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + cpu: "1" + ephemeral-storage: 128Mi + memory: 1Gi + requests: + cpu: 200m + memory: 512Mi + securityContext: + privileged: false + readOnlyRootFilesystem: true + runAsGroup: 65534 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /tmp + name: tmp-data + - mountPath: /home/scanner/.cache + name: data + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: + fsGroup: 65534 + runAsNonRoot: true + runAsUser: 65534 + serviceAccount: trivy + serviceAccountName: trivy + terminationGracePeriodSeconds: 30 + volumes: + - emptyDir: {} + name: tmp-data + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + creationTimestamp: null + name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + volumeMode: Filesystem + status: + phase: Pending diff --git a/score/testdata/statefulset-skip.yaml b/score/testdata/statefulset-skip.yaml new file mode 100644 index 0000000..0a1f3ba --- /dev/null +++ b/score/testdata/statefulset-skip.yaml @@ -0,0 +1,112 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: trivy + namespace: trivy-staging + annotations: + kube-score/skip: "true" +spec: + podManagementPolicy: Parallel + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/instance: trivy + app.kubernetes.io/name: trivy + serviceName: trivy + template: + metadata: + annotations: + kube-score/skip: "true" + kube-score/ignore: container-image-tag,pod-probes + labels: + app.kubernetes.io/instance: trivy + app.kubernetes.io/name: trivy + spec: + automountServiceAccountToken: false + containers: + - args: + - server + envFrom: + - configMapRef: + name: trivy + - secretRef: + name: trivy + image: aquasec/trivy:latest + imagePullPolicy: Always + livenessProbe: + failureThreshold: 10 + httpGet: + path: /healthz + port: trivy-http + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + name: main + ports: + - containerPort: 4954 + name: trivy-http + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: trivy-http + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + cpu: "1" + ephemeral-storage: 128Mi + memory: 1Gi + requests: + cpu: 200m + memory: 512Mi + securityContext: + privileged: false + readOnlyRootFilesystem: true + runAsGroup: 65534 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /tmp + name: tmp-data + - mountPath: /home/scanner/.cache + name: data + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: + fsGroup: 65534 + runAsNonRoot: true + runAsUser: 65534 + serviceAccount: trivy + serviceAccountName: trivy + terminationGracePeriodSeconds: 30 + volumes: + - emptyDir: {} + name: tmp-data + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + creationTimestamp: null + name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + volumeMode: Filesystem + status: + phase: Pending diff --git a/scorecard/scorecard.go b/scorecard/scorecard.go index d4895b9..7b4fe7d 100644 --- a/scorecard/scorecard.go +++ b/scorecard/scorecard.go @@ -5,6 +5,7 @@ import ( "github.com/zegl/kube-score/config" ks "github.com/zegl/kube-score/domain" + "github.com/zegl/kube-score/parser" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -99,6 +100,7 @@ func (so *ScoredObject) Add(ts TestScore, check ks.Check, locationer ks.FileLoca skipAll := so.FileLocation.Skip if !skipAll && annotations != nil { + skipAll = skipAll || parser.IsSkipped([]error{}, annotations...) if len(annotations) == 1 && !so.isEnabled(check, annotations[0], nil) { skip = true } @@ -107,6 +109,11 @@ func (so *ScoredObject) Add(ts TestScore, check ks.Check, locationer ks.FileLoca } } + fmt.Printf( + "checking: %s#%d [%s] => %v/%v\n", + so.FileLocation.Name, so.FileLocation.Line, check.ID, skipAll, skip, + ) + // This test is ignored (via annotations), don't save the score if skipAll { ts.Skipped = true From 06149d96add89e6edf67c95cabfef78257e6745d Mon Sep 17 00:00:00 2001 From: romnn Date: Tue, 8 Oct 2024 19:53:28 +0200 Subject: [PATCH 5/6] remove prints --- parser/parse.go | 4 ---- scorecard/scorecard.go | 5 ----- 2 files changed, 9 deletions(-) diff --git a/parser/parse.go b/parser/parse.go index 51eb633..6dc9123 100644 --- a/parser/parse.go +++ b/parser/parse.go @@ -259,7 +259,6 @@ const ( func IsSkipped(errs []error, annotations ...map[string]string) bool { skip := false for _, annotations := range annotations { - // fmt.Printf("annotation: %v => %v\n", annotations, annotations[skippedResourceAnnotation]) if skipAnnotation, ok := annotations[SkippedResourceAnnotation]; ok { if err := yaml.Unmarshal([]byte(skipAnnotation), &skip); err != nil { errs = append(errs, fmt.Errorf("invalid skip annotation %q, must be boolean", skipAnnotation)) @@ -348,7 +347,6 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio var statefulSet appsv1.StatefulSet errs.AddIfErr(p.decode(fileContents, &statefulSet)) fileLocation.Skip = p.isSkipped(&statefulSet, errs) - fmt.Printf("sfs skip=%v\n", fileLocation.Skip) sset := internal.Appsv1StatefulSet{Obj: statefulSet, Location: fileLocation} addPodSpeccer(sset) @@ -359,14 +357,12 @@ func (p *Parser) decodeItem(s *parsedObjects, detectedVersion schema.GroupVersio var statefulSet appsv1beta1.StatefulSet errs.AddIfErr(p.decode(fileContents, &statefulSet)) fileLocation.Skip = p.isSkipped(&statefulSet, errs) - fmt.Printf("sfs skip=%v\n", fileLocation.Skip) addPodSpeccer(internal.Appsv1beta1StatefulSet{StatefulSet: statefulSet, Location: fileLocation}) case appsv1beta2.SchemeGroupVersion.WithKind("StatefulSet"): var statefulSet appsv1beta2.StatefulSet errs.AddIfErr(p.decode(fileContents, &statefulSet)) fileLocation.Skip = p.isSkipped(&statefulSet, errs) - fmt.Printf("sfs skip=%v\n", fileLocation.Skip) addPodSpeccer(internal.Appsv1beta2StatefulSet{StatefulSet: statefulSet, Location: fileLocation}) diff --git a/scorecard/scorecard.go b/scorecard/scorecard.go index 7b4fe7d..3ade345 100644 --- a/scorecard/scorecard.go +++ b/scorecard/scorecard.go @@ -109,11 +109,6 @@ func (so *ScoredObject) Add(ts TestScore, check ks.Check, locationer ks.FileLoca } } - fmt.Printf( - "checking: %s#%d [%s] => %v/%v\n", - so.FileLocation.Name, so.FileLocation.Line, check.ID, skipAll, skip, - ) - // This test is ignored (via annotations), don't save the score if skipAll { ts.Skipped = true From 85fc08eb9a9c4c6afd9c27feef5c2145b4529d37 Mon Sep 17 00:00:00 2001 From: romnn Date: Tue, 8 Oct 2024 20:01:03 +0200 Subject: [PATCH 6/6] wording --- parser/parse_test.go | 1 - scorecard/scorecard.go | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/parser/parse_test.go b/parser/parse_test.go index 9303e21..617d46b 100644 --- a/parser/parse_test.go +++ b/parser/parse_test.go @@ -8,7 +8,6 @@ import ( "testing" ks "github.com/zegl/kube-score/domain" - // "github.com/zegl/kube-score/parser" "github.com/stretchr/testify/assert" ) diff --git a/scorecard/scorecard.go b/scorecard/scorecard.go index 3ade345..8a07ad4 100644 --- a/scorecard/scorecard.go +++ b/scorecard/scorecard.go @@ -113,7 +113,7 @@ func (so *ScoredObject) Add(ts TestScore, check ks.Check, locationer ks.FileLoca if skipAll { ts.Skipped = true ts.Comments = []TestScoreComment{{Summary: fmt.Sprintf( - "Skipped because %s#L%d is ignored", + "Skipped because %s#L%d is skipped", so.FileLocation.Name, so.FileLocation.Line, )}} } else if skip {