From 16b8600eb7a0e24ee7ac99d4d7aafae4e1bb4774 Mon Sep 17 00:00:00 2001 From: Nikita Pivkin Date: Mon, 7 Oct 2024 14:09:05 +0600 Subject: [PATCH 01/17] feat(checks): add secrets leak check in Dockerfile Signed-off-by: Nikita Pivkin --- .../dockerfile/general/AVD-DS-0031/docs.md | 13 ++ checks/docker/README.md | 2 +- checks/docker/leaked_secrets.rego | 162 ++++++++++++++++++ checks/docker/leaked_secrets_test.rego | 106 ++++++++++++ cmd/id/main.go | 3 + 5 files changed, 285 insertions(+), 1 deletion(-) create mode 100644 avd_docs/dockerfile/general/AVD-DS-0031/docs.md create mode 100644 checks/docker/leaked_secrets.rego create mode 100644 checks/docker/leaked_secrets_test.rego diff --git a/avd_docs/dockerfile/general/AVD-DS-0031/docs.md b/avd_docs/dockerfile/general/AVD-DS-0031/docs.md new file mode 100644 index 00000000..bca9038c --- /dev/null +++ b/avd_docs/dockerfile/general/AVD-DS-0031/docs.md @@ -0,0 +1,13 @@ + +Passing secrets via `build-args` or envs or copying secret files can leak them out + +### Impact + + + +{{ remediationActions }} + +### Links +- https://docs.docker.com/build/building/secrets/ + + diff --git a/checks/docker/README.md b/checks/docker/README.md index 4d9a95dc..5ffa6632 100644 --- a/checks/docker/README.md +++ b/checks/docker/README.md @@ -1 +1 @@ -Collection of docker policies +Collection of Docker checks diff --git a/checks/docker/leaked_secrets.rego b/checks/docker/leaked_secrets.rego new file mode 100644 index 00000000..8482db8a --- /dev/null +++ b/checks/docker/leaked_secrets.rego @@ -0,0 +1,162 @@ +# METADATA +# title: Secrets passed via `build-args` or envs or copied secret files +# description: Passing secrets via `build-args` or envs or copying secret files can leak them out +# schemas: +# - input: schema["dockerfile"] +# related_resources: +# - https://docs.docker.com/build/building/secrets/ +# custom: +# id: DS031 +# avd_id: AVD-DS-0031 +# severity: CRITICAL +# short_code: do-not-pass-secrets +# recommended_action: Use secret mount if secrets are needed during image build. Use volume mount if secret files are needed during container runtime. +# input: +# selector: +# - type: dockerfile +package builtin.dockerfile.DS031 + +import rego.v1 + +import data.lib.docker + +final_stage := last(input.Stages) + +# check if env or arg contains secret env +deny contains res if { + some instruction in final_stage.Commands + is_arg_or_env(instruction.Cmd) + [name, _] := retrive_name_and_default(instruction) + is_secret_env(name) + res := result.new( + sprintf("Possible exposure of secret env %q in %s", [name, upper(instruction.Cmd)]), + instruction, + ) +} + +# check if env or arg contains secret file env +deny contains res if { + some instruction in final_stage.Commands + is_arg_or_env(instruction.Cmd) + [name, path] := retrive_name_and_default(instruction) + path != "" + name in secret_file_envs + is_secret_file_copied(path) + res := result.new( + sprintf("Possible exposure of the copied secret env file %q in %s", [name, upper(instruction.Cmd)]), + instruction, + ) +} + +# check if a secret file is copied +deny contains res if { + some instruction in final_stage.Commands + instruction.Cmd == "copy" + count(instruction.Value) == 2 + env := trim_prefix(instruction.Value[1], "$") + env in secret_file_envs + res := result.new( + sprintf("Possible exposure of secret file %q in COPY", [env]), + instruction, + ) +} + +check_args := true + +# TODO: Should arguments be checked? +is_arg_or_env(cmd) if { + check_args + cmd == "arg" +} + +is_arg_or_env(cmd) if cmd == "env" + +retrive_name_and_default(instruction) := [instruction.Value[0], ""] if { + instruction.Cmd == "env" + count(instruction.Value) == 1 +} + +retrive_name_and_default(instruction) := [instruction.Value[0], instruction.Value[1]] if { + instruction.Cmd == "env" + count(instruction.Value) > 1 +} + +retrive_name_and_default(instruction) := [parts[0], ""] if { + instruction.Cmd == "arg" + parts := split(instruction.Value[0], "=") + count(parts) == 1 +} + +retrive_name_and_default(instruction) := [parts[0], parts[1]] if { + instruction.Cmd == "arg" + parts := split(instruction.Value[0], "=") + count(parts) > 1 +} + +default_envs := { + "AWS_ACCESS_KEY_ID", # https://docs.aws.amazon.com/cli/v1/userguide/cli-configure-envvars.html + "AWS_SECRET_ACCESS_KEY", + "AWS_SESSION_TOKEN", + "AZURE_CLIENT_ID", # https://learn.microsoft.com/en-us/dotnet/api/azure.identity.environmentcredential?view=azure-dotnet + "AZURE_CLIENT_SECRET", + "GITHUB_TOKEN", # https://docs.github.com/en/actions/security-for-github-actions/security-guides/automatic-token-authentication#about-the-github_token-secret + "OPENAI_API_KEY", # https://platform.openai.com/docs/quickstart/create-and-export-an-api-key + "HF_TOKEN", # https://huggingface.co/docs/huggingface_hub/en/package_reference/environment_variables#hftoken +} + +excluded_envs := set() + +included_envs := set() + +envs := (default_envs - excluded_envs) | included_envs + +is_secret_env(str) if str in envs + +env_prefixes := { + "VITE_", # https://v3.vitejs.dev/guide/env-and-mode.html#env-files + "REACT_APP_", # https://create-react-app.dev/docs/adding-custom-environment-variables/ +} + +is_secret_env(str) if { + some prefix in env_prefixes + trim_left(str, prefix) in envs +} + +secret_file_envs := { + "AWS_CONFIG_FILE", # https://docs.aws.amazon.com/cli/v1/userguide/cli-configure-envvars.html + "HF_TOKEN_PATH", # https://huggingface.co/docs/huggingface_hub/en/package_reference/environment_variables#hftokenpath + "GOOGLE_APPLICATION_CREDENTIALS", # https://cloud.google.com/docs/authentication/application-default-credentials#GAC +} + +last(array) := array[count(array) - 1] + +# check only the simple case when the secret file from the copied directory is used +# For example: +# COPY /src /app +# ENV GOOGLE_APPLICATION_CREDENTIALS="./app/google-storage-service.json" +is_secret_file_copied(path) if { + some instruction in final_stage.Commands + instruction.Cmd == "copy" + dst := last(instruction.Value) + is_sub_path(path, dst) +} + +is_sub_path(a, b) if startswith(clean_path(a), clean_path(b)) + +clean_path(path) := remove_trailing_slash(remove_leading_slash(remove_leading_dot(unquote(path)))) + +unquote(s) := cut_prefix(cut_suffix(s, "\""), "\"") + +remove_leading_dot(path) := cut_prefix(path, ".") + +remove_leading_slash(path) := cut_prefix(path, "/") + +remove_trailing_slash(path) := cut_suffix(path, "/") + +cut_prefix(s, prefix) := substring(s, 1, -1) if { + startswith(s, prefix) +} else := s + +cut_suffix(s, suffix) := substring(s, 0, count(s) - 1) if { + endswith(s, suffix) +} else := s diff --git a/checks/docker/leaked_secrets_test.rego b/checks/docker/leaked_secrets_test.rego new file mode 100644 index 00000000..c0733d45 --- /dev/null +++ b/checks/docker/leaked_secrets_test.rego @@ -0,0 +1,106 @@ +package builtin.dockerfile.DS031_test + +import rego.v1 + +import data.builtin.dockerfile.DS031 as check + +test_deny_secret_env_variable if { + res := check.deny with input as build_simple_input("env", ["GITHUB_TOKEN"]) + count(res) = 1 +} + +test_deny_secret_env_variable_with_default if { + res := check.deny with input as build_simple_input("env", ["GITHUB_TOKEN", "placeholder", "="]) + count(res) = 1 +} + +test_deny_secret_arg_variable_with_default if { + res := check.deny with input as build_simple_input("arg", ["GITHUB_TOKEN=placeholder"]) + count(res) = 1 +} + +test_deny_secret_arg if { + res := check.deny with input as build_simple_input("arg", ["GITHUB_TOKEN"]) + count(res) = 1 +} + +test_allow_secret_arg_but_argument_checking_disabled if { + inp := build_simple_input("arg", ["GITHUB_TOKEN"]) + res := check.deny with input as inp with check.check_args as false + count(res) = 0 +} + +test_allow_secret_github_env_but_this_env_excluded if { + inp := build_simple_input("env", ["GITHUB_TOKEN"]) + res := check.deny with input as inp with check.excluded_envs as {"GITHUB_TOKEN"} + count(res) = 0 +} + +test_deny_custom_secret_env if { + inp := build_simple_input("env", ["MY_SECRET"]) + res := check.deny with input as inp with check.included_envs as {"MY_SECRET"} + count(res) = 1 +} + +test_deny_secret_arg_with_prefix if { + inp := build_simple_input("arg", ["VITE_AWS_ACCESS_KEY_ID=REPLACE_WITH_YOUR_OWN"]) + res := check.deny with input as inp + count(res) = 1 +} + +test_deny_copy_secret_file if { + inp := build_input([instruction("copy", ["./config", "$AWS_CONFIG_FILE"])]) + res := check.deny with input as inp + count(res) = 1 +} + +test_allow_secret_file_without_copy if { + inp := build_simple_input("env", ["GOOGLE_APPLICATION_CREDENTIALS", "/credentials/google-storage-service.json", "="]) + res := check.deny with input as inp + count(res) = 0 +} + +test_allow_secret_file_copy_with_other_base_path if { + inp := build_input([ + instruction("copy", ["/src", "/src"]), + instruction("env", ["GOOGLE_APPLICATION_CREDENTIALS=./app/google-storage-service.json"]), + ]) + res := check.deny with input as inp + count(res) = 0 +} + +test_deny_secret_file if { + inp := build_input([ + instruction("copy", ["/src", "/app/"]), + instruction("env", ["GOOGLE_APPLICATION_CREDENTIALS", "./app/google-storage-service.json", "="]), + ]) + res := check.deny with input as inp + count(res) = 1 +} + +test_deny_secret_file_quoted_path if { + inp := build_input([ + instruction("copy", [".", "."]), + instruction("env", ["GOOGLE_APPLICATION_CREDENTIALS", "\"./news-extraction.json\"", "="]), + ]) + res := check.deny with input as inp + count(res) = 1 +} + +test_deny_secret_file_in_arg if { + inp := build_input([ + instruction("copy", ["/src", "/app/"]), + instruction("arg", ["GOOGLE_APPLICATION_CREDENTIALS=./app/google-storage-service.json"]), + ]) + res := check.deny with input as inp + count(res) = 1 +} + +instruction(cmd, val) := { + "Cmd": cmd, + "Value": val, +} + +build_simple_input(cmd, val) := build_input([instruction(cmd, val)]) + +build_input(cmds) := {"Stages": [{"Name": "busybox", "Commands": cmds}]} diff --git a/cmd/id/main.go b/cmd/id/main.go index c3376249..6e714b44 100644 --- a/cmd/id/main.go +++ b/cmd/id/main.go @@ -8,12 +8,15 @@ import ( "strings" "github.com/aquasecurity/trivy/pkg/iac/framework" + "github.com/aquasecurity/trivy/pkg/iac/rego" _ "github.com/aquasecurity/trivy/pkg/iac/rego" "github.com/aquasecurity/trivy/pkg/iac/rules" ) func main() { + rules.Reset() + rego.LoadAndRegister() // organise existing rules by provider keyMap := make(map[string][]string) for _, rule := range rules.GetRegistered(framework.ALL) { From d54d9ee9e05ba9c853e9129aff5eff4c30916af5 Mon Sep 17 00:00:00 2001 From: Nikita Pivkin Date: Mon, 7 Oct 2024 17:00:52 +0600 Subject: [PATCH 02/17] feat: check credentials setup in RUN Signed-off-by: Nikita Pivkin --- checks/docker/leaked_secrets.rego | 30 +++++++++++++++++++++++ checks/docker/leaked_secrets_test.rego | 34 ++++++++++++++++++++++++++ 2 files changed, 64 insertions(+) diff --git a/checks/docker/leaked_secrets.rego b/checks/docker/leaked_secrets.rego index 8482db8a..2b7271b0 100644 --- a/checks/docker/leaked_secrets.rego +++ b/checks/docker/leaked_secrets.rego @@ -160,3 +160,33 @@ cut_prefix(s, prefix) := substring(s, 1, -1) if { cut_suffix(s, suffix) := substring(s, 0, count(s) - 1) if { endswith(s, suffix) } else := s + + +deny contains res if { + some instruction in final_stage.Commands + instruction.Cmd == "run" + not has_secret_mount_arg(instruction) + use_command_to_setup_credentials(instruction) + res := result.new( + "Possible exposure of secret in RUN", + instruction, + ) +} + +has_secret_mount_arg(instruction) if { + some flag in instruction.Flags + startswith(flag, "--mount=type=secret") +} + +setup_creds_commands := { + "aws configure set aws_access_key_id", # https://docs.aws.amazon.com/cli/latest/reference/configure/set.html + "aws configure set aws_secret_access_key", + "gcloud auth activate-service-account", # https://cloud.google.com/sdk/gcloud/reference/auth/activate-service-account + "az login", # TODO: check flags +} + +use_command_to_setup_credentials(instruction) if { + some val in instruction.Value + some cmd in setup_creds_commands + contains(val, cmd) +} \ No newline at end of file diff --git a/checks/docker/leaked_secrets_test.rego b/checks/docker/leaked_secrets_test.rego index c0733d45..5ec9e0fa 100644 --- a/checks/docker/leaked_secrets_test.rego +++ b/checks/docker/leaked_secrets_test.rego @@ -96,6 +96,40 @@ test_deny_secret_file_in_arg if { count(res) = 1 } +test_deny_secret_in_set_command if { + inp := { + "Stages": [{ + "Name": "amazon/aws-cli:latest", + "Commands": [instruction( + "run", + ["aws configure set aws_access_key_id test-id && aws configure set aws_secret_access_key test-key"], + )] + }] + } + + res := check.deny with input as inp + count(res) = 1 +} + +test_allow_secret_in_set_command_with_secret_mount if { + inp := { + "Stages": [{ + "Name": "amazon/aws-cli:latest", + "Commands": [{ + "Cmd": "run", + "Value": ["aws configure set aws_access_key_id $(cat /run/secrets/aws-key-id) && aws configure set aws_secret_access_key $(cat /run/secrets/aws-secret-key)"], + "Flags": [ + "--mount=type=secret,id=aws-key-id,env=AWS_ACCESS_KEY_ID", + "--mount=type=secret,id=aws-secret-key,env=AWS_SECRET_ACCESS_KEY" + ] + }] + }] + } + + res := check.deny with input as inp + count(res) = 0 +} + instruction(cmd, val) := { "Cmd": cmd, "Value": val, From 078a04da84e8d5563d2de142152c5218cf7c9ab2 Mon Sep 17 00:00:00 2001 From: Nikita Pivkin Date: Tue, 8 Oct 2024 16:11:44 +0600 Subject: [PATCH 03/17] refactor: move functions to lib Signed-off-by: Nikita Pivkin --- checks/docker/leaked_secrets.rego | 34 +++++----------------- checks/docker/leaked_secrets_test.rego | 40 ++++++++++++-------------- lib/docker/path.rego | 29 +++++++++++++++++++ 3 files changed, 54 insertions(+), 49 deletions(-) create mode 100644 lib/docker/path.rego diff --git a/checks/docker/leaked_secrets.rego b/checks/docker/leaked_secrets.rego index 2b7271b0..65a51fa7 100644 --- a/checks/docker/leaked_secrets.rego +++ b/checks/docker/leaked_secrets.rego @@ -19,6 +19,7 @@ package builtin.dockerfile.DS031 import rego.v1 import data.lib.docker +import data.lib.path final_stage := last(input.Stages) @@ -38,10 +39,10 @@ deny contains res if { deny contains res if { some instruction in final_stage.Commands is_arg_or_env(instruction.Cmd) - [name, path] := retrive_name_and_default(instruction) - path != "" + [name, def] := retrive_name_and_default(instruction) + def != "" name in secret_file_envs - is_secret_file_copied(path) + is_secret_file_copied(def) res := result.new( sprintf("Possible exposure of the copied secret env file %q in %s", [name, upper(instruction.Cmd)]), instruction, @@ -134,34 +135,13 @@ last(array) := array[count(array) - 1] # For example: # COPY /src /app # ENV GOOGLE_APPLICATION_CREDENTIALS="./app/google-storage-service.json" -is_secret_file_copied(path) if { +is_secret_file_copied(p) if { some instruction in final_stage.Commands instruction.Cmd == "copy" dst := last(instruction.Value) - is_sub_path(path, dst) + path.is_sub_path(p, dst) } -is_sub_path(a, b) if startswith(clean_path(a), clean_path(b)) - -clean_path(path) := remove_trailing_slash(remove_leading_slash(remove_leading_dot(unquote(path)))) - -unquote(s) := cut_prefix(cut_suffix(s, "\""), "\"") - -remove_leading_dot(path) := cut_prefix(path, ".") - -remove_leading_slash(path) := cut_prefix(path, "/") - -remove_trailing_slash(path) := cut_suffix(path, "/") - -cut_prefix(s, prefix) := substring(s, 1, -1) if { - startswith(s, prefix) -} else := s - -cut_suffix(s, suffix) := substring(s, 0, count(s) - 1) if { - endswith(s, suffix) -} else := s - - deny contains res if { some instruction in final_stage.Commands instruction.Cmd == "run" @@ -189,4 +169,4 @@ use_command_to_setup_credentials(instruction) if { some val in instruction.Value some cmd in setup_creds_commands contains(val, cmd) -} \ No newline at end of file +} diff --git a/checks/docker/leaked_secrets_test.rego b/checks/docker/leaked_secrets_test.rego index 5ec9e0fa..000a15f2 100644 --- a/checks/docker/leaked_secrets_test.rego +++ b/checks/docker/leaked_secrets_test.rego @@ -97,34 +97,30 @@ test_deny_secret_file_in_arg if { } test_deny_secret_in_set_command if { - inp := { - "Stages": [{ - "Name": "amazon/aws-cli:latest", - "Commands": [instruction( - "run", - ["aws configure set aws_access_key_id test-id && aws configure set aws_secret_access_key test-key"], - )] - }] - } + inp := {"Stages": [{ + "Name": "amazon/aws-cli:latest", + "Commands": [instruction( + "run", + ["aws configure set aws_access_key_id test-id && aws configure set aws_secret_access_key test-key"], + )], + }]} res := check.deny with input as inp count(res) = 1 } test_allow_secret_in_set_command_with_secret_mount if { - inp := { - "Stages": [{ - "Name": "amazon/aws-cli:latest", - "Commands": [{ - "Cmd": "run", - "Value": ["aws configure set aws_access_key_id $(cat /run/secrets/aws-key-id) && aws configure set aws_secret_access_key $(cat /run/secrets/aws-secret-key)"], - "Flags": [ - "--mount=type=secret,id=aws-key-id,env=AWS_ACCESS_KEY_ID", - "--mount=type=secret,id=aws-secret-key,env=AWS_SECRET_ACCESS_KEY" - ] - }] - }] - } + inp := {"Stages": [{ + "Name": "amazon/aws-cli:latest", + "Commands": [{ + "Cmd": "run", + "Value": ["aws configure set aws_access_key_id $(cat /run/secrets/aws-key-id) && aws configure set aws_secret_access_key $(cat /run/secrets/aws-secret-key)"], + "Flags": [ + "--mount=type=secret,id=aws-key-id,env=AWS_ACCESS_KEY_ID", + "--mount=type=secret,id=aws-secret-key,env=AWS_SECRET_ACCESS_KEY", + ], + }], + }]} res := check.deny with input as inp count(res) = 0 diff --git a/lib/docker/path.rego b/lib/docker/path.rego new file mode 100644 index 00000000..59274650 --- /dev/null +++ b/lib/docker/path.rego @@ -0,0 +1,29 @@ +# METADATA +# custom: +# library: true +# input: +# selector: +# - type: dockerfile +package lib.path + +import rego.v1 + +is_sub_path(a, b) if startswith(clean_path(a), clean_path(b)) + +clean_path(p) := remove_trailing_slash(remove_leading_slash(remove_leading_dot(unquote(p)))) + +unquote(s) := cut_prefix(cut_suffix(s, "\""), "\"") + +remove_leading_dot(p) := cut_prefix(p, ".") + +remove_leading_slash(p) := cut_prefix(p, "/") + +remove_trailing_slash(p) := cut_suffix(p, "/") + +cut_prefix(s, prefix) := substring(s, 1, -1) if { + startswith(s, prefix) +} else := s + +cut_suffix(s, suffix) := substring(s, 0, count(s) - 1) if { + endswith(s, suffix) +} else := s From 494f867960d9f43abe88c6f9f84dbfd7c1d39721 Mon Sep 17 00:00:00 2001 From: Nikita Pivkin Date: Tue, 8 Oct 2024 16:15:23 +0600 Subject: [PATCH 04/17] check arguments by default Signed-off-by: Nikita Pivkin --- checks/docker/leaked_secrets.rego | 10 +--------- checks/docker/leaked_secrets_test.rego | 6 ------ 2 files changed, 1 insertion(+), 15 deletions(-) diff --git a/checks/docker/leaked_secrets.rego b/checks/docker/leaked_secrets.rego index 65a51fa7..039e9641 100644 --- a/checks/docker/leaked_secrets.rego +++ b/checks/docker/leaked_secrets.rego @@ -62,15 +62,7 @@ deny contains res if { ) } -check_args := true - -# TODO: Should arguments be checked? -is_arg_or_env(cmd) if { - check_args - cmd == "arg" -} - -is_arg_or_env(cmd) if cmd == "env" +is_arg_or_env(cmd) if cmd in {"env", "arg"} retrive_name_and_default(instruction) := [instruction.Value[0], ""] if { instruction.Cmd == "env" diff --git a/checks/docker/leaked_secrets_test.rego b/checks/docker/leaked_secrets_test.rego index 000a15f2..9ef6489b 100644 --- a/checks/docker/leaked_secrets_test.rego +++ b/checks/docker/leaked_secrets_test.rego @@ -24,12 +24,6 @@ test_deny_secret_arg if { count(res) = 1 } -test_allow_secret_arg_but_argument_checking_disabled if { - inp := build_simple_input("arg", ["GITHUB_TOKEN"]) - res := check.deny with input as inp with check.check_args as false - count(res) = 0 -} - test_allow_secret_github_env_but_this_env_excluded if { inp := build_simple_input("env", ["GITHUB_TOKEN"]) res := check.deny with input as inp with check.excluded_envs as {"GITHUB_TOKEN"} From 52b50942d413bf2f65be2724b53597111e315e51 Mon Sep 17 00:00:00 2001 From: Nikita Pivkin Date: Wed, 9 Oct 2024 11:30:14 +0600 Subject: [PATCH 05/17] use included_envs from data Signed-off-by: Nikita Pivkin --- checks/docker/leaked_secrets.rego | 6 +++++- checks/docker/leaked_secrets_test.rego | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/checks/docker/leaked_secrets.rego b/checks/docker/leaked_secrets.rego index 039e9641..21e9b2b6 100644 --- a/checks/docker/leaked_secrets.rego +++ b/checks/docker/leaked_secrets.rego @@ -18,6 +18,7 @@ package builtin.dockerfile.DS031 import rego.v1 +import data.ds031 import data.lib.docker import data.lib.path @@ -99,7 +100,10 @@ default_envs := { excluded_envs := set() -included_envs := set() +included_envs := included if { + is_array(ds031.included_envs) + included := {e | some e in ds031.included_envs} +} else := set() envs := (default_envs - excluded_envs) | included_envs diff --git a/checks/docker/leaked_secrets_test.rego b/checks/docker/leaked_secrets_test.rego index 9ef6489b..e6762483 100644 --- a/checks/docker/leaked_secrets_test.rego +++ b/checks/docker/leaked_secrets_test.rego @@ -32,7 +32,7 @@ test_allow_secret_github_env_but_this_env_excluded if { test_deny_custom_secret_env if { inp := build_simple_input("env", ["MY_SECRET"]) - res := check.deny with input as inp with check.included_envs as {"MY_SECRET"} + res := check.deny with input as inp with data.ds031.included_envs as {"MY_SECRET"} count(res) = 1 } From e2836dc2f19e594e777283286f9cd8b250757263 Mon Sep 17 00:00:00 2001 From: Nikita Pivkin Date: Wed, 9 Oct 2024 17:05:23 +0600 Subject: [PATCH 06/17] remove exluded_envs Signed-off-by: Nikita Pivkin --- checks/docker/leaked_secrets.rego | 4 +--- checks/docker/leaked_secrets_test.rego | 8 +------- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/checks/docker/leaked_secrets.rego b/checks/docker/leaked_secrets.rego index 21e9b2b6..ae7a8a5c 100644 --- a/checks/docker/leaked_secrets.rego +++ b/checks/docker/leaked_secrets.rego @@ -98,14 +98,12 @@ default_envs := { "HF_TOKEN", # https://huggingface.co/docs/huggingface_hub/en/package_reference/environment_variables#hftoken } -excluded_envs := set() - included_envs := included if { is_array(ds031.included_envs) included := {e | some e in ds031.included_envs} } else := set() -envs := (default_envs - excluded_envs) | included_envs +envs := default_envs | included_envs is_secret_env(str) if str in envs diff --git a/checks/docker/leaked_secrets_test.rego b/checks/docker/leaked_secrets_test.rego index e6762483..99f3d8a3 100644 --- a/checks/docker/leaked_secrets_test.rego +++ b/checks/docker/leaked_secrets_test.rego @@ -24,15 +24,9 @@ test_deny_secret_arg if { count(res) = 1 } -test_allow_secret_github_env_but_this_env_excluded if { - inp := build_simple_input("env", ["GITHUB_TOKEN"]) - res := check.deny with input as inp with check.excluded_envs as {"GITHUB_TOKEN"} - count(res) = 0 -} - test_deny_custom_secret_env if { inp := build_simple_input("env", ["MY_SECRET"]) - res := check.deny with input as inp with data.ds031.included_envs as {"MY_SECRET"} + res := check.deny with input as inp with data.ds031.included_envs as ["MY_SECRET"] count(res) = 1 } From f88d75f06d15601efc959c8bebd4189abfade161 Mon Sep 17 00:00:00 2001 From: Nikita Pivkin Date: Wed, 9 Oct 2024 21:37:11 +0600 Subject: [PATCH 07/17] use regex to find credential setup commands Signed-off-by: Nikita Pivkin --- checks/docker/leaked_secrets.rego | 9 +++++---- checks/docker/leaked_secrets_test.rego | 4 ++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/checks/docker/leaked_secrets.rego b/checks/docker/leaked_secrets.rego index ae7a8a5c..f15b093f 100644 --- a/checks/docker/leaked_secrets.rego +++ b/checks/docker/leaked_secrets.rego @@ -152,15 +152,16 @@ has_secret_mount_arg(instruction) if { startswith(flag, "--mount=type=secret") } -setup_creds_commands := { +cred_setup_commands := { "aws configure set aws_access_key_id", # https://docs.aws.amazon.com/cli/latest/reference/configure/set.html "aws configure set aws_secret_access_key", "gcloud auth activate-service-account", # https://cloud.google.com/sdk/gcloud/reference/auth/activate-service-account - "az login", # TODO: check flags + `az login.*(-p|--password|--federated-token)\s`, # https://learn.microsoft.com/en-us/cli/azure/reference-index?view=azure-cli-latest#az-login + `doctl auth init.*(-t|--access-token)\s`, # https://docs.digitalocean.com/reference/doctl/reference/auth/init/ } use_command_to_setup_credentials(instruction) if { some val in instruction.Value - some cmd in setup_creds_commands - contains(val, cmd) + some cmd in cred_setup_commands + regex.match(cmd, val) } diff --git a/checks/docker/leaked_secrets_test.rego b/checks/docker/leaked_secrets_test.rego index 99f3d8a3..23954fdf 100644 --- a/checks/docker/leaked_secrets_test.rego +++ b/checks/docker/leaked_secrets_test.rego @@ -86,10 +86,10 @@ test_deny_secret_file_in_arg if { test_deny_secret_in_set_command if { inp := {"Stages": [{ - "Name": "amazon/aws-cli:latest", + "Name": "ubuntu:22.04", "Commands": [instruction( "run", - ["aws configure set aws_access_key_id test-id && aws configure set aws_secret_access_key test-key"], + [`if [[-z "$GOOGLE_APPLICATION_CREDENTIALS"]]; then gcloud auth activate-service-account --key-file=${GOOGLE_APPLICATION_CREDENTIALS}; fi`], )], }]} From 728ef8c59162dc23401028d55bb9aff7f2713259 Mon Sep 17 00:00:00 2001 From: Nikita Pivkin Date: Thu, 10 Oct 2024 18:03:49 +0600 Subject: [PATCH 08/17] feat: check secret keywords in arg and env Signed-off-by: Nikita Pivkin --- checks/docker/leaked_secrets.rego | 40 ++++++++++++++++++++++++-- checks/docker/leaked_secrets_test.rego | 24 ++++++++++++++++ 2 files changed, 61 insertions(+), 3 deletions(-) diff --git a/checks/docker/leaked_secrets.rego b/checks/docker/leaked_secrets.rego index f15b093f..96fb20f3 100644 --- a/checks/docker/leaked_secrets.rego +++ b/checks/docker/leaked_secrets.rego @@ -29,7 +29,7 @@ deny contains res if { some instruction in final_stage.Commands is_arg_or_env(instruction.Cmd) [name, _] := retrive_name_and_default(instruction) - is_secret_env(name) + is_secret(name) res := result.new( sprintf("Possible exposure of secret env %q in %s", [name, upper(instruction.Cmd)]), instruction, @@ -105,6 +105,16 @@ included_envs := included if { envs := default_envs | included_envs +is_secret(str) if { + is_secret_env(str) +} + +is_secret(str) if { + not is_secret_env(str) + not str in secret_file_envs + is_secret_key(str) +} + is_secret_env(str) if str in envs env_prefixes := { @@ -156,8 +166,8 @@ cred_setup_commands := { "aws configure set aws_access_key_id", # https://docs.aws.amazon.com/cli/latest/reference/configure/set.html "aws configure set aws_secret_access_key", "gcloud auth activate-service-account", # https://cloud.google.com/sdk/gcloud/reference/auth/activate-service-account - `az login.*(-p|--password|--federated-token)\s`, # https://learn.microsoft.com/en-us/cli/azure/reference-index?view=azure-cli-latest#az-login - `doctl auth init.*(-t|--access-token)\s`, # https://docs.digitalocean.com/reference/doctl/reference/auth/init/ + `az login.*(?:-p|--password|--federated-token)\s`, # https://learn.microsoft.com/en-us/cli/azure/reference-index?view=azure-cli-latest#az-login + `doctl auth init.*(?:-t|--access-token)\s`, # https://docs.digitalocean.com/reference/doctl/reference/auth/init/ } use_command_to_setup_credentials(instruction) if { @@ -165,3 +175,27 @@ use_command_to_setup_credentials(instruction) if { some cmd in cred_setup_commands regex.match(cmd, val) } + +is_secret_key(s) if { + regex.match(deny_secrets_pattern, s) + not regex.match(allow_secrets_pattern, s) +} + +# adopt https://github.com/moby/buildkit/blob/62bda5c1caae9935a2051e96443d554f7ab7ef2d/frontend/dockerfile/dockerfile2llb/convert.go#L2469 +secrets_regex_pattern := `(?i)(?:_|^)(?:%s)(?:_|$)` + +build_secrets_pattern(tokens) := sprintf(secrets_regex_pattern, [concat("|", tokens)]) + +# these tokens cover the following keywords +# https://github.com/danielmiessler/SecLists/blob/master/Discovery/Variables/secret-keywords.txt +deny_secrets_tokens := { + "apikey", "auth", "credential", + "credentials", "key", "password", + "pword", "passwd", "secret", "token", +} + +deny_secrets_pattern := build_secrets_pattern(deny_secrets_tokens) + +allow_secrets_tokens := {"public"} + +allow_secrets_pattern := build_secrets_pattern(allow_secrets_tokens) diff --git a/checks/docker/leaked_secrets_test.rego b/checks/docker/leaked_secrets_test.rego index 23954fdf..b0075e23 100644 --- a/checks/docker/leaked_secrets_test.rego +++ b/checks/docker/leaked_secrets_test.rego @@ -114,6 +114,30 @@ test_allow_secret_in_set_command_with_secret_mount if { count(res) = 0 } +test_deny_secret_key if { + # starts with uppercase secret token + case1 := check.deny with input as build_simple_input("env", ["SECRET_PASSPHRASE"]) + count(case1) = 1 + + # starts with lowercase secret token + case2 := check.deny with input as build_simple_input("env", ["password"]) + count(case2) = 1 + + # contains uppercase secret token after underscore + case3 := check.deny with input as build_simple_input("arg", ["AWS_SECRET_ACCESS_KEY"]) + count(case3) = 1 +} + +test_allow_secret_key if { + # starts with uppercase secret token + case1 := check.deny with input as build_simple_input("env", ["PUBLIC_KEY"]) + count(case1) = 0 + + # starts with lowercase secret token + case2 := check.deny with input as build_simple_input("arg", ["public_token"]) + count(case2) = 0 +} + instruction(cmd, val) := { "Cmd": cmd, "Value": val, From 1b65f93d2438ecb5582db0e35dac4cadf3b8719b Mon Sep 17 00:00:00 2001 From: Nikita Pivkin Date: Thu, 10 Oct 2024 18:21:00 +0600 Subject: [PATCH 09/17] add more envs Signed-off-by: Nikita Pivkin --- checks/docker/leaked_secrets.rego | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/checks/docker/leaked_secrets.rego b/checks/docker/leaked_secrets.rego index 96fb20f3..5153546d 100644 --- a/checks/docker/leaked_secrets.rego +++ b/checks/docker/leaked_secrets.rego @@ -93,9 +93,20 @@ default_envs := { "AWS_SESSION_TOKEN", "AZURE_CLIENT_ID", # https://learn.microsoft.com/en-us/dotnet/api/azure.identity.environmentcredential?view=azure-dotnet "AZURE_CLIENT_SECRET", - "GITHUB_TOKEN", # https://docs.github.com/en/actions/security-for-github-actions/security-guides/automatic-token-authentication#about-the-github_token-secret + "GITHUB_TOKEN", # https://docs.github.com/en/actions/security-for-github-actions/security-guides/automatic-token-authentication#about-the-github_token-secret, + "GH_TOKEN", # https://cli.github.com/manual/gh_help_environment + "GH_ENTERPRISE_TOKEN", + "GITHUB_ENTERPRISE_TOKEN", "OPENAI_API_KEY", # https://platform.openai.com/docs/quickstart/create-and-export-an-api-key "HF_TOKEN", # https://huggingface.co/docs/huggingface_hub/en/package_reference/environment_variables#hftoken + "DIGITALOCEAN_ACCESS_TOKEN", # https://github.com/digitalocean/doctl?tab=readme-ov-file#authenticating-with-digitalocean + "DOCKERHUB_PASSWORD", # https://circleci.com/docs/private-images/ + "FIREBASE_TOKEN", # https://firebase.google.com/docs/cli, + "CI_DEPLOY_PASSWORD", # https://docs.gitlab.com/ee/user/project/deploy_tokens/ + "GOOGLE_API_KEY", # https://python.langchain.com/docs/integrations/tools/google_search/ + "LANGSMITH_API_KEY", # https://docs.smith.langchain.com/how_to_guides/setup/create_account_api_key + "LANGCHAIN_API_KEY", + "HEROKU_API_KEY", # https://devcenter.heroku.com/articles/authentication } included_envs := included if { From 4cdfaa691d55ca5df6d641132166cff3bd0216c7 Mon Sep 17 00:00:00 2001 From: Nikita Pivkin Date: Thu, 10 Oct 2024 19:24:20 +0600 Subject: [PATCH 10/17] feat: handle multiple decls in arg and env Signed-off-by: Nikita Pivkin --- checks/docker/leaked_secrets.rego | 45 ++++++++++++++++++-------- checks/docker/leaked_secrets_test.rego | 33 +++++++++++++------ 2 files changed, 54 insertions(+), 24 deletions(-) diff --git a/checks/docker/leaked_secrets.rego b/checks/docker/leaked_secrets.rego index 5153546d..5d890d95 100644 --- a/checks/docker/leaked_secrets.rego +++ b/checks/docker/leaked_secrets.rego @@ -28,7 +28,7 @@ final_stage := last(input.Stages) deny contains res if { some instruction in final_stage.Commands is_arg_or_env(instruction.Cmd) - [name, _] := retrive_name_and_default(instruction) + some [name, _] in retrive_name_and_default(instruction) is_secret(name) res := result.new( sprintf("Possible exposure of secret env %q in %s", [name, upper(instruction.Cmd)]), @@ -40,7 +40,7 @@ deny contains res if { deny contains res if { some instruction in final_stage.Commands is_arg_or_env(instruction.Cmd) - [name, def] := retrive_name_and_default(instruction) + some [name, def] in retrive_name_and_default(instruction) def != "" name in secret_file_envs is_secret_file_copied(def) @@ -65,26 +65,43 @@ deny contains res if { is_arg_or_env(cmd) if cmd in {"env", "arg"} -retrive_name_and_default(instruction) := [instruction.Value[0], ""] if { +# returns an array of pairs consisting of environment variable names and their default values +retrive_name_and_default(instruction) := vals if { instruction.Cmd == "env" - count(instruction.Value) == 1 -} -retrive_name_and_default(instruction) := [instruction.Value[0], instruction.Value[1]] if { - instruction.Cmd == "env" - count(instruction.Value) > 1 + count(instruction.Value) % 3 == 0 + count_envs = count(instruction.Value) / 3 + + vals := [ + [name, def] | + some idx in numbers.range(0, count_envs - 1) + + # ENV must have two arguments + # Trivy returns `ENV FOO=bar` as [“FOO”, “bar”, “=”], so we skip the delimiter + name := instruction.Value[idx * 3] + def := instruction.Value[(idx * 3) + 1] + ] } -retrive_name_and_default(instruction) := [parts[0], ""] if { +# returns an array of pairs consisting of the argument names and their default values. +retrive_name_and_default(instruction) := vals if { instruction.Cmd == "arg" - parts := split(instruction.Value[0], "=") + vals := [ + v | + some val in instruction.Value + v := split_args(val) + ] +} + +split_args(arg) := [name, ""] if { + parts := split(arg, "=") count(parts) == 1 + name := parts[0] } -retrive_name_and_default(instruction) := [parts[0], parts[1]] if { - instruction.Cmd == "arg" - parts := split(instruction.Value[0], "=") - count(parts) > 1 +split_args(arg) := parts if { + parts := split(arg, "=") + count(parts) == 2 } default_envs := { diff --git a/checks/docker/leaked_secrets_test.rego b/checks/docker/leaked_secrets_test.rego index b0075e23..0692f468 100644 --- a/checks/docker/leaked_secrets_test.rego +++ b/checks/docker/leaked_secrets_test.rego @@ -5,11 +5,6 @@ import rego.v1 import data.builtin.dockerfile.DS031 as check test_deny_secret_env_variable if { - res := check.deny with input as build_simple_input("env", ["GITHUB_TOKEN"]) - count(res) = 1 -} - -test_deny_secret_env_variable_with_default if { res := check.deny with input as build_simple_input("env", ["GITHUB_TOKEN", "placeholder", "="]) count(res) = 1 } @@ -25,7 +20,7 @@ test_deny_secret_arg if { } test_deny_custom_secret_env if { - inp := build_simple_input("env", ["MY_SECRET"]) + inp := build_simple_input("env", ["MY_SECRET", "test", "="]) res := check.deny with input as inp with data.ds031.included_envs as ["MY_SECRET"] count(res) = 1 } @@ -36,6 +31,24 @@ test_deny_secret_arg_with_prefix if { count(res) = 1 } +test_deny_multiply_args_in_same_line if { + inp := build_simple_input("arg", ["FOO=test", "GITHUB_TOKEN"]) + res := check.deny with input as inp + count(res) = 1 +} + +test_deny_env_legacy_format if { + inp := build_simple_input("env", ["GITHUB_TOKEN", "test", ""]) + res := check.deny with input as inp + count(res) = 1 +} + +test_deny_multiply_envs_in_same_line if { + inp := build_simple_input("env", ["FOO", "test", "=", "GITHUB_TOKEN", "test", "="]) + res := check.deny with input as inp + count(res) = 1 +} + test_deny_copy_secret_file if { inp := build_input([instruction("copy", ["./config", "$AWS_CONFIG_FILE"])]) res := check.deny with input as inp @@ -51,7 +64,7 @@ test_allow_secret_file_without_copy if { test_allow_secret_file_copy_with_other_base_path if { inp := build_input([ instruction("copy", ["/src", "/src"]), - instruction("env", ["GOOGLE_APPLICATION_CREDENTIALS=./app/google-storage-service.json"]), + instruction("env", ["GOOGLE_APPLICATION_CREDENTIALS", "=", "./app/google-storage-service.json"]), ]) res := check.deny with input as inp count(res) = 0 @@ -116,11 +129,11 @@ test_allow_secret_in_set_command_with_secret_mount if { test_deny_secret_key if { # starts with uppercase secret token - case1 := check.deny with input as build_simple_input("env", ["SECRET_PASSPHRASE"]) + case1 := check.deny with input as build_simple_input("env", ["SECRET_PASSPHRASE", "test", "="]) count(case1) = 1 # starts with lowercase secret token - case2 := check.deny with input as build_simple_input("env", ["password"]) + case2 := check.deny with input as build_simple_input("env", ["password", "test", "="]) count(case2) = 1 # contains uppercase secret token after underscore @@ -130,7 +143,7 @@ test_deny_secret_key if { test_allow_secret_key if { # starts with uppercase secret token - case1 := check.deny with input as build_simple_input("env", ["PUBLIC_KEY"]) + case1 := check.deny with input as build_simple_input("env", ["PUBLIC_KEY", "test", "="]) count(case1) = 0 # starts with lowercase secret token From 986d98497e319839357cd03b92c790a8e38f3482 Mon Sep 17 00:00:00 2001 From: Nikita Pivkin Date: Thu, 10 Oct 2024 21:34:57 +0600 Subject: [PATCH 11/17] update rules Signed-off-by: Nikita Pivkin --- checks/docker/leaked_secrets.rego | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/checks/docker/leaked_secrets.rego b/checks/docker/leaked_secrets.rego index 5d890d95..f2daf95a 100644 --- a/checks/docker/leaked_secrets.rego +++ b/checks/docker/leaked_secrets.rego @@ -42,7 +42,7 @@ deny contains res if { is_arg_or_env(instruction.Cmd) some [name, def] in retrive_name_and_default(instruction) def != "" - name in secret_file_envs + is_secret_file_env(name) is_secret_file_copied(def) res := result.new( sprintf("Possible exposure of the copied secret env file %q in %s", [name, upper(instruction.Cmd)]), @@ -50,13 +50,15 @@ deny contains res if { ) } +is_secret_file_env(name) if name in secret_file_envs + # check if a secret file is copied deny contains res if { some instruction in final_stage.Commands instruction.Cmd == "copy" count(instruction.Value) == 2 env := trim_prefix(instruction.Value[1], "$") - env in secret_file_envs + is_secret_file_env(env) res := result.new( sprintf("Possible exposure of secret file %q in COPY", [env]), instruction, @@ -138,8 +140,8 @@ is_secret(str) if { } is_secret(str) if { - not is_secret_env(str) - not str in secret_file_envs + not is_secret_env(str) # to avoid duplication of results + not is_secret_file_env(str) # files require checking that they have been copied is_secret_key(str) } From 4b62f66d1f07c25f1deffb26c9de4dac18d7a4ef Mon Sep 17 00:00:00 2001 From: Nikita Pivkin Date: Mon, 14 Oct 2024 13:13:24 +0600 Subject: [PATCH 12/17] add usr and psw tokens Signed-off-by: Nikita Pivkin --- checks/docker/leaked_secrets.rego | 1 + checks/docker/leaked_secrets_test.rego | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/checks/docker/leaked_secrets.rego b/checks/docker/leaked_secrets.rego index f2daf95a..c3025ba8 100644 --- a/checks/docker/leaked_secrets.rego +++ b/checks/docker/leaked_secrets.rego @@ -222,6 +222,7 @@ deny_secrets_tokens := { "apikey", "auth", "credential", "credentials", "key", "password", "pword", "passwd", "secret", "token", + "usr", "psw", } deny_secrets_pattern := build_secrets_pattern(deny_secrets_tokens) diff --git a/checks/docker/leaked_secrets_test.rego b/checks/docker/leaked_secrets_test.rego index 0692f468..31909c33 100644 --- a/checks/docker/leaked_secrets_test.rego +++ b/checks/docker/leaked_secrets_test.rego @@ -139,6 +139,12 @@ test_deny_secret_key if { # contains uppercase secret token after underscore case3 := check.deny with input as build_simple_input("arg", ["AWS_SECRET_ACCESS_KEY"]) count(case3) = 1 + + case4 := check.deny with input as build_simple_input("arg", ["ARTIFACTORY_USR"]) + count(case4) = 1 + + case5 := check.deny with input as build_simple_input("arg", ["ARTIFACTORY_PSW"]) + count(case5) = 1 } test_allow_secret_key if { From 9a8d5ab0028f76545809c7bf8726971200628ff0 Mon Sep 17 00:00:00 2001 From: Nikita Pivkin Date: Tue, 15 Oct 2024 11:57:49 +0600 Subject: [PATCH 13/17] fix(ci): exclude Trivy for dependabot Signed-off-by: Nikita Pivkin --- .github/dependabot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 0e7dccd8..dfff7543 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -6,14 +6,14 @@ updates: schedule: interval: "weekly" ignore: - - dependency-name: "github.com/aquasecurity/trivy-*" ## `trivy-*` dependencies are updated manually + - dependency-name: "github.com/aquasecurity/trivy" ## `trivy` are updated manually groups: docker: patterns: - "github.com/docker/*" common: exclude-patterns: - - "github.com/aquasecurity/trivy-*" + - "github.com/aquasecurity/trivy" patterns: - "*" - package-ecosystem: github-actions From 4ef77376ceb29815eab17f583b2d1e4980fe2460 Mon Sep 17 00:00:00 2001 From: Nikita Pivkin Date: Tue, 1 Oct 2024 18:34:55 +0600 Subject: [PATCH 14/17] refactor(checks): improve metadata retrieval Signed-off-by: Nikita Pivkin --- .../aws/apigateway/enable_access_logging.rego | 12 +++++- checks/cloud/aws/apigateway/enable_cache.rego | 4 +- .../apigateway/enable_cache_encryption.rego | 5 ++- .../cloud/aws/apigateway/enable_tracing.rego | 4 +- .../aws/apigateway/use_secure_tls_policy.rego | 6 ++- .../aws/athena/enable_at_rest_encryption.rego | 20 +++++++--- .../aws/athena/no_encryption_override.rego | 7 +++- .../cloud/aws/cloudfront/enable_logging.rego | 4 +- .../aws/cloudfront/use_secure_tls_policy.rego | 4 +- .../aws/cloudtrail/enable_all_regions.rego | 7 +++- .../aws/cloudtrail/enable_log_validation.rego | 7 +++- .../cloudtrail/encryption_customer_key.rego | 11 +++++- .../ensure_cloudwatch_integration.rego | 11 +++++- .../require_bucket_access_logging.rego | 7 +++- .../cloudwatch/log_group_customer_key.rego | 11 +++++- .../aws/codebuild/enable_encryption.rego | 19 +++++++--- .../aws/config/aggregate_all_regions.rego | 9 ++++- .../documentdb/enable_storage_encryption.rego | 7 +++- .../documentdb/encryption_customer_key.rego | 22 ++++++++--- .../encryption_customer_key_test.rego | 4 +- .../add_description_to_security_group.rego | 17 ++++++--- ...dd_description_to_security_group_rule.rego | 11 +++++- .../aws/ec2/as_enable_at_rest_encryption.rego | 17 +++++++-- .../aws/ec2/as_enforce_http_token_imds.rego | 19 ++++++---- .../aws/ec2/enable_at_rest_encryption.rego | 17 +++++++-- .../aws/ec2/enable_volume_encryption.rego | 11 ++++-- .../aws/ec2/encryption_customer_key.rego | 13 +++++-- .../aws/ec2/enforce_http_token_imds.rego | 12 ++++-- checks/cloud/aws/ecr/enable_image_scans.rego | 10 +++-- .../aws/ecr/enforce_immutable_repository.rego | 10 +++-- .../aws/ecr/repository_customer_key.rego | 11 +++++- .../aws/ecs/enable_container_insight.rego | 6 ++- .../aws/ecs/enable_in_transit_encryption.rego | 6 ++- .../aws/efs/enable_at_rest_encryption.rego | 9 ++++- .../aws/eks/enable_control_plane_logging.rego | 37 ++++++++++++++----- .../enable_control_plane_logging_test.rego | 4 +- checks/cloud/aws/eks/encrypt_secrets.rego | 22 ++++++++--- .../add_description_for_security_group.rego | 11 +++++- .../enable_at_rest_encryption.rego | 9 ++++- .../enable_in_transit_encryption.rego | 9 ++++- .../enable_domain_encryption.rego | 6 ++- .../elasticsearch/enable_domain_logging.rego | 9 ++++- .../enable_in_transit_encryption.rego | 9 ++++- .../aws/elasticsearch/enforce_https.rego | 9 ++++- .../elasticsearch/use_secure_tls_policy.rego | 11 +++++- checks/cloud/aws/elb/alb_not_public.rego | 13 +++++-- .../cloud/aws/elb/drop_invalid_headers.rego | 9 ++++- .../aws/msk/enable_at_rest_encryption.rego | 4 +- .../cloud/aws/neptune/enable_log_export.rego | 4 +- .../aws/rds/enable_performance_insights.rego | 6 ++- .../aws/rds/encrypt_cluster_storage_data.rego | 6 ++- .../rds/encrypt_instance_storage_data.rego | 4 +- checks/cloud/aws/s3/block_public_acls.rego | 8 ++-- checks/cloud/aws/s3/block_public_policy.rego | 8 ++-- .../aws/s3/enable_bucket_encryption.rego | 4 +- checks/cloud/aws/s3/enable_logging.rego | 9 +++-- checks/cloud/aws/s3/enable_versioning.rego | 4 +- checks/cloud/aws/s3/ignore_public_acls.rego | 8 ++-- checks/cloud/aws/s3/no_public_buckets.rego | 8 ++-- checks/cloud/aws/s3/require_mfa_delete.rego | 4 +- .../account_identity_registered.rego | 4 +- .../appservice/authentication_enabled.rego | 4 +- .../cloud/azure/appservice/enable_http2.rego | 4 +- .../appservice/use_secure_tls_policy.rego | 4 +- .../disable_password_authentication.rego | 4 +- .../azure/compute/enable_disk_encryption.rego | 4 +- .../container/configured_network_policy.rego | 4 +- checks/cloud/azure/container/logging.rego | 4 +- .../azure/container/use_rbac_permissions.rego | 4 +- .../azure/database/no_public_access.rego | 3 +- ...s_configuration_connection_throttling.rego | 4 +- ...ostgres_configuration_log_checkpoints.rego | 4 +- ...ostgres_configuration_log_connections.rego | 4 +- .../azure/database/secure_tls_policy.rego | 5 ++- .../azure/keyvault/specify_network_acl.rego | 4 +- .../monitor/activity_log_retention_set.rego | 6 ++- .../azure/network/retention_policy_set.rego | 6 ++- .../queue_services_logging_enabled.rego | 4 +- .../compute/disk_encryption_customer_key.rego | 4 +- .../vm_disk_encryption_customer_key.rego | 4 +- .../google/gke/enable_network_policy.rego | 4 +- 81 files changed, 491 insertions(+), 182 deletions(-) diff --git a/checks/cloud/aws/apigateway/enable_access_logging.rego b/checks/cloud/aws/apigateway/enable_access_logging.rego index 4a85eb4a..11e1c61d 100644 --- a/checks/cloud/aws/apigateway/enable_access_logging.rego +++ b/checks/cloud/aws/apigateway/enable_access_logging.rego @@ -33,16 +33,24 @@ package builtin.aws.apigateway.aws0001 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some stage in input.aws.apigateway.v1.apis[_].stages not logging_is_configured(stage) - res := result.new("Access logging is not configured.", stage.accesslogging.cloudwatchloggrouparn) + res := result.new( + "Access logging is not configured.", + metadata.obj_by_path(stage, ["accesslogging", "cloudwatchloggrouparn"]), + ) } deny contains res if { some stage in input.aws.apigateway.v2.apis[_].stages not logging_is_configured(stage) - res := result.new("Access logging is not configured.", stage.accesslogging.cloudwatchloggrouparn) + res := result.new( + "Access logging is not configured.", + metadata.obj_by_path(stage, ["accesslogging", "cloudwatchloggrouparn"]), + ) } logging_is_configured(stage) if { diff --git a/checks/cloud/aws/apigateway/enable_cache.rego b/checks/cloud/aws/apigateway/enable_cache.rego index 42dfcd62..6cd4d742 100644 --- a/checks/cloud/aws/apigateway/enable_cache.rego +++ b/checks/cloud/aws/apigateway/enable_cache.rego @@ -30,6 +30,8 @@ package builtin.aws.apigateway.aws0190 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some api in input.aws.apigateway.v1.apis isManaged(api) @@ -40,6 +42,6 @@ deny contains res if { not settings.cacheenabled.value res := result.new( "Cache data is not enabled.", - object.get(settings, "cacheenabled", settings), + metadata.obj_by_path(settings, ["cacheenabled"]), ) } diff --git a/checks/cloud/aws/apigateway/enable_cache_encryption.rego b/checks/cloud/aws/apigateway/enable_cache_encryption.rego index 8d39f3e5..ed5d19e8 100644 --- a/checks/cloud/aws/apigateway/enable_cache_encryption.rego +++ b/checks/cloud/aws/apigateway/enable_cache_encryption.rego @@ -28,6 +28,8 @@ package builtin.aws.apigateway.aws0002 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some api in input.aws.apigateway.v1.apis isManaged(api) @@ -37,8 +39,9 @@ deny contains res if { isManaged(settings) settings.cacheenabled.value not settings.cachedataencrypted.value + res := result.new( "Cache data is not encrypted.", - object.get(settings, "cachedataencrypted", settings), + metadata.obj_by_path(settings, ["cachedataencrypted"]), ) } diff --git a/checks/cloud/aws/apigateway/enable_tracing.rego b/checks/cloud/aws/apigateway/enable_tracing.rego index b35c3c19..f3e4bff8 100644 --- a/checks/cloud/aws/apigateway/enable_tracing.rego +++ b/checks/cloud/aws/apigateway/enable_tracing.rego @@ -28,6 +28,8 @@ package builtin.aws.apigateway.aws0003 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some api in input.aws.apigateway.v1.apis isManaged(api) @@ -36,6 +38,6 @@ deny contains res if { not stage.xraytracingenabled.value res := result.new( "X-Ray tracing is not enabled.", - object.get(stage, "xraytracingenabled", stage), + metadata.obj_by_path(stage, ["xraytracingenabled"]), ) } diff --git a/checks/cloud/aws/apigateway/use_secure_tls_policy.rego b/checks/cloud/aws/apigateway/use_secure_tls_policy.rego index 44a9711a..2a291140 100644 --- a/checks/cloud/aws/apigateway/use_secure_tls_policy.rego +++ b/checks/cloud/aws/apigateway/use_secure_tls_policy.rego @@ -30,12 +30,14 @@ package builtin.aws.apigateway.aws0005 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some domain in input.aws.apigateway.v1.domainnames not is_tls_1_2(domain) res := result.new( "Domain name is configured with an outdated TLS policy.", - object.get(domain, "securitypolicy", domain), + metadata.obj_by_path(domain, "securitypolicy"), ) } @@ -44,7 +46,7 @@ deny contains res if { not is_tls_1_2(domain) res := result.new( "Domain name is configured with an outdated TLS policy.", - object.get(domain, "securitypolicy", domain), + metadata.obj_by_path(domain, "securitypolicy"), ) } diff --git a/checks/cloud/aws/athena/enable_at_rest_encryption.rego b/checks/cloud/aws/athena/enable_at_rest_encryption.rego index dd319edd..46d44ddd 100644 --- a/checks/cloud/aws/athena/enable_at_rest_encryption.rego +++ b/checks/cloud/aws/athena/enable_at_rest_encryption.rego @@ -34,20 +34,28 @@ package builtin.aws.athena.aws0006 import rego.v1 +import data.lib.cloud.metadata + encryption_type_none := "" deny contains res if { some workgroup in input.aws.athena.workgroups - is_encryption_type_none(workgroup.encryption) - res := result.new("Workgroup does not have encryption configured.", workgroup) + not is_encrypted(workgroup) + res := result.new( + "Workgroup does not have encryption configured.", + metadata.obj_by_path(workgroup, ["encryption", "type"]), + ) } deny contains res if { some database in input.aws.athena.databases - is_encryption_type_none(database.encryption) - res := result.new("Database does not have encryption configured.", database) + not is_encrypted(database) + res := result.new( + "Database does not have encryption configured.", + metadata.obj_by_path(database, ["encryption", "type"]), + ) } -is_encryption_type_none(encryption) if { - encryption.type.value == encryption_type_none +is_encrypted(obj) if { + obj.encryption.type.value != encryption_type_none } diff --git a/checks/cloud/aws/athena/no_encryption_override.rego b/checks/cloud/aws/athena/no_encryption_override.rego index 078d93eb..17c7a17a 100644 --- a/checks/cloud/aws/athena/no_encryption_override.rego +++ b/checks/cloud/aws/athena/no_encryption_override.rego @@ -33,8 +33,13 @@ package builtin.aws.athena.aws0007 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some workgroup in input.aws.athena.workgroups not workgroup.enforceconfiguration.value - res := result.new("The workgroup configuration is not enforced.", workgroup.enforceconfiguration) + res := result.new( + "The workgroup configuration is not enforced.", + metadata.obj_by_path(workgroup, ["enforceconfiguration"]), + ) } diff --git a/checks/cloud/aws/cloudfront/enable_logging.rego b/checks/cloud/aws/cloudfront/enable_logging.rego index bb74205e..4c39ef09 100644 --- a/checks/cloud/aws/cloudfront/enable_logging.rego +++ b/checks/cloud/aws/cloudfront/enable_logging.rego @@ -33,12 +33,14 @@ package builtin.aws.cloudfront.aws0010 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some dist in input.aws.cloudfront.distributions not has_logging_bucket(dist) res := result.new( "Distribution does not have logging enabled", - object.get(dist, ["logging", "bucket"], dist), + metadata.obj_by_path(dist, ["logging", "bucket"]), ) } diff --git a/checks/cloud/aws/cloudfront/use_secure_tls_policy.rego b/checks/cloud/aws/cloudfront/use_secure_tls_policy.rego index 9d65731f..e0874b3f 100644 --- a/checks/cloud/aws/cloudfront/use_secure_tls_policy.rego +++ b/checks/cloud/aws/cloudfront/use_secure_tls_policy.rego @@ -39,13 +39,15 @@ import rego.v1 protocol_version_tls1_2_2021 = "TLSv1.2_2021" +import data.lib.cloud.metadata + deny contains res if { some dist in input.aws.cloudfront.distributions not dist.viewercertificate.cloudfrontdefaultcertificate.value not is_tls_1_2(dist) res := result.new( "Distribution allows unencrypted communications.", - object.get(dist, ["viewercertificate", "minimumprotocolversion"], dist), + metadata.obj_by_path(dist, ["viewercertificate", "minimumprotocolversion"]), ) } diff --git a/checks/cloud/aws/cloudtrail/enable_all_regions.rego b/checks/cloud/aws/cloudtrail/enable_all_regions.rego index 0383c327..7f630754 100644 --- a/checks/cloud/aws/cloudtrail/enable_all_regions.rego +++ b/checks/cloud/aws/cloudtrail/enable_all_regions.rego @@ -38,8 +38,13 @@ package builtin.aws.cloudtrail.aws0014 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some trail in input.aws.cloudtrail.trails not trail.ismultiregion.value - res := result.new("Trail is not enabled across all regions.", trail.ismultiregion) + res := result.new( + "Trail is not enabled across all regions.", + metadata.obj_by_path(trail, ["ismultiregion"]), + ) } diff --git a/checks/cloud/aws/cloudtrail/enable_log_validation.rego b/checks/cloud/aws/cloudtrail/enable_log_validation.rego index ced05965..81f2bfe4 100644 --- a/checks/cloud/aws/cloudtrail/enable_log_validation.rego +++ b/checks/cloud/aws/cloudtrail/enable_log_validation.rego @@ -33,8 +33,13 @@ package builtin.aws.cloudtrail.aws0016 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some trail in input.aws.cloudtrail.trails not trail.enablelogfilevalidation.value - res := result.new("Trail does not have log validation enabled.", trail.enablelogfilevalidation) + res := result.new( + "Trail does not have log validation enabled.", + metadata.obj_by_path(trail, ["enablelogfilevalidation"]), + ) } diff --git a/checks/cloud/aws/cloudtrail/encryption_customer_key.rego b/checks/cloud/aws/cloudtrail/encryption_customer_key.rego index 8c2576a4..c5b586fe 100644 --- a/checks/cloud/aws/cloudtrail/encryption_customer_key.rego +++ b/checks/cloud/aws/cloudtrail/encryption_customer_key.rego @@ -36,8 +36,15 @@ package builtin.aws.cloudtrail.aws0015 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some trail in input.aws.cloudtrail.trails - trail.kmskeyid.value == "" - res := result.new("CloudTrail does not use a customer managed key to encrypt the logs.", trail.kmskeyid) + not use_cms(trail) + res := result.new( + "CloudTrail does not use a customer managed key to encrypt the logs.", + metadata.obj_by_path(trail, ["kmskeyid"]), + ) } + +use_cms(trail) if trail.kmskeyid.value != "" diff --git a/checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration.rego b/checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration.rego index fdfdb964..a587c494 100644 --- a/checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration.rego +++ b/checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration.rego @@ -46,8 +46,15 @@ package builtin.aws.cloudtrail.aws0162 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some trail in input.aws.cloudtrail.trails - trail.cloudwatchlogsloggrouparn.value == "" - res := result.new("Trail does not have CloudWatch logging configured", trail) + not is_logging_configured(trail) + res := result.new( + "Trail does not have CloudWatch logging configured", + metadata.obj_by_path(trail, ["cloudwatchlogsloggrouparn"]), + ) } + +is_logging_configured(trail) if trail.cloudwatchlogsloggrouparn.value != "" diff --git a/checks/cloud/aws/cloudtrail/require_bucket_access_logging.rego b/checks/cloud/aws/cloudtrail/require_bucket_access_logging.rego index 1fb862be..195da356 100644 --- a/checks/cloud/aws/cloudtrail/require_bucket_access_logging.rego +++ b/checks/cloud/aws/cloudtrail/require_bucket_access_logging.rego @@ -42,6 +42,8 @@ package builtin.aws.cloudtrail.aws0163 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some trail in input.aws.cloudtrail.trails trail.bucketname.value != "" @@ -50,5 +52,8 @@ deny contains res if { bucket.name.value == trail.bucketname.value not bucket.logging.enabled.value - res := result.new("Trail S3 bucket does not have logging enabled", bucket) + res := result.new( + "Trail S3 bucket does not have logging enabled", + metadata.obj_by_path(bucket, ["name"]), + ) } diff --git a/checks/cloud/aws/cloudwatch/log_group_customer_key.rego b/checks/cloud/aws/cloudwatch/log_group_customer_key.rego index f93c2a68..e128b4a8 100644 --- a/checks/cloud/aws/cloudwatch/log_group_customer_key.rego +++ b/checks/cloud/aws/cloudwatch/log_group_customer_key.rego @@ -33,8 +33,15 @@ package builtin.aws.cloudwatch.aws0017 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some group in input.aws.cloudwatch.loggroups - group.kmskeyid.value == "" - res := result.new("Log group is not encrypted.", group) + not has_cms(group) + res := result.new( + "Log group is not encrypted.", + metadata.obj_by_path(group, ["kmskeyid"]), + ) } + +has_cms(group) if group.kmskeyid.value != "" diff --git a/checks/cloud/aws/codebuild/enable_encryption.rego b/checks/cloud/aws/codebuild/enable_encryption.rego index 8e0042e6..5251d88b 100644 --- a/checks/cloud/aws/codebuild/enable_encryption.rego +++ b/checks/cloud/aws/codebuild/enable_encryption.rego @@ -34,16 +34,25 @@ package builtin.aws.codebuild.aws0018 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some project in input.aws.codebuild.projects - encryptionenabled := project.artifactsettings.encryptionenabled - not encryptionenabled.value - res := result.new("Encryption is not enabled for project artifacts.", encryptionenabled) + not is_encryption_enabled(project.artifactsettings) + res := result.new( + "Encryption is not enabled for project artifacts.", + metadata.obj_by_path(project, ["artifactsettings", "encryptionenabled"]), + ) } +is_encryption_enabled(settings) if settings.encryptionenabled.value + deny contains res if { some project in input.aws.codebuild.projects some setting in project.secondaryartifactsettings - not setting.encryptionenabled.value - res := result.new("Encryption is not enabled for secondary project artifacts.", setting.encryptionenabled) + not is_encryption_enabled(setting) + res := result.new( + "Encryption is not enabled for secondary project artifacts.", + metadata.obj_by_path(setting, ["encryptionenabled"]), + ) } diff --git a/checks/cloud/aws/config/aggregate_all_regions.rego b/checks/cloud/aws/config/aggregate_all_regions.rego index 305fa36b..8f49a670 100644 --- a/checks/cloud/aws/config/aggregate_all_regions.rego +++ b/checks/cloud/aws/config/aggregate_all_regions.rego @@ -34,9 +34,14 @@ package builtin.aws.config.aws0019 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { cfg_aggregator := input.aws.config.configurationaggregrator - cfg_aggregator.__defsec_metadata.managed + isManaged(cfg_aggregator) not cfg_aggregator.sourceallregions.value - res := result.new("Configuration aggregation is not set to source from all regions.", cfg_aggregator.sourceallregions) + res := result.new( + "Configuration aggregation is not set to source from all regions.", + metadata.obj_by_path(cfg_aggregator, ["sourceallregions"]), + ) } diff --git a/checks/cloud/aws/documentdb/enable_storage_encryption.rego b/checks/cloud/aws/documentdb/enable_storage_encryption.rego index dec97001..1bb7b924 100644 --- a/checks/cloud/aws/documentdb/enable_storage_encryption.rego +++ b/checks/cloud/aws/documentdb/enable_storage_encryption.rego @@ -33,8 +33,13 @@ package builtin.aws.documentdb.aws0021 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some cluster in input.aws.documentdb.clusters not cluster.storageencrypted.value - res := result.new("Cluster storage does not have encryption enabled.", cluster.storageencrypted) + res := result.new( + "Cluster storage does not have encryption enabled.", + metadata.obj_by_path(cluster, ["storageencrypted"]), + ) } diff --git a/checks/cloud/aws/documentdb/encryption_customer_key.rego b/checks/cloud/aws/documentdb/encryption_customer_key.rego index c38427f6..0c58ecb1 100644 --- a/checks/cloud/aws/documentdb/encryption_customer_key.rego +++ b/checks/cloud/aws/documentdb/encryption_customer_key.rego @@ -33,17 +33,27 @@ package builtin.aws.documentdb.aws0022 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some cluster in input.aws.documentdb.clusters - cluster.kmskeyid.value == "" - - res := result.new("Cluster encryption does not use a customer-managed KMS key.", cluster) + isManaged(cluster) + not has_cms(cluster) + res := result.new( + "Cluster encryption does not use a customer-managed KMS key.", + metadata.obj_by_path(cluster, ["kmskeyid"]), + ) } deny contains res if { some cluster in input.aws.documentdb.clusters some instance in cluster.instances - instance.kmskeyid.value == "" - - res := result.new("Instance encryption does not use a customer-managed KMS key.", cluster) + isManaged(instance) + not has_cms(instance) + res := result.new( + "Instance encryption does not use a customer-managed KMS key.", + metadata.obj_by_path(instance, ["kmskeyid"]), + ) } + +has_cms(obj) if obj.kmskeyid.value != "" diff --git a/checks/cloud/aws/documentdb/encryption_customer_key_test.rego b/checks/cloud/aws/documentdb/encryption_customer_key_test.rego index 107a4113..18e1f3bd 100644 --- a/checks/cloud/aws/documentdb/encryption_customer_key_test.rego +++ b/checks/cloud/aws/documentdb/encryption_customer_key_test.rego @@ -12,7 +12,7 @@ test_allow_cluster_with_kms_key if { } test_allow_instance_with_kms_key if { - inp := {"aws": {"documentdb": {"clusters": [{"instances": [{"kmskeyid": {"value": "test"}}]}]}}} + inp := {"aws": {"documentdb": {"clusters": [{"kmskeyid": {"value": "test"}, "instances": [{"kmskeyid": {"value": "test"}}]}]}}} test.assert_empty(check.deny) with input as inp } @@ -24,7 +24,7 @@ test_disallow_cluster_without_kms_key if { } test_disallow_instance_without_kms_key if { - inp := {"aws": {"documentdb": {"clusters": [{"instances": [{"kmskeyid": {"value": ""}}]}]}}} + inp := {"aws": {"documentdb": {"clusters": [{"kmskeyid": {"value": "test"}, "instances": [{"kmskeyid": {"value": ""}}]}]}}} test.assert_equal_message("Instance encryption does not use a customer-managed KMS key.", check.deny) with input as inp } diff --git a/checks/cloud/aws/ec2/add_description_to_security_group.rego b/checks/cloud/aws/ec2/add_description_to_security_group.rego index bdaaf474..ae7d812f 100644 --- a/checks/cloud/aws/ec2/add_description_to_security_group.rego +++ b/checks/cloud/aws/ec2/add_description_to_security_group.rego @@ -38,16 +38,23 @@ package builtin.aws.ec2.aws0099 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some sg in input.aws.ec2.securitygroups - sg.__defsec_metadata.managed - sg.description.value == "" - res := result.new("Security group does not have a description.", sg) + isManaged(sg) + not has_description(sg) + res := result.new( + "Security group does not have a description.", + metadata.obj_by_path(sg, ["description"]), + ) } deny contains res if { some sg in input.aws.ec2.securitygroups - sg.__defsec_metadata.managed + isManaged(sg) sg.description.value == "Managed by Terraform" - res := result.new("Security group explicitly uses the default description.", sg) + res := result.new("Security group explicitly uses the default description.", sg.description) } + +has_description(sg) if sg.description.value != "" diff --git a/checks/cloud/aws/ec2/add_description_to_security_group_rule.rego b/checks/cloud/aws/ec2/add_description_to_security_group_rule.rego index dc9f6d50..e23c8a33 100644 --- a/checks/cloud/aws/ec2/add_description_to_security_group_rule.rego +++ b/checks/cloud/aws/ec2/add_description_to_security_group_rule.rego @@ -38,12 +38,19 @@ package builtin.aws.ec2.aws0124 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some group in input.aws.ec2.securitygroups some rule in array.concat( object.get(group, "egressrules", []), object.get(group, "ingressrules", []), ) - rule.description.value == "" - res := result.new("Security group rule does not have a description.", rule.description) + not has_description(rule) + res := result.new( + "Security group rule does not have a description.", + metadata.obj_by_path(rule, ["description"]), + ) } + +has_description(rule) if rule.description.value != "" diff --git a/checks/cloud/aws/ec2/as_enable_at_rest_encryption.rego b/checks/cloud/aws/ec2/as_enable_at_rest_encryption.rego index ffdaf698..41290bbb 100644 --- a/checks/cloud/aws/ec2/as_enable_at_rest_encryption.rego +++ b/checks/cloud/aws/ec2/as_enable_at_rest_encryption.rego @@ -35,15 +35,24 @@ package builtin.aws.ec2.aws0008 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some cfg in input.aws.ec2.launchconfigurations - cfg.rootblockdevice.encrypted.value == false - res := result.new("Root block device is not encrypted.", cfg.rootblockdevice.encrypted) + cfg.rootblockdevice + not cfg.rootblockdevice.encrypted.value + res := result.new( + "Root block device is not encrypted.", + metadata.obj_by_path(cfg, ["rootblockdevice", "encrypted"]), + ) } deny contains res if { some cfg in input.aws.ec2.launchconfigurations some device in cfg.ebsblockdevices - device.encrypted.value == false - res := result.new("EBS block device is not encrypted.", device.encrypted.value) + not device.encrypted.value + res := result.new( + "EBS block device is not encrypted.", + metadata.obj_by_path(device, ["encrypted"]), + ) } diff --git a/checks/cloud/aws/ec2/as_enforce_http_token_imds.rego b/checks/cloud/aws/ec2/as_enforce_http_token_imds.rego index 2c547b49..ed22174e 100644 --- a/checks/cloud/aws/ec2/as_enforce_http_token_imds.rego +++ b/checks/cloud/aws/ec2/as_enforce_http_token_imds.rego @@ -39,25 +39,28 @@ package builtin.aws.ec2.aws0130 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some config in input.aws.ec2.launchconfigurations - opts_do_not_require_token(config.metadataoptions) + not is_tokens_required(config) + not is_endpoint_disabled(config) res := result.new( "Launch configuration does not require IMDS access to require a token", - config.metadataoptions.httptokens, + metadata.obj_by_path(config, ["metadataoptions", "httptokens"]), ) } deny contains res if { some tpl in input.aws.ec2.launchtemplates - opts_do_not_require_token(tpl.instance.metadataoptions) + not is_tokens_required(tpl.instance) + not is_endpoint_disabled(tpl.instance) res := result.new( "Launch template does not require IMDS access to require a token", - tpl.instance.metadataoptions.httptokens, + metadata.obj_by_path(tpl.instance, ["metadataoptions", "httptokens"]), ) } -opts_do_not_require_token(opts) if { - opts.httptokens.value != "required" - opts.httpendpoint.value != "disabled" -} +is_tokens_required(instance) if instance.metadataoptions.httptokens.value == "required" + +is_endpoint_disabled(instance) if instance.metadataoptions.httpendpoint.value == "disabled" diff --git a/checks/cloud/aws/ec2/enable_at_rest_encryption.rego b/checks/cloud/aws/ec2/enable_at_rest_encryption.rego index 74a21486..0a8ba7cf 100644 --- a/checks/cloud/aws/ec2/enable_at_rest_encryption.rego +++ b/checks/cloud/aws/ec2/enable_at_rest_encryption.rego @@ -33,15 +33,24 @@ package builtin.aws.ec2.aws0131 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some instance in input.aws.ec2.instances - instance.rootblockdevice.encrypted.value == false - res := result.new("Root block device is not encrypted.", instance.rootblockdevice.encrypted) + instance.rootblockdevice + not instance.rootblockdevice.encrypted.value + res := result.new( + "Root block device is not encrypted.", + metadata.obj_by_path(instance, ["rootblockdevice", "encrypted"]), + ) } deny contains res if { some instance in input.aws.ec2.instances some ebs in instance.ebsblockdevices - ebs.encrypted.value == false - res := result.new("EBS block device is not encrypted.", ebs.encrypted) + not ebs.encrypted.value + res := result.new( + "EBS block device is not encrypted.", + metadata.obj_by_path(ebs, ["encrypted"]), + ) } diff --git a/checks/cloud/aws/ec2/enable_volume_encryption.rego b/checks/cloud/aws/ec2/enable_volume_encryption.rego index fefa6f60..11ba3667 100644 --- a/checks/cloud/aws/ec2/enable_volume_encryption.rego +++ b/checks/cloud/aws/ec2/enable_volume_encryption.rego @@ -35,9 +35,14 @@ package builtin.aws.ec2.aws0026 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some volume in input.aws.ec2.volumes - volume.__defsec_metadata.managed - volume.encryption.enabled.value == false - res := result.new("EBS volume is not encrypted.", volume.encryption.enabled) + isManaged(volume) + not volume.encryption.enabled.value + res := result.new( + "EBS volume is not encrypted.", + metadata.obj_by_path(volume, ["encryption", "enabled"]), + ) } diff --git a/checks/cloud/aws/ec2/encryption_customer_key.rego b/checks/cloud/aws/ec2/encryption_customer_key.rego index 063d205f..2fc2cae7 100644 --- a/checks/cloud/aws/ec2/encryption_customer_key.rego +++ b/checks/cloud/aws/ec2/encryption_customer_key.rego @@ -35,9 +35,16 @@ package builtin.aws.ec2.aws0027 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some volume in input.aws.ec2.volumes - volume.__defsec_metadata.managed - volume.encryption.kmskeyid.value == "" - res := result.new("EBS volume does not use a customer-managed KMS key.", volume.encryption.kmskeyid) + isManaged(volume) + not has_cms(volume) + res := result.new( + "EBS volume does not use a customer-managed KMS key.", + metadata.obj_by_path(volume, ["encryption", "kmskeyid"]), + ) } + +has_cms(volume) if volume.encryption.kmskeyid.value != "" diff --git a/checks/cloud/aws/ec2/enforce_http_token_imds.rego b/checks/cloud/aws/ec2/enforce_http_token_imds.rego index 8c1a1a8f..b8cdc172 100644 --- a/checks/cloud/aws/ec2/enforce_http_token_imds.rego +++ b/checks/cloud/aws/ec2/enforce_http_token_imds.rego @@ -34,12 +34,18 @@ package builtin.aws.ec2.aws0028 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some instance in input.aws.ec2.instances - instance.metadataoptions.httptokens.value != "required" - instance.metadataoptions.httpendpoint.value != "disabled" + not is_tokens_required(instance) + not is_endpoint_disabled(instance) res := result.new( "Instance does not require IMDS access to require a token.", - instance.metadataoptions.httptokens, + metadata.obj_by_path(instance, ["metadataoptions", "httptokens"]), ) } + +is_tokens_required(instance) if instance.metadataoptions.httptokens.value == "required" + +is_endpoint_disabled(instance) if instance.metadataoptions.httpendpoint.value == "disabled" diff --git a/checks/cloud/aws/ecr/enable_image_scans.rego b/checks/cloud/aws/ecr/enable_image_scans.rego index 1d0090d8..d56716f0 100644 --- a/checks/cloud/aws/ecr/enable_image_scans.rego +++ b/checks/cloud/aws/ecr/enable_image_scans.rego @@ -33,9 +33,13 @@ package builtin.aws.ecr.aws0030 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some repo in input.aws.ecr.repositories - repo.imagescanning.scanonpush.value == false - - res := result.new("Image scanning is not enabled", repo.imagescanning.scanonpush) + not repo.imagescanning.scanonpush.value + res := result.new( + "Image scanning is not enabled", + metadata.obj_by_path(repo, ["imagescanning", "scanonpush"]), + ) } diff --git a/checks/cloud/aws/ecr/enforce_immutable_repository.rego b/checks/cloud/aws/ecr/enforce_immutable_repository.rego index 5f26daba..6831538f 100644 --- a/checks/cloud/aws/ecr/enforce_immutable_repository.rego +++ b/checks/cloud/aws/ecr/enforce_immutable_repository.rego @@ -34,9 +34,13 @@ package builtin.aws.ecr.aws0031 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some repo in input.aws.ecr.repositories - repo.imagetagsimmutable.value == false - - res := result.new("Repository tags are mutable.", repo.imagetagsimmutable) + not repo.imagetagsimmutable.value + res := result.new( + "Repository tags are mutable.", + metadata.obj_by_path(repo, ["imagetagsimmutable"]), + ) } diff --git a/checks/cloud/aws/ecr/repository_customer_key.rego b/checks/cloud/aws/ecr/repository_customer_key.rego index 7d7b3cf7..a45aca9c 100644 --- a/checks/cloud/aws/ecr/repository_customer_key.rego +++ b/checks/cloud/aws/ecr/repository_customer_key.rego @@ -33,6 +33,8 @@ package builtin.aws.ecr.aws0033 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some repo in input.aws.ecr.repositories not is_encyption_type_kms(repo.encryption.type) @@ -42,8 +44,13 @@ deny contains res if { deny contains res if { some repo in input.aws.ecr.repositories is_encyption_type_kms(repo.encryption.type) - repo.encryption.kmskeyid.value == "" - res := result.new("Repository encryption does not use a customer managed KMS key.", repo.encryption.kmskeyid) + not has_cms(repo) + res := result.new( + "Repository encryption does not use a customer managed KMS key.", + metadata.obj_by_path(repo, ["encryption", "kmskeyid"]), + ) } is_encyption_type_kms(typ) if typ.value == "KMS" + +has_cms(repo) if repo.encryption.kmskeyid.value != "" diff --git a/checks/cloud/aws/ecs/enable_container_insight.rego b/checks/cloud/aws/ecs/enable_container_insight.rego index 428fc58a..6238d65d 100644 --- a/checks/cloud/aws/ecs/enable_container_insight.rego +++ b/checks/cloud/aws/ecs/enable_container_insight.rego @@ -33,11 +33,13 @@ package builtin.aws.ecs.aws0034 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some cluster in input.aws.ecs.clusters - cluster.settings.containerinsightsenabled.value == false + not cluster.settings.containerinsightsenabled.value res := result.new( "Cluster does not have container insights enabled.", - cluster.settings.containerinsightsenabled, + metadata.obj_by_path(cluster, ["settings", "containerinsightsenabled"]), ) } diff --git a/checks/cloud/aws/ecs/enable_in_transit_encryption.rego b/checks/cloud/aws/ecs/enable_in_transit_encryption.rego index 966d381a..b0b6bd22 100644 --- a/checks/cloud/aws/ecs/enable_in_transit_encryption.rego +++ b/checks/cloud/aws/ecs/enable_in_transit_encryption.rego @@ -34,12 +34,14 @@ package builtin.aws.ecs.aws0035 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some task_definition in input.aws.ecs.taskdefinitions some volume in task_definition.volumes - volume.efsvolumeconfiguration.transitencryptionenabled.value == false + not volume.efsvolumeconfiguration.transitencryptionenabled.value res := result.new( "Task definition includes a volume which does not have in-transit-encryption enabled.", - volume.efsvolumeconfiguration.transitencryptionenabled, + metadata.obj_by_path(volume, ["efsvolumeconfiguration", "transitencryptionenabled"]), ) } diff --git a/checks/cloud/aws/efs/enable_at_rest_encryption.rego b/checks/cloud/aws/efs/enable_at_rest_encryption.rego index 673de489..32e04566 100644 --- a/checks/cloud/aws/efs/enable_at_rest_encryption.rego +++ b/checks/cloud/aws/efs/enable_at_rest_encryption.rego @@ -33,8 +33,13 @@ package builtin.aws.efs.aws0037 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some fs in input.aws.efs.filesystems - fs.encrypted.value == false - res := result.new("File system is not encrypted.", fs.encrypted) + not fs.encrypted.value + res := result.new( + "File system is not encrypted.", + metadata.obj_by_path(fs, ["encrypted"]), + ) } diff --git a/checks/cloud/aws/eks/enable_control_plane_logging.rego b/checks/cloud/aws/eks/enable_control_plane_logging.rego index 926bac94..f7d1b4ff 100644 --- a/checks/cloud/aws/eks/enable_control_plane_logging.rego +++ b/checks/cloud/aws/eks/enable_control_plane_logging.rego @@ -30,32 +30,49 @@ package builtin.aws.eks.aws0038 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some cluster in input.aws.eks.clusters - cluster.logging.api.value == false - res := result.new("Control plane API logging is not enabled.", cluster.logging.api) + not cluster.logging.api.value + res := result.new( + "Control plane API logging is not enabled.", + metadata.obj_by_path(cluster, ["logging", "api"]), + ) } deny contains res if { some cluster in input.aws.eks.clusters - cluster.logging.audit.value == false - res := result.new("Control plane audit logging is not enabled.", cluster.logging.audit) + not cluster.logging.audit.value + res := result.new( + "Control plane audit logging is not enabled.", + metadata.obj_by_path(cluster, ["logging", "audit"]), + ) } deny contains res if { some cluster in input.aws.eks.clusters - cluster.logging.authenticator.value == false - res := result.new("Control plane authenticator logging is not enabled.", cluster.logging.authenticator) + not cluster.logging.authenticator.value + res := result.new( + "Control plane authenticator logging is not enabled.", + metadata.obj_by_path(cluster, ["logging", "authenticator"]), + ) } deny contains res if { some cluster in input.aws.eks.clusters - cluster.logging.controllermanager.value == false - res := result.new("Control plane controller manager logging is not enabled.", cluster.logging.controllermanager) + not cluster.logging.controllermanager.value + res := result.new( + "Control plane controller manager logging is not enabled.", + metadata.obj_by_path(cluster, ["logging", "controllermanager"]), + ) } deny contains res if { some cluster in input.aws.eks.clusters - cluster.logging.scheduler.value == false - res := result.new("Control plane scheduler logging is not enabled.", cluster.logging.scheduler) + not cluster.logging.scheduler.value + res := result.new( + "Control plane scheduler logging is not enabled.", + metadata.obj_by_path(cluster, ["logging", "scheduler"]), + ) } diff --git a/checks/cloud/aws/eks/enable_control_plane_logging_test.rego b/checks/cloud/aws/eks/enable_control_plane_logging_test.rego index fb3a7996..c0a71266 100644 --- a/checks/cloud/aws/eks/enable_control_plane_logging_test.rego +++ b/checks/cloud/aws/eks/enable_control_plane_logging_test.rego @@ -7,6 +7,7 @@ import data.lib.test test_allow_all_logging_enabled if { inp := {"aws": {"eks": {"clusters": [{"logging": { + "api": {"value": true}, "audit": {"value": true}, "authenticator": {"value": true}, "controllermanager": {"value": true}, @@ -24,11 +25,12 @@ test_deny_all_logging_disabled if { "scheduler": {"value": false}, }}]}}} - test.assert_count(check.deny, 4) with input as inp + test.assert_count(check.deny, 5) with input as inp } test_deny_one_logging_disabled if { inp := {"aws": {"eks": {"clusters": [{"logging": { + "api": {"value": true}, "audit": {"value": false}, "authenticator": {"value": true}, "controllermanager": {"value": true}, diff --git a/checks/cloud/aws/eks/encrypt_secrets.rego b/checks/cloud/aws/eks/encrypt_secrets.rego index ef4087f4..d2e0811c 100644 --- a/checks/cloud/aws/eks/encrypt_secrets.rego +++ b/checks/cloud/aws/eks/encrypt_secrets.rego @@ -33,15 +33,27 @@ package builtin.aws.eks.aws0039 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some cluster in input.aws.eks.clusters - cluster.encryption.secrets.value == false - res := result.new("Cluster does not have secret encryption enabled.", cluster.encryption.secrets) + not secret_encryption_enabled(cluster) + res := result.new( + "Cluster does not have secret encryption enabled.", + metadata.obj_by_path(cluster, ["encryption", "secrets"]), + ) } deny contains res if { some cluster in input.aws.eks.clusters - cluster.encryption.secrets.value == true - cluster.encryption.kmskeyid.value == "" - res := result.new("Cluster encryption requires a KMS key ID, which is missing", cluster.encryption.kmskeyid) + secret_encryption_enabled(cluster) + not has_cms(cluster) + res := result.new( + "Cluster encryption requires a KMS key ID, which is missing", + metadata.obj_by_path(cluster, ["encryption", "kmskeyid"]), + ) } + +secret_encryption_enabled(cluster) if cluster.encryption.secrets.value == true + +has_cms(cluster) if cluster.encryption.kmskeyid.value != "" diff --git a/checks/cloud/aws/elasticache/add_description_for_security_group.rego b/checks/cloud/aws/elasticache/add_description_for_security_group.rego index a09687f2..78135fdd 100644 --- a/checks/cloud/aws/elasticache/add_description_for_security_group.rego +++ b/checks/cloud/aws/elasticache/add_description_for_security_group.rego @@ -34,8 +34,15 @@ package builtin.aws.elasticache.aws0049 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some secgroup in input.aws.elasticache.securitygroups - secgroup.description.value == "" - res := result.new("Security group does not have a description.", secgroup.description) + not has_description(secgroup) + res := result.new( + "Security group does not have a description.", + metadata.obj_by_path(secgroup, ["description"]), + ) } + +has_description(sg) if sg.description.value != "" diff --git a/checks/cloud/aws/elasticache/enable_at_rest_encryption.rego b/checks/cloud/aws/elasticache/enable_at_rest_encryption.rego index e3d06bf1..bc126bd1 100644 --- a/checks/cloud/aws/elasticache/enable_at_rest_encryption.rego +++ b/checks/cloud/aws/elasticache/enable_at_rest_encryption.rego @@ -30,8 +30,13 @@ package builtin.aws.elasticache.aws0045 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some group in input.aws.elasticache.replicationgroups - group.atrestencryptionenabled.value == false - res := result.new("Replication group does not have at-rest encryption enabled.", group.atrestencryptionenabled) + not group.atrestencryptionenabled.value + res := result.new( + "Replication group does not have at-rest encryption enabled.", + metadata.obj_by_path(group, ["atrestencryptionenabled"]), + ) } diff --git a/checks/cloud/aws/elasticache/enable_in_transit_encryption.rego b/checks/cloud/aws/elasticache/enable_in_transit_encryption.rego index b1f2b010..b101761b 100644 --- a/checks/cloud/aws/elasticache/enable_in_transit_encryption.rego +++ b/checks/cloud/aws/elasticache/enable_in_transit_encryption.rego @@ -33,8 +33,13 @@ package builtin.aws.elasticache.aws0051 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some group in input.aws.elasticache.replicationgroups - group.transitencryptionenabled.value == false - res := result.new("Replication group does not have transit encryption enabled.", group.transitencryptionenabled) + not group.transitencryptionenabled.value + res := result.new( + "Replication group does not have transit encryption enabled.", + metadata.obj_by_path(group, ["transitencryptionenabled"]), + ) } diff --git a/checks/cloud/aws/elasticsearch/enable_domain_encryption.rego b/checks/cloud/aws/elasticsearch/enable_domain_encryption.rego index 2661878f..777dd3b9 100644 --- a/checks/cloud/aws/elasticsearch/enable_domain_encryption.rego +++ b/checks/cloud/aws/elasticsearch/enable_domain_encryption.rego @@ -33,11 +33,13 @@ package builtin.aws.elasticsearch.aws0048 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some domain in input.aws.elasticsearch.domains - domain.atrestencryption.enabled.value == false + not domain.atrestencryption.enabled.value res := result.new( "Domain does not have at-rest encryption enabled.", - domain.atrestencryption.enabled, + metadata.obj_by_path(domain, ["atrestencryption", "enabled"]), ) } diff --git a/checks/cloud/aws/elasticsearch/enable_domain_logging.rego b/checks/cloud/aws/elasticsearch/enable_domain_logging.rego index 7a6387a9..0c25438a 100644 --- a/checks/cloud/aws/elasticsearch/enable_domain_logging.rego +++ b/checks/cloud/aws/elasticsearch/enable_domain_logging.rego @@ -36,8 +36,13 @@ package builtin.aws.elasticsearch.aws0042 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some domain in input.aws.elasticsearch.domains - domain.logpublishing.auditenabled.value == false - res := result.new("Domain audit logging is not enabled.", domain.logpublishing.auditenabled) + not domain.logpublishing.auditenabled.value + res := result.new( + "Domain audit logging is not enabled.", + metadata.obj_by_path(domain, ["logpublishing", "auditenabled"]), + ) } diff --git a/checks/cloud/aws/elasticsearch/enable_in_transit_encryption.rego b/checks/cloud/aws/elasticsearch/enable_in_transit_encryption.rego index 9cdb8011..4041013f 100644 --- a/checks/cloud/aws/elasticsearch/enable_in_transit_encryption.rego +++ b/checks/cloud/aws/elasticsearch/enable_in_transit_encryption.rego @@ -33,8 +33,13 @@ package builtin.aws.elasticsearch.aws0043 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some domain in input.aws.elasticsearch.domains - domain.transitencryption.enabled.value == false - res := result.new("Domain does not have in-transit encryption enabled.", domain.transitencryption.enabled) + not domain.transitencryption.enabled.value + res := result.new( + "Domain does not have in-transit encryption enabled.", + metadata.obj_by_path(domain, ["transitencryption", "enabled"]), + ) } diff --git a/checks/cloud/aws/elasticsearch/enforce_https.rego b/checks/cloud/aws/elasticsearch/enforce_https.rego index 33c84d80..299c0fae 100644 --- a/checks/cloud/aws/elasticsearch/enforce_https.rego +++ b/checks/cloud/aws/elasticsearch/enforce_https.rego @@ -34,8 +34,13 @@ package builtin.aws.elasticsearch.aws0046 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some domain in input.aws.elasticsearch.domains - domain.endpoint.enforcehttps.value == false - res := result.new("Domain does not enforce HTTPS.", domain.endpoint.enforcehttps) + not domain.endpoint.enforcehttps.value + res := result.new( + "Domain does not enforce HTTPS.", + metadata.obj_by_path(domain, ["endpoint", "enforcehttps"]), + ) } diff --git a/checks/cloud/aws/elasticsearch/use_secure_tls_policy.rego b/checks/cloud/aws/elasticsearch/use_secure_tls_policy.rego index 6a299378..c42ef8c1 100644 --- a/checks/cloud/aws/elasticsearch/use_secure_tls_policy.rego +++ b/checks/cloud/aws/elasticsearch/use_secure_tls_policy.rego @@ -33,8 +33,15 @@ package builtin.aws.elasticsearch.aws0126 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some domain in input.aws.elasticsearch.domains - domain.endpoint.tlspolicy.value != "Policy-Min-TLS-1-2-2019-07" - res := result.new("Domain does not have a secure TLS policy.", domain.endpoint.tlspolicy) + not is_tls_policy_secure(domain) + res := result.new( + "Domain does not have a secure TLS policy.", + metadata.obj_by_path(domain, ["endpoint", "tlspolicy"]), + ) } + +is_tls_policy_secure(domain) if domain.endpoint.tlspolicy.value == "Policy-Min-TLS-1-2-2019-07" diff --git a/checks/cloud/aws/elb/alb_not_public.rego b/checks/cloud/aws/elb/alb_not_public.rego index 75f35edb..b64eb670 100644 --- a/checks/cloud/aws/elb/alb_not_public.rego +++ b/checks/cloud/aws/elb/alb_not_public.rego @@ -28,10 +28,17 @@ package builtin.aws.elb.aws0053 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some lb in input.aws.elb.loadbalancers - lb.type.value != "gateway" - lb.internal.value == false + not is_gateway(lb) + not lb.internal.value - res := result.new("Load balancer is exposed publicly.", lb.internal) + res := result.new( + "Load balancer is exposed publicly.", + metadata.obj_by_path(lb, ["internal"]), + ) } + +is_gateway(lb) if lb.type.value == "gateway" diff --git a/checks/cloud/aws/elb/drop_invalid_headers.rego b/checks/cloud/aws/elb/drop_invalid_headers.rego index 0939307a..988c67d2 100644 --- a/checks/cloud/aws/elb/drop_invalid_headers.rego +++ b/checks/cloud/aws/elb/drop_invalid_headers.rego @@ -31,9 +31,14 @@ package builtin.aws.elb.aws0052 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some lb in input.aws.elb.loadbalancers lb.type.value == "application" - lb.dropinvalidheaderfields.value == false - res := result.new("Application load balancer is not set to drop invalid headers.", lb.dropinvalidheaderfields) + not lb.dropinvalidheaderfields.value + res := result.new( + "Application load balancer is not set to drop invalid headers.", + metadata.obj_by_path(lb, ["dropinvalidheaderfields"]), + ) } diff --git a/checks/cloud/aws/msk/enable_at_rest_encryption.rego b/checks/cloud/aws/msk/enable_at_rest_encryption.rego index 7dc27a11..ee54f557 100644 --- a/checks/cloud/aws/msk/enable_at_rest_encryption.rego +++ b/checks/cloud/aws/msk/enable_at_rest_encryption.rego @@ -33,11 +33,13 @@ package builtin.aws.msk.aws0179 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some cluster in input.aws.msk.clusters not cluster.encryptionatrest.enabled.value res := result.new( "The cluster is not encrypted at rest.", - object.get(cluster, ["encryptionatrest", "enabled"], cluster), + metadata.obj_by_path(cluster, ["encryptionatrest", "enabled"]), ) } diff --git a/checks/cloud/aws/neptune/enable_log_export.rego b/checks/cloud/aws/neptune/enable_log_export.rego index acf6a71b..8a2754ef 100644 --- a/checks/cloud/aws/neptune/enable_log_export.rego +++ b/checks/cloud/aws/neptune/enable_log_export.rego @@ -33,11 +33,13 @@ package builtin.aws.neptune.aws0075 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some cluster in input.aws.neptune.clusters not cluster.logging.audit.value res := result.new( "Cluster does not have audit logging enabled.", - object.get(cluster.logging, "audit", cluster.logging), + metadata.obj_by_path(cluster, ["logging", "audit"]), ) } diff --git a/checks/cloud/aws/rds/enable_performance_insights.rego b/checks/cloud/aws/rds/enable_performance_insights.rego index f6c9569e..99f83923 100644 --- a/checks/cloud/aws/rds/enable_performance_insights.rego +++ b/checks/cloud/aws/rds/enable_performance_insights.rego @@ -35,6 +35,8 @@ package builtin.aws.rds.aws0133 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some cluster in input.aws.rds.clusters some instance in cluster.instances @@ -42,7 +44,7 @@ deny contains res if { not instance.instance.performanceinsights.enabled.value res := result.new( "Instance does not have performance insights enabled.", - object.get(instance.instance.performanceinsights, "enabled", instance.instance.performanceinsights), + metadata.obj_by_path(instance.instance, ["performanceinsights", "enabled"]), ) } @@ -52,6 +54,6 @@ deny contains res if { not instance.performanceinsights.enabled.value res := result.new( "Instance does not have performance insights enabled.", - object.get(instance.performanceinsights, "enabled", instance.performanceinsights), + metadata.obj_by_path(instance, ["performanceinsights", "enabled"]), ) } diff --git a/checks/cloud/aws/rds/encrypt_cluster_storage_data.rego b/checks/cloud/aws/rds/encrypt_cluster_storage_data.rego index 02261ab1..dd4e51a7 100644 --- a/checks/cloud/aws/rds/encrypt_cluster_storage_data.rego +++ b/checks/cloud/aws/rds/encrypt_cluster_storage_data.rego @@ -34,13 +34,15 @@ package builtin.aws.rds.aws0079 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some cluster in input.aws.rds.clusters isManaged(cluster) not encryption_enabled(cluster) res := result.new( "Cluster does not have storage encryption enabled.", - object.get(cluster.encryption, "encryptstorage", cluster.encryption), + metadata.obj_by_path(cluster, ["encryption", "encryptstorage"]), ) } @@ -51,7 +53,7 @@ deny contains res if { not has_kms_key(cluster) res := result.new( "Cluster does not specify a customer managed key for storage encryption.", - object.get(cluster.encryption, "kmskeyid", cluster.encryption), + metadata.obj_by_path(cluster, ["encryption", "kmskeyid"]), ) } diff --git a/checks/cloud/aws/rds/encrypt_instance_storage_data.rego b/checks/cloud/aws/rds/encrypt_instance_storage_data.rego index e167b79d..196202e1 100644 --- a/checks/cloud/aws/rds/encrypt_instance_storage_data.rego +++ b/checks/cloud/aws/rds/encrypt_instance_storage_data.rego @@ -34,13 +34,15 @@ package builtin.aws.rds.aws0080 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some instance in input.aws.rds.instances not has_replication_source_arn(instance) not instance.encryption.encryptstorage.value res := result.new( "Instance does not have storage encryption enabled.", - object.get(instance.encryption, "encryptstorage", instance.encryption), + metadata.obj_by_path(instance, ["encryption", "encryptstorage"]), ) } diff --git a/checks/cloud/aws/s3/block_public_acls.rego b/checks/cloud/aws/s3/block_public_acls.rego index cd56524f..b1065f19 100644 --- a/checks/cloud/aws/s3/block_public_acls.rego +++ b/checks/cloud/aws/s3/block_public_acls.rego @@ -33,6 +33,8 @@ package builtin.aws.s3.aws0086 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some bucket in input.aws.s3.buckets not bucket.publicaccessblock @@ -48,10 +50,6 @@ deny contains res if { not bucket.publicaccessblock.blockpublicacls.value res := result.new( "Public access block does not block public ACLs", - object.get( - bucket.publicaccessblock, - "blockpublicacls", - bucket.publicaccessblock, - ), + metadata.obj_by_path(bucket, ["publicaccessblock", "blockpublicacls"]), ) } diff --git a/checks/cloud/aws/s3/block_public_policy.rego b/checks/cloud/aws/s3/block_public_policy.rego index ae394ec2..66e94416 100644 --- a/checks/cloud/aws/s3/block_public_policy.rego +++ b/checks/cloud/aws/s3/block_public_policy.rego @@ -33,6 +33,8 @@ package builtin.aws.s3.aws0087 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some bucket in input.aws.s3.buckets not bucket.publicaccessblock @@ -48,10 +50,6 @@ deny contains res if { not bucket.publicaccessblock.blockpublicpolicy.value res := result.new( "Public access block does not block public policies", - object.get( - bucket.publicaccessblock, - "blockpublicpolicy", - bucket.publicaccessblock, - ), + metadata.obj_by_path(bucket, ["publicaccessblock", "blockpublicpolicy"]), ) } diff --git a/checks/cloud/aws/s3/enable_bucket_encryption.rego b/checks/cloud/aws/s3/enable_bucket_encryption.rego index 62331050..10c264f7 100644 --- a/checks/cloud/aws/s3/enable_bucket_encryption.rego +++ b/checks/cloud/aws/s3/enable_bucket_encryption.rego @@ -33,11 +33,13 @@ package builtin.aws.s3.aws0088 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some bucket in input.aws.s3.buckets not bucket.encryption.enabled.value res := result.new( "Bucket does not have encryption enabled", - object.get(bucket, ["encryption", "enabled"], bucket), + metadata.obj_by_path(bucket, ["encryption", "enabled"]), ) } diff --git a/checks/cloud/aws/s3/enable_logging.rego b/checks/cloud/aws/s3/enable_logging.rego index 4b1ba430..4027b830 100644 --- a/checks/cloud/aws/s3/enable_logging.rego +++ b/checks/cloud/aws/s3/enable_logging.rego @@ -31,16 +31,17 @@ # good_examples: checks/cloud/aws/s3/enable_bucket_logging.cf.go package builtin.aws.s3.aws0089 -import future.keywords.if -import future.keywords.in +import rego.v1 -deny[res] { +import data.lib.cloud.metadata + +deny contains res if { bucket := input.aws.s3.buckets[_] not bucket_has_server_logging_access(bucket) not is_logging_enabled(bucket) res := result.new( "Bucket has logging disabled", - object.get(bucket, ["logging", "enabled"], bucket), + metadata.obj_by_path(bucket, ["logging", "enabled"]), ) } diff --git a/checks/cloud/aws/s3/enable_versioning.rego b/checks/cloud/aws/s3/enable_versioning.rego index 56653ded..cbd78948 100644 --- a/checks/cloud/aws/s3/enable_versioning.rego +++ b/checks/cloud/aws/s3/enable_versioning.rego @@ -37,11 +37,13 @@ package builtin.aws.s3.aws0090 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some bucket in input.aws.s3.buckets not bucket.versioning.enabled.value res := result.new( "Bucket does not have versioning enabled", - object.get(bucket, ["versioning", "enabled"], bucket), + metadata.obj_by_path(bucket, ["versioning", "enabled"]), ) } diff --git a/checks/cloud/aws/s3/ignore_public_acls.rego b/checks/cloud/aws/s3/ignore_public_acls.rego index 55cfa87c..e24da3aa 100644 --- a/checks/cloud/aws/s3/ignore_public_acls.rego +++ b/checks/cloud/aws/s3/ignore_public_acls.rego @@ -33,6 +33,8 @@ package builtin.aws.s3.aws0091 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some bucket in input.aws.s3.buckets not bucket.publicaccessblock @@ -48,10 +50,6 @@ deny contains res if { not bucket.publicaccessblock.ignorepublicacls.value res := result.new( "Public access block does not ignore public ACLs", - object.get( - bucket.publicaccessblock, - "ignorepublicacls", - bucket.publicaccessblock, - ), + metadata.obj_by_path(bucket, ["publicaccessblock", "ignorepublicacls"]), ) } diff --git a/checks/cloud/aws/s3/no_public_buckets.rego b/checks/cloud/aws/s3/no_public_buckets.rego index c6dd3e9e..a2b948ac 100644 --- a/checks/cloud/aws/s3/no_public_buckets.rego +++ b/checks/cloud/aws/s3/no_public_buckets.rego @@ -33,6 +33,8 @@ package builtin.aws.s3.aws0093 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some bucket in input.aws.s3.buckets not bucket.publicaccessblock @@ -48,10 +50,6 @@ deny contains res if { not bucket.publicaccessblock.restrictpublicbuckets.value res := result.new( "Public access block does not restrict public buckets", - object.get( - bucket.publicaccessblock, - "restrictpublicbuckets", - bucket.publicaccessblock, - ), + metadata.obj_by_path(bucket, ["publicaccessblock", "restrictpublicbuckets"]), ) } diff --git a/checks/cloud/aws/s3/require_mfa_delete.rego b/checks/cloud/aws/s3/require_mfa_delete.rego index ac0da411..eb0af5e7 100644 --- a/checks/cloud/aws/s3/require_mfa_delete.rego +++ b/checks/cloud/aws/s3/require_mfa_delete.rego @@ -33,12 +33,14 @@ package builtin.aws.s3.aws0170 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some bucket in input.aws.s3.buckets isManaged(bucket.versioning.mfadelete) not bucket.versioning.mfadelete.value res := result.new( "Bucket does not have MFA deletion protection enabled", - object.get(bucket, ["versioning", "mfadelete"], bucket), + metadata.obj_by_path(bucket, ["versioning", "mfadelete"]), ) } diff --git a/checks/cloud/azure/appservice/account_identity_registered.rego b/checks/cloud/azure/appservice/account_identity_registered.rego index 9bcd9634..b01c6ada 100644 --- a/checks/cloud/azure/appservice/account_identity_registered.rego +++ b/checks/cloud/azure/appservice/account_identity_registered.rego @@ -28,13 +28,15 @@ package builtin.azure.appservice.azure0002 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some service in input.azure.appservice.services isManaged(service) not has_identity_type(service) res := result.new( "App service does not have an identity type.", - object.get(service, ["identity", "type"], service), + metadata.obj_by_path(service, ["identity", "type"]), ) } diff --git a/checks/cloud/azure/appservice/authentication_enabled.rego b/checks/cloud/azure/appservice/authentication_enabled.rego index 7241a63d..66d7471f 100644 --- a/checks/cloud/azure/appservice/authentication_enabled.rego +++ b/checks/cloud/azure/appservice/authentication_enabled.rego @@ -28,12 +28,14 @@ package builtin.azure.appservice.azure0003 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some service in input.azure.appservice.services isManaged(service) not service.authentication.enabled.value res := result.new( "App service does not have authentication enabled.", - object.get(service, ["authentication", "enabled"], service), + metadata.obj_by_path(service, ["authentication", "enabled"]), ) } diff --git a/checks/cloud/azure/appservice/enable_http2.rego b/checks/cloud/azure/appservice/enable_http2.rego index ee0c91c8..95a312f3 100644 --- a/checks/cloud/azure/appservice/enable_http2.rego +++ b/checks/cloud/azure/appservice/enable_http2.rego @@ -28,12 +28,14 @@ package builtin.azure.appservice.azure0005 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some service in input.azure.appservice.services isManaged(service) not service.site.enablehttp2.value res := result.new( "App service does not have HTTP/2 enabled.", - object.get(service, ["site", "enablehttp2"], service), + metadata.obj_by_path(service, ["site", "enablehttp2"]), ) } diff --git a/checks/cloud/azure/appservice/use_secure_tls_policy.rego b/checks/cloud/azure/appservice/use_secure_tls_policy.rego index f76469ad..94a63495 100644 --- a/checks/cloud/azure/appservice/use_secure_tls_policy.rego +++ b/checks/cloud/azure/appservice/use_secure_tls_policy.rego @@ -28,6 +28,8 @@ package builtin.azure.appservice.azure0006 import rego.v1 +import data.lib.cloud.metadata + recommended_tls_version := "1.2" deny contains res if { @@ -36,7 +38,7 @@ deny contains res if { not is_recommended_tls_version(service) res := result.new( "App service does not require a secure TLS version.", - object.get(service, ["site", "minimumtlsversion"], service), + metadata.obj_by_path(service, ["site", "minimumtlsversion"]), ) } diff --git a/checks/cloud/azure/compute/disable_password_authentication.rego b/checks/cloud/azure/compute/disable_password_authentication.rego index 507acfdb..6dc9d9d8 100644 --- a/checks/cloud/azure/compute/disable_password_authentication.rego +++ b/checks/cloud/azure/compute/disable_password_authentication.rego @@ -29,12 +29,14 @@ package builtin.azure.compute.azure0039 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some vm in input.azure.compute.linuxvirtualmachines isManaged(vm) not vm.osprofilelinuxconfig.disablepasswordauthentication.value res := result.new( "Linux virtual machine allows password authentication.", - object.get(vm, ["osprofilelinuxconfig", "disablepasswordauthentication"], vm), + metadata.obj_by_path(vm, ["osprofilelinuxconfig", "disablepasswordauthentication"]), ) } diff --git a/checks/cloud/azure/compute/enable_disk_encryption.rego b/checks/cloud/azure/compute/enable_disk_encryption.rego index f1670d9d..058466f3 100644 --- a/checks/cloud/azure/compute/enable_disk_encryption.rego +++ b/checks/cloud/azure/compute/enable_disk_encryption.rego @@ -30,12 +30,14 @@ package builtin.azure.compute.azure0038 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some disk in input.azure.compute.manageddisks isManaged(disk) not disk.encryption.enabled.value res := result.new( "Managed disk is not encrypted.", - object.get(disk, ["encryption", "enabled"], disk), + metadata.obj_by_path(disk, ["encryption", "enabled"]), ) } diff --git a/checks/cloud/azure/container/configured_network_policy.rego b/checks/cloud/azure/container/configured_network_policy.rego index a96386bb..da371e3e 100644 --- a/checks/cloud/azure/container/configured_network_policy.rego +++ b/checks/cloud/azure/container/configured_network_policy.rego @@ -30,12 +30,14 @@ package builtin.azure.container.azure0043 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some cluster in input.azure.container.kubernetesclusters not has_network_policy(cluster) res := result.new( "Kubernetes cluster does not have a network policy set.", - object.get(cluster, ["networkprofile", "networkpolicy"], cluster), + metadata.obj_by_path(cluster, ["networkprofile", "networkpolicy"]), ) } diff --git a/checks/cloud/azure/container/logging.rego b/checks/cloud/azure/container/logging.rego index 3d99a6be..62682ae8 100644 --- a/checks/cloud/azure/container/logging.rego +++ b/checks/cloud/azure/container/logging.rego @@ -30,12 +30,14 @@ package builtin.azure.container.azure0040 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some cluster in input.azure.container.kubernetesclusters isManaged(cluster) not cluster.addonprofile.omsagent.enabled.value res := result.new( "Cluster does not have logging enabled via OMS Agent.", - object.get(cluster, ["addonprofile", "omsagent", "enabled"], cluster), + metadata.obj_by_path(cluster, ["addonprofile", "omsagent", "enabled"]), ) } diff --git a/checks/cloud/azure/container/use_rbac_permissions.rego b/checks/cloud/azure/container/use_rbac_permissions.rego index 0e7b4dbd..fd4f2df2 100644 --- a/checks/cloud/azure/container/use_rbac_permissions.rego +++ b/checks/cloud/azure/container/use_rbac_permissions.rego @@ -30,12 +30,14 @@ package builtin.azure.container.azure0042 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some cluster in input.azure.container.kubernetesclusters isManaged(cluster) not cluster.rolebasedaccesscontrol.enabled.value res := result.new( "RBAC is not enabled on cluster", - object.get(cluster, ["rolebasedaccesscontrol", "enabled"], cluster), + metadata.obj_by_path(cluster, ["rolebasedaccesscontrol", "enabled"]), ) } diff --git a/checks/cloud/azure/database/no_public_access.rego b/checks/cloud/azure/database/no_public_access.rego index 339d4b97..227b392b 100644 --- a/checks/cloud/azure/database/no_public_access.rego +++ b/checks/cloud/azure/database/no_public_access.rego @@ -31,13 +31,14 @@ package builtin.azure.database.azure0022 import rego.v1 import data.lib.azure.database +import data.lib.cloud.metadata deny contains res if { some server in database.all_servers is_public_access_enabled(server) res := result.new( "Database server does not have public access enabled.", - object.get(server, "enablepublicnetworkaccess", server), + metadata.obj_by_path(server, "enablepublicnetworkaccess"), ) } diff --git a/checks/cloud/azure/database/postgres_configuration_connection_throttling.rego b/checks/cloud/azure/database/postgres_configuration_connection_throttling.rego index 1286b413..a4e70773 100644 --- a/checks/cloud/azure/database/postgres_configuration_connection_throttling.rego +++ b/checks/cloud/azure/database/postgres_configuration_connection_throttling.rego @@ -30,6 +30,8 @@ package builtin.azure.database.azure0021 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some server in input.azure.database.postgresqlservers isManaged(server) @@ -37,6 +39,6 @@ deny contains res if { not server.config.connectionthrottling.value res := result.new( "Database server is not configured to throttle connections.", - object.get(server, ["config", "connectionthrottling"], server), + metadata.obj_by_path(server, ["config", "connectionthrottling"]), ) } diff --git a/checks/cloud/azure/database/postgres_configuration_log_checkpoints.rego b/checks/cloud/azure/database/postgres_configuration_log_checkpoints.rego index d409f4c4..c1170d5b 100644 --- a/checks/cloud/azure/database/postgres_configuration_log_checkpoints.rego +++ b/checks/cloud/azure/database/postgres_configuration_log_checkpoints.rego @@ -30,12 +30,14 @@ package builtin.azure.database.azure0024 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some server in input.azure.database.postgresqlservers isManaged(server) not server.config.logcheckpoints.value res := result.new( "Database server is not configured to log checkpoints.", - object.get(server, ["config", "logcheckpoints"], server), + metadata.obj_by_path(server, ["config", "logcheckpoints"]), ) } diff --git a/checks/cloud/azure/database/postgres_configuration_log_connections.rego b/checks/cloud/azure/database/postgres_configuration_log_connections.rego index 10993d76..f35790e4 100644 --- a/checks/cloud/azure/database/postgres_configuration_log_connections.rego +++ b/checks/cloud/azure/database/postgres_configuration_log_connections.rego @@ -31,12 +31,14 @@ package builtin.azure.database.azure0019 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some server in input.azure.database.postgresqlservers isManaged(server) not server.config.logconnections.value res := result.new( "Database server is not configured to log connections.", - object.get(server, ["config", "logconnections"], server), + metadata.obj_by_path(server, ["config", "logconnections"]), ) } diff --git a/checks/cloud/azure/database/secure_tls_policy.rego b/checks/cloud/azure/database/secure_tls_policy.rego index 6ce43696..a9d2c51f 100644 --- a/checks/cloud/azure/database/secure_tls_policy.rego +++ b/checks/cloud/azure/database/secure_tls_policy.rego @@ -31,6 +31,7 @@ package builtin.azure.database.azure0026 import rego.v1 import data.lib.azure.database +import data.lib.cloud.metadata recommended_tls_version := "TLS1_2" @@ -41,7 +42,7 @@ deny contains res if { not is_recommended_tls(server) res := result.new( "Database server does not require a secure TLS version.", - object.get(server, "minimumtlsversion", server), + metadata.obj_by_path(server, "minimumtlsversion"), ) } @@ -50,7 +51,7 @@ deny contains res if { not is_recommended_mssql_tls(server) res := result.new( "Database server does not require a secure TLS version.", - object.get(server, "minimumtlsversion", server), + metadata.obj_by_path(server, "minimumtlsversion"), ) } diff --git a/checks/cloud/azure/keyvault/specify_network_acl.rego b/checks/cloud/azure/keyvault/specify_network_acl.rego index ce4b35c3..f4d73b64 100644 --- a/checks/cloud/azure/keyvault/specify_network_acl.rego +++ b/checks/cloud/azure/keyvault/specify_network_acl.rego @@ -32,13 +32,15 @@ package builtin.azure.keyvault.azure0013 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some vault in input.azure.keyvault.vaults isManaged(vault) not block_access_by_default(vault) res := result.new( "Vault network ACL does not block access by default.", - object.get(vault, ["networkacls", "defaultaction"], vault), + metadata.obj_by_path(vault, ["networkacls", "defaultaction"]), ) } diff --git a/checks/cloud/azure/monitor/activity_log_retention_set.rego b/checks/cloud/azure/monitor/activity_log_retention_set.rego index 6b45478e..ca4391cd 100644 --- a/checks/cloud/azure/monitor/activity_log_retention_set.rego +++ b/checks/cloud/azure/monitor/activity_log_retention_set.rego @@ -30,13 +30,15 @@ package builtin.azure.monitor.azure0031 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some profile in input.azure.monitor.logprofiles isManaged(profile) not profile.retentionpolicy.enabled.value res := result.new( "Profile does not enable the log retention policy.", - object.get(profile, ["retentionpolicy", "enabled"], profile), + metadata.obj_by_path(profile, ["retentionpolicy", "enabled"]), ) } @@ -47,7 +49,7 @@ deny contains res if { not is_recommended_retention_policy(profile) res := result.new( "Profile has a log retention policy of less than 1 year.", - object.get(profile, ["retentionpolicy", "days"], profile), + metadata.obj_by_path(profile, ["retentionpolicy", "days"]), ) } diff --git a/checks/cloud/azure/network/retention_policy_set.rego b/checks/cloud/azure/network/retention_policy_set.rego index 6d1658e7..3c81d871 100644 --- a/checks/cloud/azure/network/retention_policy_set.rego +++ b/checks/cloud/azure/network/retention_policy_set.rego @@ -34,6 +34,8 @@ package builtin.azure.network.azure0049 import rego.v1 +import data.lib.cloud.metadata + flowlogs := input.azure.network.networkwatcherflowlogs deny contains res if { @@ -43,7 +45,7 @@ deny contains res if { not flowlog.retentionpolicy.enabled.value res := result.new( "Flow log does not enable the log retention policy.", - object.get(flowlog, ["retentionpolicy", "enabled"], flowlog), + metadata.obj_by_path(flowlog, ["retentionpolicy", "enabled"]), ) } @@ -55,6 +57,6 @@ deny contains res if { flowlog.retentionpolicy.days.value < 90 res := result.new( "Flow log has a log retention policy of less than 90 days.", - object.get(flowlog, ["retentionpolicy", "days"], flowlog), + metadata.obj_by_path(flowlog, ["retentionpolicy", "days"]), ) } diff --git a/checks/cloud/azure/storage/queue_services_logging_enabled.rego b/checks/cloud/azure/storage/queue_services_logging_enabled.rego index 98c6b7ed..57dc2749 100644 --- a/checks/cloud/azure/storage/queue_services_logging_enabled.rego +++ b/checks/cloud/azure/storage/queue_services_logging_enabled.rego @@ -32,6 +32,8 @@ package builtin.azure.storage.azure0009 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some account in input.azure.storage.accounts isManaged(account) @@ -39,6 +41,6 @@ deny contains res if { not account.queueproperties.enablelogging.value res := result.new( "Queue services storage account does not have logging enabled.", - object.get(account, ["queueproperties", "enablelogging"], account), + metadata.obj_by_path(account, ["queueproperties", "enablelogging"]), ) } diff --git a/checks/cloud/google/compute/disk_encryption_customer_key.rego b/checks/cloud/google/compute/disk_encryption_customer_key.rego index 82ef98b1..e512fa67 100644 --- a/checks/cloud/google/compute/disk_encryption_customer_key.rego +++ b/checks/cloud/google/compute/disk_encryption_customer_key.rego @@ -28,12 +28,14 @@ package builtin.google.compute.google0034 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some disk in input.google.compute.disks not is_disk_encrypted(disk) res := result.new( "Disk is not encrypted with a customer managed key.", - object.get(disk, ["encryption", "kmskeylink"], disk), + metadata.obj_by_path(disk, ["encryption", "kmskeylink"]), ) } diff --git a/checks/cloud/google/compute/vm_disk_encryption_customer_key.rego b/checks/cloud/google/compute/vm_disk_encryption_customer_key.rego index 75a84b27..53678b83 100644 --- a/checks/cloud/google/compute/vm_disk_encryption_customer_key.rego +++ b/checks/cloud/google/compute/vm_disk_encryption_customer_key.rego @@ -28,6 +28,8 @@ package builtin.google.compute.google0033 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some instance in input.google.compute.instances disks := array.concat( @@ -40,7 +42,7 @@ deny contains res if { not disk_is_encrypted(disk) res := result.new( "Instance disk encryption does not use a customer managed key.", - object.get(disk, ["encryption", "kmskeylink"], disk), + metadata.obj_by_path(disk, ["encryption", "kmskeylink"]), ) } diff --git a/checks/cloud/google/gke/enable_network_policy.rego b/checks/cloud/google/gke/enable_network_policy.rego index 113d6f24..84a4d981 100644 --- a/checks/cloud/google/gke/enable_network_policy.rego +++ b/checks/cloud/google/gke/enable_network_policy.rego @@ -28,6 +28,8 @@ package builtin.google.gke.google0056 import rego.v1 +import data.lib.cloud.metadata + deny contains res if { some cluster in input.google.gke.clusters isManaged(cluster) @@ -36,7 +38,7 @@ deny contains res if { not dataplane_v2_enabled(cluster) res := result.new( "Cluster does not have a network policy enabled.", - object.get(cluster.networkpolicy, "enabled", cluster.networkpolicy), + metadata.obj_by_path(cluster, ["networkpolicy", "enabled"]), ) } From 595f1b7794caf029120417c7a641a00c298ec6f3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 16 Oct 2024 00:15:19 +0000 Subject: [PATCH 15/17] chore(deps): bump mvdan.cc/sh/v3 in the common group across 1 directory Bumps the common group with 1 update in the / directory: [mvdan.cc/sh/v3](https://github.com/mvdan/sh). Updates `mvdan.cc/sh/v3` from 3.8.0 to 3.9.0 - [Release notes](https://github.com/mvdan/sh/releases) - [Changelog](https://github.com/mvdan/sh/blob/master/CHANGELOG.md) - [Commits](https://github.com/mvdan/sh/compare/v3.8.0...v3.9.0) --- updated-dependencies: - dependency-name: mvdan.cc/sh/v3 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: common ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f8f869ae..b1fdae8f 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/samber/lo v1.47.0 github.com/stretchr/testify v1.9.0 gopkg.in/yaml.v3 v3.0.1 - mvdan.cc/sh/v3 v3.8.0 + mvdan.cc/sh/v3 v3.9.0 ) require ( diff --git a/go.sum b/go.sum index 3e33a217..9c3aa37c 100644 --- a/go.sum +++ b/go.sum @@ -139,6 +139,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= +github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= @@ -551,8 +553,8 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -mvdan.cc/sh/v3 v3.8.0 h1:ZxuJipLZwr/HLbASonmXtcvvC9HXY9d2lXZHnKGjFc8= -mvdan.cc/sh/v3 v3.8.0/go.mod h1:w04623xkgBVo7/IUK89E0g8hBykgEpN0vgOj3RJr6MY= +mvdan.cc/sh/v3 v3.9.0 h1:it14fyjCdQUk4jf/aYxLO3FG8jFarR9GzMCtnlvvD7c= +mvdan.cc/sh/v3 v3.9.0/go.mod h1:cdBk8bgoiBI7lSZqK5JhUuq7OB64VQ7fgm85xelw3Nk= oras.land/oras-go/v2 v2.3.1 h1:lUC6q8RkeRReANEERLfH86iwGn55lbSWP20egdFHVec= oras.land/oras-go/v2 v2.3.1/go.mod h1:5AQXVEu1X/FKp1F9DMOb5ZItZBOa0y5dha0yCm4NR9c= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= From a9e124b2b9c27bdd7a56940d1b24b1af7f1a226e Mon Sep 17 00:00:00 2001 From: Nikita Pivkin Date: Tue, 15 Oct 2024 17:29:23 +0600 Subject: [PATCH 16/17] downgrade KSV117 severity from High to Medium Signed-off-by: Nikita Pivkin --- avd_docs/kubernetes/general/AVD-KSV-0117/docs.md | 2 ++ .../kubernetes/pss/baseline/12_privileged_ports_binding.rego | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/avd_docs/kubernetes/general/AVD-KSV-0117/docs.md b/avd_docs/kubernetes/general/AVD-KSV-0117/docs.md index a563b5ba..bdb8da60 100644 --- a/avd_docs/kubernetes/general/AVD-KSV-0117/docs.md +++ b/avd_docs/kubernetes/general/AVD-KSV-0117/docs.md @@ -10,4 +10,6 @@ The ports which are lower than 1024 receive and transmit various sensitive and p ### Links - https://kubernetes.io/docs/concepts/security/pod-security-standards/ +- https://www.stigviewer.com/stig/kubernetes/2022-12-02/finding/V-242414 + diff --git a/checks/kubernetes/pss/baseline/12_privileged_ports_binding.rego b/checks/kubernetes/pss/baseline/12_privileged_ports_binding.rego index 43f6b41b..9ee098c9 100644 --- a/checks/kubernetes/pss/baseline/12_privileged_ports_binding.rego +++ b/checks/kubernetes/pss/baseline/12_privileged_ports_binding.rego @@ -6,10 +6,11 @@ # - input: schema["kubernetes"] # related_resources: # - https://kubernetes.io/docs/concepts/security/pod-security-standards/ +# - https://www.stigviewer.com/stig/kubernetes/2022-12-02/finding/V-242414 # custom: # id: KSV117 # avd_id: AVD-KSV-0117 -# severity: HIGH +# severity: MEDIUM # short_code: no-privilege-port-binding # recommended_action: "Do not map the container ports to privileged host ports when starting a container." # input: From e9a668ec339b4159a13619f0e0ffef4ccb1cfc48 Mon Sep 17 00:00:00 2001 From: Nikita Pivkin Date: Wed, 16 Oct 2024 12:36:14 +0600 Subject: [PATCH 17/17] refactor: remove references to __defsec_metadata Signed-off-by: Nikita Pivkin --- .../config/aggregate_all_regions_test.rego | 10 ++---- ...dd_description_to_security_group_test.rego | 15 ++------- .../ec2/enable_volume_encryption_test.rego | 5 +-- .../aws/ec2/encryption_customer_key_test.rego | 5 +-- .../aws/sqs/enable_queue_encryption_test.rego | 33 +++++++------------ .../sqs/queue_encryption_with_cmk_test.rego | 2 -- .../compute/project_level_oslogin_test.rego | 10 ------ .../bucket_encryption_customer_key.rego | 2 +- .../bucket_encryption_customer_key_test.rego | 10 ++---- checks/cloud/google/storage/enable_ubla.rego | 2 +- .../google/storage/enable_ubla_test.rego | 10 ++---- .../google/storage/no_public_access.rego | 4 +-- .../google/storage/no_public_access_test.rego | 15 ++------- 13 files changed, 30 insertions(+), 93 deletions(-) diff --git a/checks/cloud/aws/config/aggregate_all_regions_test.rego b/checks/cloud/aws/config/aggregate_all_regions_test.rego index 854ec618..3d592826 100644 --- a/checks/cloud/aws/config/aggregate_all_regions_test.rego +++ b/checks/cloud/aws/config/aggregate_all_regions_test.rego @@ -6,15 +6,9 @@ import data.builtin.aws.config.aws0019 as check import data.lib.test test_allow_all_regions if { - test.assert_empty(check.deny) with input as {"aws": {"config": {"configurationaggregrator": { - "__defsec_metadata": {"managed": true}, - "sourceallregions": {"value": true}, - }}}} + test.assert_empty(check.deny) with input as {"aws": {"config": {"configurationaggregrator": {"sourceallregions": {"value": true}}}}} } test_disallow_all_regions if { - test.assert_equal_message("Configuration aggregation is not set to source from all regions.", check.deny) with input as {"aws": {"config": {"configurationaggregrator": { - "__defsec_metadata": {"managed": true}, - "sourceallregions": {"value": false}, - }}}} + test.assert_equal_message("Configuration aggregation is not set to source from all regions.", check.deny) with input as {"aws": {"config": {"configurationaggregrator": {"sourceallregions": {"value": false}}}}} } diff --git a/checks/cloud/aws/ec2/add_description_to_security_group_test.rego b/checks/cloud/aws/ec2/add_description_to_security_group_test.rego index cb9f0e1e..71cdb559 100644 --- a/checks/cloud/aws/ec2/add_description_to_security_group_test.rego +++ b/checks/cloud/aws/ec2/add_description_to_security_group_test.rego @@ -6,28 +6,19 @@ import data.builtin.aws.ec2.aws0099 as check import data.lib.test test_allow_sg_with_description if { - inp := {"aws": {"ec2": {"securitygroups": [{ - "__defsec_metadata": {"managed": true}, - "description": {"value": "test"}, - }]}}} + inp := {"aws": {"ec2": {"securitygroups": [{"description": {"value": "test"}}]}}} test.assert_empty(check.deny) with input as inp } test_disallow_sg_without_description if { - inp := {"aws": {"ec2": {"securitygroups": [{ - "__defsec_metadata": {"managed": true}, - "description": {"value": ""}, - }]}}} + inp := {"aws": {"ec2": {"securitygroups": [{"description": {"value": ""}}]}}} test.assert_equal_message("Security group does not have a description", check.deny) with input as inp } test_disallow_sg_with_default_description if { - inp := {"aws": {"ec2": {"securitygroups": [{ - "__defsec_metadata": {"managed": true}, - "description": {"value": "Managed by Terraform"}, - }]}}} + inp := {"aws": {"ec2": {"securitygroups": [{"description": {"value": "Managed by Terraform"}}]}}} test.assert_equal_message("Security group explicitly uses the default description", check.deny) with input as inp } diff --git a/checks/cloud/aws/ec2/enable_volume_encryption_test.rego b/checks/cloud/aws/ec2/enable_volume_encryption_test.rego index e9b4215d..bbc043a5 100644 --- a/checks/cloud/aws/ec2/enable_volume_encryption_test.rego +++ b/checks/cloud/aws/ec2/enable_volume_encryption_test.rego @@ -15,7 +15,4 @@ test_deny_not_encrypted_volume if { test.assert_equal_message("EBS volume is not encrypted", check.deny) with input as inp } -build_input(encryption) := {"aws": {"ec2": {"volumes": [{ - "__defsec_metadata": {"managed": true}, - "encryption": encryption, -}]}}} +build_input(encryption) := {"aws": {"ec2": {"volumes": [{"encryption": encryption}]}}} diff --git a/checks/cloud/aws/ec2/encryption_customer_key_test.rego b/checks/cloud/aws/ec2/encryption_customer_key_test.rego index 71a22afa..bc6f40da 100644 --- a/checks/cloud/aws/ec2/encryption_customer_key_test.rego +++ b/checks/cloud/aws/ec2/encryption_customer_key_test.rego @@ -15,7 +15,4 @@ test_deny_volume_without_cmk if { test.assert_equal_message("EBS volume does not use a customer-managed KMS key.", check.deny) with input as inp } -build_input(encryption) := {"aws": {"ec2": {"volumes": [{ - "__defsec_metadata": {"managed": true}, - "encryption": encryption, -}]}}} +build_input(encryption) := {"aws": {"ec2": {"volumes": [{"encryption": encryption}]}}} diff --git a/checks/cloud/aws/sqs/enable_queue_encryption_test.rego b/checks/cloud/aws/sqs/enable_queue_encryption_test.rego index dc674d80..10fa999f 100644 --- a/checks/cloud/aws/sqs/enable_queue_encryption_test.rego +++ b/checks/cloud/aws/sqs/enable_queue_encryption_test.rego @@ -6,35 +6,26 @@ import data.builtin.aws.sqs.aws0096 as check import data.lib.test test_allow_encrypted if { - inp := {"aws": {"sqs": {"queues": [{ - "__defsec_metadata": {"managed": true}, - "encryption": { - "kmskeyid": {"value": "alias/key"}, - "managedencryption": {"value": true}, - }, - }]}}} + inp := {"aws": {"sqs": {"queues": [{"encryption": { + "kmskeyid": {"value": "alias/key"}, + "managedencryption": {"value": true}, + }}]}}} test.assert_empty(check.deny) with input as inp } test_allow_without_key_but_managed if { - inp := {"aws": {"sqs": {"queues": [{ - "__defsec_metadata": {"managed": true}, - "encryption": { - "kmskeyid": {"value": ""}, - "managedencryption": {"value": true}, - }, - }]}}} + inp := {"aws": {"sqs": {"queues": [{"encryption": { + "kmskeyid": {"value": ""}, + "managedencryption": {"value": true}, + }}]}}} } test_deny_unencrypted if { - inp := {"aws": {"sqs": {"queues": [{ - "__defsec_metadata": {"managed": true}, - "encryption": { - "kmskeyid": {"value": ""}, - "managedencryption": {"value": false}, - }, - }]}}} + inp := {"aws": {"sqs": {"queues": [{"encryption": { + "kmskeyid": {"value": ""}, + "managedencryption": {"value": false}, + }}]}}} test.assert_equal_message("Queue is not encrypted", check.deny) with input as inp } diff --git a/checks/cloud/aws/sqs/queue_encryption_with_cmk_test.rego b/checks/cloud/aws/sqs/queue_encryption_with_cmk_test.rego index 3b60c7dc..4c731af8 100644 --- a/checks/cloud/aws/sqs/queue_encryption_with_cmk_test.rego +++ b/checks/cloud/aws/sqs/queue_encryption_with_cmk_test.rego @@ -7,7 +7,6 @@ import data.lib.test test_allow_encrypted_with_cmk if { inp := {"aws": {"sqs": {"queues": [{ - "__defsec_metadata": {"managed": true}, "name": "test-queue", "encryption": {"kmskeyid": {"value": "key-id"}}, }]}}} @@ -17,7 +16,6 @@ test_allow_encrypted_with_cmk if { test_deny_unencrypted_with_cmk if { inp := {"aws": {"sqs": {"queues": [{ - "__defsec_metadata": {"managed": true}, "name": "test-queue", "encryption": {"kmskeyid": {"value": "alias/aws/sqs"}}, }]}}} diff --git a/checks/cloud/google/compute/project_level_oslogin_test.rego b/checks/cloud/google/compute/project_level_oslogin_test.rego index e5d96aa0..1c448edc 100644 --- a/checks/cloud/google/compute/project_level_oslogin_test.rego +++ b/checks/cloud/google/compute/project_level_oslogin_test.rego @@ -18,13 +18,3 @@ test_allow_compute_os_login_enabled if { res := check.deny with input as inp res == set() } - -test_allow_compute_os_login_is_not_managed if { - inp := {"google": {"compute": {"projectmetadata": { - "__defsec_metadata": {"managed": false}, - "enableoslogin": {"value": false}, - }}}} - - res := check.deny with input as inp - res == set() -} diff --git a/checks/cloud/google/storage/bucket_encryption_customer_key.rego b/checks/cloud/google/storage/bucket_encryption_customer_key.rego index 0000d12f..db94d3e7 100644 --- a/checks/cloud/google/storage/bucket_encryption_customer_key.rego +++ b/checks/cloud/google/storage/bucket_encryption_customer_key.rego @@ -32,7 +32,7 @@ import rego.v1 deny contains res if { some bucket in input.google.storage.buckets - bucket.__defsec_metadata.managed + isManaged(bucket) bucket.encryption.defaultkmskeyname.value == "" res := result.new("Storage bucket encryption does not use a customer-managed key.", bucket.encryption.defaultkmskeyname) } diff --git a/checks/cloud/google/storage/bucket_encryption_customer_key_test.rego b/checks/cloud/google/storage/bucket_encryption_customer_key_test.rego index f04ef71d..cb918c70 100644 --- a/checks/cloud/google/storage/bucket_encryption_customer_key_test.rego +++ b/checks/cloud/google/storage/bucket_encryption_customer_key_test.rego @@ -6,20 +6,14 @@ import data.builtin.google.storage.google0066 as check import data.lib.test test_allow_bucket_with_customer_key if { - inp := {"google": {"storage": {"buckets": [{ - "__defsec_metadata": {"managed": true}, - "encryption": {"defaultkmskeyname": {"value": "key"}}, - }]}}} + inp := {"google": {"storage": {"buckets": [{"encryption": {"defaultkmskeyname": {"value": "key"}}}]}}} res := check.deny with input as inp res == set() } test_deny_bucket_without_customer_key if { - inp := {"google": {"storage": {"buckets": [{ - "__defsec_metadata": {"managed": true}, - "encryption": {"defaultkmskeyname": {"value": ""}}, - }]}}} + inp := {"google": {"storage": {"buckets": [{"encryption": {"defaultkmskeyname": {"value": ""}}}]}}} res := check.deny with input as inp count(res) == 1 diff --git a/checks/cloud/google/storage/enable_ubla.rego b/checks/cloud/google/storage/enable_ubla.rego index 53e5dcdb..b7b229b5 100644 --- a/checks/cloud/google/storage/enable_ubla.rego +++ b/checks/cloud/google/storage/enable_ubla.rego @@ -33,7 +33,7 @@ import rego.v1 deny contains res if { some bucket in input.google.storage.buckets - bucket.__defsec_metadata.managed + isManaged(bucket) bucket.enableuniformbucketlevelaccess.value == false res := result.new("Bucket has uniform bucket level access disabled.", bucket.enableuniformbucketlevelaccess) } diff --git a/checks/cloud/google/storage/enable_ubla_test.rego b/checks/cloud/google/storage/enable_ubla_test.rego index 3428912f..23147a91 100644 --- a/checks/cloud/google/storage/enable_ubla_test.rego +++ b/checks/cloud/google/storage/enable_ubla_test.rego @@ -6,20 +6,14 @@ import data.builtin.google.storage.google0002 as check import data.lib.test test_allow_uniform_bucket_level_access_enabled if { - inp := {"google": {"storage": {"buckets": [{ - "__defsec_metadata": {"managed": true}, - "enableuniformbucketlevelaccess": {"value": true}, - }]}}} + inp := {"google": {"storage": {"buckets": [{"enableuniformbucketlevelaccess": {"value": true}}]}}} res := check.deny with input as inp res == set() } test_deny_uniform_bucket_level_access_disabled if { - inp := {"google": {"storage": {"buckets": [{ - "__defsec_metadata": {"managed": true}, - "enableuniformbucketlevelaccess": {"value": false}, - }]}}} + inp := {"google": {"storage": {"buckets": [{"enableuniformbucketlevelaccess": {"value": false}}]}}} res := check.deny with input as inp count(res) == 1 diff --git a/checks/cloud/google/storage/no_public_access.rego b/checks/cloud/google/storage/no_public_access.rego index 0b01b836..ab6366ef 100644 --- a/checks/cloud/google/storage/no_public_access.rego +++ b/checks/cloud/google/storage/no_public_access.rego @@ -32,7 +32,7 @@ import rego.v1 deny contains res if { some bucket in input.google.storage.buckets - bucket.__defsec_metadata.managed + isManaged(bucket) some member in bucket.bindings[_].members is_member_external(member.value) res := result.new("Bucket allows public access.", member) @@ -40,7 +40,7 @@ deny contains res if { deny contains res if { some bucket in input.google.storage.buckets - bucket.__defsec_metadata.managed + isManaged(bucket) some member in bucket.members is_member_external(member.member.value) res := result.new("Bucket allows public access.", member.member) diff --git a/checks/cloud/google/storage/no_public_access_test.rego b/checks/cloud/google/storage/no_public_access_test.rego index c08a0186..e8dff2e0 100644 --- a/checks/cloud/google/storage/no_public_access_test.rego +++ b/checks/cloud/google/storage/no_public_access_test.rego @@ -6,29 +6,20 @@ import data.builtin.google.storage.google0001 as check import data.lib.test test_allow_bucket_does_not_allow_public_access if { - inp := build_input({ - "__defsec_metadata": {"managed": true}, - "bindings": [{"members": [{"value": "user:zKqzW@example.com"}]}], - }) + inp := build_input({"bindings": [{"members": [{"value": "user:zKqzW@example.com"}]}]}) res := check.deny with input as inp res == set() } test_deny_bucket_allows_public_access_members if { - inp := build_input({ - "__defsec_metadata": {"managed": true}, - "bindings": [{"members": [{"value": "allAuthenticatedUsers"}]}], - }) + inp := build_input({"bindings": [{"members": [{"value": "allAuthenticatedUsers"}]}]}) res := check.deny with input as inp count(res) == 1 } test_deny_bucket_allows_public_access_bindings if { - inp := build_input({ - "__defsec_metadata": {"managed": true}, - "bindings": [{"members": [{"value": "allAuthenticatedUsers"}]}], - }) + inp := build_input({"bindings": [{"members": [{"value": "allAuthenticatedUsers"}]}]}) res := check.deny with input as inp count(res) == 1