From 8e0afe243f257d0008b9ca985597bceb0dc6796e Mon Sep 17 00:00:00 2001 From: Nikita Pivkin Date: Mon, 17 Jun 2024 14:53:45 +0700 Subject: [PATCH] refactor(checks): migrate AWS codebuild, config, documentdb, dynamodb to Rego Signed-off-by: Nikita Pivkin --- avd_docs/aws/codebuild/AVD-AWS-0018/docs.md | 3 +- avd_docs/aws/config/AVD-AWS-0019/docs.md | 6 +- avd_docs/aws/documentdb/AVD-AWS-0020/docs.md | 3 +- avd_docs/aws/documentdb/AVD-AWS-0021/docs.md | 5 +- avd_docs/aws/documentdb/AVD-AWS-0022/docs.md | 5 +- avd_docs/aws/dynamodb/AVD-AWS-0023/docs.md | 5 +- avd_docs/aws/dynamodb/AVD-AWS-0024/docs.md | 4 +- avd_docs/aws/dynamodb/AVD-AWS-0025/docs.md | 5 +- .../cloud/aws/codebuild/enable_encryption.go | 3 +- .../aws/codebuild/enable_encryption.rego | 49 +++++++++ .../aws/codebuild/enable_encryption_test.go | 98 ----------------- .../aws/codebuild/enable_encryption_test.rego | 24 +++++ .../cloud/aws/config/aggregate_all_regions.go | 3 +- .../aws/config/aggregate_all_regions.rego | 42 ++++++++ .../aws/config/aggregate_all_regions_test.go | 61 ----------- .../config/aggregate_all_regions_test.rego | 20 ++++ .../cloud/aws/documentdb/enable_log_export.go | 3 +- .../aws/documentdb/enable_log_export.rego | 49 +++++++++ .../aws/documentdb/enable_log_export_test.go | 83 -------------- .../documentdb/enable_log_export_test.rego | 26 +++++ .../documentdb/enable_storage_encryption.go | 3 +- .../documentdb/enable_storage_encryption.rego | 40 +++++++ .../enable_storage_encryption_test.go | 65 ----------- .../enable_storage_encryption_test.rego | 17 +++ .../aws/documentdb/encryption_customer_key.go | 3 +- .../documentdb/encryption_customer_key.rego | 49 +++++++++ .../encryption_customer_key_test.go | 89 --------------- .../encryption_customer_key_test.rego | 30 ++++++ .../aws/dynamodb/enable_at_rest_encryption.go | 3 +- .../dynamodb/enable_at_rest_encryption.rego | 41 +++++++ .../enable_at_rest_encryption_test.go | 71 ------------ .../enable_at_rest_encryption_test.rego | 18 ++++ checks/cloud/aws/dynamodb/enable_recovery.go | 3 +- .../cloud/aws/dynamodb/enable_recovery.rego | 46 ++++++++ .../aws/dynamodb/enable_recovery_test.go | 65 ----------- .../aws/dynamodb/enable_recovery_test.rego | 30 ++++++ .../cloud/aws/dynamodb/table_customer_key.go | 3 +- .../aws/dynamodb/table_customer_key.rego | 44 ++++++++ .../aws/dynamodb/table_customer_key_test.go | 102 ------------------ .../aws/dynamodb/table_customer_key_test.rego | 42 ++++++++ 40 files changed, 604 insertions(+), 657 deletions(-) create mode 100644 checks/cloud/aws/codebuild/enable_encryption.rego delete mode 100644 checks/cloud/aws/codebuild/enable_encryption_test.go create mode 100644 checks/cloud/aws/codebuild/enable_encryption_test.rego create mode 100644 checks/cloud/aws/config/aggregate_all_regions.rego delete mode 100644 checks/cloud/aws/config/aggregate_all_regions_test.go create mode 100644 checks/cloud/aws/config/aggregate_all_regions_test.rego create mode 100644 checks/cloud/aws/documentdb/enable_log_export.rego delete mode 100644 checks/cloud/aws/documentdb/enable_log_export_test.go create mode 100644 checks/cloud/aws/documentdb/enable_log_export_test.rego create mode 100644 checks/cloud/aws/documentdb/enable_storage_encryption.rego delete mode 100644 checks/cloud/aws/documentdb/enable_storage_encryption_test.go create mode 100644 checks/cloud/aws/documentdb/enable_storage_encryption_test.rego create mode 100644 checks/cloud/aws/documentdb/encryption_customer_key.rego delete mode 100644 checks/cloud/aws/documentdb/encryption_customer_key_test.go create mode 100644 checks/cloud/aws/documentdb/encryption_customer_key_test.rego create mode 100644 checks/cloud/aws/dynamodb/enable_at_rest_encryption.rego delete mode 100644 checks/cloud/aws/dynamodb/enable_at_rest_encryption_test.go create mode 100644 checks/cloud/aws/dynamodb/enable_at_rest_encryption_test.rego create mode 100644 checks/cloud/aws/dynamodb/enable_recovery.rego delete mode 100644 checks/cloud/aws/dynamodb/enable_recovery_test.go create mode 100644 checks/cloud/aws/dynamodb/enable_recovery_test.rego create mode 100644 checks/cloud/aws/dynamodb/table_customer_key.rego delete mode 100644 checks/cloud/aws/dynamodb/table_customer_key_test.go create mode 100644 checks/cloud/aws/dynamodb/table_customer_key_test.rego diff --git a/avd_docs/aws/codebuild/AVD-AWS-0018/docs.md b/avd_docs/aws/codebuild/AVD-AWS-0018/docs.md index 61295520..8c9e4a20 100644 --- a/avd_docs/aws/codebuild/AVD-AWS-0018/docs.md +++ b/avd_docs/aws/codebuild/AVD-AWS-0018/docs.md @@ -1,8 +1,9 @@ All artifacts produced by your CodeBuild project pipeline should always be encrypted + ### Impact -CodeBuild project artifacts are unencrypted + {{ remediationActions }} diff --git a/avd_docs/aws/config/AVD-AWS-0019/docs.md b/avd_docs/aws/config/AVD-AWS-0019/docs.md index 4a4ce16a..eb3fa783 100644 --- a/avd_docs/aws/config/AVD-AWS-0019/docs.md +++ b/avd_docs/aws/config/AVD-AWS-0019/docs.md @@ -1,10 +1,10 @@ -The configuration aggregator should be configured with all_regions for the source. - +Sources that aren't covered by the aggregator are not include in the configuration. The configuration aggregator should be configured with all_regions for the source. This will help limit the risk of any unmonitored configuration in regions that are thought to be unused. + ### Impact -Sources that aren't covered by the aggregator are not include in the configuration + {{ remediationActions }} diff --git a/avd_docs/aws/documentdb/AVD-AWS-0020/docs.md b/avd_docs/aws/documentdb/AVD-AWS-0020/docs.md index f6f10534..4eeac450 100644 --- a/avd_docs/aws/documentdb/AVD-AWS-0020/docs.md +++ b/avd_docs/aws/documentdb/AVD-AWS-0020/docs.md @@ -1,8 +1,9 @@ Document DB does not have auditing by default. To ensure that you are able to accurately audit the usage of your DocumentDB cluster you should enable export logs. + ### Impact -Limited visibility of audit trail for changes to the DocumentDB + {{ remediationActions }} diff --git a/avd_docs/aws/documentdb/AVD-AWS-0021/docs.md b/avd_docs/aws/documentdb/AVD-AWS-0021/docs.md index 28798f39..24f0c5d1 100644 --- a/avd_docs/aws/documentdb/AVD-AWS-0021/docs.md +++ b/avd_docs/aws/documentdb/AVD-AWS-0021/docs.md @@ -1,8 +1,9 @@ -Encryption of the underlying storage used by DocumentDB ensures that if their is compromise of the disks, the data is still protected. +Unencrypted sensitive data is vulnerable to compromise. Encryption of the underlying storage used by DocumentDB ensures that if their is compromise of the disks, the data is still protected. + ### Impact -Unencrypted sensitive data is vulnerable to compromise. + {{ remediationActions }} diff --git a/avd_docs/aws/documentdb/AVD-AWS-0022/docs.md b/avd_docs/aws/documentdb/AVD-AWS-0022/docs.md index c013e4db..a2e329b7 100644 --- a/avd_docs/aws/documentdb/AVD-AWS-0022/docs.md +++ b/avd_docs/aws/documentdb/AVD-AWS-0022/docs.md @@ -1,8 +1,9 @@ -Encryption using AWS keys provides protection for your DocumentDB underlying storage. To increase control of the encryption and manage factors like rotation use customer managed keys. +Using AWS managed keys does not allow for fine grained control. Encryption using AWS keys provides protection for your DocumentDB underlying storage. To increase control of the encryption and manage factors like rotation use customer managed keys. + ### Impact -Using AWS managed keys does not allow for fine grained control + {{ remediationActions }} diff --git a/avd_docs/aws/dynamodb/AVD-AWS-0023/docs.md b/avd_docs/aws/dynamodb/AVD-AWS-0023/docs.md index 72d8cbf3..1b2a57ae 100644 --- a/avd_docs/aws/dynamodb/AVD-AWS-0023/docs.md +++ b/avd_docs/aws/dynamodb/AVD-AWS-0023/docs.md @@ -1,8 +1,9 @@ -Amazon DynamoDB Accelerator (DAX) encryption at rest provides an additional layer of data protection by helping secure your data from unauthorized access to the underlying storage. +Data can be freely read if compromised. Amazon DynamoDB Accelerator (DAX) encryption at rest provides an additional layer of data protection by helping secure your data from unauthorized access to the underlying storage. + ### Impact -Data can be freely read if compromised + {{ remediationActions }} diff --git a/avd_docs/aws/dynamodb/AVD-AWS-0024/docs.md b/avd_docs/aws/dynamodb/AVD-AWS-0024/docs.md index 0623a53c..c4251db4 100644 --- a/avd_docs/aws/dynamodb/AVD-AWS-0024/docs.md +++ b/avd_docs/aws/dynamodb/AVD-AWS-0024/docs.md @@ -1,10 +1,10 @@ DynamoDB tables should be protected against accidentally or malicious write/delete actions by ensuring that there is adequate protection. - By enabling point-in-time-recovery you can restore to a known point in the event of loss of data. + ### Impact -Accidental or malicious writes and deletes can't be rolled back + {{ remediationActions }} diff --git a/avd_docs/aws/dynamodb/AVD-AWS-0025/docs.md b/avd_docs/aws/dynamodb/AVD-AWS-0025/docs.md index d9bde7a8..8397b845 100644 --- a/avd_docs/aws/dynamodb/AVD-AWS-0025/docs.md +++ b/avd_docs/aws/dynamodb/AVD-AWS-0025/docs.md @@ -1,8 +1,9 @@ -DynamoDB tables are encrypted by default using AWS managed encryption keys. To increase control of the encryption and control the management of factors like key rotation, use a Customer Managed Key. +Using AWS managed keys does not allow for fine grained control. DynamoDB tables are encrypted by default using AWS managed encryption keys. To increase control of the encryption and control the management of factors like key rotation, use a Customer Managed Key. + ### Impact -Using AWS managed keys does not allow for fine grained control + {{ remediationActions }} diff --git a/checks/cloud/aws/codebuild/enable_encryption.go b/checks/cloud/aws/codebuild/enable_encryption.go index bb0fca17..921ce2d4 100755 --- a/checks/cloud/aws/codebuild/enable_encryption.go +++ b/checks/cloud/aws/codebuild/enable_encryption.go @@ -34,7 +34,8 @@ var CheckEnableEncryption = rules.Register( Links: cloudFormationEnableEncryptionLinks, RemediationMarkdown: cloudFormationEnableEncryptionRemediationMarkdown, }, - Severity: severity.High, + Severity: severity.High, + Deprecated: true, }, func(s *state.State) (results scan.Results) { for _, project := range s.AWS.CodeBuild.Projects { diff --git a/checks/cloud/aws/codebuild/enable_encryption.rego b/checks/cloud/aws/codebuild/enable_encryption.rego new file mode 100644 index 00000000..fd37bab4 --- /dev/null +++ b/checks/cloud/aws/codebuild/enable_encryption.rego @@ -0,0 +1,49 @@ +# METADATA +# title: CodeBuild Project artifacts encryption should not be disabled +# description: | +# All artifacts produced by your CodeBuild project pipeline should always be encrypted +# scope: package +# schemas: +# - input: schema["cloud"] +# related_resources: +# - https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codebuild-project-artifacts.html +# - https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codebuild-project.html +# custom: +# id: AVD-AWS-0018 +# avd_id: AVD-AWS-0018 +# provider: aws +# service: codebuild +# severity: HIGH +# short_code: enable-encryption +# recommended_action: Enable encryption for CodeBuild project artifacts +# input: +# selector: +# - type: cloud +# subtypes: +# - service: codebuild +# provider: aws +# terraform: +# links: +# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/codebuild_project#encryption_disabled +# good_examples: checks/cloud/aws/codebuild/enable_encryption.tf.go +# bad_examples: checks/cloud/aws/codebuild/enable_encryption.tf.go +# cloudformation: +# good_examples: checks/cloud/aws/codebuild/enable_encryption.cf.go +# bad_examples: checks/cloud/aws/codebuild/enable_encryption.cf.go +package builtin.aws.codebuild.aws0018 + +import rego.v1 + +deny contains res if { + some project in input.aws.codebuild.projects + encryptionenabled := project.artifactsettings.encryptionenabled + not encryptionenabled.value + res := result.new("Encryption is not enabled for project artifacts.", encryptionenabled) +} + +deny contains res if { + some project in input.aws.codebuild.projects + some setting in project.secondaryartifactsettings + not setting.encryptionenabled.value + res := result.new("Encryption is not enabled for secondary project artifacts.", setting.encryptionenabled) +} diff --git a/checks/cloud/aws/codebuild/enable_encryption_test.go b/checks/cloud/aws/codebuild/enable_encryption_test.go deleted file mode 100644 index 15493589..00000000 --- a/checks/cloud/aws/codebuild/enable_encryption_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package codebuild - -import ( - "testing" - - trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" - - "github.com/aquasecurity/trivy/pkg/iac/state" - - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/codebuild" - "github.com/aquasecurity/trivy/pkg/iac/scan" - - "github.com/stretchr/testify/assert" -) - -func TestCheckEnableEncryption(t *testing.T) { - tests := []struct { - name string - input codebuild.CodeBuild - expected bool - }{ - { - name: "AWS Codebuild project with unencrypted artifact", - input: codebuild.CodeBuild{ - Projects: []codebuild.Project{ - { - Metadata: trivyTypes.NewTestMetadata(), - ArtifactSettings: codebuild.ArtifactSettings{ - Metadata: trivyTypes.NewTestMetadata(), - EncryptionEnabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - expected: true, - }, - { - name: "AWS Codebuild project with unencrypted secondary artifact", - input: codebuild.CodeBuild{ - Projects: []codebuild.Project{ - { - Metadata: trivyTypes.NewTestMetadata(), - ArtifactSettings: codebuild.ArtifactSettings{ - Metadata: trivyTypes.NewTestMetadata(), - EncryptionEnabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), - }, - SecondaryArtifactSettings: []codebuild.ArtifactSettings{ - { - Metadata: trivyTypes.NewTestMetadata(), - EncryptionEnabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - }, - expected: true, - }, - { - name: "AWS Codebuild with encrypted artifacts", - input: codebuild.CodeBuild{ - Projects: []codebuild.Project{ - { - Metadata: trivyTypes.NewTestMetadata(), - ArtifactSettings: codebuild.ArtifactSettings{ - Metadata: trivyTypes.NewTestMetadata(), - EncryptionEnabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), - }, - SecondaryArtifactSettings: []codebuild.ArtifactSettings{ - { - Metadata: trivyTypes.NewTestMetadata(), - EncryptionEnabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - }, - expected: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var testState state.State - testState.AWS.CodeBuild = test.input - results := CheckEnableEncryption.Evaluate(&testState) - var found bool - for _, result := range results { - if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableEncryption.LongID() { - found = true - } - } - if test.expected { - assert.True(t, found, "Rule should have been found") - } else { - assert.False(t, found, "Rule should not have been found") - } - }) - } -} diff --git a/checks/cloud/aws/codebuild/enable_encryption_test.rego b/checks/cloud/aws/codebuild/enable_encryption_test.rego new file mode 100644 index 00000000..a5ea58b1 --- /dev/null +++ b/checks/cloud/aws/codebuild/enable_encryption_test.rego @@ -0,0 +1,24 @@ +package builtin.aws.codebuild.aws0018_test + +import rego.v1 + +import data.builtin.aws.codebuild.aws0018 as check +import data.lib.test + +test_allow_artifact_settings_with_encryption if { + test.assert_empty(check.deny) with input as build_input({"artifactsettings": {"encryptionenabled": {"value": true}}}) +} + +test_allow_secondary_artifact_settings_with_encryption if { + test.assert_empty(check.deny) with input as build_input({"secondaryartifactsettings": [{"encryptionenabled": {"value": true}}]}) +} + +test_disallow_artifact_settings_without_encryption if { + test.assert_equal_message("Encryption is not enabled for project artifacts.", check.deny) with input as build_input({"artifactsettings": {"encryptionenabled": {"value": false}}}) +} + +test_disallow_secondary_artifact_settings_without_encryption if { + test.assert_equal_message("Encryption is not enabled for secondary project artifacts.", check.deny) with input as build_input({"secondaryartifactsettings": [{"encryptionenabled": {"value": false}}]}) +} + +build_input(project) := {"aws": {"codebuild": {"projects": [project]}}} diff --git a/checks/cloud/aws/config/aggregate_all_regions.go b/checks/cloud/aws/config/aggregate_all_regions.go index c534b942..1a9c987e 100755 --- a/checks/cloud/aws/config/aggregate_all_regions.go +++ b/checks/cloud/aws/config/aggregate_all_regions.go @@ -35,7 +35,8 @@ This will help limit the risk of any unmonitored configuration in regions that a Links: cloudFormationAggregateAllRegionsLinks, RemediationMarkdown: cloudFormationAggregateAllRegionsRemediationMarkdown, }, - Severity: severity.High, + Severity: severity.High, + Deprecated: true, }, func(s *state.State) (results scan.Results) { if s.AWS.Config.ConfigurationAggregrator.Metadata.IsUnmanaged() { diff --git a/checks/cloud/aws/config/aggregate_all_regions.rego b/checks/cloud/aws/config/aggregate_all_regions.rego new file mode 100644 index 00000000..5c5ec7c9 --- /dev/null +++ b/checks/cloud/aws/config/aggregate_all_regions.rego @@ -0,0 +1,42 @@ +# METADATA +# title: Config configuration aggregator should be using all regions for source +# description: | +# Sources that aren't covered by the aggregator are not include in the configuration. The configuration aggregator should be configured with all_regions for the source. +# This will help limit the risk of any unmonitored configuration in regions that are thought to be unused. +# scope: package +# schemas: +# - input: schema["cloud"] +# related_resources: +# - https://docs.aws.amazon.com/config/latest/developerguide/aggregate-data.html +# custom: +# id: AVD-AWS-0019 +# avd_id: AVD-AWS-0019 +# provider: aws +# service: config +# severity: HIGH +# short_code: aggregate-all-regions +# recommended_action: Set the aggregator to cover all regions +# input: +# selector: +# - type: cloud +# subtypes: +# - service: config +# provider: aws +# terraform: +# links: +# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/config_configuration_aggregator#all_regions +# good_examples: checks/cloud/aws/config/aggregate_all_regions.tf.go +# bad_examples: checks/cloud/aws/config/aggregate_all_regions.tf.go +# cloudformation: +# good_examples: checks/cloud/aws/config/aggregate_all_regions.cf.go +# bad_examples: checks/cloud/aws/config/aggregate_all_regions.cf.go +package builtin.aws.config.aws0019 + +import rego.v1 + +deny contains res if { + cfg_aggregator := input.aws.config.configurationaggregrator + cfg_aggregator.__defsec_metadata.managed + not cfg_aggregator.sourceallregions.value + res := result.new("Configuration aggregation is not set to source from all regions.", cfg_aggregator.sourceallregions) +} diff --git a/checks/cloud/aws/config/aggregate_all_regions_test.go b/checks/cloud/aws/config/aggregate_all_regions_test.go deleted file mode 100644 index af2b6d0e..00000000 --- a/checks/cloud/aws/config/aggregate_all_regions_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package config - -import ( - "testing" - - trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" - - "github.com/aquasecurity/trivy/pkg/iac/state" - - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/config" - "github.com/aquasecurity/trivy/pkg/iac/scan" - - "github.com/stretchr/testify/assert" -) - -func TestCheckAggregateAllRegions(t *testing.T) { - tests := []struct { - name string - input config.Config - expected bool - }{ - { - name: "AWS Config aggregator source with all regions set to false", - input: config.Config{ - ConfigurationAggregrator: config.ConfigurationAggregrator{ - Metadata: trivyTypes.NewTestMetadata(), - SourceAllRegions: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), - }, - }, - expected: true, - }, - { - name: "AWS Config aggregator source with all regions set to true", - input: config.Config{ - ConfigurationAggregrator: config.ConfigurationAggregrator{ - Metadata: trivyTypes.NewTestMetadata(), - SourceAllRegions: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), - }, - }, - expected: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var testState state.State - testState.AWS.Config = test.input - results := CheckAggregateAllRegions.Evaluate(&testState) - var found bool - for _, result := range results { - if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckAggregateAllRegions.LongID() { - found = true - } - } - if test.expected { - assert.True(t, found, "Rule should have been found") - } else { - assert.False(t, found, "Rule should not have been found") - } - }) - } -} diff --git a/checks/cloud/aws/config/aggregate_all_regions_test.rego b/checks/cloud/aws/config/aggregate_all_regions_test.rego new file mode 100644 index 00000000..854ec618 --- /dev/null +++ b/checks/cloud/aws/config/aggregate_all_regions_test.rego @@ -0,0 +1,20 @@ +package builtin.aws.config.aws0019_test + +import rego.v1 + +import data.builtin.aws.config.aws0019 as check +import data.lib.test + +test_allow_all_regions if { + test.assert_empty(check.deny) with input as {"aws": {"config": {"configurationaggregrator": { + "__defsec_metadata": {"managed": true}, + "sourceallregions": {"value": true}, + }}}} +} + +test_disallow_all_regions if { + test.assert_equal_message("Configuration aggregation is not set to source from all regions.", check.deny) with input as {"aws": {"config": {"configurationaggregrator": { + "__defsec_metadata": {"managed": true}, + "sourceallregions": {"value": false}, + }}}} +} diff --git a/checks/cloud/aws/documentdb/enable_log_export.go b/checks/cloud/aws/documentdb/enable_log_export.go index 47d41f38..889d33b9 100755 --- a/checks/cloud/aws/documentdb/enable_log_export.go +++ b/checks/cloud/aws/documentdb/enable_log_export.go @@ -34,7 +34,8 @@ var CheckEnableLogExport = rules.Register( Links: cloudFormationEnableLogExportLinks, RemediationMarkdown: cloudFormationEnableLogExportRemediationMarkdown, }, - Severity: severity.Medium, + Severity: severity.Medium, + Deprecated: true, }, func(s *state.State) (results scan.Results) { for _, cluster := range s.AWS.DocumentDB.Clusters { diff --git a/checks/cloud/aws/documentdb/enable_log_export.rego b/checks/cloud/aws/documentdb/enable_log_export.rego new file mode 100644 index 00000000..e0cbd385 --- /dev/null +++ b/checks/cloud/aws/documentdb/enable_log_export.rego @@ -0,0 +1,49 @@ +# METADATA +# title: DocumentDB logs export should be enabled +# description: | +# Document DB does not have auditing by default. To ensure that you are able to accurately audit the usage of your DocumentDB cluster you should enable export logs. +# scope: package +# schemas: +# - input: schema["cloud"] +# related_resources: +# - https://docs.aws.amazon.com/documentdb/latest/developerguide/event-auditing.html +# custom: +# id: AVD-AWS-0020 +# avd_id: AVD-AWS-0020 +# provider: aws +# service: documentdb +# severity: MEDIUM +# short_code: enable-log-export +# recommended_action: Enable export logs +# input: +# selector: +# - type: cloud +# subtypes: +# - service: documentdb +# provider: aws +# terraform: +# links: +# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/docdb_cluster#enabled_cloudwatch_logs_exports +# good_examples: checks/cloud/aws/documentdb/enable_log_export.tf.go +# bad_examples: checks/cloud/aws/documentdb/enable_log_export.tf.go +# cloudformation: +# good_examples: checks/cloud/aws/documentdb/enable_log_export.cf.go +# bad_examples: checks/cloud/aws/documentdb/enable_log_export.cf.go +package builtin.aws.documentdb.aws0020 + +import rego.v1 + +log_export_audit := "audit" + +log_export_profiler := "profiler" + +deny contains res if { + some cluster in input.aws.documentdb.clusters + not export_audit_or_profiler(cluster) + res := result.new("Neither CloudWatch audit nor profiler log exports are enabled.", cluster) +} + +export_audit_or_profiler(cluster) if { + some log in cluster.enabledlogexports + log.value in [log_export_audit, log_export_profiler] +} diff --git a/checks/cloud/aws/documentdb/enable_log_export_test.go b/checks/cloud/aws/documentdb/enable_log_export_test.go deleted file mode 100644 index 9fd21b5a..00000000 --- a/checks/cloud/aws/documentdb/enable_log_export_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package documentdb - -import ( - "testing" - - trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" - - "github.com/aquasecurity/trivy/pkg/iac/state" - - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/documentdb" - "github.com/aquasecurity/trivy/pkg/iac/scan" - - "github.com/stretchr/testify/assert" -) - -func TestCheckEnableLogExport(t *testing.T) { - tests := []struct { - name string - input documentdb.DocumentDB - expected bool - }{ - { - name: "DocDB Cluster not exporting logs", - input: documentdb.DocumentDB{ - Clusters: []documentdb.Cluster{ - { - Metadata: trivyTypes.NewTestMetadata(), - EnabledLogExports: []trivyTypes.StringValue{ - trivyTypes.String("", trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - expected: true, - }, - { - name: "DocDB Cluster exporting audit logs", - input: documentdb.DocumentDB{ - Clusters: []documentdb.Cluster{ - { - Metadata: trivyTypes.NewTestMetadata(), - EnabledLogExports: []trivyTypes.StringValue{ - trivyTypes.String(documentdb.LogExportAudit, trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - expected: false, - }, - { - name: "DocDB Cluster exporting profiler logs", - input: documentdb.DocumentDB{ - Clusters: []documentdb.Cluster{ - { - Metadata: trivyTypes.NewTestMetadata(), - EnabledLogExports: []trivyTypes.StringValue{ - trivyTypes.String(documentdb.LogExportProfiler, trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - expected: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var testState state.State - testState.AWS.DocumentDB = test.input - results := CheckEnableLogExport.Evaluate(&testState) - var found bool - for _, result := range results { - if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableLogExport.LongID() { - found = true - } - } - if test.expected { - assert.True(t, found, "Rule should have been found") - } else { - assert.False(t, found, "Rule should not have been found") - } - }) - } -} diff --git a/checks/cloud/aws/documentdb/enable_log_export_test.rego b/checks/cloud/aws/documentdb/enable_log_export_test.rego new file mode 100644 index 00000000..95cb64a1 --- /dev/null +++ b/checks/cloud/aws/documentdb/enable_log_export_test.rego @@ -0,0 +1,26 @@ +package builtin.aws.documentdb.aws0020_test + +import rego.v1 + +import data.builtin.aws.documentdb.aws0020 as check +import data.lib.test + +test_disallow_no_export_log if { + inp := {"aws": {"documentdb": {"clusters": [{"enabledlogexports": []}]}}} + test.assert_equal_message("Neither CloudWatch audit nor profiler log exports are enabled.", check.deny) with input as inp +} + +test_allow_export_audit if { + inp := {"aws": {"documentdb": {"clusters": [{"enabledlogexports": [{"value": "audit"}]}]}}} + test.assert_empty(check.deny) with input as inp +} + +test_allow_export_profiler if { + inp := {"aws": {"documentdb": {"clusters": [{"enabledlogexports": [{"value": "profiler"}]}]}}} + test.assert_empty(check.deny) with input as inp +} + +test_allow_export_mixed if { + inp := {"aws": {"documentdb": {"clusters": [{"enabledlogexports": [{"value": "audit"}, {"value": "profiler"}]}]}}} + test.assert_empty(check.deny) with input as inp +} diff --git a/checks/cloud/aws/documentdb/enable_storage_encryption.go b/checks/cloud/aws/documentdb/enable_storage_encryption.go index ba8eb653..c34c98d5 100755 --- a/checks/cloud/aws/documentdb/enable_storage_encryption.go +++ b/checks/cloud/aws/documentdb/enable_storage_encryption.go @@ -31,7 +31,8 @@ var CheckEnableStorageEncryption = rules.Register( Links: cloudFormationEnableStorageEncryptionLinks, RemediationMarkdown: cloudFormationEnableStorageEncryptionRemediationMarkdown, }, - Severity: severity.High, + Severity: severity.High, + Deprecated: true, }, func(s *state.State) (results scan.Results) { for _, cluster := range s.AWS.DocumentDB.Clusters { diff --git a/checks/cloud/aws/documentdb/enable_storage_encryption.rego b/checks/cloud/aws/documentdb/enable_storage_encryption.rego new file mode 100644 index 00000000..a7810613 --- /dev/null +++ b/checks/cloud/aws/documentdb/enable_storage_encryption.rego @@ -0,0 +1,40 @@ +# METADATA +# title: DocumentDB storage must be encrypted +# description: | +# Unencrypted sensitive data is vulnerable to compromise. Encryption of the underlying storage used by DocumentDB ensures that if their is compromise of the disks, the data is still protected. +# scope: package +# schemas: +# - input: schema["cloud"] +# related_resources: +# - https://docs.aws.amazon.com/documentdb/latest/developerguide/encryption-at-rest.html +# custom: +# id: AVD-AWS-0021 +# avd_id: AVD-AWS-0021 +# provider: aws +# service: documentdb +# severity: HIGH +# short_code: enable-storage-encryption +# recommended_action: Enable storage encryption +# input: +# selector: +# - type: cloud +# subtypes: +# - service: documentdb +# provider: aws +# terraform: +# links: +# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/docdb_cluster#storage_encrypted +# good_examples: checks/cloud/aws/documentdb/enable_storage_encryption.tf.go +# bad_examples: checks/cloud/aws/documentdb/enable_storage_encryption.tf.go +# cloudformation: +# good_examples: checks/cloud/aws/documentdb/enable_storage_encryption.cf.go +# bad_examples: checks/cloud/aws/documentdb/enable_storage_encryption.cf.go +package builtin.aws.documentdb.aws0021 + +import rego.v1 + +deny contains res if { + some cluster in input.aws.documentdb.clusters + not cluster.storageencrypted.value + res := result.new("Cluster storage does not have encryption enabled.", cluster.storageencrypted) +} diff --git a/checks/cloud/aws/documentdb/enable_storage_encryption_test.go b/checks/cloud/aws/documentdb/enable_storage_encryption_test.go deleted file mode 100644 index 7b289cd7..00000000 --- a/checks/cloud/aws/documentdb/enable_storage_encryption_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package documentdb - -import ( - "testing" - - trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" - - "github.com/aquasecurity/trivy/pkg/iac/state" - - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/documentdb" - "github.com/aquasecurity/trivy/pkg/iac/scan" - - "github.com/stretchr/testify/assert" -) - -func TestCheckEnableStorageEncryption(t *testing.T) { - tests := []struct { - name string - input documentdb.DocumentDB - expected bool - }{ - { - name: "DocDB unencrypted storage", - input: documentdb.DocumentDB{ - Clusters: []documentdb.Cluster{ - { - Metadata: trivyTypes.NewTestMetadata(), - StorageEncrypted: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), - }, - }, - }, - expected: true, - }, - { - name: "DocDB encrypted storage", - input: documentdb.DocumentDB{ - Clusters: []documentdb.Cluster{ - { - Metadata: trivyTypes.NewTestMetadata(), - StorageEncrypted: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), - }, - }, - }, - expected: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var testState state.State - testState.AWS.DocumentDB = test.input - results := CheckEnableStorageEncryption.Evaluate(&testState) - var found bool - for _, result := range results { - if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableStorageEncryption.LongID() { - found = true - } - } - if test.expected { - assert.True(t, found, "Rule should have been found") - } else { - assert.False(t, found, "Rule should not have been found") - } - }) - } -} diff --git a/checks/cloud/aws/documentdb/enable_storage_encryption_test.rego b/checks/cloud/aws/documentdb/enable_storage_encryption_test.rego new file mode 100644 index 00000000..399a2796 --- /dev/null +++ b/checks/cloud/aws/documentdb/enable_storage_encryption_test.rego @@ -0,0 +1,17 @@ +package builtin.aws.documentdb.aws0021_test + +import rego.v1 + +import data.builtin.aws.documentdb.aws0021 as check +import data.lib.test + +test_allow_with_encryption if { + inp := {"aws": {"documentdb": {"clusters": [{"storageencrypted": {"value": true}}]}}} + test.assert_empty(check.deny) with input as inp +} + +test_disallow_without_encryption if { + inp := {"aws": {"documentdb": {"clusters": [{"storageencrypted": {"value": false}}]}}} + + test.assert_equal_message("Cluster storage does not have encryption enabled.", check) with input as inp +} diff --git a/checks/cloud/aws/documentdb/encryption_customer_key.go b/checks/cloud/aws/documentdb/encryption_customer_key.go index 4ba0ebd5..c23f6376 100755 --- a/checks/cloud/aws/documentdb/encryption_customer_key.go +++ b/checks/cloud/aws/documentdb/encryption_customer_key.go @@ -31,7 +31,8 @@ var CheckEncryptionCustomerKey = rules.Register( Links: cloudFormationEncryptionCustomerKeyLinks, RemediationMarkdown: cloudFormationEncryptionCustomerKeyRemediationMarkdown, }, - Severity: severity.Low, + Severity: severity.Low, + Deprecated: true, }, func(s *state.State) (results scan.Results) { for _, cluster := range s.AWS.DocumentDB.Clusters { diff --git a/checks/cloud/aws/documentdb/encryption_customer_key.rego b/checks/cloud/aws/documentdb/encryption_customer_key.rego new file mode 100644 index 00000000..4f95d554 --- /dev/null +++ b/checks/cloud/aws/documentdb/encryption_customer_key.rego @@ -0,0 +1,49 @@ +# METADATA +# title: DocumentDB encryption should use Customer Managed Keys +# description: | +# Using AWS managed keys does not allow for fine grained control. Encryption using AWS keys provides protection for your DocumentDB underlying storage. To increase control of the encryption and manage factors like rotation use customer managed keys. +# scope: package +# schemas: +# - input: schema["cloud"] +# related_resources: +# - https://docs.aws.amazon.com/documentdb/latest/developerguide/security.encryption.ssl.public-key.html +# custom: +# id: AVD-AWS-0022 +# avd_id: AVD-AWS-0022 +# provider: aws +# service: documentdb +# severity: LOW +# short_code: encryption-customer-key +# recommended_action: Enable encryption using customer managed keys +# input: +# selector: +# - type: cloud +# subtypes: +# - service: documentdb +# provider: aws +# terraform: +# links: +# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/docdb_cluster#kms_key_id +# good_examples: checks/cloud/aws/documentdb/encryption_customer_key.tf.go +# bad_examples: checks/cloud/aws/documentdb/encryption_customer_key.tf.go +# cloudformation: +# good_examples: checks/cloud/aws/documentdb/encryption_customer_key.cf.go +# bad_examples: checks/cloud/aws/documentdb/encryption_customer_key.cf.go +package builtin.aws.documentdb.aws0022 + +import rego.v1 + +deny contains res if { + some cluster in input.aws.documentdb.clusters + cluster.kmskeyid.value == "" + + res := result.new("Cluster encryption does not use a customer-managed KMS key.", cluster) +} + +deny contains res if { + some cluster in input.aws.documentdb.clusters + some instance in cluster.instances + instance.kmskeyid.value == "" + + res := result.new("Instance encryption does not use a customer-managed KMS key.", cluster) +} diff --git a/checks/cloud/aws/documentdb/encryption_customer_key_test.go b/checks/cloud/aws/documentdb/encryption_customer_key_test.go deleted file mode 100644 index 86f1c1f2..00000000 --- a/checks/cloud/aws/documentdb/encryption_customer_key_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package documentdb - -import ( - "testing" - - trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" - - "github.com/aquasecurity/trivy/pkg/iac/state" - - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/documentdb" - "github.com/aquasecurity/trivy/pkg/iac/scan" - - "github.com/stretchr/testify/assert" -) - -func TestCheckEncryptionCustomerKey(t *testing.T) { - tests := []struct { - name string - input documentdb.DocumentDB - expected bool - }{ - { - name: "DocDB Cluster encryption missing KMS key", - input: documentdb.DocumentDB{ - Clusters: []documentdb.Cluster{ - { - Metadata: trivyTypes.NewTestMetadata(), - KMSKeyID: trivyTypes.String("", trivyTypes.NewTestMetadata()), - }, - }, - }, - expected: true, - }, - { - name: "DocDB Instance encryption missing KMS key", - input: documentdb.DocumentDB{ - Clusters: []documentdb.Cluster{ - { - Metadata: trivyTypes.NewTestMetadata(), - KMSKeyID: trivyTypes.String("kms-key", trivyTypes.NewTestMetadata()), - Instances: []documentdb.Instance{ - { - Metadata: trivyTypes.NewTestMetadata(), - KMSKeyID: trivyTypes.String("", trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - }, - expected: true, - }, - { - name: "DocDB Cluster and Instance encrypted with proper KMS keys", - input: documentdb.DocumentDB{ - Clusters: []documentdb.Cluster{ - { - Metadata: trivyTypes.NewTestMetadata(), - KMSKeyID: trivyTypes.String("kms-key", trivyTypes.NewTestMetadata()), - Instances: []documentdb.Instance{ - { - Metadata: trivyTypes.NewTestMetadata(), - KMSKeyID: trivyTypes.String("kms-key", trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - }, - expected: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var testState state.State - testState.AWS.DocumentDB = test.input - results := CheckEncryptionCustomerKey.Evaluate(&testState) - var found bool - for _, result := range results { - if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEncryptionCustomerKey.LongID() { - found = true - } - } - if test.expected { - assert.True(t, found, "Rule should have been found") - } else { - assert.False(t, found, "Rule should not have been found") - } - }) - } -} diff --git a/checks/cloud/aws/documentdb/encryption_customer_key_test.rego b/checks/cloud/aws/documentdb/encryption_customer_key_test.rego new file mode 100644 index 00000000..107a4113 --- /dev/null +++ b/checks/cloud/aws/documentdb/encryption_customer_key_test.rego @@ -0,0 +1,30 @@ +package builtin.aws.documentdb.aws0022_test + +import rego.v1 + +import data.builtin.aws.documentdb.aws0022 as check +import data.lib.test + +test_allow_cluster_with_kms_key if { + inp := {"aws": {"documentdb": {"clusters": [{"kmskeyid": {"value": "test"}}]}}} + + test.assert_empty(check.deny) with input as inp +} + +test_allow_instance_with_kms_key if { + inp := {"aws": {"documentdb": {"clusters": [{"instances": [{"kmskeyid": {"value": "test"}}]}]}}} + + test.assert_empty(check.deny) with input as inp +} + +test_disallow_cluster_without_kms_key if { + inp := {"aws": {"documentdb": {"clusters": [{"kmskeyid": {"value": ""}}]}}} + + test.assert_equal_message("Cluster encryption does not use a customer-managed KMS key.", check.deny) with input as inp +} + +test_disallow_instance_without_kms_key if { + inp := {"aws": {"documentdb": {"clusters": [{"instances": [{"kmskeyid": {"value": ""}}]}]}}} + + test.assert_equal_message("Instance encryption does not use a customer-managed KMS key.", check.deny) with input as inp +} diff --git a/checks/cloud/aws/dynamodb/enable_at_rest_encryption.go b/checks/cloud/aws/dynamodb/enable_at_rest_encryption.go index b0cac3be..444b0d47 100755 --- a/checks/cloud/aws/dynamodb/enable_at_rest_encryption.go +++ b/checks/cloud/aws/dynamodb/enable_at_rest_encryption.go @@ -34,7 +34,8 @@ var CheckEnableAtRestEncryption = rules.Register( Links: cloudFormationEnableAtRestEncryptionLinks, RemediationMarkdown: cloudFormationEnableAtRestEncryptionRemediationMarkdown, }, - Severity: severity.High, + Severity: severity.High, + Deprecated: true, }, func(s *state.State) (results scan.Results) { for _, cluster := range s.AWS.DynamoDB.DAXClusters { diff --git a/checks/cloud/aws/dynamodb/enable_at_rest_encryption.rego b/checks/cloud/aws/dynamodb/enable_at_rest_encryption.rego new file mode 100644 index 00000000..2ce4ea7e --- /dev/null +++ b/checks/cloud/aws/dynamodb/enable_at_rest_encryption.rego @@ -0,0 +1,41 @@ +# METADATA +# title: DAX Cluster should always encrypt data at rest +# description: | +# Data can be freely read if compromised. Amazon DynamoDB Accelerator (DAX) encryption at rest provides an additional layer of data protection by helping secure your data from unauthorized access to the underlying storage. +# scope: package +# schemas: +# - input: schema["cloud"] +# related_resources: +# - https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DAXEncryptionAtRest.html +# - https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dax-cluster.html +# custom: +# id: AVD-AWS-0023 +# avd_id: AVD-AWS-0023 +# provider: aws +# service: dynamodb +# severity: HIGH +# short_code: enable-at-rest-encryption +# recommended_action: Enable encryption at rest for DAX Cluster +# input: +# selector: +# - type: cloud +# subtypes: +# - service: dynamodb +# provider: aws +# terraform: +# links: +# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/dax_cluster#server_side_encryption +# good_examples: checks/cloud/aws/dynamodb/enable_at_rest_encryption.tf.go +# bad_examples: checks/cloud/aws/dynamodb/enable_at_rest_encryption.tf.go +# cloudformation: +# good_examples: checks/cloud/aws/dynamodb/enable_at_rest_encryption.cf.go +# bad_examples: checks/cloud/aws/dynamodb/enable_at_rest_encryption.cf.go +package builtin.aws.dynamodb.aws0023 + +import rego.v1 + +deny contains res if { + some cluster in input.aws.dynamodb.daxclusters + cluster.serversideencryption.enabled.value == false + res := result.new("DAX encryption is not enabled.", cluster.serversideencryption.enabled) +} diff --git a/checks/cloud/aws/dynamodb/enable_at_rest_encryption_test.go b/checks/cloud/aws/dynamodb/enable_at_rest_encryption_test.go deleted file mode 100644 index 66c02a1b..00000000 --- a/checks/cloud/aws/dynamodb/enable_at_rest_encryption_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package dynamodb - -import ( - "testing" - - trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" - - "github.com/aquasecurity/trivy/pkg/iac/state" - - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/dynamodb" - "github.com/aquasecurity/trivy/pkg/iac/scan" - - "github.com/stretchr/testify/assert" -) - -func TestCheckEnableAtRestEncryption(t *testing.T) { - tests := []struct { - name string - input dynamodb.DynamoDB - expected bool - }{ - { - name: "Cluster with SSE disabled", - input: dynamodb.DynamoDB{ - DAXClusters: []dynamodb.DAXCluster{ - { - Metadata: trivyTypes.NewTestMetadata(), - ServerSideEncryption: dynamodb.ServerSideEncryption{ - Metadata: trivyTypes.NewTestMetadata(), - Enabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - expected: true, - }, - { - name: "Cluster with SSE enabled", - input: dynamodb.DynamoDB{ - DAXClusters: []dynamodb.DAXCluster{ - { - Metadata: trivyTypes.NewTestMetadata(), - ServerSideEncryption: dynamodb.ServerSideEncryption{ - Metadata: trivyTypes.NewTestMetadata(), - Enabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - expected: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var testState state.State - testState.AWS.DynamoDB = test.input - results := CheckEnableAtRestEncryption.Evaluate(&testState) - var found bool - for _, result := range results { - if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableAtRestEncryption.LongID() { - found = true - } - } - if test.expected { - assert.True(t, found, "Rule should have been found") - } else { - assert.False(t, found, "Rule should not have been found") - } - }) - } -} diff --git a/checks/cloud/aws/dynamodb/enable_at_rest_encryption_test.rego b/checks/cloud/aws/dynamodb/enable_at_rest_encryption_test.rego new file mode 100644 index 00000000..237e2f20 --- /dev/null +++ b/checks/cloud/aws/dynamodb/enable_at_rest_encryption_test.rego @@ -0,0 +1,18 @@ +package builtin.aws.dynamodb.aws0023_test + +import rego.v1 + +import data.builtin.aws.dynamodb.aws0023 as check +import data.lib.test + +test_allow_with_encryption if { + inp := {"aws": {"dynamodb": {"daxclusters": [{"serversideencryption": {"enabled": {"value": true}}}]}}} + + test.assert_empty(check.deny) with input as inp +} + +test_disallow_without_encryption if { + inp := {"aws": {"dynamodb": {"daxclusters": [{"serversideencryption": {"enabled": {"value": false}}}]}}} + + test.assert_equal_message("DAX encryption is not enabled.", check.deny) with input as inp +} diff --git a/checks/cloud/aws/dynamodb/enable_recovery.go b/checks/cloud/aws/dynamodb/enable_recovery.go index 8fa5e687..0cf2b6dc 100755 --- a/checks/cloud/aws/dynamodb/enable_recovery.go +++ b/checks/cloud/aws/dynamodb/enable_recovery.go @@ -29,7 +29,8 @@ By enabling point-in-time-recovery you can restore to a known point in the event Links: terraformEnableRecoveryLinks, RemediationMarkdown: terraformEnableRecoveryRemediationMarkdown, }, - Severity: severity.Medium, + Severity: severity.Medium, + Deprecated: true, }, func(s *state.State) (results scan.Results) { for _, cluster := range s.AWS.DynamoDB.DAXClusters { diff --git a/checks/cloud/aws/dynamodb/enable_recovery.rego b/checks/cloud/aws/dynamodb/enable_recovery.rego new file mode 100644 index 00000000..3e19dca2 --- /dev/null +++ b/checks/cloud/aws/dynamodb/enable_recovery.rego @@ -0,0 +1,46 @@ +# METADATA +# title: Point in time recovery should be enabled to protect DynamoDB table +# description: | +# DynamoDB tables should be protected against accidentally or malicious write/delete actions by ensuring that there is adequate protection. +# By enabling point-in-time-recovery you can restore to a known point in the event of loss of data. +# scope: package +# schemas: +# - input: schema["cloud"] +# related_resources: +# - https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/PointInTimeRecovery.html +# custom: +# id: AVD-AWS-0024 +# avd_id: AVD-AWS-0024 +# provider: aws +# service: dynamodb +# severity: MEDIUM +# short_code: enable-recovery +# recommended_action: Enable point in time recovery +# input: +# selector: +# - type: cloud +# subtypes: +# - service: dynamodb +# provider: aws +# terraform: +# links: +# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/dynamodb_table#point_in_time_recovery +# good_examples: checks/cloud/aws/dynamodb/enable_recovery.tf.go +# bad_examples: checks/cloud/aws/dynamodb/enable_recovery.tf.go +package builtin.aws.dynamodb.aws0024 + +import rego.v1 + +deny contains res if { + some cluster in input.aws.dynamodb.daxclusters + cluster.pointintimerecovery.value == false + + res := result.new("Point-in-time recovery is not enabled.", cluster.pointintimerecovery) +} + +deny contains res if { + some table in input.aws.dynamodb.tables + table.pointintimerecovery.value == false + + res := result.new("Point-in-time recovery is not enabled.", table.pointintimerecovery) +} diff --git a/checks/cloud/aws/dynamodb/enable_recovery_test.go b/checks/cloud/aws/dynamodb/enable_recovery_test.go deleted file mode 100644 index 9df6d104..00000000 --- a/checks/cloud/aws/dynamodb/enable_recovery_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package dynamodb - -import ( - "testing" - - trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" - - "github.com/aquasecurity/trivy/pkg/iac/state" - - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/dynamodb" - "github.com/aquasecurity/trivy/pkg/iac/scan" - - "github.com/stretchr/testify/assert" -) - -func TestCheckEnableRecovery(t *testing.T) { - tests := []struct { - name string - input dynamodb.DynamoDB - expected bool - }{ - { - name: "Cluster with point in time recovery disabled", - input: dynamodb.DynamoDB{ - DAXClusters: []dynamodb.DAXCluster{ - { - Metadata: trivyTypes.NewTestMetadata(), - PointInTimeRecovery: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()), - }, - }, - }, - expected: true, - }, - { - name: "Cluster with point in time recovery enabled", - input: dynamodb.DynamoDB{ - DAXClusters: []dynamodb.DAXCluster{ - { - Metadata: trivyTypes.NewTestMetadata(), - PointInTimeRecovery: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), - }, - }, - }, - expected: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var testState state.State - testState.AWS.DynamoDB = test.input - results := CheckEnableRecovery.Evaluate(&testState) - var found bool - for _, result := range results { - if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableRecovery.LongID() { - found = true - } - } - if test.expected { - assert.True(t, found, "Rule should have been found") - } else { - assert.False(t, found, "Rule should not have been found") - } - }) - } -} diff --git a/checks/cloud/aws/dynamodb/enable_recovery_test.rego b/checks/cloud/aws/dynamodb/enable_recovery_test.rego new file mode 100644 index 00000000..e73a8d09 --- /dev/null +++ b/checks/cloud/aws/dynamodb/enable_recovery_test.rego @@ -0,0 +1,30 @@ +package builtin.aws.dynamodb.aws0024_test + +import rego.v1 + +import data.builtin.aws.dynamodb.aws0024 as check +import data.lib.test + +test_allow_cluster_with_recovery if { + inp := {"aws": {"dynamodb": {"tables": [{"pointintimerecovery": {"value": true}}]}}} + + test.assert_empty(check.deny) with input as inp +} + +test_deny_cluster_without_recovery if { + inp := {"aws": {"dynamodb": {"tables": [{"pointintimerecovery": {"value": false}}]}}} + + test.assert_equal_message("Point-in-time recovery is not enabled.", check.deny) with input as inp +} + +test_allow_table_with_recovery if { + inp := {"aws": {"dynamodb": {"tables": [{"pointintimerecovery": {"value": true}}]}}} + + test.assert_empty(check.deny) with input as inp +} + +test_deny_table_without_recovery if { + inp := {"aws": {"dynamodb": {"tables": [{"pointintimerecovery": {"value": false}}]}}} + + test.assert_equal_message("Point-in-time recovery is not enabled.", check.deny) with input as inp +} diff --git a/checks/cloud/aws/dynamodb/table_customer_key.go b/checks/cloud/aws/dynamodb/table_customer_key.go index 643e3bdd..e0dbca69 100755 --- a/checks/cloud/aws/dynamodb/table_customer_key.go +++ b/checks/cloud/aws/dynamodb/table_customer_key.go @@ -28,7 +28,8 @@ var CheckTableCustomerKey = rules.Register( Links: terraformTableCustomerKeyLinks, RemediationMarkdown: terraformTableCustomerKeyRemediationMarkdown, }, - Severity: severity.Low, + Severity: severity.Low, + Deprecated: true, }, func(s *state.State) (results scan.Results) { for _, table := range s.AWS.DynamoDB.Tables { diff --git a/checks/cloud/aws/dynamodb/table_customer_key.rego b/checks/cloud/aws/dynamodb/table_customer_key.rego new file mode 100644 index 00000000..efd1f542 --- /dev/null +++ b/checks/cloud/aws/dynamodb/table_customer_key.rego @@ -0,0 +1,44 @@ +# METADATA +# title: DynamoDB tables should use at rest encryption with a Customer Managed Key +# description: | +# Using AWS managed keys does not allow for fine grained control. DynamoDB tables are encrypted by default using AWS managed encryption keys. To increase control of the encryption and control the management of factors like key rotation, use a Customer Managed Key. +# scope: package +# schemas: +# - input: schema["cloud"] +# related_resources: +# - https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/EncryptionAtRest.html +# custom: +# id: AVD-AWS-0025 +# avd_id: AVD-AWS-0025 +# provider: aws +# service: dynamodb +# severity: LOW +# short_code: table-customer-key +# recommended_action: Enable server side encryption with a customer managed key +# input: +# selector: +# - type: cloud +# subtypes: +# - service: dynamodb +# provider: aws +# terraform: +# links: +# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/dynamodb_table#server_side_encryption +# good_examples: checks/cloud/aws/dynamodb/table_customer_key.tf.go +# bad_examples: checks/cloud/aws/dynamodb/table_customer_key.tf.go +package builtin.aws.dynamodb.aws0025 + +import rego.v1 + +deny contains res if { + some table in input.aws.dynamodb.tables + table.serversideencryption.enabled.value == false + res := result.new("Table encryption does not use a customer-managed KMS key.", table.serversideencryption.enabled) +} + +deny contains res if { + some table in input.aws.dynamodb.tables + table.serversideencryption.enabled.value + table.serversideencryption.kmskeyid.value == "" + res := result.new("Table encryption explicitly uses the default KMS key.", table.serversideencryption.kmskeyid) +} diff --git a/checks/cloud/aws/dynamodb/table_customer_key_test.go b/checks/cloud/aws/dynamodb/table_customer_key_test.go deleted file mode 100644 index 56daa731..00000000 --- a/checks/cloud/aws/dynamodb/table_customer_key_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package dynamodb - -import ( - "testing" - - "github.com/aquasecurity/trivy/pkg/iac/providers/aws/dynamodb" - "github.com/aquasecurity/trivy/pkg/iac/scan" - "github.com/aquasecurity/trivy/pkg/iac/state" - trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types" - "github.com/stretchr/testify/assert" -) - -func TestCheckTableCustomerKey(t *testing.T) { - tests := []struct { - name string - input dynamodb.DynamoDB - expected bool - }{ - { - name: "Cluster encryption missing KMS key", - input: dynamodb.DynamoDB{ - Tables: []dynamodb.Table{ - { - Metadata: trivyTypes.NewTestMetadata(), - ServerSideEncryption: dynamodb.ServerSideEncryption{ - Enabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), - Metadata: trivyTypes.NewTestMetadata(), - KMSKeyID: trivyTypes.String("", trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - expected: true, - }, - { - name: "Cluster encryption using default KMS key", - input: dynamodb.DynamoDB{ - Tables: []dynamodb.Table{ - { - Metadata: trivyTypes.NewTestMetadata(), - ServerSideEncryption: dynamodb.ServerSideEncryption{ - Enabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), - Metadata: trivyTypes.NewTestMetadata(), - KMSKeyID: trivyTypes.String(dynamodb.DefaultKMSKeyID, trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - expected: true, - }, - { - name: "Cluster encryption using proper KMS key", - input: dynamodb.DynamoDB{ - Tables: []dynamodb.Table{ - { - Metadata: trivyTypes.NewTestMetadata(), - ServerSideEncryption: dynamodb.ServerSideEncryption{ - Enabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()), - Metadata: trivyTypes.NewTestMetadata(), - KMSKeyID: trivyTypes.String("some-ok-key", trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - expected: false, - }, - { - name: "KMS key exist, but SSE is not enabled", - input: dynamodb.DynamoDB{ - Tables: []dynamodb.Table{ - { - Metadata: trivyTypes.NewTestMetadata(), - ServerSideEncryption: dynamodb.ServerSideEncryption{ - Enabled: trivyTypes.BoolDefault(false, trivyTypes.NewTestMetadata()), - Metadata: trivyTypes.NewTestMetadata(), - KMSKeyID: trivyTypes.String("some-ok-key", trivyTypes.NewTestMetadata()), - }, - }, - }, - }, - expected: true, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var testState state.State - testState.AWS.DynamoDB = test.input - results := CheckTableCustomerKey.Evaluate(&testState) - var found bool - for _, result := range results { - if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckTableCustomerKey.LongID() { - found = true - } - } - if test.expected { - assert.True(t, found, "Rule should have been found") - } else { - assert.False(t, found, "Rule should not have been found") - } - }) - } -} diff --git a/checks/cloud/aws/dynamodb/table_customer_key_test.rego b/checks/cloud/aws/dynamodb/table_customer_key_test.rego new file mode 100644 index 00000000..74b24f7c --- /dev/null +++ b/checks/cloud/aws/dynamodb/table_customer_key_test.rego @@ -0,0 +1,42 @@ +package builtin.aws.dynamodb.aws0025_test + +import rego.v1 + +import data.builtin.aws.dynamodb.aws0025 as check +import data.lib.test + +test_allow_table_with_cmk if { + inp := {"aws": {"dynamodb": {"tables": [{ + "name": "test", + "serversideencryption": { + "enabled": {"value": true}, + "kmskeyid": {"value": "alias/test"}, + }, + }]}}} + + test.assert_empty(check.deny) with input as inp +} + +test_deny_table_without_cmk if { + inp := {"aws": {"dynamodb": {"tables": [{ + "name": "test", + "serversideencryption": { + "enabled": {"value": true}, + "kmskeyid": {"value": ""}, + }, + }]}}} + + test.assert_equal_message("Table encryption explicitly uses the default KMS key.", check.deny) with input as inp +} + +test_deny_table_sse_disabled if { + inp := {"aws": {"dynamodb": {"tables": [{ + "name": "test", + "serversideencryption": { + "enabled": {"value": false}, + "kmskeyid": {"value": ""}, + }, + }]}}} + + test.assert_equal_message("Table encryption explicitly uses the default KMS key.", check.deny) with input as inp +}