From 7027b6b2ec63686bd1fb4270485cd9d1e651a6d3 Mon Sep 17 00:00:00 2001 From: Andy Pitcher Date: Wed, 26 Jun 2024 08:53:57 -0400 Subject: [PATCH] Add CIS kubernetes CIS-1.9 for k8s v1.27 - v1.29 (#1617) * Create cis-1.9 yamls and Update info - policies.yaml - 5.1.1 to 5.1.6 were adapted from Manual to Automated - 5.1.3 got broken down into 5.1.3.1 and 5.1.3.2 - 5.1.6 got broken down into 5.1.6.1 and 5.1.6.2 - version was set to cis-1.9 - node.yaml master.yaml controlplane.yaml etcd.yaml - version was set to cis-1.9 * Adapt master.yaml - Expand 1.1.13/1.1.14 checks by adding super-admin.conf to the permission and ownership verification - Remove 1.2.12 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual) - Adjust numbering from 1.2.12 to 1.2.29 * Adjust policies.yaml - Check 5.2.3 to 5.2.9 Title Automated to Manual * Append node.yaml - Create 4.3 kube-config group - Create 4.3.1 Ensure that the kube-proxy metrics service is bound to localhost (Automated) * Adjust policies 5.1.3 and 5.1.6 - Merge 5.1.3.1 and 5.1.3.2 into 5.1.3 (use role_is_compliant and clusterrole_is_compliant) - Remove 5.1.6.1 and promote 5.1.6.2 to 5.1.6 since it natively covered 5.1.6.1 artifacts * Add kubectl dependency and update publish - Download kubectl (build stage) based on version and architecture - Add binary checksum verification - Use go env GOARCH for ARCH --- .github/workflows/publish.yml | 9 +- Dockerfile | 9 + Dockerfile.fips.ubi | 9 + Dockerfile.ubi | 9 + cfg/cis-1.9/config.yaml | 2 + cfg/cis-1.9/controlplane.yaml | 60 +++ cfg/cis-1.9/etcd.yaml | 135 +++++ cfg/cis-1.9/master.yaml | 919 ++++++++++++++++++++++++++++++++++ cfg/cis-1.9/node.yaml | 478 ++++++++++++++++++ cfg/cis-1.9/policies.yaml | 372 ++++++++++++++ cfg/config.yaml | 9 + cmd/common_test.go | 3 + docs/architecture.md | 1 + docs/platforms.md | 1 + makefile | 18 +- 15 files changed, 2027 insertions(+), 7 deletions(-) create mode 100644 cfg/cis-1.9/config.yaml create mode 100644 cfg/cis-1.9/controlplane.yaml create mode 100644 cfg/cis-1.9/etcd.yaml create mode 100644 cfg/cis-1.9/master.yaml create mode 100644 cfg/cis-1.9/node.yaml create mode 100644 cfg/cis-1.9/policies.yaml diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 681ae0a02..027941c5e 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -9,6 +9,7 @@ env: ALIAS: aquasecurity DOCKERHUB_ALIAS: aquasec REP: kube-bench + jobs: publish: name: Publish @@ -46,7 +47,10 @@ jobs: images: ${{ env.REP }} tag-semver: | {{version}} - + - name: Extract variables from makefile (kubectl) + id: extract_vars + run: | + echo "KUBECTL_VERSION=$(grep -oP '^KUBECTL_VERSION\s*\?=\s*\K.*' makefile)" >> $GITHUB_ENV - name: Build and push - Docker/ECR id: docker_build uses: docker/build-push-action@v5 @@ -57,6 +61,7 @@ jobs: push: true build-args: | KUBEBENCH_VERSION=${{ steps.get_version.outputs.version }} + KUBECTL_VERSION=${{ env.KUBECTL_VERSION }} tags: | ${{ env.DOCKERHUB_ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }} public.ecr.aws/${{ env.ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }} @@ -76,6 +81,7 @@ jobs: file: Dockerfile.ubi build-args: | KUBEBENCH_VERSION=${{ steps.get_version.outputs.version }} + KUBECTL_VERSION=${{ env.KUBECTL_VERSION }} tags: | ${{ env.DOCKERHUB_ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}-ubi public.ecr.aws/${{ env.ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}-ubi @@ -95,6 +101,7 @@ jobs: file: Dockerfile.fips.ubi build-args: | KUBEBENCH_VERSION=${{ steps.get_version.outputs.version }} + KUBECTL_VERSION=${{ env.KUBECTL_VERSION }} tags: | ${{ env.DOCKERHUB_ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}-ubi-fips public.ecr.aws/${{ env.ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}-ubi-fips diff --git a/Dockerfile b/Dockerfile index fb77e0493..22032822b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,6 +9,14 @@ COPY internal/ internal/ ARG KUBEBENCH_VERSION RUN make build && cp kube-bench /go/bin/kube-bench +# Add kubectl to run policies checks +ARG KUBECTL_VERSION TARGETARCH +RUN wget -O /usr/local/bin/kubectl "https://dl.k8s.io/release/v${KUBECTL_VERSION}/bin/linux/${TARGETARCH}/kubectl" +RUN wget -O kubectl.sha256 "https://dl.k8s.io/release/v${KUBECTL_VERSION}/bin/linux/${TARGETARCH}/kubectl.sha256" +# Verify kubectl sha256sum +RUN /bin/bash -c 'echo "$( + --key-file= + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: false diff --git a/cfg/cis-1.9/master.yaml b/cfg/cis-1.9/master.yaml new file mode 100644 index 000000000..ad1423eb5 --- /dev/null +++ b/cfg/cis-1.9/master.yaml @@ -0,0 +1,919 @@ +--- +controls: +version: "cis-1.9" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 600 $apiserverconf + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $etcdconf + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c permissions=%a "$DATA_DIR" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c %U:%G "$DATA_DIR" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.1.13 + text: "Ensure that the default administrative credential file permissions are set to 600 (Automated)" + audit: | + for adminconf in /etc/kubernetes/{admin.conf,super-admin.conf}; do if test -e $adminconf; then stat -c \"permissions=%a %n\" $adminconf; fi; done + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /etc/kubernetes/admin.conf + On Kubernetes 1.29+ the super-admin.conf file should also be modified, if present. + For example, chmod 600 /etc/kubernetes/super-admin.conf + scored: true + + - id: 1.1.14 + text: "Ensure that the default administrative credential file ownership is set to root:root (Automated)" + audit: | + for adminconf in /tmp/{admin.conf,super-admin.conf}; do if test -e $adminconf; then stat -c "ownership=%U:%G %n" $adminconf; fi; done + use_multiple_values: true + tests: + test_items: + - flag: "ownership" + compare: + op: eq + value: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + On Kubernetes 1.29+ the super-admin.conf file should also be modified, if present. + For example, chmod 600 /etc/kubernetes/super-admin.conf + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "find /etc/kubernetes/pki/ | xargs stat -c %U:%G" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.crt' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.key' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: have + value: "DenyServiceExternalIPs" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: false + + - id: 1.2.4 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.2.6 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.9 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.10 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.12 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.13 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.14 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.15 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.16 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.17 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.18 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.20 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: manual + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: false + + - id: 1.2.21 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.22 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.23 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + - flag: "--etcd-keyfile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.24 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + - flag: "--tls-private-key-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.25 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.26 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.27 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.28 + text: "Ensure that encryption providers are appropriately configured (Manual)" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + scored: false + + - id: 1.2.29 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/cis-1.9/node.yaml b/cfg/cis-1.9/node.yaml new file mode 100644 index 000000000..bd7cc1f68 --- /dev/null +++ b/cfg/cis-1.9/node.yaml @@ -0,0 +1,478 @@ +--- +controls: +version: "cis-1.9" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 600 $kubeletsvc + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"' + tests: + bin_op: or + test_items: + - flag: root:root + - flag: "File not found" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: false + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 + scored: false + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: false + + - id: 4.1.9 + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 600 $kubeletconf + scored: true + + - id: 4.1.10 + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin" + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.8 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: gte + value: 0 + - flag: --event-qps + path: '{.eventRecordQPS}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.9 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.11 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.12 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.13 + text: "Ensure that a limit is set on pod PIDs (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --pod-max-pids + path: '{.podPidsLimit}' + remediation: | + Decide on an appropriate level for this parameter and set it, + either via the --pod-max-pids command line parameter or the PodPidsLimit configuration file setting. + scored: false + + - id: 4.3 + text: "kube-proxy" + checks: + - id: 4.3.1 + text: "Ensure that the kube-proxy metrics service is bound to localhost (Automated)" + audit: "/bin/ps -fC $proxybin" + audit_config: "/bin/sh -c 'if test -e $proxykubeconfig; then cat $proxykubeconfig; fi'" + tests: + bin_op: or + test_items: + - flag: "--metrics-bind-address" + path: '{.metricsBindAddress}' + compare: + op: has + value: "127.0.0.1" + - flag: "--metrics-bind-address" + path: '{.metricsBindAddress}' + set: false + remediation: | + Modify or remove any values which bind the metrics service to a non-localhost address. + The default value is 127.0.0.1:10249. + scored: true diff --git a/cfg/cis-1.9/policies.yaml b/cfg/cis-1.9/policies.yaml new file mode 100644 index 000000000..e99c42c98 --- /dev/null +++ b/cfg/cis-1.9/policies.yaml @@ -0,0 +1,372 @@ +--- +controls: +version: "cis-1.9" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Automated)" + audit: | + kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].name --no-headers | while read -r role_name role_binding subject + do + if [[ "${role_name}" != "cluster-admin" && "${role_binding}" == "cluster-admin" ]]; then + is_compliant="false" + else + is_compliant="true" + fi; + echo "**role_name: ${role_name} role_binding: ${rolebinding} subject: ${subject} is_compliant: ${is_compliant}" + done + use_multiple_values: true + tests: + test_items: + - flag: "is_compliant" + compare: + op: eq + value: true + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : kubectl delete clusterrolebinding [name] + Condition: is_compliant is false if rolename is not cluster-admin and rolebinding is cluster-admin. + scored: true + - id: 5.1.2 + text: "Minimize access to secrets (Automated)" + audit: "echo \"canGetListWatchSecretsAsSystemAuthenticated: $(kubectl auth can-i get,list,watch secrets --all-namespaces --as=system:authenticated)\"" + tests: + test_items: + - flag: "canGetListWatchSecretsAsSystemAuthenticated" + compare: + op: eq + value: no + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: true + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Automated)" + audit: | + # Check Roles + kubectl get roles --all-namespaces -o custom-columns=ROLE_NAMESPACE:.metadata.namespace,ROLE_NAME:.metadata.name --no-headers | while read -r role_namespace role_name + do + role_rules=$(kubectl get role -n "${role_namespace}" "${role_name}" -o=json | jq -c '.rules') + if echo "${role_rules}" | grep -q "\[\"\*\"\]"; then + role_is_compliant="false" + else + role_is_compliant="true" + fi; + echo "**role_name: ${role_name} role_namespace: ${role_namespace} role_rules: ${role_rules} role_is_compliant: ${role_is_compliant}" + done + + # Check ClusterRoles + kubectl get clusterroles -o custom-columns=CLUSTERROLE_NAME:.metadata.name --no-headers | while read -r clusterrole_name + do + clusterrole_rules=$(kubectl get clusterrole "${clusterrole_name}" -o=json | jq -c '.rules') + if echo "${clusterrole_rules}" | grep -q "\[\"\*\"\]"; then + clusterrole_is_compliant="false" + else + clusterrole_is_compliant="true" + fi; + echo "**clusterrole_name: ${clusterrole_name} clusterrole_rules: ${clusterrole_rules} clusterrole_is_compliant: ${clusterrole_is_compliant}" + done + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: "role_is_compliant" + compare: + op: eq + value: true + set: true + - flag: "clusterrole_is_compliant" + compare: + op: eq + value: true + set: true + remediation: | + Where possible replace any use of wildcards ["*"] in roles and clusterroles with specific + objects or actions. + Condition: role_is_compliant is false if ["*"] is found in rules. + Condition: clusterrole_is_compliant is false if ["*"] is found in rules. + scored: true + - id: 5.1.4 + text: "Minimize access to create pods (Automated)" + audit: | + echo "canCreatePodsAsSystemAuthenticated: $(kubectl auth can-i create pods --all-namespaces --as=system:authenticated)" + tests: + test_items: + - flag: "canCreatePodsAsSystemAuthenticated" + compare: + op: eq + value: no + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: true + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Automated)" + audit: | + kubectl get serviceaccount --all-namespaces --field-selector metadata.name=default -o=json | jq -r '.items[] | " namespace: \(.metadata.namespace), kind: \(.kind), name: \(.metadata.name), automountServiceAccountToken: \(.automountServiceAccountToken | if . == null then "notset" else . end )"' | xargs -L 1 + use_multiple_values: true + tests: + test_items: + - flag: "automountServiceAccountToken" + compare: + op: eq + value: false + set: true + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + `automountServiceAccountToken: false`. + scored: true + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Automated)" + audit: | + kubectl get pods --all-namespaces -o custom-columns=POD_NAMESPACE:.metadata.namespace,POD_NAME:.metadata.name,POD_SERVICE_ACCOUNT:.spec.serviceAccount,POD_IS_AUTOMOUNTSERVICEACCOUNTTOKEN:.spec.automountServiceAccountToken --no-headers | while read -r pod_namespace pod_name pod_service_account pod_is_automountserviceaccounttoken + do + # Retrieve automountServiceAccountToken's value for ServiceAccount and Pod, set to notset if null or . + svacc_is_automountserviceaccounttoken=$(kubectl get serviceaccount -n ${pod_namespace} ${pod_service_account} -o json | jq -r '.automountServiceAccountToken' | sed -e 's//notset/g' -e 's/null/notset/g') + pod_is_automountserviceaccounttoken=$(echo ${pod_is_automountserviceaccounttoken} | sed -e 's//notset/g' -e 's/null/notset/g') + if [[ "${svacc_is_automountserviceaccounttoken}" == "false" && ( "${pod_is_automountserviceaccounttoken}" == "false" || "${pod_is_automountserviceaccounttoken}" == "notset" ) ]]; then + is_compliant="true" + elif [[ "${svacc_is_automountserviceaccounttoken}" == "true" && "${pod_is_automountserviceaccounttoken}" == "false" ]]; then + is_compliant="true" + else + is_compliant="false" + fi + echo "**namespace: ${pod_namespace} pod_name: ${pod_name} service_account: ${pod_service_account} pod_is_automountserviceaccounttoken: ${pod_is_automountserviceaccounttoken} svacc_is_automountServiceAccountToken: ${svacc_is_automountserviceaccounttoken} is_compliant: ${is_compliant}" + done + use_multiple_values: true + tests: + test_items: + - flag: "is_compliant" + compare: + op: eq + value: true + remediation: | + Modify the definition of ServiceAccounts and Pods which do not need to mount service + account tokens to disable it, with `automountServiceAccountToken: false`. + If both the ServiceAccount and the Pod's .spec specify a value for automountServiceAccountToken, the Pod spec takes precedence. + Condition: Pod is_compliant to true when + - ServiceAccount is automountServiceAccountToken: false and Pod is automountServiceAccountToken: false or notset + - ServiceAccount is automountServiceAccountToken: true notset and Pod is automountServiceAccountToken: false + scored: true + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + - id: 5.1.9 + text: "Minimize access to create persistent volumes (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to PersistentVolume objects in the cluster. + scored: false + - id: 5.1.10 + text: "Minimize access to the proxy sub-resource of nodes (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the proxy sub-resource of node objects. + scored: false + - id: 5.1.11 + text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the approval sub-resource of certificatesigningrequest objects. + scored: false + - id: 5.1.12 + text: "Minimize access to webhook configuration objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects + scored: false + - id: 5.1.13 + text: "Minimize access to the service account token creation (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the token sub-resource of serviceaccount objects. + scored: false + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + - id: 5.2.7 + text: "Minimize the admission of root containers (Manual)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/config.yaml b/cfg/config.yaml index b26115f37..9ccd9e0c0 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -281,6 +281,9 @@ version_mapping: "1.24": "cis-1.24" "1.25": "cis-1.7" "1.26": "cis-1.8" + "1.27": "cis-1.9" + "1.28": "cis-1.9" + "1.29": "cis-1.9" "eks-1.0.1": "eks-1.0.1" "eks-1.1.0": "eks-1.1.0" "eks-1.2.0": "eks-1.2.0" @@ -359,6 +362,12 @@ target_mapping: - "controlplane" - "etcd" - "policies" + "cis-1.9": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" "gke-1.0": - "master" - "node" diff --git a/cmd/common_test.go b/cmd/common_test.go index 3627c9865..53793a000 100644 --- a/cmd/common_test.go +++ b/cmd/common_test.go @@ -242,6 +242,9 @@ func TestMapToCISVersion(t *testing.T) { {kubeVersion: "1.24", succeed: true, exp: "cis-1.24"}, {kubeVersion: "1.25", succeed: true, exp: "cis-1.7"}, {kubeVersion: "1.26", succeed: true, exp: "cis-1.8"}, + {kubeVersion: "1.27", succeed: true, exp: "cis-1.9"}, + {kubeVersion: "1.28", succeed: true, exp: "cis-1.9"}, + {kubeVersion: "1.29", succeed: true, exp: "cis-1.9"}, {kubeVersion: "gke-1.2.0", succeed: true, exp: "gke-1.2.0"}, {kubeVersion: "ocp-3.10", succeed: true, exp: "rh-0.7"}, {kubeVersion: "ocp-3.11", succeed: true, exp: "rh-0.7"}, diff --git a/docs/architecture.md b/docs/architecture.md index 09d081e30..ef17ca6f6 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -22,6 +22,7 @@ The following table shows the valid targets based on the CIS Benchmark version. | cis-1.24 | master, controlplane, node, etcd, policies | | cis-1.7 | master, controlplane, node, etcd, policies | | cis-1.8 | master, controlplane, node, etcd, policies | +| cis-1.9 | master, controlplane, node, etcd, policies | | gke-1.0 | master, controlplane, node, etcd, policies, managedservices | | gke-1.2.0 | controlplane, node, policies, managedservices | | eks-1.0.1 | controlplane, node, policies, managedservices | diff --git a/docs/platforms.md b/docs/platforms.md index 92b6c7d10..e907609ad 100644 --- a/docs/platforms.md +++ b/docs/platforms.md @@ -17,6 +17,7 @@ Some defined by other hardenening guides. | CIS | [1.24](https://workbench.cisecurity.org/benchmarks/10873) | cis-1.24 | 1.24 | | CIS | [1.7](https://workbench.cisecurity.org/benchmarks/11107) | cis-1.7 | 1.25 | | CIS | [1.8](https://workbench.cisecurity.org/benchmarks/12958) | cis-1.8 | 1.26 | +| CIS | [1.9](https://workbench.cisecurity.org/benchmarks/16828) | cis-1.9 | 1.27-1.29 | | CIS | [GKE 1.0.0](https://workbench.cisecurity.org/benchmarks/4536) | gke-1.0 | GKE | | CIS | [GKE 1.2.0](https://workbench.cisecurity.org/benchmarks/7534) | gke-1.2.0 | GKE | | CIS | [EKS 1.0.1](https://workbench.cisecurity.org/benchmarks/6041) | eks-1.0.1 | EKS | diff --git a/makefile b/makefile index f9c91fc01..a0e01fb51 100644 --- a/makefile +++ b/makefile @@ -11,6 +11,8 @@ uname := $(shell uname -s) BUILDX_PLATFORM ?= linux/amd64,linux/arm64,linux/arm,linux/ppc64le,linux/s390x DOCKER_ORGS ?= aquasec public.ecr.aws/aquasecurity GOARCH ?= $@ +KUBECTL_VERSION ?= 1.28.7 +ARCH ?= $(shell go env GOARCH) ifneq ($(findstring Microsoft,$(shell uname -r)),) BUILD_OS := windows @@ -45,15 +47,19 @@ build-fips: # builds the current dev docker version build-docker: docker build --build-arg BUILD_DATE=$(shell date -u +"%Y-%m-%dT%H:%M:%SZ") \ - --build-arg VCS_REF=$(VERSION) \ - --build-arg KUBEBENCH_VERSION=$(KUBEBENCH_VERSION) \ - -t $(IMAGE_NAME) . + --build-arg VCS_REF=$(VERSION) \ + --build-arg KUBEBENCH_VERSION=$(KUBEBENCH_VERSION) \ + --build-arg KUBECTL_VERSION=$(KUBECTL_VERSION) \ + --build-arg TARGETARCH=$(ARCH) \ + -t $(IMAGE_NAME) . build-docker-ubi: docker build -f Dockerfile.ubi --build-arg BUILD_DATE=$(shell date -u +"%Y-%m-%dT%H:%M:%SZ") \ - --build-arg VCS_REF=$(VERSION) \ - --build-arg KUBEBENCH_VERSION=$(KUBEBENCH_VERSION) \ - -t $(IMAGE_NAME_UBI) . + --build-arg VCS_REF=$(VERSION) \ + --build-arg KUBEBENCH_VERSION=$(KUBEBENCH_VERSION) \ + --build-arg KUBECTL_VERSION=$(KUBECTL_VERSION) \ + --build-arg TARGETARCH=$(ARCH) \ + -t $(IMAGE_NAME_UBI) . # unit tests tests: