From 2ad0aa13805e3e2f0f2c91fe04c44126d31f676c Mon Sep 17 00:00:00 2001 From: rootxrishabh Date: Fri, 10 May 2024 16:39:31 +0530 Subject: [PATCH 01/13] Added yamls for OKE Signed-off-by: rootxrishabh --- cfg/config.yaml | 3 + cfg/oke-1.26/config.yaml | 2 + cfg/oke-1.26/node.yaml | 321 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 326 insertions(+) create mode 100644 cfg/oke-1.26/config.yaml create mode 100644 cfg/oke-1.26/node.yaml diff --git a/cfg/config.yaml b/cfg/config.yaml index 05aeeb477..aa6a4a623 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -278,6 +278,7 @@ version_mapping: "1.24": "cis-1.24" "1.25": "cis-1.7" "1.26": "cis-1.8" + "oke-1.26": "oke-1.26" "eks-1.0.1": "eks-1.0.1" "eks-1.1.0": "eks-1.1.0" "eks-1.2.0": "eks-1.2.0" @@ -474,3 +475,5 @@ target_mapping: - "controlplane" - "node" - "policies" + "oke-1.26": + - "node" diff --git a/cfg/oke-1.26/config.yaml b/cfg/oke-1.26/config.yaml new file mode 100644 index 000000000..4cbf4cf00 --- /dev/null +++ b/cfg/oke-1.26/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml \ No newline at end of file diff --git a/cfg/oke-1.26/node.yaml b/cfg/oke-1.26/node.yaml new file mode 100644 index 000000000..983a129d7 --- /dev/null +++ b/cfg/oke-1.26/node.yaml @@ -0,0 +1,321 @@ +--- +controls: +version: "oke-1.26" +id: 3.1 +text: "Worker Nodes" +type: "node" +groups: + - id: 3.1 + text: "Worker Node Configuration Files" + checks: + - id: 3.1.1 + text: "Ensure that the kubeconfig file permissions are set to 644 or more restrictive (Manual)" + audit: "stat -c %a /etc/kubernetes/kubelet.conf; stat -c %a /etc/kubernetes/bootstrap-kubelet.conf" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $kubeletkubeconfig + scored: false + - id: 3.1.2 + text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Manual)" + audit: "stat -c %U:%G etc/kubernetes/kubelet.conf; stat -c %U:%G etc/kubernetes/bootstrap-kubelet.conf" + tests: + bin_op: or + test_items: + - flag: root:root + - flag: "$proxykubeconfig" + set: false + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + - id: 3.1.3 + text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Manual)" + audit: "stat -c %a etc/kubernetes/kubelet.conf; stat -c %a etc/kubernetes/bootstrap-kubelet.conf" + tests: + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 644 $kubeletconf + scored: true + - id: 3.1.4 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Manual)" + audit: "stat -c %U:%G etc/kubernetes/kubelet.conf; stat -c %U:%G etc/kubernetes/bootstrap-kubelet.conf" + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identied in the Audit step) + chown root:root /etc/kubernetes/kubelet.conf + scored: false + - id: 3.2 + text: "Kubelet" + checks: + - id: 3.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "cat /etc/systemd/system/kubelet.service" + # audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file + /etc/systemd/system/kubelet.service and set the below parameter + --anonymous-auth=false + Based on your system, restart the kubelet service and check status + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + - id: 3.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "cat /etc/systemd/system/kubelet.service" + # audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + iff modifying the Kubelet service config file, edit the kubelet.service file + /etc/systemd/system/kubelet.service and set the below parameter + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + - id: 3.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "cat /etc/systemd/system/kubelet.service" + # audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file + /etc/systemd/system/kubelet.service and set the below parameter + --client-ca-file=/etc/kubernetes/ca.crt \ + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: false + - id: 3.2.4 + text: "Ensure that the --read-only-port argument is set to 0 (Manual)" + audit: "cat /etc/systemd/system/kubelet.service" + # audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If modifying the Kubelet config file, edit the kubelet.service file + /etc/sytemd/system/kubelet.service and set the below parameter + --read-only-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: false + - id: 3.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "cat /etc/systemd/system/kubelet.service" + # audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file + /etc/systemd/system/kubelet.service and set the below parameter + --streaming-connection-idle-timeout + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: false + - id: 3.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Manual)" + audit: "cat /etc/systemd/system/kubelet.service" + # audit_config: "cat /etc/systemd/system/kubelet.service" + tests: + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + set: true + compare: + op: eq + value: true + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file + /etc/systemd/system/kubelet.service and set the below parameter + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: false + - id: 3.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "cat /etc/systemd/system/kubelet.service" + # audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file + /etc/systemd/system/kubelet.service and set the below parameter + --make-iptables-util-chains:true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + - id: 3.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "cat /etc/systemd/system/kubelet.service" + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + If modifying the Kubelet config file, edit the kubelet.service file + /etc/systemd/system/kubelet-.service and set the below parameter + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: false + - id: 3.2.9 + text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Automated)" + audit: "cat /etc/systemd/system/kubelet.service" + # audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + set: true + compare: + op: eq + value: 0 + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file + /etc/systemd/system/kubelet.service and set the below parameter + --event-qps=0 + If using command line arguments, edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + - id: 3.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "cat /etc/systemd/system/kubelet.service" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + - flag: "--tls-private-key-file" + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file + /etc/systemd/system/kubelet.service and set the below parameter + Verify that the `tls-cert-file=/var/lib/kubelet/pki/tls.pem`. + Verify that the `tls-private-key-file=/var/lib/kubelet/pki/tls.key`. + Based on your system, restart the kubelet service and check status + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + - id: 3.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "cat /etc/systemd/system/kubelet.service" + # audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file + /etc/systemd/system/kubelet.service and set the below parameter + Verify that the `--rotate-certificates` is present. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + - id: 3.2.12 + text: "Ensure that the --rotate-server-certificates argument is set to true (Manual)" + audit: "cat /etc/systemd/system/kubelet.service" + # audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file + /etc/systemd/system/kubelet.service and set the below parameter + --rotate-server-certificates=true + Based on your system, restart the kubelet service and check status + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true \ No newline at end of file From 21484093ec2fdaff96251cfd988600732e11f018 Mon Sep 17 00:00:00 2001 From: rootxrishabh Date: Mon, 10 Jun 2024 18:17:39 +0530 Subject: [PATCH 02/13] Crawl paths from ENVs Signed-off-by: rootxrishabh --- cfg/oke-1.26/node.yaml | 54 ++++++++++++++++++++++-------------------- 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/cfg/oke-1.26/node.yaml b/cfg/oke-1.26/node.yaml index 983a129d7..20ff4afd0 100644 --- a/cfg/oke-1.26/node.yaml +++ b/cfg/oke-1.26/node.yaml @@ -10,7 +10,7 @@ groups: checks: - id: 3.1.1 text: "Ensure that the kubeconfig file permissions are set to 644 or more restrictive (Manual)" - audit: "stat -c %a /etc/kubernetes/kubelet.conf; stat -c %a /etc/kubernetes/bootstrap-kubelet.conf" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' tests: test_items: - flag: "permissions" @@ -22,9 +22,10 @@ groups: For example, chmod 644 $kubeletkubeconfig scored: false + - id: 3.1.2 text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Manual)" - audit: "stat -c %U:%G etc/kubernetes/kubelet.conf; stat -c %U:%G etc/kubernetes/bootstrap-kubelet.conf" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' tests: bin_op: or test_items: @@ -37,7 +38,7 @@ groups: scored: false - id: 3.1.3 text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Manual)" - audit: "stat -c %a etc/kubernetes/kubelet.conf; stat -c %a etc/kubernetes/bootstrap-kubelet.conf" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' tests: test_items: - flag: "permissions" @@ -51,7 +52,7 @@ groups: scored: true - id: 3.1.4 text: "Ensure that the kubelet configuration file ownership is set to root:root (Manual)" - audit: "stat -c %U:%G etc/kubernetes/kubelet.conf; stat -c %U:%G etc/kubernetes/bootstrap-kubelet.conf" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' tests: test_items: - flag: root:root @@ -64,8 +65,8 @@ groups: checks: - id: 3.2.1 text: "Ensure that the --anonymous-auth argument is set to false (Automated)" - audit: "cat /etc/systemd/system/kubelet.service" - # audit_config: "/bin/cat $kubeletconf" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" tests: test_items: - flag: "--anonymous-auth" @@ -84,8 +85,8 @@ groups: scored: true - id: 3.2.2 text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" - audit: "cat /etc/systemd/system/kubelet.service" - # audit_config: "/bin/cat $kubeletconf" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" tests: test_items: - flag: --authorization-mode @@ -104,8 +105,8 @@ groups: scored: true - id: 3.2.3 text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" - audit: "cat /etc/systemd/system/kubelet.service" - # audit_config: "/bin/cat $kubeletconf" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" tests: test_items: - flag: --client-ca-file @@ -121,8 +122,8 @@ groups: scored: false - id: 3.2.4 text: "Ensure that the --read-only-port argument is set to 0 (Manual)" - audit: "cat /etc/systemd/system/kubelet.service" - # audit_config: "/bin/cat $kubeletconf" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" tests: bin_op: or test_items: @@ -145,8 +146,8 @@ groups: scored: false - id: 3.2.5 text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" - audit: "cat /etc/systemd/system/kubelet.service" - # audit_config: "/bin/cat $kubeletconf" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" tests: test_items: - flag: --streaming-connection-idle-timeout @@ -169,8 +170,8 @@ groups: scored: false - id: 3.2.6 text: "Ensure that the --protect-kernel-defaults argument is set to true (Manual)" - audit: "cat /etc/systemd/system/kubelet.service" - # audit_config: "cat /etc/systemd/system/kubelet.service" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" tests: test_items: - flag: --protect-kernel-defaults @@ -190,8 +191,8 @@ groups: scored: false - id: 3.2.7 text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" - audit: "cat /etc/systemd/system/kubelet.service" - # audit_config: "/bin/cat $kubeletconf" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" tests: test_items: - flag: --make-iptables-util-chains @@ -217,7 +218,7 @@ groups: # This is one of those properties that can only be set as a command line argument. # To check if the property is set as expected, we need to parse the kubelet command # instead reading the Kubelet Configuration file. - audit: "cat /etc/systemd/system/kubelet.service" + audit: "/bin/ps -fC $kubeletbin " tests: test_items: - flag: --hostname-override @@ -232,8 +233,8 @@ groups: scored: false - id: 3.2.9 text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Automated)" - audit: "cat /etc/systemd/system/kubelet.service" - # audit_config: "/bin/cat $kubeletconf" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" tests: test_items: - flag: --event-qps @@ -255,7 +256,8 @@ groups: scored: true - id: 3.2.10 text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" - audit: "cat /etc/systemd/system/kubelet.service" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" tests: bin_op: and test_items: @@ -273,8 +275,8 @@ groups: scored: true - id: 3.2.11 text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" - audit: "cat /etc/systemd/system/kubelet.service" - # audit_config: "/bin/cat $kubeletconf" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" tests: test_items: - flag: --rotate-certificates @@ -297,8 +299,8 @@ groups: scored: true - id: 3.2.12 text: "Ensure that the --rotate-server-certificates argument is set to true (Manual)" - audit: "cat /etc/systemd/system/kubelet.service" - # audit_config: "/bin/cat $kubeletconf" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" tests: test_items: - flag: --rotate-certificates From a70243f7a366b0e1e234b19581782abb8c3dc887 Mon Sep 17 00:00:00 2001 From: Rishabh Soni Date: Mon, 10 Jun 2024 20:19:34 +0530 Subject: [PATCH 03/13] Fixed typos --- cfg/oke-1.26/node.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cfg/oke-1.26/node.yaml b/cfg/oke-1.26/node.yaml index 20ff4afd0..79d6d4dca 100644 --- a/cfg/oke-1.26/node.yaml +++ b/cfg/oke-1.26/node.yaml @@ -1,8 +1,8 @@ --- controls: version: "oke-1.26" -id: 3.1 -text: "Worker Nodes" +id: 3 +text: "Worker Node Security Configuration" type: "node" groups: - id: 3.1 From 5414877908da97526ea0b94544b60114da4f0e48 Mon Sep 17 00:00:00 2001 From: Rishabh Soni Date: Tue, 11 Jun 2024 16:10:06 +0530 Subject: [PATCH 04/13] Update config.yaml --- cfg/config.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/cfg/config.yaml b/cfg/config.yaml index aa6a4a623..105d3c630 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -165,6 +165,7 @@ node: - "/var/lib/rancher/k3s/server/cred/admin.kubeconfig" - "/var/lib/rancher/k3s/agent/kubelet.kubeconfig" confs: + - "/etc/kubernetes/kubelet-config.json" - "/etc/kubernetes/kubelet-config.yaml" - "/var/lib/kubelet/config.yaml" - "/var/lib/kubelet/config.yml" From 6ed1721538115cdfba4d3126dc47f0c65068e235 Mon Sep 17 00:00:00 2001 From: Rishabh Soni Date: Tue, 11 Jun 2024 16:55:08 +0530 Subject: [PATCH 05/13] changes made --- cfg/oke-1.26/node.yaml | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/cfg/oke-1.26/node.yaml b/cfg/oke-1.26/node.yaml index 79d6d4dca..2e5b005b5 100644 --- a/cfg/oke-1.26/node.yaml +++ b/cfg/oke-1.26/node.yaml @@ -76,7 +76,7 @@ groups: value: false remediation: | If modifying the Kubelet service config file, edit the kubelet.service file - /etc/systemd/system/kubelet.service and set the below parameter + /etc/kubernetes/kubelet-config.json and set the below parameter --anonymous-auth=false Based on your system, restart the kubelet service and check status systemctl daemon-reload @@ -96,7 +96,7 @@ groups: value: AlwaysAllow remediation: | iff modifying the Kubelet service config file, edit the kubelet.service file - /etc/systemd/system/kubelet.service and set the below parameter + /etc/kubernetes/kubelet-config.json and set the below parameter --authorization-mode=Webhook Based on your system, restart the kubelet service. For example, systemctl daemon-reload @@ -113,7 +113,7 @@ groups: path: '{.authentication.x509.clientCAFile}' remediation: | If modifying the Kubelet service config file, edit the kubelet.service file - /etc/systemd/system/kubelet.service and set the below parameter + /etc/kubernetes/kubelet-config.json and set the below parameter --client-ca-file=/etc/kubernetes/ca.crt \ Based on your system, restart the kubelet service. For example, systemctl daemon-reload @@ -161,7 +161,7 @@ groups: bin_op: or remediation: | If modifying the Kubelet service config file, edit the kubelet.service file - /etc/systemd/system/kubelet.service and set the below parameter + /etc/kubernetes/kubelet-config.json and set the below parameter --streaming-connection-idle-timeout Based on your system, restart the kubelet service. For example: systemctl daemon-reload @@ -182,7 +182,7 @@ groups: value: true remediation: | If modifying the Kubelet service config file, edit the kubelet.service file - /etc/systemd/system/kubelet.service and set the below parameter + /etc/kubernetes/kubelet-config.json and set the below parameter --protect-kernel-defaults=true Based on your system, restart the kubelet service. For example: systemctl daemon-reload @@ -206,7 +206,7 @@ groups: bin_op: or remediation: | If modifying the Kubelet service config file, edit the kubelet.service file - /etc/systemd/system/kubelet.service and set the below parameter + /etc/kubernetes/kubelet-config.json and set the below parameter --make-iptables-util-chains:true Based on your system, restart the kubelet service. For example: systemctl daemon-reload @@ -226,6 +226,8 @@ groups: remediation: | If modifying the Kubelet config file, edit the kubelet.service file /etc/systemd/system/kubelet-.service and set the below parameter + --hostname-override=NODE NAME (where NODE NAME is the internal IP ex. + 10.0.10.4, as assigned my OKE on build) Based on your system, restart the kubelet service. For example: systemctl daemon-reload systemctl restart kubelet.service @@ -245,9 +247,9 @@ groups: value: 0 remediation: | If modifying the Kubelet service config file, edit the kubelet.service file - /etc/systemd/system/kubelet.service and set the below parameter + /etc/kubernetes/kubelet-config.json and set the below parameter --event-qps=0 - If using command line arguments, edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + If using command line arguments, edit the kubelet service file /etc/kubernetes/kubelet-config.json.d/10-kubeadm.conf on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. Based on your system, restart the kubelet service. For example: systemctl daemon-reload @@ -261,11 +263,13 @@ groups: tests: bin_op: and test_items: - - flag: "--tls-cert-file" - - flag: "--tls-private-key-file" + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' remediation: | - If modifying the Kubelet service config file, edit the kubelet.service file - /etc/systemd/system/kubelet.service and set the below parameter + If modifying the Kubelet service config file, edit the kubelet.service file + /etc/kubernetes/kubelet-config.json and set the below parameter Verify that the `tls-cert-file=/var/lib/kubelet/pki/tls.pem`. Verify that the `tls-private-key-file=/var/lib/kubelet/pki/tls.key`. Based on your system, restart the kubelet service and check status @@ -290,7 +294,7 @@ groups: bin_op: or remediation: | If modifying the Kubelet service config file, edit the kubelet.service file - /etc/systemd/system/kubelet.service and set the below parameter + /etc/kubernetes/kubelet-config.json and set the below parameter Verify that the `--rotate-certificates` is present. Based on your system, restart the kubelet service. For example, systemctl daemon-reload @@ -314,7 +318,7 @@ groups: bin_op: or remediation: | If modifying the Kubelet service config file, edit the kubelet.service file - /etc/systemd/system/kubelet.service and set the below parameter + /etc/kubernetes/kubelet-config.json and set the below parameter --rotate-server-certificates=true Based on your system, restart the kubelet service and check status systemctl daemon-reload From 75ead5448091d9ec9c088f5dfb71b1d140b1a90e Mon Sep 17 00:00:00 2001 From: deboshree-b Date: Fri, 16 Aug 2024 05:41:31 +0530 Subject: [PATCH 06/13] NDEV-20011 : adding CIS GKE-1.6.0 benchmarks --- cfg/gke-1.6.0/config.yaml | 2 + cfg/gke-1.6.0/controlplane.yaml | 35 ++ cfg/gke-1.6.0/managedservices.yaml | 706 +++++++++++++++++++++++++++++ cfg/gke-1.6.0/master.yaml | 6 + cfg/gke-1.6.0/node.yaml | 335 ++++++++++++++ cfg/gke-1.6.0/policies.yaml | 239 ++++++++++ 6 files changed, 1323 insertions(+) create mode 100644 cfg/gke-1.6.0/config.yaml create mode 100644 cfg/gke-1.6.0/controlplane.yaml create mode 100644 cfg/gke-1.6.0/managedservices.yaml create mode 100644 cfg/gke-1.6.0/master.yaml create mode 100644 cfg/gke-1.6.0/node.yaml create mode 100644 cfg/gke-1.6.0/policies.yaml diff --git a/cfg/gke-1.6.0/config.yaml b/cfg/gke-1.6.0/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/gke-1.6.0/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/gke-1.6.0/controlplane.yaml b/cfg/gke-1.6.0/controlplane.yaml new file mode 100644 index 000000000..515a24752 --- /dev/null +++ b/cfg/gke-1.6.0/controlplane.yaml @@ -0,0 +1,35 @@ +--- +controls: +version: "gke-1.6.0" +id: 2 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 2.1 + text: "Authentication and Authorization" + checks: + - id: 2.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + You can remediate the availability of client certificates in your GKE cluster. See + Recommendation 5.8.1. + scored: false + + - id: 2.2 + text: "Logging" + type: skip + checks: + - id: 2.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + type: "manual" + remediation: "This control cannot be modified in GKE." + scored: false + + - id: 2.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: "This control cannot be modified in GKE." + scored: false diff --git a/cfg/gke-1.6.0/managedservices.yaml b/cfg/gke-1.6.0/managedservices.yaml new file mode 100644 index 000000000..a15f49c9c --- /dev/null +++ b/cfg/gke-1.6.0/managedservices.yaml @@ -0,0 +1,706 @@ +--- +controls: +version: "gke-1.6.0" +id: 5 +text: "Managed Services" +type: "managedservices" +groups: + - id: 5.1 + text: "Image Registry and Image Scanning" + checks: + - id: 5.1.1 + text: "Ensure Image Vulnerability Scanning using GCR Container Analysis + or a third-party provider (Manual)" + type: "manual" + remediation: | + Using Command Line: + + gcloud services enable containerscanning.googleapis.com + scored: false + + - id: 5.1.2 + text: "Minimize user access to GCR (Manual)" + type: "manual" + remediation: | + Using Command Line: + To change roles at the GCR bucket level: + Firstly, run the following if read permissions are required: + + gsutil iam ch [TYPE]:[EMAIL-ADDRESS]:objectViewer + gs://artifacts.[PROJECT_ID].appspot.com + + Then remove the excessively privileged role (Storage Admin / Storage Object Admin / + Storage Object Creator) using: + + gsutil iam ch -d [TYPE]:[EMAIL-ADDRESS]:[ROLE] + gs://artifacts.[PROJECT_ID].appspot.com + + where: + [TYPE] can be one of the following: + o user, if the [EMAIL-ADDRESS] is a Google account + o serviceAccount, if [EMAIL-ADDRESS] specifies a Service account + [EMAIL-ADDRESS] can be one of the following: + o a Google account (for example, someone@example.com) + o a Cloud IAM service account + To modify roles defined at the project level and subsequently inherited within the GCR + bucket, or the Service Account User role, extract the IAM policy file, modify it accordingly + and apply it using: + + gcloud projects set-iam-policy [PROJECT_ID] [POLICY_FILE] + scored: false + + - id: 5.1.3 + text: "Minimize cluster access to read-only for GCR (Manual)" + type: "manual" + remediation: | + Using Command Line: + For an account explicitly granted to the bucket. First, add read access to the Kubernetes + Service Account + + gsutil iam ch [TYPE]:[EMAIL-ADDRESS]:objectViewer + gs://artifacts.[PROJECT_ID].appspot.com + + where: + [TYPE] can be one of the following: + o user, if the [EMAIL-ADDRESS] is a Google account + o serviceAccount, if [EMAIL-ADDRESS] specifies a Service account + [EMAIL-ADDRESS] can be one of the following: + o a Google account (for example, someone@example.com) + o a Cloud IAM service account + + Then remove the excessively privileged role (Storage Admin / Storage Object Admin / + Storage Object Creator) using: + + gsutil iam ch -d [TYPE]:[EMAIL-ADDRESS]:[ROLE] + gs://artifacts.[PROJECT_ID].appspot.com + + For an account that inherits access to the GCR Bucket through Project level permissions, + modify the Projects IAM policy file accordingly, then upload it using: + + gcloud projects set-iam-policy [PROJECT_ID] [POLICY_FILE] + scored: false + + - id: 5.1.4 + text: "Minimize Container Registries to only those approved (Manual)" + type: "manual" + remediation: | + Using Command Line: + First, update the cluster to enable Binary Authorization: + + gcloud container cluster update [CLUSTER_NAME] \ + --enable-binauthz + + Create a Binary Authorization Policy using the Binary Authorization Policy Reference + (https://cloud.google.com/binary-authorization/docs/policy-yaml-reference) for guidance. + Import the policy file into Binary Authorization: + + gcloud container binauthz policy import [YAML_POLICY] + scored: false + + - id: 5.2 + text: "Identity and Access Management (IAM)" + checks: + - id: 5.2.1 + text: "Ensure GKE clusters are not running using the Compute Engine + default service account (Manual)" + type: "manual" + remediation: | + Using Command Line: + Firstly, create a minimally privileged service account: + + gcloud iam service-accounts create [SA_NAME] \ + --display-name "GKE Node Service Account" + export NODE_SA_EMAIL=`gcloud iam service-accounts list \ + --format='value(email)' \ + --filter='displayName:GKE Node Service Account'` + + Grant the following roles to the service account: + + export PROJECT_ID=`gcloud config get-value project` + gcloud projects add-iam-policy-binding $PROJECT_ID \ + --member serviceAccount:$NODE_SA_EMAIL \ + --role roles/monitoring.metricWriter + gcloud projects add-iam-policy-binding $PROJECT_ID \ + --member serviceAccount:$NODE_SA_EMAIL \ + --role roles/monitoring.viewer + gcloud projects add-iam-policy-binding $PROJECT_ID \ + --member serviceAccount:$NODE_SA_EMAIL \ + --role roles/logging.logWriter + + To create a new Node pool using the Service account, run the following command: + + gcloud container node-pools create [NODE_POOL] \ + --service-account=[SA_NAME]@[PROJECT_ID].iam.gserviceaccount.com \ + --cluster=[CLUSTER_NAME] --zone [COMPUTE_ZONE] + + You will need to migrate your workloads to the new Node pool, and delete Node pools that + use the default service account to complete the remediation. + scored: false + + - id: 5.2.2 + text: "Prefer using dedicated GCP Service Accounts and Workload Identity (Manual)" + type: "manual" + remediation: | + Using Command Line: + + gcloud beta container clusters update [CLUSTER_NAME] --zone [CLUSTER_ZONE] \ + --identity-namespace=[PROJECT_ID].svc.id.goog + + Note that existing Node pools are unaffected. New Node pools default to --workload- + metadata-from-node=GKE_METADATA_SERVER . + + Then, modify existing Node pools to enable GKE_METADATA_SERVER: + + gcloud beta container node-pools update [NODEPOOL_NAME] \ + --cluster=[CLUSTER_NAME] --zone [CLUSTER_ZONE] \ + --workload-metadata-from-node=GKE_METADATA_SERVER + + You may also need to modify workloads in order for them to use Workload Identity as + described within https://cloud.google.com/kubernetes-engine/docs/how-to/workload- + identity. Also consider the effects on the availability of your hosted workloads as Node + pools are updated, it may be more appropriate to create new Node Pools. + scored: false + + - id: 5.3 + text: "Cloud Key Management Service (Cloud KMS)" + checks: + - id: 5.3.1 + text: "Ensure Kubernetes Secrets are encrypted using keys managed in Cloud KMS (Manual)" + type: "manual" + remediation: | + Using Command Line: + To create a key + + Create a key ring: + + gcloud kms keyrings create [RING_NAME] \ + --location [LOCATION] \ + --project [KEY_PROJECT_ID] + + Create a key: + + gcloud kms keys create [KEY_NAME] \ + --location [LOCATION] \ + --keyring [RING_NAME] \ + --purpose encryption \ + --project [KEY_PROJECT_ID] + + Grant the Kubernetes Engine Service Agent service account the Cloud KMS CryptoKey + Encrypter/Decrypter role: + + gcloud kms keys add-iam-policy-binding [KEY_NAME] \ + --location [LOCATION] \ + --keyring [RING_NAME] \ + --member serviceAccount:[SERVICE_ACCOUNT_NAME] \ + --role roles/cloudkms.cryptoKeyEncrypterDecrypter \ + --project [KEY_PROJECT_ID] + + To create a new cluster with Application-layer Secrets Encryption: + + gcloud container clusters create [CLUSTER_NAME] \ + --cluster-version=latest \ + --zone [ZONE] \ + --database-encryption-key projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKey s/[KEY_NAME] \ + --project [CLUSTER_PROJECT_ID] + + To enable on an existing cluster: + + gcloud container clusters update [CLUSTER_NAME] \ + --zone [ZONE] \ + --database-encryption-key projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKey s/[KEY_NAME] \ + --project [CLUSTER_PROJECT_ID] + scored: false + + - id: 5.4 + text: "Node Metadata" + checks: + - id: 5.4.1 + text: "Ensure legacy Compute Engine instance metadata APIs are Disabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To update an existing cluster, create a new Node pool with the legacy GCE metadata + endpoint disabled: + + gcloud container node-pools create [POOL_NAME] \ + --metadata disable-legacy-endpoints=true \ + --cluster [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] + + You will need to migrate workloads from any existing non-conforming Node pools, to the + new Node pool, then delete non-conforming Node pools to complete the remediation. + scored: false + + - id: 5.4.2 + text: "Ensure the GKE Metadata Server is Enabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + gcloud beta container clusters update [CLUSTER_NAME] \ + --identity-namespace=[PROJECT_ID].svc.id.goog + Note that existing Node pools are unaffected. New Node pools default to --workload- + metadata-from-node=GKE_METADATA_SERVER . + + To modify an existing Node pool to enable GKE Metadata Server: + + gcloud beta container node-pools update [NODEPOOL_NAME] \ + --cluster=[CLUSTER_NAME] \ + --workload-metadata-from-node=GKE_METADATA_SERVER + + You may also need to modify workloads in order for them to use Workload Identity as + described within https://cloud.google.com/kubernetes-engine/docs/how-to/workload- + identity. + scored: false + + - id: 5.5 + text: "Node Configuration and Maintenance" + checks: + - id: 5.5.1 + text: "Ensure Container-Optimized OS (COS) is used for GKE node images (Automated)" + type: "manual" + remediation: | + Using Command Line: + To set the node image to cos for an existing cluster's Node pool: + + gcloud container clusters upgrade [CLUSTER_NAME]\ + --image-type cos \ + --zone [COMPUTE_ZONE] --node-pool [POOL_NAME] + scored: false + + - id: 5.5.2 + text: "Ensure Node Auto-Repair is enabled for GKE nodes (Automated)" + type: "manual" + remediation: | + Using Command Line: + To enable node auto-repair for an existing cluster with Node pool, run the following + command: + + gcloud container node-pools update [POOL_NAME] \ + --cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ + --enable-autorepair + scored: false + + - id: 5.5.3 + text: "Ensure Node Auto-Upgrade is enabled for GKE nodes (Automated)" + type: "manual" + remediation: | + Using Command Line: + To enable node auto-upgrade for an existing cluster's Node pool, run the following + command: + + gcloud container node-pools update [NODE_POOL] \ + --cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ + --enable-autoupgrade + scored: false + + - id: 5.5.4 + text: "Automate GKE version management using Release Channels (Manual)" + type: "manual" + remediation: | + Using Command Line: + Create a new cluster by running the following command: + + gcloud beta container clusters create [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] \ + --release-channel [RELEASE_CHANNEL] + + where [RELEASE_CHANNEL] is stable or regular according to your needs. + scored: false + + - id: 5.5.5 + text: "Ensure Shielded GKE Nodes are Enabled (Manual)" + type: "manual" + remediation: | + Using Command Line: + To create a Node pool within the cluster with Integrity Monitoring enabled, run the + following command: + + gcloud beta container node-pools create [NODEPOOL_NAME] \ + --cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ + --shielded-integrity-monitoring + + You will also need to migrate workloads from existing non-conforming Node pools to the + newly created Node pool, then delete the non-conforming pools. + scored: false + + - id: 5.5.6 + text: "Ensure Integrity Monitoring for Shielded GKE Nodes is Enabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To create a Node pool within the cluster with Integrity Monitoring enabled, run the + following command: + + gcloud beta container node-pools create [NODEPOOL_NAME] \ + --cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ + --shielded-integrity-monitoring + + You will also need to migrate workloads from existing non-conforming Node pools to the newly created Node pool, + then delete the non-conforming pools. + scored: false + + - id: 5.5.7 + text: "Ensure Secure Boot for Shielded GKE Nodes is Enabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To create a Node pool within the cluster with Secure Boot enabled, run the following + command: + + gcloud beta container node-pools create [NODEPOOL_NAME] \ + --cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ + --shielded-secure-boot + + You will also need to migrate workloads from existing non-conforming Node pools to the + newly created Node pool, then delete the non-conforming pools. + scored: false + + - id: 5.6 + text: "Cluster Networking" + checks: + - id: 5.6.1 + text: "Enable VPC Flow Logs and Intranode Visibility (Automated)" + type: "manual" + remediation: | + Using Command Line: + To enable intranode visibility on an existing cluster, run the following command: + + gcloud beta container clusters update [CLUSTER_NAME] \ + --enable-intra-node-visibility + scored: false + + - id: 5.6.2 + text: "Ensure use of VPC-native clusters (Automated)" + type: "manual" + remediation: | + Using Command Line: + To enable Alias IP on a new cluster, run the following command: + + gcloud container clusters create [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] \ + --enable-ip-alias + scored: false + + - id: 5.6.3 + text: "Ensure Master Authorized Networks is Enabled (Manual)" + type: "manual" + remediation: | + Using Command Line: + To check Master Authorized Networks status for an existing cluster, run the following + command; + + gcloud container clusters describe [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] \ + --format json | jq '.masterAuthorizedNetworksConfig' + + The output should return + + { + "enabled": true + } + + if Master Authorized Networks is enabled. + + If Master Authorized Networks is disabled, the + above command will return null ( { } ). + scored: false + + - id: 5.6.4 + text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Manual)" + type: "manual" + remediation: | + Using Command Line: + Create a cluster with a Private Endpoint enabled and Public Access disabled by including + the --enable-private-endpoint flag within the cluster create command: + + gcloud container clusters create [CLUSTER_NAME] \ + --enable-private-endpoint + + Setting this flag also requires the setting of --enable-private-nodes , --enable-ip-alias + and --master-ipv4-cidr=[MASTER_CIDR_RANGE] . + scored: false + + - id: 5.6.5 + text: "Ensure clusters are created with Private Nodes (Manual)" + type: "manual" + remediation: | + Using Command Line: + To create a cluster with Private Nodes enabled, include the --enable-private-nodes flag + within the cluster create command: + + gcloud container clusters create [CLUSTER_NAME] \ + --enable-private-nodes + + Setting this flag also requires the setting of --enable-ip-alias and --master-ipv4- + cidr=[MASTER_CIDR_RANGE] . + scored: false + + - id: 5.6.6 + text: "Consider firewalling GKE worker nodes (Manual)" + type: "manual" + remediation: | + Using Command Line: + Use the following command to generate firewall rules, setting the variables as appropriate. + You may want to use the target [TAG] and [SERVICE_ACCOUNT] previously identified. + + gcloud compute firewall-rules create FIREWALL_RULE_NAME \ + --network [NETWORK] \ + --priority [PRIORITY] \ + --direction [DIRECTION] \ + --action [ACTION] \ + --target-tags [TAG] \ + --target-service-accounts [SERVICE_ACCOUNT] \ + --source-ranges [SOURCE_CIDR-RANGE] \ + --source-tags [SOURCE_TAGS] \ + --source-service-accounts=[SOURCE_SERVICE_ACCOUNT] \ + --destination-ranges [DESTINATION_CIDR_RANGE] \ + --rules [RULES] + scored: false + + - id: 5.6.7 + text: "Ensure Network Policy is Enabled and set as appropriate (Manual)" + type: "manual" + remediation: | + Using Command Line: + To enable Network Policy for an existing cluster, firstly enable the Network Policy add-on: + + gcloud container clusters update [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] \ + --update-addons NetworkPolicy=ENABLED + + Then, enable Network Policy: + + gcloud container clusters update [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] \ + --enable-network-policy + scored: false + + - id: 5.6.8 + text: "Ensure use of Google-managed SSL Certificates (Manual)" + type: "manual" + remediation: | + If services of type:LoadBalancer are discovered, consider replacing the Service with an + Ingress. + + To configure the Ingress and use Google-managed SSL certificates, follow the instructions + as listed at https://cloud.google.com/kubernetes-engine/docs/how-to/managed-certs. + scored: false + + - id: 5.7 + text: "Logging" + checks: + - id: 5.7.1 + text: "Ensure Stackdriver Kubernetes Logging and Monitoring is Enabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + + STACKDRIVER KUBERNETES ENGINE MONITORING SUPPORT (PREFERRED): + To enable Stackdriver Kubernetes Engine Monitoring for an existing cluster, run the + following command: + + gcloud container clusters update [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] \ + --enable-stackdriver-kubernetes + + LEGACY STACKDRIVER SUPPORT: + Both Logging and Monitoring support must be enabled. + To enable Legacy Stackdriver Logging for an existing cluster, run the following command: + + gcloud container clusters update [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ + --logging-service logging.googleapis.com + + To enable Legacy Stackdriver Monitoring for an existing cluster, run the following + command: + + gcloud container clusters update [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ + --monitoring-service monitoring.googleapis.com + scored: false + + - id: 5.7.2 + text: "Enable Linux auditd logging (Manual)" + type: "manual" + remediation: | + Using Command Line: + Download the example manifests: + + curl https://raw.githubusercontent.com/GoogleCloudPlatform/k8s-node-tools/master/os-audit/cos-auditd-logging.yaml \ + > cos-auditd-logging.yaml + + Edit the example manifests if needed. Then, deploy them: + + kubectl apply -f cos-auditd-logging.yaml + + Verify that the logging Pods have started. If you defined a different Namespace in your + manifests, replace cos-auditd with the name of the namespace you're using: + + kubectl get pods --namespace=cos-auditd + scored: false + + - id: 5.8 + text: "Authentication and Authorization" + checks: + - id: 5.8.1 + text: "Ensure Basic Authentication using static passwords is Disabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To update an existing cluster and disable Basic Authentication by removing the static + password: + + gcloud container clusters update [CLUSTER_NAME] \ + --no-enable-basic-auth + scored: false + + - id: 5.8.2 + text: "Ensure authentication using Client Certificates is Disabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + Create a new cluster without a Client Certificate: + + gcloud container clusters create [CLUSTER_NAME] \ + --no-issue-client-certificate + scored: false + + - id: 5.8.3 + text: "Manage Kubernetes RBAC users with Google Groups for GKE (Manual)" + type: "manual" + remediation: | + Using Command Line: + Follow the G Suite Groups instructions at https://cloud.google.com/kubernetes- + engine/docs/how-to/role-based-access-control#google-groups-for-gke. + + Then, create a cluster with + + gcloud beta container clusters create my-cluster \ + --security-group="gke-security-groups@[yourdomain.com]" + + Finally create Roles, ClusterRoles, RoleBindings, and ClusterRoleBindings that + reference your G Suite Groups. + scored: false + + - id: 5.8.4 + text: "Ensure Legacy Authorization (ABAC) is Disabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To disable Legacy Authorization for an existing cluster, run the following command: + + gcloud container clusters update [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] \ + --no-enable-legacy-authorization + scored: false + + - id: 5.9 + text: "Storage" + checks: + - id: 5.9.1 + text: "Enable Customer-Managed Encryption Keys (CMEK) for GKE Persistent Disks (PD) (Manual)" + type: "manual" + remediation: | + Using Command Line: + FOR NODE BOOT DISKS: + Create a new node pool using customer-managed encryption keys for the node boot disk, of + [DISK_TYPE] either pd-standard or pd-ssd : + + gcloud beta container node-pools create [CLUSTER_NAME] \ + --disk-type [DISK_TYPE] \ + --boot-disk-kms-key \ + projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME] + + Create a cluster using customer-managed encryption keys for the node boot disk, of + [DISK_TYPE] either pd-standard or pd-ssd : + + gcloud beta container clusters create [CLUSTER_NAME] \ + --disk-type [DISK_TYPE] \ + --boot-disk-kms-key \ + projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME] + + FOR ATTACHED DISKS: + Follow the instructions detailed at https://cloud.google.com/kubernetes- + engine/docs/how-to/using-cmek. + scored: false + + - id: 5.10 + text: "Other Cluster Configurations" + checks: + - id: 5.10.1 + text: "Ensure Kubernetes Web UI is Disabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To disable the Kubernetes Dashboard on an existing cluster, run the following command: + + gcloud container clusters update [CLUSTER_NAME] \ + --zone [ZONE] \ + --update-addons=KubernetesDashboard=DISABLED + scored: false + + - id: 5.10.2 + text: "Ensure that Alpha clusters are not used for production workloads (Automated)" + type: "manual" + remediation: | + Using Command Line: + Upon creating a new cluster + + gcloud container clusters create [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] + + Do not use the --enable-kubernetes-alpha argument. + scored: false + + - id: 5.10.3 + text: "Ensure Pod Security Policy is Enabled and set as appropriate (Manual)" + type: "manual" + remediation: | + Using Command Line: + To enable Pod Security Policy for an existing cluster, run the following command: + + gcloud beta container clusters update [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] \ + --enable-pod-security-policy + scored: false + + - id: 5.10.4 + text: "Consider GKE Sandbox for running untrusted workloads (Manual)" + type: "manual" + remediation: | + Using Command Line: + To enable GKE Sandbox on an existing cluster, a new Node pool must be created. + + gcloud container node-pools create [NODE_POOL_NAME] \ + --zone=[COMPUTE-ZONE] \ + --cluster=[CLUSTER_NAME] \ + --image-type=cos_containerd \ + --sandbox type=gvisor + scored: false + + - id: 5.10.5 + text: "Ensure use of Binary Authorization (Automated)" + type: "manual" + remediation: | + Using Command Line: + Firstly, update the cluster to enable Binary Authorization: + + gcloud container cluster update [CLUSTER_NAME] \ + --zone [COMPUTE-ZONE] \ + --enable-binauthz + + Create a Binary Authorization Policy using the Binary Authorization Policy Reference + (https://cloud.google.com/binary-authorization/docs/policy-yaml-reference) for + guidance. + + Import the policy file into Binary Authorization: + + gcloud container binauthz policy import [YAML_POLICY] + scored: false + + - id: 5.10.6 + text: "Enable Cloud Security Command Center (Cloud SCC) (Manual)" + type: "manual" + remediation: | + Using Command Line: + Follow the instructions at https://cloud.google.com/security-command- + center/docs/quickstart-scc-setup. + scored: false diff --git a/cfg/gke-1.6.0/master.yaml b/cfg/gke-1.6.0/master.yaml new file mode 100644 index 000000000..9686bf2f8 --- /dev/null +++ b/cfg/gke-1.6.0/master.yaml @@ -0,0 +1,6 @@ +--- +controls: +version: "gke-1.6.0" +id: 1 +text: "Control Plane Components" +type: "master" diff --git a/cfg/gke-1.6.0/node.yaml b/cfg/gke-1.6.0/node.yaml new file mode 100644 index 000000000..30e9aa8c6 --- /dev/null +++ b/cfg/gke-1.6.0/node.yaml @@ -0,0 +1,335 @@ +--- +controls: +version: "gke-1.6.0" +id: 3 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 3.1 + text: "Worker Node Configuration Files" + checks: + - id: 3.1.1 + text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on each worker node. + For example, + chmod 644 $proxykubeconfig + scored: false + + - id: 3.1.2 + text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 3.1.3 + text: "Ensure that the kubelet configuration file permissions has permissions set to 600 (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identied in the Audit step) + chmod 644 /var/lib/kubelet/config.yaml + scored: false + + - id: 3.1.4 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identied in the Audit step) + chown root:root /etc/kubernetes/kubelet.conf + scored: false + + - id: 3.2 + text: "Kubelet" + checks: + - id: 3.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to + false. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + set: true + remediation: | + If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.4 + text: "Ensure that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set readOnlyPort to 0. + If using command line arguments, edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + set: true + compare: + op: eq + value: true + remediation: | + If using a Kubelet config file, edit the file to set protectKernelDefaults: true. + If using command line arguments, edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated) " + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.9 + text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. + If using command line arguments, edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set tlsCertFile to the location + of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line rotateCertificates: true or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.12 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: eq + value: true + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true diff --git a/cfg/gke-1.6.0/policies.yaml b/cfg/gke-1.6.0/policies.yaml new file mode 100644 index 000000000..6b3ccda78 --- /dev/null +++ b/cfg/gke-1.6.0/policies.yaml @@ -0,0 +1,239 @@ +--- +controls: +version: "gke-1.6.0" +id: 4 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 4.1 + text: "RBAC and Service Accounts" + checks: + - id: 4.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 4.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + scored: false + + - id: 4.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 4.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 4.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: true + + - id: 4.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 4.2 + text: "Pod Security Policies" + checks: + - id: 4.2.1 + text: "Minimize the admission of privileged containers (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that + the .spec.privileged field is omitted or set to false. + scored: false + + - id: 4.2.2 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostPID field is omitted or set to false. + scored: false + + - id: 4.2.3 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostIPC field is omitted or set to false. + scored: false + + - id: 4.2.4 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostNetwork field is omitted or set to false. + scored: false + + - id: 4.2.5 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.allowPrivilegeEscalation field is omitted or set to false. + scored: false + + - id: 4.2.6 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of + UIDs not including 0. + scored: false + + - id: 4.2.7 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.requiredDropCapabilities is set to include either NET_RAW or ALL. + scored: false + + - id: 4.2.8 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that allowedCapabilities is not present in PSPs for the cluster unless + it is set to an empty array. + scored: false + + - id: 4.2.9 + text: "Minimize the admission of containers with capabilities assigned (Manual) " + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applications which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 4.3 + text: "Network Policies and CNI" + checks: + - id: 4.3.1 + text: "Ensure that the CNI in use supports Network Policies (Manual)" + type: "manual" + remediation: | + To use a CNI plugin with Network Policy, enable Network Policy in GKE, and the CNI plugin + will be updated. See Recommendation 6.6.7. + scored: false + + - id: 4.3.2 + text: "Ensure that all Namespaces have Network Policies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 4.4 + text: "Secrets Management" + checks: + - id: 4.4.1 + text: "Prefer using secrets as files over secrets as environment variables (Manual)" + type: "manual" + remediation: | + if possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 4.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 4.5 + text: "Extensible Admission Control" + checks: + - id: 4.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + See also Recommendation 6.10.5 for GKE specifically. + scored: false + + - id: 4.6 + text: "General Policies" + checks: + - id: 4.6.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 4.6.2 + text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual)" + type: "manual" + remediation: | + Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you + would need to enable alpha features in the apiserver by passing "--feature- + gates=AllAlpha=true" argument. + Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS + parameter to "--feature-gates=AllAlpha=true" + KUBE_API_ARGS="--feature-gates=AllAlpha=true" + Based on your system, restart the kube-apiserver service. For example: + systemctl restart kube-apiserver.service + Use annotations to enable the docker/default seccomp profile in your pod definitions. An + example is as below: + apiVersion: v1 + kind: Pod + metadata: + name: trustworthy-pod + annotations: + seccomp.security.alpha.kubernetes.io/pod: docker/default + spec: + containers: + - name: trustworthy-container + image: sotrustworthy:latest + scored: false + + - id: 4.6.3 + text: "Apply Security Context to Your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 4.6.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false From 96a8081e8d2e3f7e73aea1a5223f1530c7f49fe1 Mon Sep 17 00:00:00 2001 From: deboshree-b Date: Mon, 26 Aug 2024 05:57:53 +0530 Subject: [PATCH 07/13] NDEV-20011 : initial commit for other benchmarks --- cfg/aks-1.5.0/config.yaml | 2 + cfg/aks-1.5.0/controlplane.yaml | 31 + cfg/aks-1.5.0/managedservices.yaml | 144 +++ cfg/aks-1.5.0/master.yaml | 6 + cfg/aks-1.5.0/node.yaml | 321 ++++++ cfg/aks-1.5.0/policies.yaml | 214 ++++ cfg/eks-1.5.0/config.yaml | 9 + cfg/eks-1.5.0/controlplane.yaml | 14 + cfg/eks-1.5.0/managedservices.yaml | 154 +++ cfg/eks-1.5.0/master.yaml | 6 + cfg/eks-1.5.0/node.yaml | 330 +++++++ cfg/eks-1.5.0/policies.yaml | 213 ++++ cfg/oke-1.5.0/config.yaml | 2 + cfg/oke-1.5.0/controlplane.yaml | 62 ++ cfg/oke-1.5.0/node.yaml | 327 +++++++ cfg/oke-1.5.0/policies.yaml | 287 ++++++ cfg/rh-1.4.0/config.yaml | 2 + cfg/rh-1.4.0/controlplane.yaml | 62 ++ cfg/rh-1.4.0/etcd.yaml | 183 ++++ cfg/rh-1.4.0/master.yaml | 1445 ++++++++++++++++++++++++++++ cfg/rh-1.4.0/node.yaml | 429 +++++++++ cfg/rh-1.4.0/policies.yaml | 287 ++++++ cfg/rh-1.6.0/config.yaml | 2 + cfg/rh-1.6.0/controlplane.yaml | 62 ++ cfg/rh-1.6.0/etcd.yaml | 183 ++++ cfg/rh-1.6.0/master.yaml | 1445 ++++++++++++++++++++++++++++ cfg/rh-1.6.0/node.yaml | 429 +++++++++ cfg/rh-1.6.0/policies.yaml | 287 ++++++ 28 files changed, 6938 insertions(+) create mode 100644 cfg/aks-1.5.0/config.yaml create mode 100644 cfg/aks-1.5.0/controlplane.yaml create mode 100644 cfg/aks-1.5.0/managedservices.yaml create mode 100644 cfg/aks-1.5.0/master.yaml create mode 100644 cfg/aks-1.5.0/node.yaml create mode 100644 cfg/aks-1.5.0/policies.yaml create mode 100644 cfg/eks-1.5.0/config.yaml create mode 100644 cfg/eks-1.5.0/controlplane.yaml create mode 100644 cfg/eks-1.5.0/managedservices.yaml create mode 100644 cfg/eks-1.5.0/master.yaml create mode 100644 cfg/eks-1.5.0/node.yaml create mode 100644 cfg/eks-1.5.0/policies.yaml create mode 100644 cfg/oke-1.5.0/config.yaml create mode 100644 cfg/oke-1.5.0/controlplane.yaml create mode 100644 cfg/oke-1.5.0/node.yaml create mode 100644 cfg/oke-1.5.0/policies.yaml create mode 100644 cfg/rh-1.4.0/config.yaml create mode 100644 cfg/rh-1.4.0/controlplane.yaml create mode 100644 cfg/rh-1.4.0/etcd.yaml create mode 100644 cfg/rh-1.4.0/master.yaml create mode 100644 cfg/rh-1.4.0/node.yaml create mode 100644 cfg/rh-1.4.0/policies.yaml create mode 100644 cfg/rh-1.6.0/config.yaml create mode 100644 cfg/rh-1.6.0/controlplane.yaml create mode 100644 cfg/rh-1.6.0/etcd.yaml create mode 100644 cfg/rh-1.6.0/master.yaml create mode 100644 cfg/rh-1.6.0/node.yaml create mode 100644 cfg/rh-1.6.0/policies.yaml diff --git a/cfg/aks-1.5.0/config.yaml b/cfg/aks-1.5.0/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/aks-1.5.0/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/aks-1.5.0/controlplane.yaml b/cfg/aks-1.5.0/controlplane.yaml new file mode 100644 index 000000000..44d1f8907 --- /dev/null +++ b/cfg/aks-1.5.0/controlplane.yaml @@ -0,0 +1,31 @@ +--- +controls: +version: "aks-1.0" +id: 2 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 2.1 + text: "Logging" + checks: + - id: 2.1.1 + text: "Enable audit Logs" + type: "manual" + remediation: | + Azure audit logs are enabled and managed in the Azure portal. To enable log collection for + the Kubernetes master components in your AKS cluster, open the Azure portal in a web + browser and complete the following steps: + 1. Select the resource group for your AKS cluster, such as myResourceGroup. Don't + select the resource group that contains your individual AKS cluster resources, such + as MC_myResourceGroup_myAKSCluster_eastus. + 2. On the left-hand side, choose Diagnostic settings. + 3. Select your AKS cluster, such as myAKSCluster, then choose to Add diagnostic setting. + 4. Enter a name, such as myAKSClusterLogs, then select the option to Send to Log Analytics. + 5. Select an existing workspace or create a new one. If you create a workspace, provide + a workspace name, a resource group, and a location. + 6. In the list of available logs, select the logs you wish to enable. For this example, + enable the kube-audit and kube-audit-admin logs. Common logs include the kube- + apiserver, kube-controller-manager, and kube-scheduler. You can return and change + the collected logs once Log Analytics workspaces are enabled. + 7. When ready, select Save to enable collection of the selected logs. + scored: false diff --git a/cfg/aks-1.5.0/managedservices.yaml b/cfg/aks-1.5.0/managedservices.yaml new file mode 100644 index 000000000..7e5646aac --- /dev/null +++ b/cfg/aks-1.5.0/managedservices.yaml @@ -0,0 +1,144 @@ +--- +controls: +version: "aks-1.0" +id: 5 +text: "Managed Services" +type: "managedservices" +groups: + - id: 5.1 + text: "Image Registry and Image Scanning" + checks: + - id: 5.1.1 + text: "Ensure Image Vulnerability Scanning using Azure Defender image scanning or a third party provider (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.1.2 + text: "Minimize user access to Azure Container Registry (ACR) (Manual)" + type: "manual" + remediation: | + Azure Container Registry + If you use Azure Container Registry (ACR) as your container image store, you need to grant + permissions to the service principal for your AKS cluster to read and pull images. Currently, + the recommended configuration is to use the az aks create or az aks update command to + integrate with a registry and assign the appropriate role for the service principal. For + detailed steps, see Authenticate with Azure Container Registry from Azure Kubernetes + Service. + To avoid needing an Owner or Azure account administrator role, you can configure a + service principal manually or use an existing service principal to authenticate ACR from + AKS. For more information, see ACR authentication with service principals or Authenticate + from Kubernetes with a pull secret. + scored: false + + - id: 5.1.3 + text: "Minimize cluster access to read-only for Azure Container Registry (ACR) (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.1.4 + text: "Minimize Container Registries to only those approved (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.2 + text: "Access and identity options for Azure Kubernetes Service (AKS)" + checks: + - id: 5.2.1 + text: "Prefer using dedicated AKS Service Accounts (Manual)" + type: "manual" + remediation: | + Azure Active Directory integration + The security of AKS clusters can be enhanced with the integration of Azure Active Directory + (AD). Built on decades of enterprise identity management, Azure AD is a multi-tenant, + cloud-based directory, and identity management service that combines core directory + services, application access management, and identity protection. With Azure AD, you can + integrate on-premises identities into AKS clusters to provide a single source for account + management and security. + Azure Active Directory integration with AKS clusters + With Azure AD-integrated AKS clusters, you can grant users or groups access to Kubernetes + resources within a namespace or across the cluster. To obtain a kubectl configuration + context, a user can run the az aks get-credentials command. When a user then interacts + with the AKS cluster with kubectl, they're prompted to sign in with their Azure AD + credentials. This approach provides a single source for user account management and + password credentials. The user can only access the resources as defined by the cluster + administrator. + Azure AD authentication is provided to AKS clusters with OpenID Connect. OpenID Connect + is an identity layer built on top of the OAuth 2.0 protocol. For more information on OpenID + Connect, see the Open ID connect documentation. From inside of the Kubernetes cluster, + Webhook Token Authentication is used to verify authentication tokens. Webhook token + authentication is configured and managed as part of the AKS cluster. + scored: false + + - id: 5.3 + text: "Key Management Service (KMS)" + checks: + - id: 5.3.1 + text: "Ensure Kubernetes Secrets are encrypted (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4 + text: "Cluster Networking" + checks: + - id: 5.4.1 + text: "Restrict Access to the Control Plane Endpoint (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.2 + text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.3 + text: "Ensure clusters are created with Private Nodes (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.4 + text: "Ensure Network Policy is Enabled and set as appropriate (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.5 + text: "Encrypt traffic to HTTPS load balancers with TLS certificates (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + + - id: 5.5 + text: "Authentication and Authorization" + checks: + - id: 5.5.1 + text: "Manage Kubernetes RBAC users with Azure AD (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + - id: 5.5.2 + text: "Use Azure RBAC for Kubernetes Authorization (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.6 + text: "Other Cluster Configurations" + checks: + - id: 5.6.1 + text: "Restrict untrusted workloads (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + - id: 5.6.2 + text: "Hostile multi-tenant workloads (Manual)" + type: "manual" + remediation: "No remediation" + scored: false diff --git a/cfg/aks-1.5.0/master.yaml b/cfg/aks-1.5.0/master.yaml new file mode 100644 index 000000000..7ec9eae88 --- /dev/null +++ b/cfg/aks-1.5.0/master.yaml @@ -0,0 +1,6 @@ +--- +controls: +version: "aks-1.0" +id: 1 +text: "Control Plane Components" +type: "master" diff --git a/cfg/aks-1.5.0/node.yaml b/cfg/aks-1.5.0/node.yaml new file mode 100644 index 000000000..78fda584d --- /dev/null +++ b/cfg/aks-1.5.0/node.yaml @@ -0,0 +1,321 @@ +--- +controls: +version: "aks-1.0" +id: 3 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 3.1 + text: "Worker Node Configuration Files" + checks: + - id: 3.1.1 + text: "Ensure that the kubeconfig file permissions are set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $kubeletkubeconfig + scored: false + + - id: 3.1.2 + text: "Ensure that the kubelet kubeconfig file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: false + + - id: 3.1.3 + text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 644 $kubeletconf + scored: false + + - id: 3.1.4 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: false + + - id: 3.2 + text: "Kubelet" + checks: + - id: 3.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to + false. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + set: true + remediation: | + If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.4 + text: "Ensure that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set readOnlyPort to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: true + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + set: true + compare: + op: eq + value: true + remediation: | + If using a Kubelet config file, edit the file to set protectKernelDefaults: true. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Manual) " + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: true + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.9 + text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.10 + text: "Ensure that the --rotate-certificates argument is not set to false (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: true + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line rotateCertificates: true or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.11 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: true + compare: + op: eq + value: true + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false diff --git a/cfg/aks-1.5.0/policies.yaml b/cfg/aks-1.5.0/policies.yaml new file mode 100644 index 000000000..9cfde1e3f --- /dev/null +++ b/cfg/aks-1.5.0/policies.yaml @@ -0,0 +1,214 @@ +--- +controls: +version: "aks-1.0" +id: 4 +text: "Policies" +type: "policies" +groups: + - id: 4.1 + text: "RBAC and Service Accounts" + checks: + - id: 4.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 4.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + scored: false + + - id: 4.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 4.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 4.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 4.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 4.2 + text: "Pod Security Policies" + checks: + - id: 4.2.1 + text: "Minimize the admission of privileged containers (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that + the .spec.privileged field is omitted or set to false. + scored: false + + - id: 4.2.2 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostPID field is omitted or set to false. + scored: false + + - id: 4.2.3 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostIPC field is omitted or set to false. + scored: false + + - id: 4.2.4 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostNetwork field is omitted or set to false. + scored: false + + - id: 4.2.5 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.allowPrivilegeEscalation field is omitted or set to false. + scored: false + + - id: 4.2.6 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of + UIDs not including 0. + scored: false + + - id: 4.2.7 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.requiredDropCapabilities is set to include either NET_RAW or ALL. + scored: false + + - id: 4.2.8 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that allowedCapabilities is not present in PSPs for the cluster unless + it is set to an empty array. + scored: false + + - id: 4.2.9 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilities in applications running on your cluster. Where a namespace + contains applications which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 4.3 + text: "Azure Policy / OPA" + checks: [] + + - id: 4.4 + text: "CNI Plugin" + checks: + - id: 4.4.1 + text: "Ensure that the latest CNI version is used (Manual)" + type: "manual" + remediation: | + Review the documentation of AWS CNI plugin, and ensure latest CNI version is used. + scored: false + + - id: 4.4.2 + text: "Ensure that all Namespaces have Network Policies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 4.5 + text: "Secrets Management" + checks: + - id: 4.5.1 + text: "Prefer using secrets as files over secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 4.5.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 4.6 + text: "Extensible Admission Control" + checks: + - id: 4.6.1 + text: "Verify that admission controllers are working as expected (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 4.7 + text: "General Policies" + checks: + - id: 4.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 4.7.2 + text: "Apply Security Context to Your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 4.7.3 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/eks-1.5.0/config.yaml b/cfg/eks-1.5.0/config.yaml new file mode 100644 index 000000000..17301a751 --- /dev/null +++ b/cfg/eks-1.5.0/config.yaml @@ -0,0 +1,9 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml +## These settings are required if you are using the --asff option to report findings to AWS Security Hub +## AWS account number is required. +AWS_ACCOUNT: "" +## AWS region is required. +AWS_REGION: "" +## EKS Cluster ARN is required. +CLUSTER_ARN: "" diff --git a/cfg/eks-1.5.0/controlplane.yaml b/cfg/eks-1.5.0/controlplane.yaml new file mode 100644 index 000000000..687ee6df1 --- /dev/null +++ b/cfg/eks-1.5.0/controlplane.yaml @@ -0,0 +1,14 @@ +--- +controls: +version: "eks-1.2.0" +id: 2 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 2.1 + text: "Logging" + checks: + - id: 2.1.1 + text: "Enable audit logs (Automated)" + remediation: "Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler." + scored: false diff --git a/cfg/eks-1.5.0/managedservices.yaml b/cfg/eks-1.5.0/managedservices.yaml new file mode 100644 index 000000000..c9ae5ff3f --- /dev/null +++ b/cfg/eks-1.5.0/managedservices.yaml @@ -0,0 +1,154 @@ +--- +controls: +version: "eks-1.2.0" +id: 5 +text: "Managed Services" +type: "managedservices" +groups: + - id: 5.1 + text: "Image Registry and Image Scanning" + checks: + - id: 5.1.1 + text: "Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third-party provider (Manual)" + type: "manual" + remediation: | + To utilize AWS ECR for Image scanning please follow the steps below: + + To create a repository configured for scan on push (AWS CLI): + aws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE + + To edit the settings of an existing repository (AWS CLI): + aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE + + Use the following steps to start a manual image scan using the AWS Management Console. + Open the Amazon ECR console at https://console.aws.amazon.com/ecr/repositories. + From the navigation bar, choose the Region to create your repository in. + In the navigation pane, choose Repositories. + On the Repositories page, choose the repository that contains the image to scan. + On the Images page, select the image to scan and then choose Scan. + scored: false + + - id: 5.1.2 + text: "Minimize user access to Amazon ECR (Manual)" + type: "manual" + remediation: | + Before you use IAM to manage access to Amazon ECR, you should understand what IAM features + are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other + AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide. + scored: false + + - id: 5.1.3 + text: "Minimize cluster access to read-only for Amazon ECR (Manual)" + type: "manual" + remediation: | + You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites. + + The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess + the following IAM policy permissions for Amazon ECR. + + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:GetDownloadUrlForLayer", + "ecr:GetAuthorizationToken" + ], + "Resource": "*" + } + ] + } + scored: false + + - id: 5.1.4 + text: "Minimize Container Registries to only those approved (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.2 + text: "Identity and Access Management (IAM)" + checks: + - id: 5.2.1 + text: "Prefer using dedicated Amazon EKS Service Accounts (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.3 + text: "AWS Key Management Service (KMS)" + checks: + - id: 5.3.1 + text: "Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS (Manual)" + type: "manual" + remediation: | + This process can only be performed during Cluster Creation. + + Enable 'Secrets Encryption' during Amazon EKS cluster creation as described + in the links within the 'References' section. + scored: false + + - id: 5.4 + text: "Cluster Networking" + checks: + - id: 5.4.1 + text: "Restrict Access to the Control Plane Endpoint (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.2 + text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.3 + text: "Ensure clusters are created with Private Nodes (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.4 + text: "Ensure Network Policy is Enabled and set as appropriate (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.5 + text: "Encrypt traffic to HTTPS load balancers with TLS certificates (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + + - id: 5.5 + text: "Authentication and Authorization" + checks: + - id: 5.5.1 + text: "Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes (Manual)" + type: "manual" + remediation: | + Refer to the 'Managing users or IAM roles for your cluster' in Amazon EKS documentation. + scored: false + + + - id: 5.6 + text: "Other Cluster Configurations" + checks: + - id: 5.6.1 + text: "Consider Fargate for running untrusted workloads (Manual)" + type: "manual" + remediation: | + Create a Fargate profile for your cluster Before you can schedule pods running on Fargate + in your cluster, you must define a Fargate profile that specifies which pods should use + Fargate when they are launched. For more information, see AWS Fargate profile. + + Note: If you created your cluster with eksctl using the --fargate option, then a Fargate profile has + already been created for your cluster with selectors for all pods in the kube-system + and default namespaces. Use the following procedure to create Fargate profiles for + any other namespaces you would like to use with Fargate. + scored: false diff --git a/cfg/eks-1.5.0/master.yaml b/cfg/eks-1.5.0/master.yaml new file mode 100644 index 000000000..8da0179b3 --- /dev/null +++ b/cfg/eks-1.5.0/master.yaml @@ -0,0 +1,6 @@ +--- +controls: +version: "eks-1.2.0" +id: 1 +text: "Control Plane Components" +type: "master" diff --git a/cfg/eks-1.5.0/node.yaml b/cfg/eks-1.5.0/node.yaml new file mode 100644 index 000000000..c7f47c04d --- /dev/null +++ b/cfg/eks-1.5.0/node.yaml @@ -0,0 +1,330 @@ +--- +controls: +version: "eks-1.2.0" +id: 3 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 3.1 + text: "Worker Node Configuration Files" + checks: + - id: 3.1.1 + text: "Ensure that the kubeconfig file permissions are set to 644 or more restrictive (Auomated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $kubeletkubeconfig + scored: false + + - id: 3.1.2 + text: "Ensure that the kubelet kubeconfig file ownership is set to root:root (Auomated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: false + + - id: 3.1.3 + text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Atomated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 644 $kubeletconf + scored: false + + - id: 3.1.4 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: false + + - id: 3.2 + text: "Kubelet" + checks: + - id: 3.2.1 + text: "Ensure that the Anonymous Auth is Not Enabled (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + set: true + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to + false. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + set: true + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.3 + text: "Ensure that a Client CA File is Configured (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + set: true + remediation: | + If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.4 + text: "Ensure that the --read-only-port is disabled (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set readOnlyPort to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: true + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + set: true + compare: + op: eq + value: true + remediation: | + If using a Kubelet config file, edit the file to set protectKernelDefaults: true. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated) " + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: true + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.9 + text: "Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + set: true + compare: + op: gte + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.10 + text: "Ensure that the --rotate-certificates argument is not present or is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: true + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line rotateCertificates: true or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.11 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: true + compare: + op: eq + value: true + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + - id: 3.3 + text: "Container Optimized OS" + checks: + - id: 3.3.1 + text: "Prefer using a container-optimized OS when possible (Manual)" + remediation: "No remediation" + scored: false diff --git a/cfg/eks-1.5.0/policies.yaml b/cfg/eks-1.5.0/policies.yaml new file mode 100644 index 000000000..7f5db95d9 --- /dev/null +++ b/cfg/eks-1.5.0/policies.yaml @@ -0,0 +1,213 @@ +--- +controls: +version: "eks-1.2.0" +id: 4 +text: "Policies" +type: "policies" +groups: + - id: 4.1 + text: "RBAC and Service Accounts" + checks: + - id: 4.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 4.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + scored: false + + - id: 4.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 4.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 4.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 4.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 4.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 4.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 4.2 + text: "Pod Security Policies" + checks: + - id: 4.2.1 + text: "Minimize the admission of privileged containers (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that + the .spec.privileged field is omitted or set to false. + scored: false + + - id: 4.2.2 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostPID field is omitted or set to false. + scored: false + + - id: 4.2.3 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostIPC field is omitted or set to false. + scored: false + + - id: 4.2.4 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostNetwork field is omitted or set to false. + scored: false + + - id: 4.2.5 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.allowPrivilegeEscalation field is omitted or set to false. + scored: false + + - id: 4.2.6 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of + UIDs not including 0. + scored: false + + - id: 4.2.7 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + Ensure that allowedCapabilities is not present in PSPs for the cluster unless + it is set to an empty array. + scored: false + + - id: 4.2.8 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilities in applications running on your cluster. Where a namespace + contains applications which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 4.3 + text: "CNI Plugin" + checks: + - id: 4.3.1 + text: "Ensure CNI plugin supports network policies (Manual)" + type: "manual" + remediation: | + As with RBAC policies, network policies should adhere to the policy of least privileged + access. Start by creating a deny all policy that restricts all inbound and outbound traffic + from a namespace or create a global policy using Calico. + scored: false + + - id: 4.3.2 + text: "Ensure that all Namespaces have Network Policies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 4.4 + text: "Secrets Management" + checks: + - id: 4.4.1 + text: "Prefer using secrets as files over secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 4.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 4.5 + text: "Extensible Admission Control" + checks: [] + + - id: 4.6 + text: "General Policies" + checks: + - id: 4.6.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 4.6.2 + text: "Apply Security Context to Your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 4.6.3 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/oke-1.5.0/config.yaml b/cfg/oke-1.5.0/config.yaml new file mode 100644 index 000000000..4cbf4cf00 --- /dev/null +++ b/cfg/oke-1.5.0/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml \ No newline at end of file diff --git a/cfg/oke-1.5.0/controlplane.yaml b/cfg/oke-1.5.0/controlplane.yaml new file mode 100644 index 000000000..5c227aab0 --- /dev/null +++ b/cfg/oke-1.5.0/controlplane.yaml @@ -0,0 +1,62 @@ +--- +controls: +version: oke-1.5.0 +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 2.1 + text: "Authentication and Authorization" + checks: + - id: 2.1.1 + text: "Client certificate authentication should not be used for users (Auomated)" + audit: | + # To verify user authentication is enabled + oc describe authentication + # To verify that an identity provider is configured + oc get identity + # To verify that a custom cluster-admin user exists + oc get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | grep cluster-admin | grep User + # To verity that kbueadmin is removed, no results should be returned + oc get secrets kubeadmin -n kube-system + type: manual + remediation: | + Configure an identity provider for the OpenShift cluster. + Understanding identity provider configuration | Authentication | OpenShift + Container Platform 4.5. Once an identity provider has been defined, + you can use RBAC to define and apply permissions. + After you define an identity provider and create a new cluster-admin user, + remove the kubeadmin user to improve cluster security. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + audit: | + #To view kube apiserver log files + oc adm node-logs --role=master --path=kube-apiserver/ + #To view openshift apiserver log files + oc adm node-logs --role=master --path=openshift-apiserver/ + #To verify kube apiserver audit config + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]?' + #To verify openshift apiserver audit config + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]?' + type: manual + remediation: | + No remediation required. + scored: false + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + audit: | + #To verify openshift apiserver audit config + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' + #To verify kube apiserver audit config + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' + type: manual + remediation: | + In OpenShift 4.6 and higher, if appropriate for your needs, + modify the audit policy. + scored: false diff --git a/cfg/oke-1.5.0/node.yaml b/cfg/oke-1.5.0/node.yaml new file mode 100644 index 000000000..2e5b005b5 --- /dev/null +++ b/cfg/oke-1.5.0/node.yaml @@ -0,0 +1,327 @@ +--- +controls: +version: "oke-1.26" +id: 3 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 3.1 + text: "Worker Node Configuration Files" + checks: + - id: 3.1.1 + text: "Ensure that the kubeconfig file permissions are set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $kubeletkubeconfig + scored: false + + - id: 3.1.2 + text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + - flag: "$proxykubeconfig" + set: false + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + - id: 3.1.3 + text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 644 $kubeletconf + scored: true + - id: 3.1.4 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identied in the Audit step) + chown root:root /etc/kubernetes/kubelet.conf + scored: false + - id: 3.2 + text: "Kubelet" + checks: + - id: 3.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file + /etc/kubernetes/kubelet-config.json and set the below parameter + --anonymous-auth=false + Based on your system, restart the kubelet service and check status + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + - id: 3.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + iff modifying the Kubelet service config file, edit the kubelet.service file + /etc/kubernetes/kubelet-config.json and set the below parameter + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + - id: 3.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file + /etc/kubernetes/kubelet-config.json and set the below parameter + --client-ca-file=/etc/kubernetes/ca.crt \ + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: false + - id: 3.2.4 + text: "Ensure that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If modifying the Kubelet config file, edit the kubelet.service file + /etc/sytemd/system/kubelet.service and set the below parameter + --read-only-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: false + - id: 3.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file + /etc/kubernetes/kubelet-config.json and set the below parameter + --streaming-connection-idle-timeout + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: false + - id: 3.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + set: true + compare: + op: eq + value: true + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file + /etc/kubernetes/kubelet-config.json and set the below parameter + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: false + - id: 3.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file + /etc/kubernetes/kubelet-config.json and set the below parameter + --make-iptables-util-chains:true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + - id: 3.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + If modifying the Kubelet config file, edit the kubelet.service file + /etc/systemd/system/kubelet-.service and set the below parameter + --hostname-override=NODE NAME (where NODE NAME is the internal IP ex. + 10.0.10.4, as assigned my OKE on build) + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: false + - id: 3.2.9 + text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + set: true + compare: + op: eq + value: 0 + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file + /etc/kubernetes/kubelet-config.json and set the below parameter + --event-qps=0 + If using command line arguments, edit the kubelet service file /etc/kubernetes/kubelet-config.json.d/10-kubeadm.conf + on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + - id: 3.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: and + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file + /etc/kubernetes/kubelet-config.json and set the below parameter + Verify that the `tls-cert-file=/var/lib/kubelet/pki/tls.pem`. + Verify that the `tls-private-key-file=/var/lib/kubelet/pki/tls.key`. + Based on your system, restart the kubelet service and check status + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + - id: 3.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file + /etc/kubernetes/kubelet-config.json and set the below parameter + Verify that the `--rotate-certificates` is present. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + - id: 3.2.12 + text: "Ensure that the --rotate-server-certificates argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If modifying the Kubelet service config file, edit the kubelet.service file + /etc/kubernetes/kubelet-config.json and set the below parameter + --rotate-server-certificates=true + Based on your system, restart the kubelet service and check status + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true \ No newline at end of file diff --git a/cfg/oke-1.5.0/policies.yaml b/cfg/oke-1.5.0/policies.yaml new file mode 100644 index 000000000..e90cd877f --- /dev/null +++ b/cfg/oke-1.5.0/policies.yaml @@ -0,0 +1,287 @@ +--- +controls: +version: rh-1.0 +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + audit: | + #To get a list of users and service accounts with the cluster-admin role + oc get clusterrolebindings -o=customcolumns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | + grep cluster-admin + #To verity that kbueadmin is removed, no results should be returned + oc get secrets kubeadmin -n kube-system + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + audit: | + #needs verification + oc get roles --all-namespaces -o yaml + for i in $(oc get roles -A -o jsonpath='{.items[*].metadata.name}'); do oc + describe clusterrole ${i}; done + #Retrieve the cluster roles defined in the cluster and review for wildcards + oc get clusterroles -o yaml + for i in $(oc get clusterroles -o jsonpath='{.items[*].metadata.name}'); do + oc describe clusterrole ${i}; done + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + None required. + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.2 + text: "Pod Security Policies" + checks: + - id: 5.2.1 + text: "Minimize the admission of privileged containers (Manual)" + audit: | + # needs verification + oc get scc -o=custom-columns=NAME:.metadata.name,allowPrivilegedContainer:.allowPrivilegedContainer + tests: + test_items: + - flag: "false" + remediation: | + Create a SCC as described in the OpenShift documentation, ensuring that the Allow + Privileged field is set to false. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" + audit: | + oc get scc -o=custom-columns=NAME:.metadata.name,allowHostPID:.allowHostPID + tests: + test_items: + - flag: "false" + remediation: | + Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host + PID field is set to false. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" + audit: | + oc get scc -o=custom-columns=NAME:.metadata.name,allowHostIPC:.allowHostIPC + tests: + test_items: + - flag: "false" + remediation: | + Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host + IPC field is set to false. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" + audit: | + oc get scc -o=custom-columns=NAME:.metadata.name,allowHostNetwork:.allowHostNetwork + tests: + test_items: + - flag: "false" + remediation: | + Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host + Network field is omitted or set to false. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" + audit: | + oc get scc -o=custom-columns=NAME:.metadata.name,allowPrivilegeEscalation:.allowPrivilegeEscalation + tests: + test_items: + - flag: "false" + remediation: | + Create a SCC as described in the OpenShift documentation, ensuring that the Allow + Privilege Escalation field is omitted or set to false. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of root containers (Manual)" + audit: | + # needs verification # | awk 'NR>1 {gsub("map\\[type:", "", $2); gsub("\\]$", "", $2); print $1 ":" $2}' + oc get scc -o=custom-columns=NAME:.metadata.name,runAsUser:.runAsUser.type + #For SCCs with MustRunAs verify that the range of UIDs does not include 0 + oc get scc -o=custom-columns=NAME:.metadata.name,uidRangeMin:.runAsUser.uidRangeMin,uidRangeMax:.runAsUser.uidRangeMax + tests: + bin_op: or + test_items: + - flag: "MustRunAsNonRoot" + - flag: "MustRunAs" + compare: + op: nothave + value: 0 + remediation: | + None required. By default, OpenShift includes the non-root SCC with the the Run As User + Strategy is set to either MustRunAsNonRoot. If additional SCCs are appropriate, follow the + OpenShift documentation to create custom SCCs. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of containers with the NET_RAW capability (Manual)" + audit: | + # needs verification + oc get scc -o=custom-columns=NAME:.metadata.name,requiredDropCapabilities:.requiredDropCapabilities + tests: + bin_op: or + test_items: + - flag: "ALL" + - flag: "NET_RAW" + remediation: | + Create a SCC as described in the OpenShift documentation, ensuring that the Required + Drop Capabilities is set to include either NET_RAW or ALL. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + Ensure that Allowed Capabilities is set to an empty array for every SCC in the cluster + except for the privileged SCC. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider + adding a SCC which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports Network Policies (Manual)" + type: "manual" + remediation: | + None required. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have Network Policies defined (Manual)" + type: "manual" + audit: | + #Run the following command and review the NetworkPolicy objects created in the cluster. + oc -n all get networkpolicy + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using secrets as files over secrets as environment variables (Manual)" + type: "manual" + audit: | + #Run the following command to find references to objects which use environment variables defined from secrets. + oc get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} + {.metadata.name} {"\n"}{end}' -A + remediation: | + If possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the OpenShift documentation: [Image configuration resources](https://docs.openshift.com/container-platform/4.5/openshift_images/image-configuration.html + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + audit: | + #Run the following command and review the namespaces created in the cluster. + oc get namespaces + #Ensure that these namespaces are the ones you need and are adequately administered as per your requirements. + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual)" + type: "manual" + remediation: | + To enable the default seccomp profile, use the reserved value /runtime/default that will + make sure that the pod uses the default policy available on the host. + scored: false + + - id: 5.7.3 + text: "Apply Security Context to Your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + audit: | + #Run this command to list objects in default namespace + oc project default + oc get all + #The only entries there should be system managed resources such as the kubernetes and openshift service + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/rh-1.4.0/config.yaml b/cfg/rh-1.4.0/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/rh-1.4.0/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/rh-1.4.0/controlplane.yaml b/cfg/rh-1.4.0/controlplane.yaml new file mode 100644 index 000000000..606194ddf --- /dev/null +++ b/cfg/rh-1.4.0/controlplane.yaml @@ -0,0 +1,62 @@ +--- +controls: +version: rh-1.0 +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + audit: | + # To verify user authentication is enabled + oc describe authentication + # To verify that an identity provider is configured + oc get identity + # To verify that a custom cluster-admin user exists + oc get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | grep cluster-admin | grep User + # To verity that kbueadmin is removed, no results should be returned + oc get secrets kubeadmin -n kube-system + type: manual + remediation: | + Configure an identity provider for the OpenShift cluster. + Understanding identity provider configuration | Authentication | OpenShift + Container Platform 4.5. Once an identity provider has been defined, + you can use RBAC to define and apply permissions. + After you define an identity provider and create a new cluster-admin user, + remove the kubeadmin user to improve cluster security. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + audit: | + #To view kube apiserver log files + oc adm node-logs --role=master --path=kube-apiserver/ + #To view openshift apiserver log files + oc adm node-logs --role=master --path=openshift-apiserver/ + #To verify kube apiserver audit config + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]?' + #To verify openshift apiserver audit config + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]?' + type: manual + remediation: | + No remediation required. + scored: false + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + audit: | + #To verify openshift apiserver audit config + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' + #To verify kube apiserver audit config + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' + type: manual + remediation: | + In OpenShift 4.6 and higher, if appropriate for your needs, + modify the audit policy. + scored: false diff --git a/cfg/rh-1.4.0/etcd.yaml b/cfg/rh-1.4.0/etcd.yaml new file mode 100644 index 000000000..4398d9cc1 --- /dev/null +++ b/cfg/rh-1.4.0/etcd.yaml @@ -0,0 +1,183 @@ +--- +controls: +version: rh-1.0 +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration Files" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/' + fi + use_multiple_values: true + tests: + test_items: + - flag: "file" + compare: + op: regex + value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-serving\/etcd-serving-.*\.(?:crt|key)' + remediation: | + OpenShift does not use the etcd-certfile or etcd-keyfile flags. + Certificates for etcd are managed by the etcd cluster operator. + scored: false + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/' + fi + use_multiple_values: true + tests: + test_items: + - flag: "--client-cert-auth" + compare: + op: eq + value: true + remediation: | + This setting is managed by the cluster etcd operator. No remediation required." + scored: false + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Manual)" + audit: | + # Returns 0 if found, 1 if not found + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --auto-tls=true 2>/dev/null ; echo exit_code=$? + fi + use_multiple_values: true + tests: + test_items: + - flag: "exit_code" + compare: + op: eq + value: "1" + remediation: | + This setting is managed by the cluster etcd operator. No remediation required. + scored: false + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/' + fi + use_multiple_values: true + tests: + test_items: + - flag: "file" + compare: + op: regex + value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-peer\/etcd-peer-.*\.(?:crt|key)' + remediation: | + None. This configuration is managed by the etcd operator. + scored: false + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/' + fi + use_multiple_values: true + tests: + test_items: + - flag: "--peer-client-cert-auth" + compare: + op: eq + value: true + remediation: | + This setting is managed by the cluster etcd operator. No remediation required. + scored: false + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Manual)" + audit: | + # Returns 0 if found, 1 if not found + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>/dev/null ; echo exit_code=$? + fi + use_multiple_values: true + tests: + test_items: + - flag: "exit_code" + compare: + op: eq + value: "1" + remediation: | + This setting is managed by the cluster etcd operator. No remediation required. + scored: false + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/' + fi + use_multiple_values: true + tests: + test_items: + - flag: "file" + compare: + op: regex + value: '\/etc\/kubernetes\/static-pod-certs\/configmaps\/etcd-(?:serving|peer-client)-ca\/ca-bundle\.(?:crt|key)' + remediation: | + None required. Certificates for etcd are managed by the OpenShift cluster etcd operator. + scored: false diff --git a/cfg/rh-1.4.0/master.yaml b/cfg/rh-1.4.0/master.yaml new file mode 100644 index 000000000..37b50f033 --- /dev/null +++ b/cfg/rh-1.4.0/master.yaml @@ -0,0 +1,1445 @@ +--- +controls: +version: rh-1.0 +id: 1 +text: "Master Node Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Master Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Manual))" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Manual))" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc rsh -n openshift-etcd "$POD_NAME" stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/manifests/etcd-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc rsh -n openshift-etcd "$POD_NAME" stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/manifests/etcd-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # For CNI multus + # Get the pod name in the openshift-multus namespace + POD_NAME=$(oc get pods -n openshift-multus -l app=multus --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/etc/cni/net.d/*.conf"; 2>/dev/null + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/var/run/multus/cni/net.d/*.conf"; 2>/dev/null + fi + # For SDN pods + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + fi + + # For OVS pods + POD_NAME=$(oc get pods -n openshift-sdn -l app=ovs --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # For CNI multus + # Get the pod name in the openshift-multus namespace + POD_NAME=$(oc get pods -n openshift-multus -l app=multus --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c '$i %n %U:%G' /host/etc/cni/net.d/*.conf" 2>/dev/null + oc exec -n openshift-multus $i -- /bin/bash -c "stat -c '$i %n %U:%G' /host/var/run/multus/cni/net.d/*.conf" 2>/dev/null + fi + # For SDN pods + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + fi + # For OVS pods in 4.5 + POD_NAME=$(oc get pods -n openshift-sdn -l app=ovs --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /var/lib/etcd/member + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /var/lib/etcd/member + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 644 or more restrictive (Manual))" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubeconfig 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Manual)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubeconfig 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Manual)" + audit: | + # Should return root:root for all files and directories + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # echo $i static-pod-certs + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + # echo $i static-pod-resources + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.20 + text: "Ensure that the OpenShift PKI certificate file permissions are set to 644 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.crt' -exec stat -c "$POD_NAME %n permissions=%a" {} \; + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.21 + text: "Ensure that the OpenShift PKI key file permissions are set to 600 (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.key' -exec stat -c "$POD_NAME %n permissions=%a" {} \; + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that anonymous requests are authorized (Manual)" + audit: | + # To verify that userGroups include system:unauthenticated + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' + # To verify that userGroups include system:unauthenticated + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?.userGroups' + # To verify RBAC is enabled + oc get clusterrolebinding + oc get clusterrole + oc get rolebinding + oc get role + tests: + test_items: + - flag: "system:unauthenticated" + remediation: | + None required. The default configuration should not be modified. + scored: false + + - id: 1.2.2 + text: "Ensure that the --basic-auth-file argument is not set (Manual)" + audit: | + oc -n openshift-kube-apiserver get cm config -o yaml | grep --color "basic-auth" + oc -n openshift-apiserver get cm config -o yaml | grep --color "basic-auth" + # Add | awk '$3 != "AVAILABLE" { if ($3){print "available=true"}else{print "available=false"} }; to create AVAILABLE = true/false form + oc get clusteroperator authentication | awk '$3 != "AVAILABLE" { if ($3){print "available=true"}else{print "available=false"} }' + tests: + bin_op: and + test_items: + - flag: "basic-auth-file" + set: false + - flag: "available" + compare: + op: eq + value: true + remediation: | + None required. --basic-auth-file cannot be configured on OpenShift. + scored: false + + - id: 1.2.3 + text: "Ensure that the --token-auth-file parameter is not set (Manual)" + audit: | + # Verify that the token-auth-file flag is not present + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' + #Verify that the authentication operator is running + oc get clusteroperator authentication | awk '$3 != "AVAILABLE" { if ($3){print "available=true"}else{print "available=false"} }' + tests: + bin_op: and + test_items: + - flag: "token-auth-file" + set: false + - flag: "available" + compare: + op: eq + value: true + remediation: | + None is required. + scored: false + + - id: 1.2.4 + text: "Use https for kubelet connections (Manual)" + audit: | + #for 4.5 + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo' + #for 4.6 + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + #for both 4.5 and 4.6 + oc -n openshift-apiserver describe secret serving-cert + tests: + bin_op: and + test_items: + - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.crt" + - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.key" + remediation: | + No remediation is required. + OpenShift platform components use X.509 certificates for authentication. + OpenShift manages the CAs and certificates for platform components. This is not configurable. + scored: false + + - id: 1.2.5 + text: "Ensure that the kubelet uses certificates to authenticate (Manual)" + audit: | + #for 4.5 + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo' + #for 4.6 + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + #for both 4.5 and 4.6 + oc -n openshift-apiserver describe secret serving-cert + tests: + bin_op: and + test_items: + - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.crt" + - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.key" + remediation: | + No remediation is required. + OpenShift platform components use X.509 certificates for authentication. + OpenShift manages the CAs and certificates for platform components. + This is not configurable. + scored: false + + - id: 1.2.6 + text: "Verify that the kubelet certificate authority is set as appropriate (Manual)" + audit: | + # for 4.5 + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo' + # for 4.6 + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + tests: + test_items: + - flag: "/etc/kubernetes/static-pod-resources/configmaps/kubelet-serving-ca/ca-bundle.crt" + remediation: | + No remediation is required. + OpenShift platform components use X.509 certificates for authentication. + OpenShift manages the CAs and certificates for platform components. + This is not configurable. + scored: false + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Manual)" + audit: | + # To verify that the authorization-mode argument is not used + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + # To verify RBAC is configured: + oc get clusterrolebinding + oc get clusterrole + oc get rolebinding + oc get role + audit_config: | + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + tests: + bin_op: or + test_items: + - path: "{.authorization-mode}" + compare: + op: nothave + value: "AlwaysAllow" + - path: "{.authorization-mode}" + flag: "authorization-mode" + set: false + remediation: | + None. RBAC is always on and the OpenShift API server does not use the values assigned to the flag authorization-mode. + scored: false + + - id: 1.2.8 + text: "Verify that the Node authorizer is enabled (Manual)" + audit: | + # For OCP 4.5 and earlier verify that authorization-mode is not used + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' + # For OCP 4.5 and earlier verify that authorization-mode is not used + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host cat /etc/kubernetes/kubelet.conf | grep authorization-mode 2> /dev/null + oc debug node/$NODE_NAME -- chroot /host ps -aux | grep kubelet | grep authorization-mode 2> /dev/null + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + audit_config: | + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + tests: + bin_op: or + test_items: + - path: "{.authorization-mode}" + compare: + op: has + value: "Node" + - path: "{.authorization-mode}" + flag: "authorization-mode" + set: false + remediation: | + No remediation is required. + scored: false + + - id: 1.2.9 + text: "Verify that RBAC is enabled (Manual)" + audit: | + # For 4.5 To verify that the authorization-mode argument is not used + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + # To verify RBAC is used + oc get clusterrolebinding + oc get clusterrole + oc get rolebinding + oc get role + # For 4.6, verify that the authorization-mode argument includes RBAC + audit_config: | + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + tests: + bin_op: or + test_items: + - path: "{.authorization-mode}" + compare: + op: has + value: "RBAC" + - path: "{.authorization-mode}" + flag: "authorization-mode" + set: false + remediation: | + None. It is not possible to disable RBAC. + scored: false + + - id: 1.2.10 + text: "Ensure that the APIPriorityAndFairness feature gate is enabled (Manual)" + audit: | + #Verify the APIPriorityAndFairness feature-gate + oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' + #Verify the set of admission-plugins for OCP 4.6 and higher + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + tests: + bin_op: and + test_items: + - flag: "APIPriorityAndFairness=true" + - flag: "EventRateLimit" + set: false + remediation: | + No remediation is required + scored: false + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Manual)" + audit: | + #Verify the set of admission-plugins for OCP 4.6 and higher + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + tests: + test_items: + - flag: "AlwaysAdmit" + set: false + remediation: | + No remediation is required. The AlwaysAdmit admission controller cannot be enabled in OpenShift. + scored: false + + - id: 1.2.12 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: | + #Verify the set of admissi on-plugins for OCP 4.6 and higher + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + tests: + test_items: + - flag: "AlwaysPullImages" + set: false + remediation: | + None required. + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin SecurityContextDeny is not set (Manual)" + audit: | + #Verify the set of admission-plugins for OCP 4.6 and higher + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') + [ "$output" == "null" ] && echo "ocp 4.5 has SecurityContextDeny and SecurityContextConstraint compiled" || echo $output + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + #Verify that SecurityContextConstraints are deployed + oc get scc + oc describe scc restricted + tests: + bin_op: and + test_items: + - flag: "SecurityContextConstraint" + set: true + - flag: "anyuid" + - flag: "hostaccess" + - flag: "hostmount-anyuid" + - flag: "hostnetwork" + - flag: "node-exporter" + - flag: "nonroot" + - flag: "privileged" + - flag: "restricted" + remediation: | + None required. The Security Context Constraint admission controller cannot be disabled in OpenShift 4. + scored: false + + - id: 1.2.14 + text: "Ensure that the admission control plugin ServiceAccount is set (Manual)" + audit: | + #Verify the list of admission controllers for 4.6 and higher + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') + [ "$output" == "null" ] && echo "ocp 4.5 has ServiceAccount compiled" || echo $output + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + #Verify that Service Accounts are present + oc get sa -A + tests: + test_items: + - flag: "ServiceAccount" + set: true + remediation: | + None required. OpenShift is configured to use service accounts by default. + scored: false + + - id: 1.2.15 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Manual)" + audit: | + #Verify the list of admission controllers for 4.6 and higher + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') + [ "$output" == "null" ] && echo "ocp 4.5 has NamespaceLifecycle compiled" || echo $output + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + tests: + test_items: + - flag: "NamespaceLifecycle" + remediation: | + Ensure that the --disable-admission-plugins parameter does not include NamespaceLifecycle. + scored: false + + - id: 1.2.16 + text: "Ensure that the admission control plugin SecurityContextConstraint is set (Manual)" + audit: | + #Verify the set of admission-plugins for OCP 4.6 and higher + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') + [ "$output" == "null" ] && echo "ocp 4.5 has SecurityContextConstraint compiled" || echo $output + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + #Verify that SecurityContextConstraints are deployed + oc get scc + oc describe scc restricted + tests: + bin_op: and + test_items: + - flag: "SecurityContextConstraint" + - flag: "anyuid" + - flag: "hostaccess" + - flag: "hostmount-anyuid" + - flag: "hostnetwork" + - flag: "node-exporter" + - flag: "nonroot" + - flag: "privileged" + - flag: "restricted" + remediation: | + None required. Security Context Constraints are enabled by default in OpenShift and cannot be disabled. + scored: false + + - id: 1.2.17 + text: "Ensure that the admission control plugin NodeRestriction is set (Manual)" + audit: | + # For 4.5, review the control plane manifest https://github.com/openshift/origin/blob/release-4.5/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane/manifests.go#L132 + #Verify the set of admission-plugins for OCP 4.6 and higher + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') + [ "$output" == "null" ] && echo "ocp 4.5 has NodeRestriction compiled" || echo $output + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + tests: + test_items: + - flag: "NodeRestriction" + remediation: | + The NodeRestriction plugin cannot be disabled. + scored: false + + - id: 1.2.18 + text: "Ensure that the --insecure-bind-address argument is not set (Manual)" + audit: | + # InsecureBindAddress=true should not be in the results + oc get kubeapiservers.operator.openshift.io cluster -o jsonpath='{range .spec.observedConfig.apiServerArguments.feature-gates[*]}{@}{"\n"}{end}' + # Result should be only 6443 + oc -n openshift-kube-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}' + # Result should be only 8443 + oc -n openshift-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}' + tests: + bin_op: and + test_items: + - flag: "insecure-bind-address" + set: false + - flag: 6443 + - flag: 8443 + remediation: | + None required. + scored: false + + - id: 1.2.19 + text: "Ensure that the --insecure-port argument is set to 0 (Manual)" + audit: | + # Should return 6443 + oc -n openshift-kube-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}' + # For OCP 4.6 and above + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments["insecure-port"]' + output=$(oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments["insecure-port"]') + [ "$output" == "null" ] && echo "ocp 4.5 has insecure-port set to \"0\" compiled" || echo $output + tests: + bin_op: and + test_items: + - flag: "\"0\"" + - flag: "6443" + remediation: | + None required. The configuration is managed by the API server operator. + scored: false + + - id: 1.2.20 + text: "Ensure that the --secure-port argument is not set to 0 (Manual)" + audit: | + oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig' + # Should return only 6443 + echo ports=`oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[*].spec.containers[?(@.name=="kube-apiserver")].ports[*].containerPort}'` + tests: + bin_op: and + test_items: + - flag: '"bindAddress": "0.0.0.0:6443"' + - flag: "ports" + compare: + op: regex + value: '\s*(?:6443\s*){1,}$' + remediation: | + None required. + scored: false + + - id: 1.2.21 + text: "Ensure that the healthz endpoint is protected by RBAC (Manual)" + type: manual + audit: | + # Verify endpoints + oc -n openshift-kube-apiserver describe endpoints + # Check config for ports, livenessProbe, readinessProbe, healthz + oc -n openshift-kube-apiserver get cm kube-apiserver-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' + # Test to validate RBAC enabled on the apiserver endpoint; check with non-admin role + oc project openshift-kube-apiserver POD=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') PORT=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].spec.containers[0].ports[0].hostPort}') + # Following should return 403 Forbidden + oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -k + # Create a service account to test RBAC + oc create -n openshift-kube-apiserver sa permission-test-sa + # Should return 403 Forbidden + SA_TOKEN=$(oc sa -n openshift-kube-apiserver get-token permission-test-sa) + oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k + # Cleanup + oc delete -n openshift-kube-apiserver sa permission-test-sa + # As cluster admin, should succeed + CLUSTER_ADMIN_TOKEN=$(oc whoami -t) + oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k + remediation: | + None required as profiling data is protected by RBAC. + scored: false + + - id: 1.2.22 + text: "Ensure that the --audit-log-path argument is set (Manual)" + audit: | + # Should return “/var/log/kube-apiserver/audit.log" + output=$(oc get configmap config -n openshift-kube-apiserver -o jsonpath="{['.data.config\.yaml']}" | jq '.auditConfig.auditFilePath') + [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true + output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-path"][]?') + [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true + POD=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') + oc rsh -n openshift-kube-apiserver -c kube-apiserver $POD ls /var/log/kube-apiserver/audit.log 2>/dev/null + # Should return 0 + echo exit_code=$? + # Should return "/var/log/openshift-apiserver/audit.log" + output=$(oc get configmap config -n openshift-apiserver -o jsonpath="{['.data.config\.yaml']}" | jq '.auditConfig.auditFilePath') + [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true + output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-path"][]?') + [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true + POD=$(oc get pods -n openshift-apiserver -l apiserver=true -o jsonpath='{.items[0].metadata.name}') + oc rsh -n openshift-apiserver $POD ls /var/log/openshift-apiserver/audit.log 2>/dev/null + # Should return 0 + echo exit_code=$? + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: "/var/log/kube-apiserver/audit.log" + - flag: "/var/log/openshift-apiserver/audit.log" + - flag: "exit_code=0" + - flag: "null" + remediation: | + None required. This is managed by the cluster apiserver operator. + scored: false + + - id: 1.2.23 + text: "Ensure that the audit logs are forwarded off the cluster for retention (Manual)" + type: "manual" + remediation: | + Follow the documentation for log forwarding. Forwarding logs to third party systems + https://docs.openshift.com/container-platform/4.5/logging/cluster-logging-external.html + scored: false + + - id: 1.2.24 + text: "Ensure that the maximumRetainedFiles argument is set to 10 or as appropriate (Manual)" + audit: | + #NOTICE + output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumRetainedFiles) + [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumRetainedFiles=$output" || true + output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumRetainedFiles) + [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumRetainedFiles=$output" || true + output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxbackup"][]?') + [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxbackup=$output" || true + output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxbackup"][]?') + [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxbackup=$output" || true + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: "maximumRetainedFiles" + compare: + op: gte + value: 10 + - flag: "audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Set the maximumRetainedFiles parameter to 10 or as an appropriate number of files. maximumRetainedFiles: 10 + scored: false + + - id: 1.2.25 + text: "Ensure that the maximumFileSizeMegabytes argument is set to 100 or as appropriate (Manual)" + audit: | + #NOTICE + output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumFileSizeMegabytes) + [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumFileSizeMegabytes=$output" || true + output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumFileSizeMegabytes) + [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumFileSizeMegabytes=$output" || true + output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxsize"][]?') + [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxsize=$output" || true + output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxsize"][]?') + [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxsize=$output" || true + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: "maximumFileSizeMegabytes" + compare: + op: gte + value: 100 + - flag: "audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Set the audit-log-maxsize parameter to 100 or as an appropriate number. + maximumFileSizeMegabytes: 100 + scored: false + + - id: 1.2.26 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: | + echo requestTimeoutSeconds=`oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .servingInfo.requestTimeoutSeconds` + tests: + test_items: + - flag: "requestTimeoutSeconds" + remediation: | + TBD + scored: false + + - id: 1.2.27 + text: "Ensure that the --service-account-lookup argument is set to true (Manual)" + audit: | + # For OCP 4.5 + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' | grep service-account-lookup + # For OCP 4.6 and above + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["service-account-lookup"]' + output=$(oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["service-account-lookup"][0]') + [ "$output" == "null" ] && echo "ocp 4.5 has service-account-lookup=true compiled" || echo service-account-lookup=$output + tests: + test_items: + - flag: "service-account-lookup=true" + remediation: | + TBD + scored: false + + - id: 1.2.28 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Manual)" + audit: | + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .serviceAccountPublicKeyFiles[] + tests: + bin_op: and + test_items: + - flag: "/etc/kubernetes/static-pod-resources/configmaps/sa-token-signing-certs" + - flag: "/etc/kubernetes/static-pod-resources/configmaps/bound-sa-token-signing-certs" + remediation: | + The OpenShift API server does not use the service-account-key-file argument. + The ServiceAccount token authenticator is configured with serviceAccountConfig.publicKeyFiles. + OpenShift does not reuse the apiserver TLS key. This is not configurable. + scored: false + + - id: 1.2.29 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Manual)" + audit: | + # etcd Certificate File + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .storageConfig.certFile + # etcd Key File + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .storageConfig.keyFile + # NOTICE 4.6 extention + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-certfile"]' + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-keyfile"]' + tests: + bin_op: and + test_items: + - flag: "/etc/kubernetes/static-pod-resources/secrets/etcd-client/tls.crt" + - flag: "/etc/kubernetes/static-pod-resources/secrets/etcd-client/tls.key" + remediation: | + OpenShift automatically manages TLS and client certificate authentication for etcd. + This is not configurable. + scored: false + + - id: 1.2.30 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: | + # TLS Cert File - openshift-kube-apiserver + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .servingInfo.certFile + # TLS Key File + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.servingInfo.keyFile' + # NOTECI 4.6 extention + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["tls-cert-file"]' + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["tls-private-key-file"]' + tests: + bin_op: and + test_items: + - flag: "/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.crt" + - flag: "/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.key" + remediation: | + OpenShift automatically manages TLS authentication for the API server communication with the node/kublet. + This is not configurable. You may optionally set a custom default certificate to be used by the API server + when serving content in order to enable clients to access the API server at a different host name or without + the need to distribute the cluster-managed certificate authority (CA) certificates to the clients. + Follow the directions in the OpenShift documentation User-provided certificates for the API server + scored: false + + - id: 1.2.31 + text: "Ensure that the --client-ca-file argument is set as appropriate (Manual)" + audit: | + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .servingInfo.clientCA + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["client-ca-file"]' + tests: + test_items: + - flag: "/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt" + remediation: | + OpenShift automatically manages TLS authentication for the API server communication with the node/kublet. + This is not configurable. You may optionally set a custom default certificate to be used by the API + server when serving content in order to enable clients to access the API server at a different host name + or without the need to distribute the cluster-managed certificate authority (CA) certificates to the clients. + + User-provided certificates must be provided in a kubernetes.io/tls type Secret in the openshift-config namespace. + Update the API server cluster configuration, + the apiserver/cluster resource, to enable the use of the user-provided certificate. + scored: false + + - id: 1.2.32 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Manual)" + audit: | + #etcd CA File + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .storageConfig.ca + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-cafile"]' + tests: + test_items: + - flag: "/etc/kubernetes/static-pod-resources/configmaps/etcd-serving-ca/ca-bundle.crt" + remediation: | + None required. OpenShift generates the etcd-cafile and sets the arguments appropriately in the API server. Communication with etcd is secured by the etcd serving CA. + scored: false + + - id: 1.2.33 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: | + # encrypt the etcd datastore + oc get openshiftapiserver -o=jsonpath='{range.items[0].status.conditions[?(@.type=="Encrypted")]}{.reason}{"\n"}{.message}{"\n"}' + tests: + test_items: + - flag: "EncryptionCompleted" + remediation: | + Follow the OpenShift documentation for Encrypting etcd data | Authentication | OpenShift Container Platform 4.5 + https://docs.openshift.com/container-platform/4.5/security/encrypting-etcd.html + scored: false + + - id: 1.2.34 + text: "Ensure that encryption providers are appropriately configured (Manual)" + audit: | + # encrypt the etcd datastore + oc get openshiftapiserver -o=jsonpath='{range.items[0].status.conditions[?(@.type=="Encrypted")]}{.reason}{"\n"}{.message}{"\n"}' + tests: + test_items: + - flag: "EncryptionCompleted" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + scored: false + + - id: 1.2.35 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + type: manual + audit: | + # verify cipher suites + oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq .servingInfo + oc get kubeapiservers.operator.openshift.io cluster -o json |jq.spec.observedConfig.servingInfo + oc get openshiftapiservers.operator.openshift.io cluster -o json |jq.spec.observedConfig.servingInfo + oc describe --namespace=openshift-ingress-operator ingresscontroller/default + remediation: | + Verify that the tlsSecurityProfile is set to the value you chose. + Note: The HAProxy Ingress controller image does not support TLS 1.3 + and because the Modern profile requires TLS 1.3, it is not supported. + The Ingress Operator converts the Modern profile to Intermediate. + The Ingress Operator also converts the TLS 1.0 of an Old or Custom profile to 1.1, + and TLS 1.3 of a Custom profile to 1.2. + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that garbage collection is configured as appropriate (Manual)" + type: manual + remediation: | + To configure, follow the directions in Configuring garbage collection for containers and images + https://docs.openshift.com/container-platform/4.5/nodes/nodes/nodes-nodes-garbage-collection.html#nodes-nodes-garbage-collection-configuring_nodes-nodes-configuring + scored: false + + - id: 1.3.2 + text: "Ensure that controller manager healthz endpoints are protected by RBAC (Manual)" + type: manual + audit: | + # Verify configuration for ports, livenessProbe, readinessProbe, healthz + oc -n openshift-kube-controller-manager get cm kube-controller-manager-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' + # Verify endpoints + oc -n openshift-kube-controller-manager describe endpoints + # Test to validate RBAC enabled on the controller endpoint; check with non-admin role + oc project openshift-kube-controller-manage + POD=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].metadata.name}') + PORT=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].spec.containers[0].ports[0].hostPort}') + # Following should return 403 Forbidden + oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -k + # Create a service account to test RBAC + oc create -n openshift-kube-controller-manager sa permission-test-sa + # Should return 403 Forbidden + SA_TOKEN=$(oc sa -n openshift-kube-controller-manager get-token permission-test-sa) + oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k + # Cleanup + oc delete -n openshift-kube-controller-manager sa permission-test-sa + # As cluster admin, should succeed + CLUSTER_ADMIN_TOKEN=$(oc whoami -t) + oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k + remediation: | + None required; profiling is protected by RBAC. + scored: false + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Manual)" + audit: | + echo use-service-account-credentials=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["use-service-account-credentials"][]'` + tests: + test_items: + - flag: "use-service-account-credentials" + compare: + op: eq + value: true + remediation: | + The OpenShift Controller Manager operator manages and updates the OpenShift Controller Manager. + The Kubernetes Controller Manager operator manages and updates the Kubernetes Controller Manager deployed on top of OpenShift. + This operator is configured via KubeControllerManager custom resource. + scored: false + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Manual)" + audit: | + oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["service-account-private-key-file"][]' + tests: + test_items: + - flag: "/etc/kubernetes/static-pod-resources/secrets/service-account-private-key/service-account.key" + remediation: | + None required. + OpenShift manages the service account credentials for the scheduler automatically. + scored: false + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Manual)" + audit: | + oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["root-ca-file"][]' + tests: + test_items: + - flag: "/etc/kubernetes/static-pod-resources/configmaps/serviceaccount-ca/ca-bundle.crt" + remediation: | + None required. + Certificates for OpenShift platform components are automatically created and rotated by the OpenShift Container Platform. + scored: false + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: | + oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["feature-gates"][]' + tests: + test_items: + - flag: "RotateKubeletServerCertificate" + compare: + op: eq + value: "true" + remediation: | + None required. + Certificates for OpenShift platform components are automatically created and rotated by the OpenShift Container Platform. + scored: false + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Manual)" + audit: | + echo port=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq '.extendedArguments["port"][]'` + echo secure-port=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq '.extendedArguments["secure-port"][]'` + #Following should fail with a http code 403 + POD=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].metadata.name}') + oc rsh -n openshift-kube-controller-manager -c kube-controller-manager $POD curl https://localhost:10257/metrics -k + tests: + bin_op: and + test_items: + - flag: "secure-port" + compare: + op: eq + value: "\"10257\"" + - flag: "port" + compare: + op: eq + value: "\"0\"" + - flag: "\"code\": 403" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the master node and ensure the correct value for the --bind-address parameter + scored: false + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the healthz endpoints for the scheduler are protected by RBAC (Manual)" + type: manual + audit: | + # check configuration for ports, livenessProbe, readinessProbe, healthz + oc -n openshift-kube-scheduler get cm kube-scheduler-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' + # Test to verify endpoints + oc -n openshift-kube-scheduler describe endpoints + # Test to validate RBAC enabled on the scheduler endpoint; check with non-admin role + oc project openshift-kube-scheduler + POD=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].metadata.name}') + PORT=$(oc get pod $POD -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.port}') + # Should return 403 Forbidden + oc rsh ${POD} curl http://localhost:${PORT}/metrics -k + # Create a service account to test RBAC + oc create sa permission-test-sa + # Should return 403 Forbidden + SA_TOKEN=$(oc sa get-token permission-test-sa) + oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k + # Cleanup + oc delete sa permission-test-sa + # As cluster admin, should succeed + CLUSTER_ADMIN_TOKEN=$(oc whoami -t) + oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k + remediation: | + A fix to this issue: https://bugzilla.redhat.com/show_bug.cgi?id=1889488 None required. + Profiling is protected by RBAC and cannot be disabled. + scored: false + + - id: 1.4.2 + text: "Verify that the scheduler API service is protected by authentication and authorization (Manual)" + type: manual + audit: | + # To verify endpoints + oc -n openshift-kube-scheduler describe endpoints + # To verify that bind-adress is not used in the configuration and that port is set to 0 + oc -n openshift-kube-scheduler get cm kube-scheduler-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' + # To test for RBAC: + oc project openshift-kube-scheduler + POD=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].metadata.name}') + POD_IP=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].status.podIP}') + PORT=$(oc get pod $POD -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.port}') + # Should return a 403 + oc rsh ${POD} curl http://${POD_IP}:${PORT}/metrics + # Create a service account to test RBAC + oc create sa permission-test-sa + # Should return 403 Forbidden + SA_TOKEN=$(oc sa get-token permission-test-sa) + oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k + # Cleanup + oc delete sa permission-test-sa + # As cluster admin, should succeed + CLUSTER_ADMIN_TOKEN=$(oc whoami -t) + oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k + remediation: | + By default, the --bind-address argument is not present, + the readinessProbe and livenessProbe arguments are set to 10251 and the port argument is set to 0. + Check the status of this issue: https://bugzilla.redhat.com/show_bug.cgi?id=1889488 + scored: false diff --git a/cfg/rh-1.4.0/node.yaml b/cfg/rh-1.4.0/node.yaml new file mode 100644 index 000000000..fb982d6f3 --- /dev/null +++ b/cfg/rh-1.4.0/node.yaml @@ -0,0 +1,429 @@ +--- +controls: +version: rh-1.0 +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/systemd/system/kubelet.service 2> /dev/null + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + By default, the kubelet service file has permissions of 644. + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: | + # Should return root:root for each node + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/systemd/system/kubelet.service 2> /dev/null + tests: + test_items: + - flag: root:root + remediation: | + By default, the kubelet service file has ownership of root:root. + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-sdn namespace + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" - stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml 2>/dev/null + fi + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "644" + remediation: | + None needed. + scored: false + + - id: 4.1.4 + text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-sdn namespace + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- stat -Lc "$i %n %U:%G" /config/kube-proxy-config.yaml 2>/dev/null + fi + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + None required. The configuration is managed by OpenShift operators. + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Manual)" + audit: | + # Check permissions + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet.conf 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + None required. + scored: false + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Manual)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet.conf 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: root:root + remediation: | + None required. + scored: false + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet-ca.crt 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + None required. + scored: true + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet-ca.crt 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: root:root + remediation: | + None required. + scored: true + + - id: 4.1.9 + text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /var/lib/kubelet/kubeconfig 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + None required. + scored: true + + - id: 4.1.10 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /var/lib/kubelet/kubeconfig 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: root:root + remediation: | + None required. + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host grep -B4 -A1 anonymous /etc/kubernetes/kubelet.conf 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "enabled: true" + set: false + remediation: | + Follow the instructions in the documentation to create a Kubelet config CRD + and set the anonymous-auth is set to false. + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Manual)" + type: manual + # Takes a lot of time for connection to fail and + audit: | + POD=$(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') + TOKEN=$(oc whoami -t) + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc exec -n openshift-kube-apiserver $POD -- curl -sS https://172.25.0.1/api/v1/nodes/$NODE_NAME/proxy/configz -k -H "Authorization:Bearer $TOKEN" | jq -r '.kubeletconfig.authorization.mode' 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "Connection timed out" + remediation: | + None required. Unauthenticated/Unauthorized users have no access to OpenShift nodes. + scored: false + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host grep clientCAFile /etc/kubernetes/kubelet.conf 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: '"clientCAFile": "/etc/kubernetes/kubelet-ca.crt"' + remediation: | + None required. Changing the clientCAFile value is unsupported. + scored: true + + - id: 4.2.4 + text: "Verify that the read only port is not used or is set to 0 (Automated)" + audit: | + echo `oc -n openshift-kube-apiserver get cm kube-apiserver-pod -o yaml | grep --color read-only-port` 2> /dev/null + echo `oc -n openshift-kube-apiserver get cm config -o yaml | grep --color "read-only-port"` 2> /dev/null + tests: + bin_op: or + test_items: + - flag: "read-only-port" + compare: + op: has + value: "[\"0\"]" + - flag: "read-only-port" + set: false + remediation: | + In earlier versions of OpenShift 4, the read-only-port argument is not used. + Follow the instructions in the documentation to create a Kubelet config CRD + and set the --read-only-port is set to 0. + scored: true + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" + audit: | + # Should return 1 for node + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/${NODE_NAME} -- chroot /host ps -ef | grep kubelet | grep streaming-connection-idle-timeout 2> /dev/null + echo exit_code=$? + # Should return 1 for node + oc debug node/${NODE_NAME} -- chroot /host grep streamingConnectionIdleTimeout /etc/kubernetes/kubelet.conf 2> /dev/null + echo exit_code=$? + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: --streaming-connection-idle-timeout + compare: + op: noteq + value: 0 + - flag: streamingConnectionIdleTimeout + compare: + op: noteq + value: 0s + - flag: "exit_code" + compare: + op: eq + value: 1 + remediation: | + Follow the instructions in the documentation to create a Kubelet config CRD and set + the --streaming-connection-idle-timeout to the desired value. Do not set the value to 0. + scored: true + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is not set (Manual)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host more /etc/kubernetes/kubelet.conf 2> /dev/null + tests: + test_items: + - flag: protectKernelDefaults + set: false + remediation: | + None required. The OpenShift 4 kubelet modifies the system tunable; + using the protect-kernel-defaults flag will cause the kubelet to fail on start if the tunables + don't match the kubelet configuration and the OpenShift node will fail to start. + scored: false + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Manual)" + audit: | + /bin/bash + flag=make-iptables-util-chains + opt=makeIPTablesUtilChains + # look at each machineconfigpool + while read -r pool nodeconfig; do + # true by default + value='true' + # first look for the flag + oc get machineconfig $nodeconfig -o json | jq -r '.spec.config.systemd[][] | select(.name=="kubelet.service") | .contents' | sed -n "/^ExecStart=/,/^\$/ { /^\\s*--$flag=false/ q 100 }" + # if the above command exited with 100, the flag was false + [ $? == 100 ] && value='false' + # now look in the yaml KubeletConfig + yamlconfig=$(oc get machineconfig $nodeconfig -o json | jq -r '.spec.config.storage.files[] | select(.path=="/etc/kubernetes/kubelet.conf") | .contents.source ' | sed 's/^data:,//' | while read; do echo -e ${REPLY//%/\\x}; done) + echo "$yamlconfig" | sed -n "/^$opt:\\s*false\\s*$/ q 100" + [ $? == 100 ] && value='false' + echo "Pool $pool has $flag ($opt) set to $value" + done < <(oc get machineconfigpools -o json | jq -r '.items[] | select(.status.machineCount>0) | .metadata.name + " " + .spec.configuration.name') + use_multiple_values: true + tests: + test_items: + - flag: "set to true" + remediation: | + None required. The --make-iptables-util-chains argument is set to true by default. + scored: false + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + audit: | + echo `oc get machineconfig 01-worker-kubelet -o yaml | grep hostname-override` + echo `oc get machineconfig 01-master-kubelet -o yaml | grep hostname-override` + tests: + test_items: + - flag: hostname-override + set: false + remediation: | + By default, --hostname-override argument is not set. + scored: false + + - id: 4.2.9 + text: "Ensure that the kubeAPIQPS [--event-qps] argument is set to 0 or a level which ensures appropriate event capture (Manual)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/${NODE_NAME} -- chroot /host cat /etc/kubernetes/kubelet.conf; + oc get machineconfig 01-worker-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050 + oc get machineconfig 01-master-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050 + type: "manual" + remediation: | + Follow the documentation to edit kubelet parameters + https://docs.openshift.com/container-platform/4.5/scalability_and_performance/recommended-host-practices.html#create-a-kubeletconfig-crd-to-edit-kubelet-parameters + KubeAPIQPS: + scored: false + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: | + oc get configmap config -n openshift-kube-apiserver -o json \ + | jq -r '.data["config.yaml"]' \ + | jq -r '.apiServerArguments | + .["kubelet-client-certificate"][0], + .["kubelet-client-key"][0] + ' + tests: + bin_op: and + test_items: + - flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.crt" + - flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.key" + remediation: | + OpenShift automatically manages TLS authentication for the API server communication with the node/kublet. + This is not configurable. + scored: true + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Manual)" + audit: | + #Verify the rotateKubeletClientCertificate feature gate is not set to false + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/${NODE_NAME} -- chroot /host cat /etc/kubernetes/kubelet.conf | grep RotateKubeletClientCertificate 2> /dev/null + # Verify the rotateCertificates argument is set to true + oc debug node/${NODE_NAME} -- chroot host grep rotate /etc/kubernetes/kubelet.conf 2> /dev/null + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: rotateCertificates + compare: + op: eq + value: true + - flag: rotateKubeletClientCertificates + compare: + op: noteq + value: false + - flag: rotateKubeletClientCertificates + set: false + remediation: | + None required. + scored: false + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: | + #Verify the rotateKubeletServerCertificate feature gate is on + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/${NODE_NAME} -- chroot /host grep RotateKubeletServerCertificate /etc/kubernetes/kubelet.conf 2> /dev/null + # Verify the rotateCertificates argument is set to true + oc debug node/${NODE_NAME} -- chroot host grep rotate /etc/kubernetes/kubelet.conf 2> /dev/null + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: rotateCertificates + compare: + op: eq + value: true + - flag: RotateKubeletServerCertificate + compare: + op: eq + value: true + remediation: | + By default, kubelet server certificate rotation is disabled. + scored: false + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: | + # needs verification + # verify cipher suites + oc describe --namespace=openshift-ingress-operator ingresscontroller/default + oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo + oc get openshiftapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo + oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq .servingInfo + #check value for tlsSecurityProfile; null is returned if default is used + oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.tlsSecurityProfile + type: manual + remediation: | + Follow the directions above and in the OpenShift documentation to configure the tlsSecurityProfile. + Configuring Ingress + scored: false diff --git a/cfg/rh-1.4.0/policies.yaml b/cfg/rh-1.4.0/policies.yaml new file mode 100644 index 000000000..e90cd877f --- /dev/null +++ b/cfg/rh-1.4.0/policies.yaml @@ -0,0 +1,287 @@ +--- +controls: +version: rh-1.0 +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + audit: | + #To get a list of users and service accounts with the cluster-admin role + oc get clusterrolebindings -o=customcolumns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | + grep cluster-admin + #To verity that kbueadmin is removed, no results should be returned + oc get secrets kubeadmin -n kube-system + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + audit: | + #needs verification + oc get roles --all-namespaces -o yaml + for i in $(oc get roles -A -o jsonpath='{.items[*].metadata.name}'); do oc + describe clusterrole ${i}; done + #Retrieve the cluster roles defined in the cluster and review for wildcards + oc get clusterroles -o yaml + for i in $(oc get clusterroles -o jsonpath='{.items[*].metadata.name}'); do + oc describe clusterrole ${i}; done + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + None required. + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.2 + text: "Pod Security Policies" + checks: + - id: 5.2.1 + text: "Minimize the admission of privileged containers (Manual)" + audit: | + # needs verification + oc get scc -o=custom-columns=NAME:.metadata.name,allowPrivilegedContainer:.allowPrivilegedContainer + tests: + test_items: + - flag: "false" + remediation: | + Create a SCC as described in the OpenShift documentation, ensuring that the Allow + Privileged field is set to false. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" + audit: | + oc get scc -o=custom-columns=NAME:.metadata.name,allowHostPID:.allowHostPID + tests: + test_items: + - flag: "false" + remediation: | + Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host + PID field is set to false. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" + audit: | + oc get scc -o=custom-columns=NAME:.metadata.name,allowHostIPC:.allowHostIPC + tests: + test_items: + - flag: "false" + remediation: | + Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host + IPC field is set to false. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" + audit: | + oc get scc -o=custom-columns=NAME:.metadata.name,allowHostNetwork:.allowHostNetwork + tests: + test_items: + - flag: "false" + remediation: | + Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host + Network field is omitted or set to false. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" + audit: | + oc get scc -o=custom-columns=NAME:.metadata.name,allowPrivilegeEscalation:.allowPrivilegeEscalation + tests: + test_items: + - flag: "false" + remediation: | + Create a SCC as described in the OpenShift documentation, ensuring that the Allow + Privilege Escalation field is omitted or set to false. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of root containers (Manual)" + audit: | + # needs verification # | awk 'NR>1 {gsub("map\\[type:", "", $2); gsub("\\]$", "", $2); print $1 ":" $2}' + oc get scc -o=custom-columns=NAME:.metadata.name,runAsUser:.runAsUser.type + #For SCCs with MustRunAs verify that the range of UIDs does not include 0 + oc get scc -o=custom-columns=NAME:.metadata.name,uidRangeMin:.runAsUser.uidRangeMin,uidRangeMax:.runAsUser.uidRangeMax + tests: + bin_op: or + test_items: + - flag: "MustRunAsNonRoot" + - flag: "MustRunAs" + compare: + op: nothave + value: 0 + remediation: | + None required. By default, OpenShift includes the non-root SCC with the the Run As User + Strategy is set to either MustRunAsNonRoot. If additional SCCs are appropriate, follow the + OpenShift documentation to create custom SCCs. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of containers with the NET_RAW capability (Manual)" + audit: | + # needs verification + oc get scc -o=custom-columns=NAME:.metadata.name,requiredDropCapabilities:.requiredDropCapabilities + tests: + bin_op: or + test_items: + - flag: "ALL" + - flag: "NET_RAW" + remediation: | + Create a SCC as described in the OpenShift documentation, ensuring that the Required + Drop Capabilities is set to include either NET_RAW or ALL. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + Ensure that Allowed Capabilities is set to an empty array for every SCC in the cluster + except for the privileged SCC. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider + adding a SCC which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports Network Policies (Manual)" + type: "manual" + remediation: | + None required. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have Network Policies defined (Manual)" + type: "manual" + audit: | + #Run the following command and review the NetworkPolicy objects created in the cluster. + oc -n all get networkpolicy + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using secrets as files over secrets as environment variables (Manual)" + type: "manual" + audit: | + #Run the following command to find references to objects which use environment variables defined from secrets. + oc get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} + {.metadata.name} {"\n"}{end}' -A + remediation: | + If possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the OpenShift documentation: [Image configuration resources](https://docs.openshift.com/container-platform/4.5/openshift_images/image-configuration.html + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + audit: | + #Run the following command and review the namespaces created in the cluster. + oc get namespaces + #Ensure that these namespaces are the ones you need and are adequately administered as per your requirements. + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual)" + type: "manual" + remediation: | + To enable the default seccomp profile, use the reserved value /runtime/default that will + make sure that the pod uses the default policy available on the host. + scored: false + + - id: 5.7.3 + text: "Apply Security Context to Your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + audit: | + #Run this command to list objects in default namespace + oc project default + oc get all + #The only entries there should be system managed resources such as the kubernetes and openshift service + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/rh-1.6.0/config.yaml b/cfg/rh-1.6.0/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/rh-1.6.0/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/rh-1.6.0/controlplane.yaml b/cfg/rh-1.6.0/controlplane.yaml new file mode 100644 index 000000000..606194ddf --- /dev/null +++ b/cfg/rh-1.6.0/controlplane.yaml @@ -0,0 +1,62 @@ +--- +controls: +version: rh-1.0 +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + audit: | + # To verify user authentication is enabled + oc describe authentication + # To verify that an identity provider is configured + oc get identity + # To verify that a custom cluster-admin user exists + oc get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | grep cluster-admin | grep User + # To verity that kbueadmin is removed, no results should be returned + oc get secrets kubeadmin -n kube-system + type: manual + remediation: | + Configure an identity provider for the OpenShift cluster. + Understanding identity provider configuration | Authentication | OpenShift + Container Platform 4.5. Once an identity provider has been defined, + you can use RBAC to define and apply permissions. + After you define an identity provider and create a new cluster-admin user, + remove the kubeadmin user to improve cluster security. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + audit: | + #To view kube apiserver log files + oc adm node-logs --role=master --path=kube-apiserver/ + #To view openshift apiserver log files + oc adm node-logs --role=master --path=openshift-apiserver/ + #To verify kube apiserver audit config + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]?' + #To verify openshift apiserver audit config + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]?' + type: manual + remediation: | + No remediation required. + scored: false + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + audit: | + #To verify openshift apiserver audit config + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' + #To verify kube apiserver audit config + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' + type: manual + remediation: | + In OpenShift 4.6 and higher, if appropriate for your needs, + modify the audit policy. + scored: false diff --git a/cfg/rh-1.6.0/etcd.yaml b/cfg/rh-1.6.0/etcd.yaml new file mode 100644 index 000000000..4398d9cc1 --- /dev/null +++ b/cfg/rh-1.6.0/etcd.yaml @@ -0,0 +1,183 @@ +--- +controls: +version: rh-1.0 +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration Files" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/' + fi + use_multiple_values: true + tests: + test_items: + - flag: "file" + compare: + op: regex + value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-serving\/etcd-serving-.*\.(?:crt|key)' + remediation: | + OpenShift does not use the etcd-certfile or etcd-keyfile flags. + Certificates for etcd are managed by the etcd cluster operator. + scored: false + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/' + fi + use_multiple_values: true + tests: + test_items: + - flag: "--client-cert-auth" + compare: + op: eq + value: true + remediation: | + This setting is managed by the cluster etcd operator. No remediation required." + scored: false + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Manual)" + audit: | + # Returns 0 if found, 1 if not found + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --auto-tls=true 2>/dev/null ; echo exit_code=$? + fi + use_multiple_values: true + tests: + test_items: + - flag: "exit_code" + compare: + op: eq + value: "1" + remediation: | + This setting is managed by the cluster etcd operator. No remediation required. + scored: false + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/' + fi + use_multiple_values: true + tests: + test_items: + - flag: "file" + compare: + op: regex + value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-peer\/etcd-peer-.*\.(?:crt|key)' + remediation: | + None. This configuration is managed by the etcd operator. + scored: false + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/' + fi + use_multiple_values: true + tests: + test_items: + - flag: "--peer-client-cert-auth" + compare: + op: eq + value: true + remediation: | + This setting is managed by the cluster etcd operator. No remediation required. + scored: false + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Manual)" + audit: | + # Returns 0 if found, 1 if not found + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>/dev/null ; echo exit_code=$? + fi + use_multiple_values: true + tests: + test_items: + - flag: "exit_code" + compare: + op: eq + value: "1" + remediation: | + This setting is managed by the cluster etcd operator. No remediation required. + scored: false + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/' + fi + use_multiple_values: true + tests: + test_items: + - flag: "file" + compare: + op: regex + value: '\/etc\/kubernetes\/static-pod-certs\/configmaps\/etcd-(?:serving|peer-client)-ca\/ca-bundle\.(?:crt|key)' + remediation: | + None required. Certificates for etcd are managed by the OpenShift cluster etcd operator. + scored: false diff --git a/cfg/rh-1.6.0/master.yaml b/cfg/rh-1.6.0/master.yaml new file mode 100644 index 000000000..37b50f033 --- /dev/null +++ b/cfg/rh-1.6.0/master.yaml @@ -0,0 +1,1445 @@ +--- +controls: +version: rh-1.0 +id: 1 +text: "Master Node Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Master Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Manual))" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Manual))" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc rsh -n openshift-etcd "$POD_NAME" stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/manifests/etcd-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc rsh -n openshift-etcd "$POD_NAME" stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/manifests/etcd-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # For CNI multus + # Get the pod name in the openshift-multus namespace + POD_NAME=$(oc get pods -n openshift-multus -l app=multus --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/etc/cni/net.d/*.conf"; 2>/dev/null + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/var/run/multus/cni/net.d/*.conf"; 2>/dev/null + fi + # For SDN pods + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + fi + + # For OVS pods + POD_NAME=$(oc get pods -n openshift-sdn -l app=ovs --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # For CNI multus + # Get the pod name in the openshift-multus namespace + POD_NAME=$(oc get pods -n openshift-multus -l app=multus --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c '$i %n %U:%G' /host/etc/cni/net.d/*.conf" 2>/dev/null + oc exec -n openshift-multus $i -- /bin/bash -c "stat -c '$i %n %U:%G' /host/var/run/multus/cni/net.d/*.conf" 2>/dev/null + fi + # For SDN pods + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + fi + # For OVS pods in 4.5 + POD_NAME=$(oc get pods -n openshift-sdn -l app=ovs --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /var/lib/etcd/member + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /var/lib/etcd/member + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 644 or more restrictive (Manual))" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubeconfig 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Manual)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubeconfig 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Manual)" + audit: | + # Should return root:root for all files and directories + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # echo $i static-pod-certs + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + # echo $i static-pod-resources + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.20 + text: "Ensure that the OpenShift PKI certificate file permissions are set to 644 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.crt' -exec stat -c "$POD_NAME %n permissions=%a" {} \; + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.21 + text: "Ensure that the OpenShift PKI key file permissions are set to 600 (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.key' -exec stat -c "$POD_NAME %n permissions=%a" {} \; + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that anonymous requests are authorized (Manual)" + audit: | + # To verify that userGroups include system:unauthenticated + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' + # To verify that userGroups include system:unauthenticated + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?.userGroups' + # To verify RBAC is enabled + oc get clusterrolebinding + oc get clusterrole + oc get rolebinding + oc get role + tests: + test_items: + - flag: "system:unauthenticated" + remediation: | + None required. The default configuration should not be modified. + scored: false + + - id: 1.2.2 + text: "Ensure that the --basic-auth-file argument is not set (Manual)" + audit: | + oc -n openshift-kube-apiserver get cm config -o yaml | grep --color "basic-auth" + oc -n openshift-apiserver get cm config -o yaml | grep --color "basic-auth" + # Add | awk '$3 != "AVAILABLE" { if ($3){print "available=true"}else{print "available=false"} }; to create AVAILABLE = true/false form + oc get clusteroperator authentication | awk '$3 != "AVAILABLE" { if ($3){print "available=true"}else{print "available=false"} }' + tests: + bin_op: and + test_items: + - flag: "basic-auth-file" + set: false + - flag: "available" + compare: + op: eq + value: true + remediation: | + None required. --basic-auth-file cannot be configured on OpenShift. + scored: false + + - id: 1.2.3 + text: "Ensure that the --token-auth-file parameter is not set (Manual)" + audit: | + # Verify that the token-auth-file flag is not present + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' + #Verify that the authentication operator is running + oc get clusteroperator authentication | awk '$3 != "AVAILABLE" { if ($3){print "available=true"}else{print "available=false"} }' + tests: + bin_op: and + test_items: + - flag: "token-auth-file" + set: false + - flag: "available" + compare: + op: eq + value: true + remediation: | + None is required. + scored: false + + - id: 1.2.4 + text: "Use https for kubelet connections (Manual)" + audit: | + #for 4.5 + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo' + #for 4.6 + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + #for both 4.5 and 4.6 + oc -n openshift-apiserver describe secret serving-cert + tests: + bin_op: and + test_items: + - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.crt" + - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.key" + remediation: | + No remediation is required. + OpenShift platform components use X.509 certificates for authentication. + OpenShift manages the CAs and certificates for platform components. This is not configurable. + scored: false + + - id: 1.2.5 + text: "Ensure that the kubelet uses certificates to authenticate (Manual)" + audit: | + #for 4.5 + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo' + #for 4.6 + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + #for both 4.5 and 4.6 + oc -n openshift-apiserver describe secret serving-cert + tests: + bin_op: and + test_items: + - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.crt" + - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.key" + remediation: | + No remediation is required. + OpenShift platform components use X.509 certificates for authentication. + OpenShift manages the CAs and certificates for platform components. + This is not configurable. + scored: false + + - id: 1.2.6 + text: "Verify that the kubelet certificate authority is set as appropriate (Manual)" + audit: | + # for 4.5 + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo' + # for 4.6 + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + tests: + test_items: + - flag: "/etc/kubernetes/static-pod-resources/configmaps/kubelet-serving-ca/ca-bundle.crt" + remediation: | + No remediation is required. + OpenShift platform components use X.509 certificates for authentication. + OpenShift manages the CAs and certificates for platform components. + This is not configurable. + scored: false + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Manual)" + audit: | + # To verify that the authorization-mode argument is not used + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + # To verify RBAC is configured: + oc get clusterrolebinding + oc get clusterrole + oc get rolebinding + oc get role + audit_config: | + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + tests: + bin_op: or + test_items: + - path: "{.authorization-mode}" + compare: + op: nothave + value: "AlwaysAllow" + - path: "{.authorization-mode}" + flag: "authorization-mode" + set: false + remediation: | + None. RBAC is always on and the OpenShift API server does not use the values assigned to the flag authorization-mode. + scored: false + + - id: 1.2.8 + text: "Verify that the Node authorizer is enabled (Manual)" + audit: | + # For OCP 4.5 and earlier verify that authorization-mode is not used + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' + # For OCP 4.5 and earlier verify that authorization-mode is not used + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host cat /etc/kubernetes/kubelet.conf | grep authorization-mode 2> /dev/null + oc debug node/$NODE_NAME -- chroot /host ps -aux | grep kubelet | grep authorization-mode 2> /dev/null + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + audit_config: | + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + tests: + bin_op: or + test_items: + - path: "{.authorization-mode}" + compare: + op: has + value: "Node" + - path: "{.authorization-mode}" + flag: "authorization-mode" + set: false + remediation: | + No remediation is required. + scored: false + + - id: 1.2.9 + text: "Verify that RBAC is enabled (Manual)" + audit: | + # For 4.5 To verify that the authorization-mode argument is not used + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + # To verify RBAC is used + oc get clusterrolebinding + oc get clusterrole + oc get rolebinding + oc get role + # For 4.6, verify that the authorization-mode argument includes RBAC + audit_config: | + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + tests: + bin_op: or + test_items: + - path: "{.authorization-mode}" + compare: + op: has + value: "RBAC" + - path: "{.authorization-mode}" + flag: "authorization-mode" + set: false + remediation: | + None. It is not possible to disable RBAC. + scored: false + + - id: 1.2.10 + text: "Ensure that the APIPriorityAndFairness feature gate is enabled (Manual)" + audit: | + #Verify the APIPriorityAndFairness feature-gate + oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' + #Verify the set of admission-plugins for OCP 4.6 and higher + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + tests: + bin_op: and + test_items: + - flag: "APIPriorityAndFairness=true" + - flag: "EventRateLimit" + set: false + remediation: | + No remediation is required + scored: false + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Manual)" + audit: | + #Verify the set of admission-plugins for OCP 4.6 and higher + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + tests: + test_items: + - flag: "AlwaysAdmit" + set: false + remediation: | + No remediation is required. The AlwaysAdmit admission controller cannot be enabled in OpenShift. + scored: false + + - id: 1.2.12 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: | + #Verify the set of admissi on-plugins for OCP 4.6 and higher + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + tests: + test_items: + - flag: "AlwaysPullImages" + set: false + remediation: | + None required. + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin SecurityContextDeny is not set (Manual)" + audit: | + #Verify the set of admission-plugins for OCP 4.6 and higher + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') + [ "$output" == "null" ] && echo "ocp 4.5 has SecurityContextDeny and SecurityContextConstraint compiled" || echo $output + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + #Verify that SecurityContextConstraints are deployed + oc get scc + oc describe scc restricted + tests: + bin_op: and + test_items: + - flag: "SecurityContextConstraint" + set: true + - flag: "anyuid" + - flag: "hostaccess" + - flag: "hostmount-anyuid" + - flag: "hostnetwork" + - flag: "node-exporter" + - flag: "nonroot" + - flag: "privileged" + - flag: "restricted" + remediation: | + None required. The Security Context Constraint admission controller cannot be disabled in OpenShift 4. + scored: false + + - id: 1.2.14 + text: "Ensure that the admission control plugin ServiceAccount is set (Manual)" + audit: | + #Verify the list of admission controllers for 4.6 and higher + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') + [ "$output" == "null" ] && echo "ocp 4.5 has ServiceAccount compiled" || echo $output + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + #Verify that Service Accounts are present + oc get sa -A + tests: + test_items: + - flag: "ServiceAccount" + set: true + remediation: | + None required. OpenShift is configured to use service accounts by default. + scored: false + + - id: 1.2.15 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Manual)" + audit: | + #Verify the list of admission controllers for 4.6 and higher + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') + [ "$output" == "null" ] && echo "ocp 4.5 has NamespaceLifecycle compiled" || echo $output + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + tests: + test_items: + - flag: "NamespaceLifecycle" + remediation: | + Ensure that the --disable-admission-plugins parameter does not include NamespaceLifecycle. + scored: false + + - id: 1.2.16 + text: "Ensure that the admission control plugin SecurityContextConstraint is set (Manual)" + audit: | + #Verify the set of admission-plugins for OCP 4.6 and higher + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') + [ "$output" == "null" ] && echo "ocp 4.5 has SecurityContextConstraint compiled" || echo $output + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + #Verify that SecurityContextConstraints are deployed + oc get scc + oc describe scc restricted + tests: + bin_op: and + test_items: + - flag: "SecurityContextConstraint" + - flag: "anyuid" + - flag: "hostaccess" + - flag: "hostmount-anyuid" + - flag: "hostnetwork" + - flag: "node-exporter" + - flag: "nonroot" + - flag: "privileged" + - flag: "restricted" + remediation: | + None required. Security Context Constraints are enabled by default in OpenShift and cannot be disabled. + scored: false + + - id: 1.2.17 + text: "Ensure that the admission control plugin NodeRestriction is set (Manual)" + audit: | + # For 4.5, review the control plane manifest https://github.com/openshift/origin/blob/release-4.5/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane/manifests.go#L132 + #Verify the set of admission-plugins for OCP 4.6 and higher + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') + [ "$output" == "null" ] && echo "ocp 4.5 has NodeRestriction compiled" || echo $output + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + tests: + test_items: + - flag: "NodeRestriction" + remediation: | + The NodeRestriction plugin cannot be disabled. + scored: false + + - id: 1.2.18 + text: "Ensure that the --insecure-bind-address argument is not set (Manual)" + audit: | + # InsecureBindAddress=true should not be in the results + oc get kubeapiservers.operator.openshift.io cluster -o jsonpath='{range .spec.observedConfig.apiServerArguments.feature-gates[*]}{@}{"\n"}{end}' + # Result should be only 6443 + oc -n openshift-kube-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}' + # Result should be only 8443 + oc -n openshift-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}' + tests: + bin_op: and + test_items: + - flag: "insecure-bind-address" + set: false + - flag: 6443 + - flag: 8443 + remediation: | + None required. + scored: false + + - id: 1.2.19 + text: "Ensure that the --insecure-port argument is set to 0 (Manual)" + audit: | + # Should return 6443 + oc -n openshift-kube-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}' + # For OCP 4.6 and above + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments["insecure-port"]' + output=$(oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments["insecure-port"]') + [ "$output" == "null" ] && echo "ocp 4.5 has insecure-port set to \"0\" compiled" || echo $output + tests: + bin_op: and + test_items: + - flag: "\"0\"" + - flag: "6443" + remediation: | + None required. The configuration is managed by the API server operator. + scored: false + + - id: 1.2.20 + text: "Ensure that the --secure-port argument is not set to 0 (Manual)" + audit: | + oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig' + # Should return only 6443 + echo ports=`oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[*].spec.containers[?(@.name=="kube-apiserver")].ports[*].containerPort}'` + tests: + bin_op: and + test_items: + - flag: '"bindAddress": "0.0.0.0:6443"' + - flag: "ports" + compare: + op: regex + value: '\s*(?:6443\s*){1,}$' + remediation: | + None required. + scored: false + + - id: 1.2.21 + text: "Ensure that the healthz endpoint is protected by RBAC (Manual)" + type: manual + audit: | + # Verify endpoints + oc -n openshift-kube-apiserver describe endpoints + # Check config for ports, livenessProbe, readinessProbe, healthz + oc -n openshift-kube-apiserver get cm kube-apiserver-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' + # Test to validate RBAC enabled on the apiserver endpoint; check with non-admin role + oc project openshift-kube-apiserver POD=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') PORT=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].spec.containers[0].ports[0].hostPort}') + # Following should return 403 Forbidden + oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -k + # Create a service account to test RBAC + oc create -n openshift-kube-apiserver sa permission-test-sa + # Should return 403 Forbidden + SA_TOKEN=$(oc sa -n openshift-kube-apiserver get-token permission-test-sa) + oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k + # Cleanup + oc delete -n openshift-kube-apiserver sa permission-test-sa + # As cluster admin, should succeed + CLUSTER_ADMIN_TOKEN=$(oc whoami -t) + oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k + remediation: | + None required as profiling data is protected by RBAC. + scored: false + + - id: 1.2.22 + text: "Ensure that the --audit-log-path argument is set (Manual)" + audit: | + # Should return “/var/log/kube-apiserver/audit.log" + output=$(oc get configmap config -n openshift-kube-apiserver -o jsonpath="{['.data.config\.yaml']}" | jq '.auditConfig.auditFilePath') + [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true + output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-path"][]?') + [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true + POD=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') + oc rsh -n openshift-kube-apiserver -c kube-apiserver $POD ls /var/log/kube-apiserver/audit.log 2>/dev/null + # Should return 0 + echo exit_code=$? + # Should return "/var/log/openshift-apiserver/audit.log" + output=$(oc get configmap config -n openshift-apiserver -o jsonpath="{['.data.config\.yaml']}" | jq '.auditConfig.auditFilePath') + [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true + output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-path"][]?') + [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true + POD=$(oc get pods -n openshift-apiserver -l apiserver=true -o jsonpath='{.items[0].metadata.name}') + oc rsh -n openshift-apiserver $POD ls /var/log/openshift-apiserver/audit.log 2>/dev/null + # Should return 0 + echo exit_code=$? + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: "/var/log/kube-apiserver/audit.log" + - flag: "/var/log/openshift-apiserver/audit.log" + - flag: "exit_code=0" + - flag: "null" + remediation: | + None required. This is managed by the cluster apiserver operator. + scored: false + + - id: 1.2.23 + text: "Ensure that the audit logs are forwarded off the cluster for retention (Manual)" + type: "manual" + remediation: | + Follow the documentation for log forwarding. Forwarding logs to third party systems + https://docs.openshift.com/container-platform/4.5/logging/cluster-logging-external.html + scored: false + + - id: 1.2.24 + text: "Ensure that the maximumRetainedFiles argument is set to 10 or as appropriate (Manual)" + audit: | + #NOTICE + output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumRetainedFiles) + [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumRetainedFiles=$output" || true + output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumRetainedFiles) + [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumRetainedFiles=$output" || true + output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxbackup"][]?') + [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxbackup=$output" || true + output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxbackup"][]?') + [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxbackup=$output" || true + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: "maximumRetainedFiles" + compare: + op: gte + value: 10 + - flag: "audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Set the maximumRetainedFiles parameter to 10 or as an appropriate number of files. maximumRetainedFiles: 10 + scored: false + + - id: 1.2.25 + text: "Ensure that the maximumFileSizeMegabytes argument is set to 100 or as appropriate (Manual)" + audit: | + #NOTICE + output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumFileSizeMegabytes) + [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumFileSizeMegabytes=$output" || true + output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumFileSizeMegabytes) + [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumFileSizeMegabytes=$output" || true + output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxsize"][]?') + [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxsize=$output" || true + output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxsize"][]?') + [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxsize=$output" || true + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: "maximumFileSizeMegabytes" + compare: + op: gte + value: 100 + - flag: "audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Set the audit-log-maxsize parameter to 100 or as an appropriate number. + maximumFileSizeMegabytes: 100 + scored: false + + - id: 1.2.26 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: | + echo requestTimeoutSeconds=`oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .servingInfo.requestTimeoutSeconds` + tests: + test_items: + - flag: "requestTimeoutSeconds" + remediation: | + TBD + scored: false + + - id: 1.2.27 + text: "Ensure that the --service-account-lookup argument is set to true (Manual)" + audit: | + # For OCP 4.5 + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' | grep service-account-lookup + # For OCP 4.6 and above + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["service-account-lookup"]' + output=$(oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["service-account-lookup"][0]') + [ "$output" == "null" ] && echo "ocp 4.5 has service-account-lookup=true compiled" || echo service-account-lookup=$output + tests: + test_items: + - flag: "service-account-lookup=true" + remediation: | + TBD + scored: false + + - id: 1.2.28 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Manual)" + audit: | + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .serviceAccountPublicKeyFiles[] + tests: + bin_op: and + test_items: + - flag: "/etc/kubernetes/static-pod-resources/configmaps/sa-token-signing-certs" + - flag: "/etc/kubernetes/static-pod-resources/configmaps/bound-sa-token-signing-certs" + remediation: | + The OpenShift API server does not use the service-account-key-file argument. + The ServiceAccount token authenticator is configured with serviceAccountConfig.publicKeyFiles. + OpenShift does not reuse the apiserver TLS key. This is not configurable. + scored: false + + - id: 1.2.29 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Manual)" + audit: | + # etcd Certificate File + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .storageConfig.certFile + # etcd Key File + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .storageConfig.keyFile + # NOTICE 4.6 extention + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-certfile"]' + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-keyfile"]' + tests: + bin_op: and + test_items: + - flag: "/etc/kubernetes/static-pod-resources/secrets/etcd-client/tls.crt" + - flag: "/etc/kubernetes/static-pod-resources/secrets/etcd-client/tls.key" + remediation: | + OpenShift automatically manages TLS and client certificate authentication for etcd. + This is not configurable. + scored: false + + - id: 1.2.30 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: | + # TLS Cert File - openshift-kube-apiserver + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .servingInfo.certFile + # TLS Key File + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.servingInfo.keyFile' + # NOTECI 4.6 extention + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["tls-cert-file"]' + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["tls-private-key-file"]' + tests: + bin_op: and + test_items: + - flag: "/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.crt" + - flag: "/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.key" + remediation: | + OpenShift automatically manages TLS authentication for the API server communication with the node/kublet. + This is not configurable. You may optionally set a custom default certificate to be used by the API server + when serving content in order to enable clients to access the API server at a different host name or without + the need to distribute the cluster-managed certificate authority (CA) certificates to the clients. + Follow the directions in the OpenShift documentation User-provided certificates for the API server + scored: false + + - id: 1.2.31 + text: "Ensure that the --client-ca-file argument is set as appropriate (Manual)" + audit: | + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .servingInfo.clientCA + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["client-ca-file"]' + tests: + test_items: + - flag: "/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt" + remediation: | + OpenShift automatically manages TLS authentication for the API server communication with the node/kublet. + This is not configurable. You may optionally set a custom default certificate to be used by the API + server when serving content in order to enable clients to access the API server at a different host name + or without the need to distribute the cluster-managed certificate authority (CA) certificates to the clients. + + User-provided certificates must be provided in a kubernetes.io/tls type Secret in the openshift-config namespace. + Update the API server cluster configuration, + the apiserver/cluster resource, to enable the use of the user-provided certificate. + scored: false + + - id: 1.2.32 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Manual)" + audit: | + #etcd CA File + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .storageConfig.ca + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-cafile"]' + tests: + test_items: + - flag: "/etc/kubernetes/static-pod-resources/configmaps/etcd-serving-ca/ca-bundle.crt" + remediation: | + None required. OpenShift generates the etcd-cafile and sets the arguments appropriately in the API server. Communication with etcd is secured by the etcd serving CA. + scored: false + + - id: 1.2.33 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: | + # encrypt the etcd datastore + oc get openshiftapiserver -o=jsonpath='{range.items[0].status.conditions[?(@.type=="Encrypted")]}{.reason}{"\n"}{.message}{"\n"}' + tests: + test_items: + - flag: "EncryptionCompleted" + remediation: | + Follow the OpenShift documentation for Encrypting etcd data | Authentication | OpenShift Container Platform 4.5 + https://docs.openshift.com/container-platform/4.5/security/encrypting-etcd.html + scored: false + + - id: 1.2.34 + text: "Ensure that encryption providers are appropriately configured (Manual)" + audit: | + # encrypt the etcd datastore + oc get openshiftapiserver -o=jsonpath='{range.items[0].status.conditions[?(@.type=="Encrypted")]}{.reason}{"\n"}{.message}{"\n"}' + tests: + test_items: + - flag: "EncryptionCompleted" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + scored: false + + - id: 1.2.35 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + type: manual + audit: | + # verify cipher suites + oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq .servingInfo + oc get kubeapiservers.operator.openshift.io cluster -o json |jq.spec.observedConfig.servingInfo + oc get openshiftapiservers.operator.openshift.io cluster -o json |jq.spec.observedConfig.servingInfo + oc describe --namespace=openshift-ingress-operator ingresscontroller/default + remediation: | + Verify that the tlsSecurityProfile is set to the value you chose. + Note: The HAProxy Ingress controller image does not support TLS 1.3 + and because the Modern profile requires TLS 1.3, it is not supported. + The Ingress Operator converts the Modern profile to Intermediate. + The Ingress Operator also converts the TLS 1.0 of an Old or Custom profile to 1.1, + and TLS 1.3 of a Custom profile to 1.2. + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that garbage collection is configured as appropriate (Manual)" + type: manual + remediation: | + To configure, follow the directions in Configuring garbage collection for containers and images + https://docs.openshift.com/container-platform/4.5/nodes/nodes/nodes-nodes-garbage-collection.html#nodes-nodes-garbage-collection-configuring_nodes-nodes-configuring + scored: false + + - id: 1.3.2 + text: "Ensure that controller manager healthz endpoints are protected by RBAC (Manual)" + type: manual + audit: | + # Verify configuration for ports, livenessProbe, readinessProbe, healthz + oc -n openshift-kube-controller-manager get cm kube-controller-manager-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' + # Verify endpoints + oc -n openshift-kube-controller-manager describe endpoints + # Test to validate RBAC enabled on the controller endpoint; check with non-admin role + oc project openshift-kube-controller-manage + POD=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].metadata.name}') + PORT=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].spec.containers[0].ports[0].hostPort}') + # Following should return 403 Forbidden + oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -k + # Create a service account to test RBAC + oc create -n openshift-kube-controller-manager sa permission-test-sa + # Should return 403 Forbidden + SA_TOKEN=$(oc sa -n openshift-kube-controller-manager get-token permission-test-sa) + oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k + # Cleanup + oc delete -n openshift-kube-controller-manager sa permission-test-sa + # As cluster admin, should succeed + CLUSTER_ADMIN_TOKEN=$(oc whoami -t) + oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k + remediation: | + None required; profiling is protected by RBAC. + scored: false + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Manual)" + audit: | + echo use-service-account-credentials=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["use-service-account-credentials"][]'` + tests: + test_items: + - flag: "use-service-account-credentials" + compare: + op: eq + value: true + remediation: | + The OpenShift Controller Manager operator manages and updates the OpenShift Controller Manager. + The Kubernetes Controller Manager operator manages and updates the Kubernetes Controller Manager deployed on top of OpenShift. + This operator is configured via KubeControllerManager custom resource. + scored: false + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Manual)" + audit: | + oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["service-account-private-key-file"][]' + tests: + test_items: + - flag: "/etc/kubernetes/static-pod-resources/secrets/service-account-private-key/service-account.key" + remediation: | + None required. + OpenShift manages the service account credentials for the scheduler automatically. + scored: false + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Manual)" + audit: | + oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["root-ca-file"][]' + tests: + test_items: + - flag: "/etc/kubernetes/static-pod-resources/configmaps/serviceaccount-ca/ca-bundle.crt" + remediation: | + None required. + Certificates for OpenShift platform components are automatically created and rotated by the OpenShift Container Platform. + scored: false + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: | + oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["feature-gates"][]' + tests: + test_items: + - flag: "RotateKubeletServerCertificate" + compare: + op: eq + value: "true" + remediation: | + None required. + Certificates for OpenShift platform components are automatically created and rotated by the OpenShift Container Platform. + scored: false + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Manual)" + audit: | + echo port=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq '.extendedArguments["port"][]'` + echo secure-port=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq '.extendedArguments["secure-port"][]'` + #Following should fail with a http code 403 + POD=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].metadata.name}') + oc rsh -n openshift-kube-controller-manager -c kube-controller-manager $POD curl https://localhost:10257/metrics -k + tests: + bin_op: and + test_items: + - flag: "secure-port" + compare: + op: eq + value: "\"10257\"" + - flag: "port" + compare: + op: eq + value: "\"0\"" + - flag: "\"code\": 403" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the master node and ensure the correct value for the --bind-address parameter + scored: false + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the healthz endpoints for the scheduler are protected by RBAC (Manual)" + type: manual + audit: | + # check configuration for ports, livenessProbe, readinessProbe, healthz + oc -n openshift-kube-scheduler get cm kube-scheduler-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' + # Test to verify endpoints + oc -n openshift-kube-scheduler describe endpoints + # Test to validate RBAC enabled on the scheduler endpoint; check with non-admin role + oc project openshift-kube-scheduler + POD=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].metadata.name}') + PORT=$(oc get pod $POD -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.port}') + # Should return 403 Forbidden + oc rsh ${POD} curl http://localhost:${PORT}/metrics -k + # Create a service account to test RBAC + oc create sa permission-test-sa + # Should return 403 Forbidden + SA_TOKEN=$(oc sa get-token permission-test-sa) + oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k + # Cleanup + oc delete sa permission-test-sa + # As cluster admin, should succeed + CLUSTER_ADMIN_TOKEN=$(oc whoami -t) + oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k + remediation: | + A fix to this issue: https://bugzilla.redhat.com/show_bug.cgi?id=1889488 None required. + Profiling is protected by RBAC and cannot be disabled. + scored: false + + - id: 1.4.2 + text: "Verify that the scheduler API service is protected by authentication and authorization (Manual)" + type: manual + audit: | + # To verify endpoints + oc -n openshift-kube-scheduler describe endpoints + # To verify that bind-adress is not used in the configuration and that port is set to 0 + oc -n openshift-kube-scheduler get cm kube-scheduler-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' + # To test for RBAC: + oc project openshift-kube-scheduler + POD=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].metadata.name}') + POD_IP=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].status.podIP}') + PORT=$(oc get pod $POD -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.port}') + # Should return a 403 + oc rsh ${POD} curl http://${POD_IP}:${PORT}/metrics + # Create a service account to test RBAC + oc create sa permission-test-sa + # Should return 403 Forbidden + SA_TOKEN=$(oc sa get-token permission-test-sa) + oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k + # Cleanup + oc delete sa permission-test-sa + # As cluster admin, should succeed + CLUSTER_ADMIN_TOKEN=$(oc whoami -t) + oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k + remediation: | + By default, the --bind-address argument is not present, + the readinessProbe and livenessProbe arguments are set to 10251 and the port argument is set to 0. + Check the status of this issue: https://bugzilla.redhat.com/show_bug.cgi?id=1889488 + scored: false diff --git a/cfg/rh-1.6.0/node.yaml b/cfg/rh-1.6.0/node.yaml new file mode 100644 index 000000000..fb982d6f3 --- /dev/null +++ b/cfg/rh-1.6.0/node.yaml @@ -0,0 +1,429 @@ +--- +controls: +version: rh-1.0 +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/systemd/system/kubelet.service 2> /dev/null + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + By default, the kubelet service file has permissions of 644. + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: | + # Should return root:root for each node + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/systemd/system/kubelet.service 2> /dev/null + tests: + test_items: + - flag: root:root + remediation: | + By default, the kubelet service file has ownership of root:root. + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-sdn namespace + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" - stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml 2>/dev/null + fi + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "644" + remediation: | + None needed. + scored: false + + - id: 4.1.4 + text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-sdn namespace + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- stat -Lc "$i %n %U:%G" /config/kube-proxy-config.yaml 2>/dev/null + fi + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + None required. The configuration is managed by OpenShift operators. + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Manual)" + audit: | + # Check permissions + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet.conf 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + None required. + scored: false + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Manual)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet.conf 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: root:root + remediation: | + None required. + scored: false + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet-ca.crt 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + None required. + scored: true + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet-ca.crt 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: root:root + remediation: | + None required. + scored: true + + - id: 4.1.9 + text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /var/lib/kubelet/kubeconfig 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + None required. + scored: true + + - id: 4.1.10 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /var/lib/kubelet/kubeconfig 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: root:root + remediation: | + None required. + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host grep -B4 -A1 anonymous /etc/kubernetes/kubelet.conf 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "enabled: true" + set: false + remediation: | + Follow the instructions in the documentation to create a Kubelet config CRD + and set the anonymous-auth is set to false. + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Manual)" + type: manual + # Takes a lot of time for connection to fail and + audit: | + POD=$(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') + TOKEN=$(oc whoami -t) + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc exec -n openshift-kube-apiserver $POD -- curl -sS https://172.25.0.1/api/v1/nodes/$NODE_NAME/proxy/configz -k -H "Authorization:Bearer $TOKEN" | jq -r '.kubeletconfig.authorization.mode' 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "Connection timed out" + remediation: | + None required. Unauthenticated/Unauthorized users have no access to OpenShift nodes. + scored: false + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host grep clientCAFile /etc/kubernetes/kubelet.conf 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: '"clientCAFile": "/etc/kubernetes/kubelet-ca.crt"' + remediation: | + None required. Changing the clientCAFile value is unsupported. + scored: true + + - id: 4.2.4 + text: "Verify that the read only port is not used or is set to 0 (Automated)" + audit: | + echo `oc -n openshift-kube-apiserver get cm kube-apiserver-pod -o yaml | grep --color read-only-port` 2> /dev/null + echo `oc -n openshift-kube-apiserver get cm config -o yaml | grep --color "read-only-port"` 2> /dev/null + tests: + bin_op: or + test_items: + - flag: "read-only-port" + compare: + op: has + value: "[\"0\"]" + - flag: "read-only-port" + set: false + remediation: | + In earlier versions of OpenShift 4, the read-only-port argument is not used. + Follow the instructions in the documentation to create a Kubelet config CRD + and set the --read-only-port is set to 0. + scored: true + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" + audit: | + # Should return 1 for node + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/${NODE_NAME} -- chroot /host ps -ef | grep kubelet | grep streaming-connection-idle-timeout 2> /dev/null + echo exit_code=$? + # Should return 1 for node + oc debug node/${NODE_NAME} -- chroot /host grep streamingConnectionIdleTimeout /etc/kubernetes/kubelet.conf 2> /dev/null + echo exit_code=$? + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: --streaming-connection-idle-timeout + compare: + op: noteq + value: 0 + - flag: streamingConnectionIdleTimeout + compare: + op: noteq + value: 0s + - flag: "exit_code" + compare: + op: eq + value: 1 + remediation: | + Follow the instructions in the documentation to create a Kubelet config CRD and set + the --streaming-connection-idle-timeout to the desired value. Do not set the value to 0. + scored: true + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is not set (Manual)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host more /etc/kubernetes/kubelet.conf 2> /dev/null + tests: + test_items: + - flag: protectKernelDefaults + set: false + remediation: | + None required. The OpenShift 4 kubelet modifies the system tunable; + using the protect-kernel-defaults flag will cause the kubelet to fail on start if the tunables + don't match the kubelet configuration and the OpenShift node will fail to start. + scored: false + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Manual)" + audit: | + /bin/bash + flag=make-iptables-util-chains + opt=makeIPTablesUtilChains + # look at each machineconfigpool + while read -r pool nodeconfig; do + # true by default + value='true' + # first look for the flag + oc get machineconfig $nodeconfig -o json | jq -r '.spec.config.systemd[][] | select(.name=="kubelet.service") | .contents' | sed -n "/^ExecStart=/,/^\$/ { /^\\s*--$flag=false/ q 100 }" + # if the above command exited with 100, the flag was false + [ $? == 100 ] && value='false' + # now look in the yaml KubeletConfig + yamlconfig=$(oc get machineconfig $nodeconfig -o json | jq -r '.spec.config.storage.files[] | select(.path=="/etc/kubernetes/kubelet.conf") | .contents.source ' | sed 's/^data:,//' | while read; do echo -e ${REPLY//%/\\x}; done) + echo "$yamlconfig" | sed -n "/^$opt:\\s*false\\s*$/ q 100" + [ $? == 100 ] && value='false' + echo "Pool $pool has $flag ($opt) set to $value" + done < <(oc get machineconfigpools -o json | jq -r '.items[] | select(.status.machineCount>0) | .metadata.name + " " + .spec.configuration.name') + use_multiple_values: true + tests: + test_items: + - flag: "set to true" + remediation: | + None required. The --make-iptables-util-chains argument is set to true by default. + scored: false + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + audit: | + echo `oc get machineconfig 01-worker-kubelet -o yaml | grep hostname-override` + echo `oc get machineconfig 01-master-kubelet -o yaml | grep hostname-override` + tests: + test_items: + - flag: hostname-override + set: false + remediation: | + By default, --hostname-override argument is not set. + scored: false + + - id: 4.2.9 + text: "Ensure that the kubeAPIQPS [--event-qps] argument is set to 0 or a level which ensures appropriate event capture (Manual)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/${NODE_NAME} -- chroot /host cat /etc/kubernetes/kubelet.conf; + oc get machineconfig 01-worker-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050 + oc get machineconfig 01-master-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050 + type: "manual" + remediation: | + Follow the documentation to edit kubelet parameters + https://docs.openshift.com/container-platform/4.5/scalability_and_performance/recommended-host-practices.html#create-a-kubeletconfig-crd-to-edit-kubelet-parameters + KubeAPIQPS: + scored: false + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: | + oc get configmap config -n openshift-kube-apiserver -o json \ + | jq -r '.data["config.yaml"]' \ + | jq -r '.apiServerArguments | + .["kubelet-client-certificate"][0], + .["kubelet-client-key"][0] + ' + tests: + bin_op: and + test_items: + - flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.crt" + - flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.key" + remediation: | + OpenShift automatically manages TLS authentication for the API server communication with the node/kublet. + This is not configurable. + scored: true + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Manual)" + audit: | + #Verify the rotateKubeletClientCertificate feature gate is not set to false + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/${NODE_NAME} -- chroot /host cat /etc/kubernetes/kubelet.conf | grep RotateKubeletClientCertificate 2> /dev/null + # Verify the rotateCertificates argument is set to true + oc debug node/${NODE_NAME} -- chroot host grep rotate /etc/kubernetes/kubelet.conf 2> /dev/null + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: rotateCertificates + compare: + op: eq + value: true + - flag: rotateKubeletClientCertificates + compare: + op: noteq + value: false + - flag: rotateKubeletClientCertificates + set: false + remediation: | + None required. + scored: false + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: | + #Verify the rotateKubeletServerCertificate feature gate is on + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/${NODE_NAME} -- chroot /host grep RotateKubeletServerCertificate /etc/kubernetes/kubelet.conf 2> /dev/null + # Verify the rotateCertificates argument is set to true + oc debug node/${NODE_NAME} -- chroot host grep rotate /etc/kubernetes/kubelet.conf 2> /dev/null + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: rotateCertificates + compare: + op: eq + value: true + - flag: RotateKubeletServerCertificate + compare: + op: eq + value: true + remediation: | + By default, kubelet server certificate rotation is disabled. + scored: false + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: | + # needs verification + # verify cipher suites + oc describe --namespace=openshift-ingress-operator ingresscontroller/default + oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo + oc get openshiftapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo + oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq .servingInfo + #check value for tlsSecurityProfile; null is returned if default is used + oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.tlsSecurityProfile + type: manual + remediation: | + Follow the directions above and in the OpenShift documentation to configure the tlsSecurityProfile. + Configuring Ingress + scored: false diff --git a/cfg/rh-1.6.0/policies.yaml b/cfg/rh-1.6.0/policies.yaml new file mode 100644 index 000000000..e90cd877f --- /dev/null +++ b/cfg/rh-1.6.0/policies.yaml @@ -0,0 +1,287 @@ +--- +controls: +version: rh-1.0 +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + audit: | + #To get a list of users and service accounts with the cluster-admin role + oc get clusterrolebindings -o=customcolumns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | + grep cluster-admin + #To verity that kbueadmin is removed, no results should be returned + oc get secrets kubeadmin -n kube-system + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + audit: | + #needs verification + oc get roles --all-namespaces -o yaml + for i in $(oc get roles -A -o jsonpath='{.items[*].metadata.name}'); do oc + describe clusterrole ${i}; done + #Retrieve the cluster roles defined in the cluster and review for wildcards + oc get clusterroles -o yaml + for i in $(oc get clusterroles -o jsonpath='{.items[*].metadata.name}'); do + oc describe clusterrole ${i}; done + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + None required. + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.2 + text: "Pod Security Policies" + checks: + - id: 5.2.1 + text: "Minimize the admission of privileged containers (Manual)" + audit: | + # needs verification + oc get scc -o=custom-columns=NAME:.metadata.name,allowPrivilegedContainer:.allowPrivilegedContainer + tests: + test_items: + - flag: "false" + remediation: | + Create a SCC as described in the OpenShift documentation, ensuring that the Allow + Privileged field is set to false. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" + audit: | + oc get scc -o=custom-columns=NAME:.metadata.name,allowHostPID:.allowHostPID + tests: + test_items: + - flag: "false" + remediation: | + Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host + PID field is set to false. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" + audit: | + oc get scc -o=custom-columns=NAME:.metadata.name,allowHostIPC:.allowHostIPC + tests: + test_items: + - flag: "false" + remediation: | + Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host + IPC field is set to false. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" + audit: | + oc get scc -o=custom-columns=NAME:.metadata.name,allowHostNetwork:.allowHostNetwork + tests: + test_items: + - flag: "false" + remediation: | + Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host + Network field is omitted or set to false. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" + audit: | + oc get scc -o=custom-columns=NAME:.metadata.name,allowPrivilegeEscalation:.allowPrivilegeEscalation + tests: + test_items: + - flag: "false" + remediation: | + Create a SCC as described in the OpenShift documentation, ensuring that the Allow + Privilege Escalation field is omitted or set to false. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of root containers (Manual)" + audit: | + # needs verification # | awk 'NR>1 {gsub("map\\[type:", "", $2); gsub("\\]$", "", $2); print $1 ":" $2}' + oc get scc -o=custom-columns=NAME:.metadata.name,runAsUser:.runAsUser.type + #For SCCs with MustRunAs verify that the range of UIDs does not include 0 + oc get scc -o=custom-columns=NAME:.metadata.name,uidRangeMin:.runAsUser.uidRangeMin,uidRangeMax:.runAsUser.uidRangeMax + tests: + bin_op: or + test_items: + - flag: "MustRunAsNonRoot" + - flag: "MustRunAs" + compare: + op: nothave + value: 0 + remediation: | + None required. By default, OpenShift includes the non-root SCC with the the Run As User + Strategy is set to either MustRunAsNonRoot. If additional SCCs are appropriate, follow the + OpenShift documentation to create custom SCCs. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of containers with the NET_RAW capability (Manual)" + audit: | + # needs verification + oc get scc -o=custom-columns=NAME:.metadata.name,requiredDropCapabilities:.requiredDropCapabilities + tests: + bin_op: or + test_items: + - flag: "ALL" + - flag: "NET_RAW" + remediation: | + Create a SCC as described in the OpenShift documentation, ensuring that the Required + Drop Capabilities is set to include either NET_RAW or ALL. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + Ensure that Allowed Capabilities is set to an empty array for every SCC in the cluster + except for the privileged SCC. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider + adding a SCC which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports Network Policies (Manual)" + type: "manual" + remediation: | + None required. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have Network Policies defined (Manual)" + type: "manual" + audit: | + #Run the following command and review the NetworkPolicy objects created in the cluster. + oc -n all get networkpolicy + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using secrets as files over secrets as environment variables (Manual)" + type: "manual" + audit: | + #Run the following command to find references to objects which use environment variables defined from secrets. + oc get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} + {.metadata.name} {"\n"}{end}' -A + remediation: | + If possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the OpenShift documentation: [Image configuration resources](https://docs.openshift.com/container-platform/4.5/openshift_images/image-configuration.html + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + audit: | + #Run the following command and review the namespaces created in the cluster. + oc get namespaces + #Ensure that these namespaces are the ones you need and are adequately administered as per your requirements. + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual)" + type: "manual" + remediation: | + To enable the default seccomp profile, use the reserved value /runtime/default that will + make sure that the pod uses the default policy available on the host. + scored: false + + - id: 5.7.3 + text: "Apply Security Context to Your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + audit: | + #Run this command to list objects in default namespace + oc project default + oc get all + #The only entries there should be system managed resources such as the kubernetes and openshift service + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false From 2668e266874ae65d42c3e597471840228b06b30a Mon Sep 17 00:00:00 2001 From: deboshree-b Date: Mon, 26 Aug 2024 07:30:24 +0530 Subject: [PATCH 08/13] Revert "NDEV-20011 : initial commit for other benchmarks" This reverts commit 96a8081e8d2e3f7e73aea1a5223f1530c7f49fe1. --- cfg/aks-1.5.0/config.yaml | 2 - cfg/aks-1.5.0/controlplane.yaml | 31 - cfg/aks-1.5.0/managedservices.yaml | 144 --- cfg/aks-1.5.0/master.yaml | 6 - cfg/aks-1.5.0/node.yaml | 321 ------ cfg/aks-1.5.0/policies.yaml | 214 ---- cfg/eks-1.5.0/config.yaml | 9 - cfg/eks-1.5.0/controlplane.yaml | 14 - cfg/eks-1.5.0/managedservices.yaml | 154 --- cfg/eks-1.5.0/master.yaml | 6 - cfg/eks-1.5.0/node.yaml | 330 ------- cfg/eks-1.5.0/policies.yaml | 213 ---- cfg/oke-1.5.0/config.yaml | 2 - cfg/oke-1.5.0/controlplane.yaml | 62 -- cfg/oke-1.5.0/node.yaml | 327 ------- cfg/oke-1.5.0/policies.yaml | 287 ------ cfg/rh-1.4.0/config.yaml | 2 - cfg/rh-1.4.0/controlplane.yaml | 62 -- cfg/rh-1.4.0/etcd.yaml | 183 ---- cfg/rh-1.4.0/master.yaml | 1445 ---------------------------- cfg/rh-1.4.0/node.yaml | 429 --------- cfg/rh-1.4.0/policies.yaml | 287 ------ cfg/rh-1.6.0/config.yaml | 2 - cfg/rh-1.6.0/controlplane.yaml | 62 -- cfg/rh-1.6.0/etcd.yaml | 183 ---- cfg/rh-1.6.0/master.yaml | 1445 ---------------------------- cfg/rh-1.6.0/node.yaml | 429 --------- cfg/rh-1.6.0/policies.yaml | 287 ------ 28 files changed, 6938 deletions(-) delete mode 100644 cfg/aks-1.5.0/config.yaml delete mode 100644 cfg/aks-1.5.0/controlplane.yaml delete mode 100644 cfg/aks-1.5.0/managedservices.yaml delete mode 100644 cfg/aks-1.5.0/master.yaml delete mode 100644 cfg/aks-1.5.0/node.yaml delete mode 100644 cfg/aks-1.5.0/policies.yaml delete mode 100644 cfg/eks-1.5.0/config.yaml delete mode 100644 cfg/eks-1.5.0/controlplane.yaml delete mode 100644 cfg/eks-1.5.0/managedservices.yaml delete mode 100644 cfg/eks-1.5.0/master.yaml delete mode 100644 cfg/eks-1.5.0/node.yaml delete mode 100644 cfg/eks-1.5.0/policies.yaml delete mode 100644 cfg/oke-1.5.0/config.yaml delete mode 100644 cfg/oke-1.5.0/controlplane.yaml delete mode 100644 cfg/oke-1.5.0/node.yaml delete mode 100644 cfg/oke-1.5.0/policies.yaml delete mode 100644 cfg/rh-1.4.0/config.yaml delete mode 100644 cfg/rh-1.4.0/controlplane.yaml delete mode 100644 cfg/rh-1.4.0/etcd.yaml delete mode 100644 cfg/rh-1.4.0/master.yaml delete mode 100644 cfg/rh-1.4.0/node.yaml delete mode 100644 cfg/rh-1.4.0/policies.yaml delete mode 100644 cfg/rh-1.6.0/config.yaml delete mode 100644 cfg/rh-1.6.0/controlplane.yaml delete mode 100644 cfg/rh-1.6.0/etcd.yaml delete mode 100644 cfg/rh-1.6.0/master.yaml delete mode 100644 cfg/rh-1.6.0/node.yaml delete mode 100644 cfg/rh-1.6.0/policies.yaml diff --git a/cfg/aks-1.5.0/config.yaml b/cfg/aks-1.5.0/config.yaml deleted file mode 100644 index b7839455a..000000000 --- a/cfg/aks-1.5.0/config.yaml +++ /dev/null @@ -1,2 +0,0 @@ ---- -## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/aks-1.5.0/controlplane.yaml b/cfg/aks-1.5.0/controlplane.yaml deleted file mode 100644 index 44d1f8907..000000000 --- a/cfg/aks-1.5.0/controlplane.yaml +++ /dev/null @@ -1,31 +0,0 @@ ---- -controls: -version: "aks-1.0" -id: 2 -text: "Control Plane Configuration" -type: "controlplane" -groups: - - id: 2.1 - text: "Logging" - checks: - - id: 2.1.1 - text: "Enable audit Logs" - type: "manual" - remediation: | - Azure audit logs are enabled and managed in the Azure portal. To enable log collection for - the Kubernetes master components in your AKS cluster, open the Azure portal in a web - browser and complete the following steps: - 1. Select the resource group for your AKS cluster, such as myResourceGroup. Don't - select the resource group that contains your individual AKS cluster resources, such - as MC_myResourceGroup_myAKSCluster_eastus. - 2. On the left-hand side, choose Diagnostic settings. - 3. Select your AKS cluster, such as myAKSCluster, then choose to Add diagnostic setting. - 4. Enter a name, such as myAKSClusterLogs, then select the option to Send to Log Analytics. - 5. Select an existing workspace or create a new one. If you create a workspace, provide - a workspace name, a resource group, and a location. - 6. In the list of available logs, select the logs you wish to enable. For this example, - enable the kube-audit and kube-audit-admin logs. Common logs include the kube- - apiserver, kube-controller-manager, and kube-scheduler. You can return and change - the collected logs once Log Analytics workspaces are enabled. - 7. When ready, select Save to enable collection of the selected logs. - scored: false diff --git a/cfg/aks-1.5.0/managedservices.yaml b/cfg/aks-1.5.0/managedservices.yaml deleted file mode 100644 index 7e5646aac..000000000 --- a/cfg/aks-1.5.0/managedservices.yaml +++ /dev/null @@ -1,144 +0,0 @@ ---- -controls: -version: "aks-1.0" -id: 5 -text: "Managed Services" -type: "managedservices" -groups: - - id: 5.1 - text: "Image Registry and Image Scanning" - checks: - - id: 5.1.1 - text: "Ensure Image Vulnerability Scanning using Azure Defender image scanning or a third party provider (Manual)" - type: "manual" - remediation: "No remediation" - scored: false - - - id: 5.1.2 - text: "Minimize user access to Azure Container Registry (ACR) (Manual)" - type: "manual" - remediation: | - Azure Container Registry - If you use Azure Container Registry (ACR) as your container image store, you need to grant - permissions to the service principal for your AKS cluster to read and pull images. Currently, - the recommended configuration is to use the az aks create or az aks update command to - integrate with a registry and assign the appropriate role for the service principal. For - detailed steps, see Authenticate with Azure Container Registry from Azure Kubernetes - Service. - To avoid needing an Owner or Azure account administrator role, you can configure a - service principal manually or use an existing service principal to authenticate ACR from - AKS. For more information, see ACR authentication with service principals or Authenticate - from Kubernetes with a pull secret. - scored: false - - - id: 5.1.3 - text: "Minimize cluster access to read-only for Azure Container Registry (ACR) (Manual)" - type: "manual" - remediation: "No remediation" - scored: false - - - id: 5.1.4 - text: "Minimize Container Registries to only those approved (Manual)" - type: "manual" - remediation: "No remediation" - scored: false - - - id: 5.2 - text: "Access and identity options for Azure Kubernetes Service (AKS)" - checks: - - id: 5.2.1 - text: "Prefer using dedicated AKS Service Accounts (Manual)" - type: "manual" - remediation: | - Azure Active Directory integration - The security of AKS clusters can be enhanced with the integration of Azure Active Directory - (AD). Built on decades of enterprise identity management, Azure AD is a multi-tenant, - cloud-based directory, and identity management service that combines core directory - services, application access management, and identity protection. With Azure AD, you can - integrate on-premises identities into AKS clusters to provide a single source for account - management and security. - Azure Active Directory integration with AKS clusters - With Azure AD-integrated AKS clusters, you can grant users or groups access to Kubernetes - resources within a namespace or across the cluster. To obtain a kubectl configuration - context, a user can run the az aks get-credentials command. When a user then interacts - with the AKS cluster with kubectl, they're prompted to sign in with their Azure AD - credentials. This approach provides a single source for user account management and - password credentials. The user can only access the resources as defined by the cluster - administrator. - Azure AD authentication is provided to AKS clusters with OpenID Connect. OpenID Connect - is an identity layer built on top of the OAuth 2.0 protocol. For more information on OpenID - Connect, see the Open ID connect documentation. From inside of the Kubernetes cluster, - Webhook Token Authentication is used to verify authentication tokens. Webhook token - authentication is configured and managed as part of the AKS cluster. - scored: false - - - id: 5.3 - text: "Key Management Service (KMS)" - checks: - - id: 5.3.1 - text: "Ensure Kubernetes Secrets are encrypted (Manual)" - type: "manual" - remediation: "No remediation" - scored: false - - - id: 5.4 - text: "Cluster Networking" - checks: - - id: 5.4.1 - text: "Restrict Access to the Control Plane Endpoint (Manual)" - type: "manual" - remediation: "No remediation" - scored: false - - - id: 5.4.2 - text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Manual)" - type: "manual" - remediation: "No remediation" - scored: false - - - id: 5.4.3 - text: "Ensure clusters are created with Private Nodes (Manual)" - type: "manual" - remediation: "No remediation" - scored: false - - - id: 5.4.4 - text: "Ensure Network Policy is Enabled and set as appropriate (Manual)" - type: "manual" - remediation: "No remediation" - scored: false - - - id: 5.4.5 - text: "Encrypt traffic to HTTPS load balancers with TLS certificates (Manual)" - type: "manual" - remediation: "No remediation" - scored: false - - - - id: 5.5 - text: "Authentication and Authorization" - checks: - - id: 5.5.1 - text: "Manage Kubernetes RBAC users with Azure AD (Manual)" - type: "manual" - remediation: "No remediation" - scored: false - - id: 5.5.2 - text: "Use Azure RBAC for Kubernetes Authorization (Manual)" - type: "manual" - remediation: "No remediation" - scored: false - - - id: 5.6 - text: "Other Cluster Configurations" - checks: - - id: 5.6.1 - text: "Restrict untrusted workloads (Manual)" - type: "manual" - remediation: "No remediation" - scored: false - - id: 5.6.2 - text: "Hostile multi-tenant workloads (Manual)" - type: "manual" - remediation: "No remediation" - scored: false diff --git a/cfg/aks-1.5.0/master.yaml b/cfg/aks-1.5.0/master.yaml deleted file mode 100644 index 7ec9eae88..000000000 --- a/cfg/aks-1.5.0/master.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -controls: -version: "aks-1.0" -id: 1 -text: "Control Plane Components" -type: "master" diff --git a/cfg/aks-1.5.0/node.yaml b/cfg/aks-1.5.0/node.yaml deleted file mode 100644 index 78fda584d..000000000 --- a/cfg/aks-1.5.0/node.yaml +++ /dev/null @@ -1,321 +0,0 @@ ---- -controls: -version: "aks-1.0" -id: 3 -text: "Worker Node Security Configuration" -type: "node" -groups: - - id: 3.1 - text: "Worker Node Configuration Files" - checks: - - id: 3.1.1 - text: "Ensure that the kubeconfig file permissions are set to 644 or more restrictive (Manual)" - audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chmod 644 $kubeletkubeconfig - scored: false - - - id: 3.1.2 - text: "Ensure that the kubelet kubeconfig file ownership is set to root:root (Manual)" - audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' - tests: - test_items: - - flag: root:root - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chown root:root $kubeletkubeconfig - scored: false - - - id: 3.1.3 - text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Manual)" - audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - Run the following command (using the config file location identified in the Audit step) - chmod 644 $kubeletconf - scored: false - - - id: 3.1.4 - text: "Ensure that the kubelet configuration file ownership is set to root:root (Manual)" - audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' - tests: - test_items: - - flag: root:root - remediation: | - Run the following command (using the config file location identified in the Audit step) - chown root:root $kubeletconf - scored: false - - - id: 3.2 - text: "Kubelet" - checks: - - id: 3.2.1 - text: "Ensure that the --anonymous-auth argument is set to false (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: "--anonymous-auth" - path: '{.authentication.anonymous.enabled}' - compare: - op: eq - value: false - remediation: | - If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to - false. - If using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --anonymous-auth=false - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 3.2.2 - text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --authorization-mode - path: '{.authorization.mode}' - compare: - op: nothave - value: AlwaysAllow - remediation: | - If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If - using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_AUTHZ_ARGS variable. - --authorization-mode=Webhook - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 3.2.3 - text: "Ensure that the --client-ca-file argument is set as appropriate (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --client-ca-file - path: '{.authentication.x509.clientCAFile}' - set: true - remediation: | - If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to - the location of the client CA file. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_AUTHZ_ARGS variable. - --client-ca-file= - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 3.2.4 - text: "Ensure that the --read-only-port argument is set to 0 (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: "--read-only-port" - path: '{.readOnlyPort}' - set: true - compare: - op: eq - value: 0 - remediation: | - If using a Kubelet config file, edit the file to set readOnlyPort to 0. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --read-only-port=0 - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 3.2.5 - text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --streaming-connection-idle-timeout - path: '{.streamingConnectionIdleTimeout}' - set: true - compare: - op: noteq - value: 0 - - flag: --streaming-connection-idle-timeout - path: '{.streamingConnectionIdleTimeout}' - set: false - bin_op: or - remediation: | - If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a - value other than 0. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --streaming-connection-idle-timeout=5m - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 3.2.6 - text: "Ensure that the --protect-kernel-defaults argument is set to true (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --protect-kernel-defaults - path: '{.protectKernelDefaults}' - set: true - compare: - op: eq - value: true - remediation: | - If using a Kubelet config file, edit the file to set protectKernelDefaults: true. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --protect-kernel-defaults=true - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 3.2.7 - text: "Ensure that the --make-iptables-util-chains argument is set to true (Manual) " - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --make-iptables-util-chains - path: '{.makeIPTablesUtilChains}' - set: true - compare: - op: eq - value: true - - flag: --make-iptables-util-chains - path: '{.makeIPTablesUtilChains}' - set: false - bin_op: or - remediation: | - If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - remove the --make-iptables-util-chains argument from the - KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 3.2.8 - text: "Ensure that the --hostname-override argument is not set (Manual)" - # This is one of those properties that can only be set as a command line argument. - # To check if the property is set as expected, we need to parse the kubelet command - # instead reading the Kubelet Configuration file. - audit: "/bin/ps -fC $kubeletbin " - tests: - test_items: - - flag: --hostname-override - set: false - remediation: | - Edit the kubelet service file $kubeletsvc - on each worker node and remove the --hostname-override argument from the - KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 3.2.9 - text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --event-qps - path: '{.eventRecordQPS}' - set: true - compare: - op: eq - value: 0 - remediation: | - If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 3.2.10 - text: "Ensure that the --rotate-certificates argument is not set to false (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --rotate-certificates - path: '{.rotateCertificates}' - set: true - compare: - op: eq - value: true - - flag: --rotate-certificates - path: '{.rotateCertificates}' - set: false - bin_op: or - remediation: | - If using a Kubelet config file, edit the file to add the line rotateCertificates: true or - remove it altogether to use the default value. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS - variable. - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 3.2.11 - text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: RotateKubeletServerCertificate - path: '{.featureGates.RotateKubeletServerCertificate}' - set: true - compare: - op: eq - value: true - remediation: | - Edit the kubelet service file $kubeletsvc - on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. - --feature-gates=RotateKubeletServerCertificate=true - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false diff --git a/cfg/aks-1.5.0/policies.yaml b/cfg/aks-1.5.0/policies.yaml deleted file mode 100644 index 9cfde1e3f..000000000 --- a/cfg/aks-1.5.0/policies.yaml +++ /dev/null @@ -1,214 +0,0 @@ ---- -controls: -version: "aks-1.0" -id: 4 -text: "Policies" -type: "policies" -groups: - - id: 4.1 - text: "RBAC and Service Accounts" - checks: - - id: 4.1.1 - text: "Ensure that the cluster-admin role is only used where required (Manual)" - type: "manual" - remediation: | - Identify all clusterrolebindings to the cluster-admin role. Check if they are used and - if they need this role or if they could use a role with fewer privileges. - Where possible, first bind users to a lower privileged role and then remove the - clusterrolebinding to the cluster-admin role : - kubectl delete clusterrolebinding [name] - scored: false - - - id: 4.1.2 - text: "Minimize access to secrets (Manual)" - type: "manual" - remediation: | - Where possible, remove get, list and watch access to secret objects in the cluster. - scored: false - - - id: 4.1.3 - text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" - type: "manual" - remediation: | - Where possible replace any use of wildcards in clusterroles and roles with specific - objects or actions. - scored: false - - - id: 4.1.4 - text: "Minimize access to create pods (Manual)" - type: "manual" - remediation: | - Where possible, remove create access to pod objects in the cluster. - scored: false - - - id: 4.1.5 - text: "Ensure that default service accounts are not actively used. (Manual)" - type: "manual" - remediation: | - Create explicit service accounts wherever a Kubernetes workload requires specific access - to the Kubernetes API server. - Modify the configuration of each default service account to include this value - automountServiceAccountToken: false - scored: false - - - id: 4.1.6 - text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" - type: "manual" - remediation: | - Modify the definition of pods and service accounts which do not need to mount service - account tokens to disable it. - scored: false - - - id: 4.2 - text: "Pod Security Policies" - checks: - - id: 4.2.1 - text: "Minimize the admission of privileged containers (Automated)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that - the .spec.privileged field is omitted or set to false. - scored: false - - - id: 4.2.2 - text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the - .spec.hostPID field is omitted or set to false. - scored: false - - - id: 4.2.3 - text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the - .spec.hostIPC field is omitted or set to false. - scored: false - - - id: 4.2.4 - text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the - .spec.hostNetwork field is omitted or set to false. - scored: false - - - id: 4.2.5 - text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the - .spec.allowPrivilegeEscalation field is omitted or set to false. - scored: false - - - id: 4.2.6 - text: "Minimize the admission of root containers (Automated)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the - .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of - UIDs not including 0. - scored: false - - - id: 4.2.7 - text: "Minimize the admission of containers with the NET_RAW capability (Automated)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the - .spec.requiredDropCapabilities is set to include either NET_RAW or ALL. - scored: false - - - id: 4.2.8 - text: "Minimize the admission of containers with added capabilities (Automated)" - type: "manual" - remediation: | - Ensure that allowedCapabilities is not present in PSPs for the cluster unless - it is set to an empty array. - scored: false - - - id: 4.2.9 - text: "Minimize the admission of containers with capabilities assigned (Manual)" - type: "manual" - remediation: | - Review the use of capabilities in applications running on your cluster. Where a namespace - contains applications which do not require any Linux capabities to operate consider adding - a PSP which forbids the admission of containers which do not drop all capabilities. - scored: false - - - id: 4.3 - text: "Azure Policy / OPA" - checks: [] - - - id: 4.4 - text: "CNI Plugin" - checks: - - id: 4.4.1 - text: "Ensure that the latest CNI version is used (Manual)" - type: "manual" - remediation: | - Review the documentation of AWS CNI plugin, and ensure latest CNI version is used. - scored: false - - - id: 4.4.2 - text: "Ensure that all Namespaces have Network Policies defined (Manual)" - type: "manual" - remediation: | - Follow the documentation and create NetworkPolicy objects as you need them. - scored: false - - - id: 4.5 - text: "Secrets Management" - checks: - - id: 4.5.1 - text: "Prefer using secrets as files over secrets as environment variables (Manual)" - type: "manual" - remediation: | - If possible, rewrite application code to read secrets from mounted secret files, rather than - from environment variables. - scored: false - - - id: 4.5.2 - text: "Consider external secret storage (Manual)" - type: "manual" - remediation: | - Refer to the secrets management options offered by your cloud provider or a third-party - secrets management solution. - scored: false - - - id: 4.6 - text: "Extensible Admission Control" - checks: - - id: 4.6.1 - text: "Verify that admission controllers are working as expected (Manual)" - type: "manual" - remediation: "No remediation" - scored: false - - - id: 4.7 - text: "General Policies" - checks: - - id: 4.7.1 - text: "Create administrative boundaries between resources using namespaces (Manual)" - type: "manual" - remediation: | - Follow the documentation and create namespaces for objects in your deployment as you need - them. - scored: false - - - id: 4.7.2 - text: "Apply Security Context to Your Pods and Containers (Manual)" - type: "manual" - remediation: | - Follow the Kubernetes documentation and apply security contexts to your pods. For a - suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker - Containers. - scored: false - - - id: 4.7.3 - text: "The default namespace should not be used (Manual)" - type: "manual" - remediation: | - Ensure that namespaces are created to allow for appropriate segregation of Kubernetes - resources and that all new resources are created in a specific namespace. - scored: false diff --git a/cfg/eks-1.5.0/config.yaml b/cfg/eks-1.5.0/config.yaml deleted file mode 100644 index 17301a751..000000000 --- a/cfg/eks-1.5.0/config.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -## Version-specific settings that override the values in cfg/config.yaml -## These settings are required if you are using the --asff option to report findings to AWS Security Hub -## AWS account number is required. -AWS_ACCOUNT: "" -## AWS region is required. -AWS_REGION: "" -## EKS Cluster ARN is required. -CLUSTER_ARN: "" diff --git a/cfg/eks-1.5.0/controlplane.yaml b/cfg/eks-1.5.0/controlplane.yaml deleted file mode 100644 index 687ee6df1..000000000 --- a/cfg/eks-1.5.0/controlplane.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -controls: -version: "eks-1.2.0" -id: 2 -text: "Control Plane Configuration" -type: "controlplane" -groups: - - id: 2.1 - text: "Logging" - checks: - - id: 2.1.1 - text: "Enable audit logs (Automated)" - remediation: "Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler." - scored: false diff --git a/cfg/eks-1.5.0/managedservices.yaml b/cfg/eks-1.5.0/managedservices.yaml deleted file mode 100644 index c9ae5ff3f..000000000 --- a/cfg/eks-1.5.0/managedservices.yaml +++ /dev/null @@ -1,154 +0,0 @@ ---- -controls: -version: "eks-1.2.0" -id: 5 -text: "Managed Services" -type: "managedservices" -groups: - - id: 5.1 - text: "Image Registry and Image Scanning" - checks: - - id: 5.1.1 - text: "Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third-party provider (Manual)" - type: "manual" - remediation: | - To utilize AWS ECR for Image scanning please follow the steps below: - - To create a repository configured for scan on push (AWS CLI): - aws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE - - To edit the settings of an existing repository (AWS CLI): - aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE - - Use the following steps to start a manual image scan using the AWS Management Console. - Open the Amazon ECR console at https://console.aws.amazon.com/ecr/repositories. - From the navigation bar, choose the Region to create your repository in. - In the navigation pane, choose Repositories. - On the Repositories page, choose the repository that contains the image to scan. - On the Images page, select the image to scan and then choose Scan. - scored: false - - - id: 5.1.2 - text: "Minimize user access to Amazon ECR (Manual)" - type: "manual" - remediation: | - Before you use IAM to manage access to Amazon ECR, you should understand what IAM features - are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other - AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide. - scored: false - - - id: 5.1.3 - text: "Minimize cluster access to read-only for Amazon ECR (Manual)" - type: "manual" - remediation: | - You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites. - - The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess - the following IAM policy permissions for Amazon ECR. - - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ecr:BatchCheckLayerAvailability", - "ecr:BatchGetImage", - "ecr:GetDownloadUrlForLayer", - "ecr:GetAuthorizationToken" - ], - "Resource": "*" - } - ] - } - scored: false - - - id: 5.1.4 - text: "Minimize Container Registries to only those approved (Manual)" - type: "manual" - remediation: "No remediation" - scored: false - - - id: 5.2 - text: "Identity and Access Management (IAM)" - checks: - - id: 5.2.1 - text: "Prefer using dedicated Amazon EKS Service Accounts (Manual)" - type: "manual" - remediation: "No remediation" - scored: false - - - id: 5.3 - text: "AWS Key Management Service (KMS)" - checks: - - id: 5.3.1 - text: "Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS (Manual)" - type: "manual" - remediation: | - This process can only be performed during Cluster Creation. - - Enable 'Secrets Encryption' during Amazon EKS cluster creation as described - in the links within the 'References' section. - scored: false - - - id: 5.4 - text: "Cluster Networking" - checks: - - id: 5.4.1 - text: "Restrict Access to the Control Plane Endpoint (Manual)" - type: "manual" - remediation: "No remediation" - scored: false - - - id: 5.4.2 - text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Manual)" - type: "manual" - remediation: "No remediation" - scored: false - - - id: 5.4.3 - text: "Ensure clusters are created with Private Nodes (Manual)" - type: "manual" - remediation: "No remediation" - scored: false - - - id: 5.4.4 - text: "Ensure Network Policy is Enabled and set as appropriate (Manual)" - type: "manual" - remediation: "No remediation" - scored: false - - - id: 5.4.5 - text: "Encrypt traffic to HTTPS load balancers with TLS certificates (Manual)" - type: "manual" - remediation: "No remediation" - scored: false - - - - id: 5.5 - text: "Authentication and Authorization" - checks: - - id: 5.5.1 - text: "Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes (Manual)" - type: "manual" - remediation: | - Refer to the 'Managing users or IAM roles for your cluster' in Amazon EKS documentation. - scored: false - - - - id: 5.6 - text: "Other Cluster Configurations" - checks: - - id: 5.6.1 - text: "Consider Fargate for running untrusted workloads (Manual)" - type: "manual" - remediation: | - Create a Fargate profile for your cluster Before you can schedule pods running on Fargate - in your cluster, you must define a Fargate profile that specifies which pods should use - Fargate when they are launched. For more information, see AWS Fargate profile. - - Note: If you created your cluster with eksctl using the --fargate option, then a Fargate profile has - already been created for your cluster with selectors for all pods in the kube-system - and default namespaces. Use the following procedure to create Fargate profiles for - any other namespaces you would like to use with Fargate. - scored: false diff --git a/cfg/eks-1.5.0/master.yaml b/cfg/eks-1.5.0/master.yaml deleted file mode 100644 index 8da0179b3..000000000 --- a/cfg/eks-1.5.0/master.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -controls: -version: "eks-1.2.0" -id: 1 -text: "Control Plane Components" -type: "master" diff --git a/cfg/eks-1.5.0/node.yaml b/cfg/eks-1.5.0/node.yaml deleted file mode 100644 index c7f47c04d..000000000 --- a/cfg/eks-1.5.0/node.yaml +++ /dev/null @@ -1,330 +0,0 @@ ---- -controls: -version: "eks-1.2.0" -id: 3 -text: "Worker Node Security Configuration" -type: "node" -groups: - - id: 3.1 - text: "Worker Node Configuration Files" - checks: - - id: 3.1.1 - text: "Ensure that the kubeconfig file permissions are set to 644 or more restrictive (Auomated)" - audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chmod 644 $kubeletkubeconfig - scored: false - - - id: 3.1.2 - text: "Ensure that the kubelet kubeconfig file ownership is set to root:root (Auomated)" - audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' - tests: - test_items: - - flag: root:root - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chown root:root $kubeletkubeconfig - scored: false - - - id: 3.1.3 - text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Atomated)" - audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - Run the following command (using the config file location identified in the Audit step) - chmod 644 $kubeletconf - scored: false - - - id: 3.1.4 - text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' - tests: - test_items: - - flag: root:root - remediation: | - Run the following command (using the config file location identified in the Audit step) - chown root:root $kubeletconf - scored: false - - - id: 3.2 - text: "Kubelet" - checks: - - id: 3.2.1 - text: "Ensure that the Anonymous Auth is Not Enabled (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: "--anonymous-auth" - path: '{.authentication.anonymous.enabled}' - set: true - compare: - op: eq - value: false - remediation: | - If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to - false. - If using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --anonymous-auth=false - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 3.2.2 - text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --authorization-mode - path: '{.authorization.mode}' - set: true - compare: - op: nothave - value: AlwaysAllow - remediation: | - If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If - using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_AUTHZ_ARGS variable. - --authorization-mode=Webhook - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 3.2.3 - text: "Ensure that a Client CA File is Configured (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --client-ca-file - path: '{.authentication.x509.clientCAFile}' - set: true - remediation: | - If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to - the location of the client CA file. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_AUTHZ_ARGS variable. - --client-ca-file= - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 3.2.4 - text: "Ensure that the --read-only-port is disabled (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: "--read-only-port" - path: '{.readOnlyPort}' - set: true - compare: - op: eq - value: 0 - remediation: | - If using a Kubelet config file, edit the file to set readOnlyPort to 0. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --read-only-port=0 - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 3.2.5 - text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --streaming-connection-idle-timeout - path: '{.streamingConnectionIdleTimeout}' - set: true - compare: - op: noteq - value: 0 - - flag: --streaming-connection-idle-timeout - path: '{.streamingConnectionIdleTimeout}' - set: false - bin_op: or - remediation: | - If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a - value other than 0. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --streaming-connection-idle-timeout=5m - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 3.2.6 - text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --protect-kernel-defaults - path: '{.protectKernelDefaults}' - set: true - compare: - op: eq - value: true - remediation: | - If using a Kubelet config file, edit the file to set protectKernelDefaults: true. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --protect-kernel-defaults=true - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 3.2.7 - text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated) " - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --make-iptables-util-chains - path: '{.makeIPTablesUtilChains}' - set: true - compare: - op: eq - value: true - - flag: --make-iptables-util-chains - path: '{.makeIPTablesUtilChains}' - set: false - bin_op: or - remediation: | - If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - remove the --make-iptables-util-chains argument from the - KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 3.2.8 - text: "Ensure that the --hostname-override argument is not set (Manual)" - # This is one of those properties that can only be set as a command line argument. - # To check if the property is set as expected, we need to parse the kubelet command - # instead reading the Kubelet Configuration file. - audit: "/bin/ps -fC $kubeletbin " - tests: - test_items: - - flag: --hostname-override - set: false - remediation: | - Edit the kubelet service file $kubeletsvc - on each worker node and remove the --hostname-override argument from the - KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 3.2.9 - text: "Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --event-qps - path: '{.eventRecordQPS}' - set: true - compare: - op: gte - value: 0 - remediation: | - If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 3.2.10 - text: "Ensure that the --rotate-certificates argument is not present or is set to true (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --rotate-certificates - path: '{.rotateCertificates}' - set: true - compare: - op: eq - value: true - - flag: --rotate-certificates - path: '{.rotateCertificates}' - set: false - bin_op: or - remediation: | - If using a Kubelet config file, edit the file to add the line rotateCertificates: true or - remove it altogether to use the default value. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS - variable. - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 3.2.11 - text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: RotateKubeletServerCertificate - path: '{.featureGates.RotateKubeletServerCertificate}' - set: true - compare: - op: eq - value: true - remediation: | - Edit the kubelet service file $kubeletsvc - on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. - --feature-gates=RotateKubeletServerCertificate=true - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - id: 3.3 - text: "Container Optimized OS" - checks: - - id: 3.3.1 - text: "Prefer using a container-optimized OS when possible (Manual)" - remediation: "No remediation" - scored: false diff --git a/cfg/eks-1.5.0/policies.yaml b/cfg/eks-1.5.0/policies.yaml deleted file mode 100644 index 7f5db95d9..000000000 --- a/cfg/eks-1.5.0/policies.yaml +++ /dev/null @@ -1,213 +0,0 @@ ---- -controls: -version: "eks-1.2.0" -id: 4 -text: "Policies" -type: "policies" -groups: - - id: 4.1 - text: "RBAC and Service Accounts" - checks: - - id: 4.1.1 - text: "Ensure that the cluster-admin role is only used where required (Manual)" - type: "manual" - remediation: | - Identify all clusterrolebindings to the cluster-admin role. Check if they are used and - if they need this role or if they could use a role with fewer privileges. - Where possible, first bind users to a lower privileged role and then remove the - clusterrolebinding to the cluster-admin role : - kubectl delete clusterrolebinding [name] - scored: false - - - id: 4.1.2 - text: "Minimize access to secrets (Manual)" - type: "manual" - remediation: | - Where possible, remove get, list and watch access to secret objects in the cluster. - scored: false - - - id: 4.1.3 - text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" - type: "manual" - remediation: | - Where possible replace any use of wildcards in clusterroles and roles with specific - objects or actions. - scored: false - - - id: 4.1.4 - text: "Minimize access to create pods (Manual)" - type: "manual" - remediation: | - Where possible, remove create access to pod objects in the cluster. - scored: false - - - id: 4.1.5 - text: "Ensure that default service accounts are not actively used. (Manual)" - type: "manual" - remediation: | - Create explicit service accounts wherever a Kubernetes workload requires specific access - to the Kubernetes API server. - Modify the configuration of each default service account to include this value - automountServiceAccountToken: false - scored: false - - - id: 4.1.6 - text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" - type: "manual" - remediation: | - Modify the definition of pods and service accounts which do not need to mount service - account tokens to disable it. - scored: false - - - id: 4.1.7 - text: "Avoid use of system:masters group (Manual)" - type: "manual" - remediation: | - Remove the system:masters group from all users in the cluster. - scored: false - - - id: 4.1.8 - text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" - type: "manual" - remediation: | - Where possible, remove the impersonate, bind and escalate rights from subjects. - scored: false - - - id: 4.2 - text: "Pod Security Policies" - checks: - - id: 4.2.1 - text: "Minimize the admission of privileged containers (Automated)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that - the .spec.privileged field is omitted or set to false. - scored: false - - - id: 4.2.2 - text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the - .spec.hostPID field is omitted or set to false. - scored: false - - - id: 4.2.3 - text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the - .spec.hostIPC field is omitted or set to false. - scored: false - - - id: 4.2.4 - text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the - .spec.hostNetwork field is omitted or set to false. - scored: false - - - id: 4.2.5 - text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the - .spec.allowPrivilegeEscalation field is omitted or set to false. - scored: false - - - id: 4.2.6 - text: "Minimize the admission of root containers (Automated)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the - .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of - UIDs not including 0. - scored: false - - - id: 4.2.7 - text: "Minimize the admission of containers with added capabilities (Manual)" - type: "manual" - remediation: | - Ensure that allowedCapabilities is not present in PSPs for the cluster unless - it is set to an empty array. - scored: false - - - id: 4.2.8 - text: "Minimize the admission of containers with capabilities assigned (Manual)" - type: "manual" - remediation: | - Review the use of capabilities in applications running on your cluster. Where a namespace - contains applications which do not require any Linux capabities to operate consider adding - a PSP which forbids the admission of containers which do not drop all capabilities. - scored: false - - - id: 4.3 - text: "CNI Plugin" - checks: - - id: 4.3.1 - text: "Ensure CNI plugin supports network policies (Manual)" - type: "manual" - remediation: | - As with RBAC policies, network policies should adhere to the policy of least privileged - access. Start by creating a deny all policy that restricts all inbound and outbound traffic - from a namespace or create a global policy using Calico. - scored: false - - - id: 4.3.2 - text: "Ensure that all Namespaces have Network Policies defined (Manual)" - type: "manual" - remediation: | - Follow the documentation and create NetworkPolicy objects as you need them. - scored: false - - - id: 4.4 - text: "Secrets Management" - checks: - - id: 4.4.1 - text: "Prefer using secrets as files over secrets as environment variables (Manual)" - type: "manual" - remediation: | - If possible, rewrite application code to read secrets from mounted secret files, rather than - from environment variables. - scored: false - - - id: 4.4.2 - text: "Consider external secret storage (Manual)" - type: "manual" - remediation: | - Refer to the secrets management options offered by your cloud provider or a third-party - secrets management solution. - scored: false - - - id: 4.5 - text: "Extensible Admission Control" - checks: [] - - - id: 4.6 - text: "General Policies" - checks: - - id: 4.6.1 - text: "Create administrative boundaries between resources using namespaces (Manual)" - type: "manual" - remediation: | - Follow the documentation and create namespaces for objects in your deployment as you need - them. - scored: false - - - id: 4.6.2 - text: "Apply Security Context to Your Pods and Containers (Manual)" - type: "manual" - remediation: | - Follow the Kubernetes documentation and apply security contexts to your pods. For a - suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker - Containers. - scored: false - - - id: 4.6.3 - text: "The default namespace should not be used (Manual)" - type: "manual" - remediation: | - Ensure that namespaces are created to allow for appropriate segregation of Kubernetes - resources and that all new resources are created in a specific namespace. - scored: false diff --git a/cfg/oke-1.5.0/config.yaml b/cfg/oke-1.5.0/config.yaml deleted file mode 100644 index 4cbf4cf00..000000000 --- a/cfg/oke-1.5.0/config.yaml +++ /dev/null @@ -1,2 +0,0 @@ ---- -## Version-specific settings that override the values in cfg/config.yaml \ No newline at end of file diff --git a/cfg/oke-1.5.0/controlplane.yaml b/cfg/oke-1.5.0/controlplane.yaml deleted file mode 100644 index 5c227aab0..000000000 --- a/cfg/oke-1.5.0/controlplane.yaml +++ /dev/null @@ -1,62 +0,0 @@ ---- -controls: -version: oke-1.5.0 -id: 3 -text: "Control Plane Configuration" -type: "controlplane" -groups: - - id: 2.1 - text: "Authentication and Authorization" - checks: - - id: 2.1.1 - text: "Client certificate authentication should not be used for users (Auomated)" - audit: | - # To verify user authentication is enabled - oc describe authentication - # To verify that an identity provider is configured - oc get identity - # To verify that a custom cluster-admin user exists - oc get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | grep cluster-admin | grep User - # To verity that kbueadmin is removed, no results should be returned - oc get secrets kubeadmin -n kube-system - type: manual - remediation: | - Configure an identity provider for the OpenShift cluster. - Understanding identity provider configuration | Authentication | OpenShift - Container Platform 4.5. Once an identity provider has been defined, - you can use RBAC to define and apply permissions. - After you define an identity provider and create a new cluster-admin user, - remove the kubeadmin user to improve cluster security. - scored: false - - - id: 3.2 - text: "Logging" - checks: - - id: 3.2.1 - text: "Ensure that a minimal audit policy is created (Manual)" - audit: | - #To view kube apiserver log files - oc adm node-logs --role=master --path=kube-apiserver/ - #To view openshift apiserver log files - oc adm node-logs --role=master --path=openshift-apiserver/ - #To verify kube apiserver audit config - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]?' - #To verify openshift apiserver audit config - oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]?' - type: manual - remediation: | - No remediation required. - scored: false - - - id: 3.2.2 - text: "Ensure that the audit policy covers key security concerns (Manual)" - audit: | - #To verify openshift apiserver audit config - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' - #To verify kube apiserver audit config - oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' - type: manual - remediation: | - In OpenShift 4.6 and higher, if appropriate for your needs, - modify the audit policy. - scored: false diff --git a/cfg/oke-1.5.0/node.yaml b/cfg/oke-1.5.0/node.yaml deleted file mode 100644 index 2e5b005b5..000000000 --- a/cfg/oke-1.5.0/node.yaml +++ /dev/null @@ -1,327 +0,0 @@ ---- -controls: -version: "oke-1.26" -id: 3 -text: "Worker Node Security Configuration" -type: "node" -groups: - - id: 3.1 - text: "Worker Node Configuration Files" - checks: - - id: 3.1.1 - text: "Ensure that the kubeconfig file permissions are set to 644 or more restrictive (Manual)" - audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chmod 644 $kubeletkubeconfig - scored: false - - - id: 3.1.2 - text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Manual)" - audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' - tests: - bin_op: or - test_items: - - flag: root:root - - flag: "$proxykubeconfig" - set: false - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, chown root:root $proxykubeconfig - scored: false - - id: 3.1.3 - text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Manual)" - audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' - tests: - test_items: - - flag: "permissions" - set: true - compare: - op: bitmask - value: "644" - remediation: | - Run the following command (using the config file location identified in the Audit step) - chmod 644 $kubeletconf - scored: true - - id: 3.1.4 - text: "Ensure that the kubelet configuration file ownership is set to root:root (Manual)" - audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' - tests: - test_items: - - flag: root:root - remediation: | - Run the following command (using the config file location identied in the Audit step) - chown root:root /etc/kubernetes/kubelet.conf - scored: false - - id: 3.2 - text: "Kubelet" - checks: - - id: 3.2.1 - text: "Ensure that the --anonymous-auth argument is set to false (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: "--anonymous-auth" - path: '{.authentication.anonymous.enabled}' - compare: - op: eq - value: false - remediation: | - If modifying the Kubelet service config file, edit the kubelet.service file - /etc/kubernetes/kubelet-config.json and set the below parameter - --anonymous-auth=false - Based on your system, restart the kubelet service and check status - systemctl daemon-reload - systemctl restart kubelet.service - systemctl status kubelet -l - scored: true - - id: 3.2.2 - text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --authorization-mode - path: '{.authorization.mode}' - compare: - op: nothave - value: AlwaysAllow - remediation: | - iff modifying the Kubelet service config file, edit the kubelet.service file - /etc/kubernetes/kubelet-config.json and set the below parameter - --authorization-mode=Webhook - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - systemctl status kubelet -l - scored: true - - id: 3.2.3 - text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --client-ca-file - path: '{.authentication.x509.clientCAFile}' - remediation: | - If modifying the Kubelet service config file, edit the kubelet.service file - /etc/kubernetes/kubelet-config.json and set the below parameter - --client-ca-file=/etc/kubernetes/ca.crt \ - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - systemctl status kubelet -l - scored: false - - id: 3.2.4 - text: "Ensure that the --read-only-port argument is set to 0 (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - bin_op: or - test_items: - - flag: "--read-only-port" - path: '{.readOnlyPort}' - compare: - op: eq - value: 0 - - flag: "--read-only-port" - path: '{.readOnlyPort}' - set: false - remediation: | - If modifying the Kubelet config file, edit the kubelet.service file - /etc/sytemd/system/kubelet.service and set the below parameter - --read-only-port=0 - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - systemctl status kubelet -l - scored: false - - id: 3.2.5 - text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --streaming-connection-idle-timeout - path: '{.streamingConnectionIdleTimeout}' - compare: - op: noteq - value: 0 - - flag: --streaming-connection-idle-timeout - path: '{.streamingConnectionIdleTimeout}' - set: false - bin_op: or - remediation: | - If modifying the Kubelet service config file, edit the kubelet.service file - /etc/kubernetes/kubelet-config.json and set the below parameter - --streaming-connection-idle-timeout - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - systemctl status kubelet -l - scored: false - - id: 3.2.6 - text: "Ensure that the --protect-kernel-defaults argument is set to true (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --protect-kernel-defaults - path: '{.protectKernelDefaults}' - set: true - compare: - op: eq - value: true - remediation: | - If modifying the Kubelet service config file, edit the kubelet.service file - /etc/kubernetes/kubelet-config.json and set the below parameter - --protect-kernel-defaults=true - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - systemctl status kubelet -l - scored: false - - id: 3.2.7 - text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --make-iptables-util-chains - path: '{.makeIPTablesUtilChains}' - compare: - op: eq - value: true - - flag: --make-iptables-util-chains - path: '{.makeIPTablesUtilChains}' - set: false - bin_op: or - remediation: | - If modifying the Kubelet service config file, edit the kubelet.service file - /etc/kubernetes/kubelet-config.json and set the below parameter - --make-iptables-util-chains:true - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - systemctl status kubelet -l - scored: true - - id: 3.2.8 - text: "Ensure that the --hostname-override argument is not set (Manual)" - # This is one of those properties that can only be set as a command line argument. - # To check if the property is set as expected, we need to parse the kubelet command - # instead reading the Kubelet Configuration file. - audit: "/bin/ps -fC $kubeletbin " - tests: - test_items: - - flag: --hostname-override - set: false - remediation: | - If modifying the Kubelet config file, edit the kubelet.service file - /etc/systemd/system/kubelet-.service and set the below parameter - --hostname-override=NODE NAME (where NODE NAME is the internal IP ex. - 10.0.10.4, as assigned my OKE on build) - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - systemctl status kubelet -l - scored: false - - id: 3.2.9 - text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --event-qps - path: '{.eventRecordQPS}' - set: true - compare: - op: eq - value: 0 - remediation: | - If modifying the Kubelet service config file, edit the kubelet.service file - /etc/kubernetes/kubelet-config.json and set the below parameter - --event-qps=0 - If using command line arguments, edit the kubelet service file /etc/kubernetes/kubelet-config.json.d/10-kubeadm.conf - on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - systemctl status kubelet -l - scored: true - - id: 3.2.10 - text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - bin_op: and - test_items: - - flag: --tls-cert-file - path: '{.tlsCertFile}' - - flag: --tls-private-key-file - path: '{.tlsPrivateKeyFile}' - remediation: | - If modifying the Kubelet service config file, edit the kubelet.service file - /etc/kubernetes/kubelet-config.json and set the below parameter - Verify that the `tls-cert-file=/var/lib/kubelet/pki/tls.pem`. - Verify that the `tls-private-key-file=/var/lib/kubelet/pki/tls.key`. - Based on your system, restart the kubelet service and check status - systemctl daemon-reload - systemctl restart kubelet.service - systemctl status kubelet -l - scored: true - - id: 3.2.11 - text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --rotate-certificates - path: '{.rotateCertificates}' - compare: - op: eq - value: true - - flag: --rotate-certificates - path: '{.rotateCertificates}' - set: false - bin_op: or - remediation: | - If modifying the Kubelet service config file, edit the kubelet.service file - /etc/kubernetes/kubelet-config.json and set the below parameter - Verify that the `--rotate-certificates` is present. - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - systemctl status kubelet -l - scored: true - - id: 3.2.12 - text: "Ensure that the --rotate-server-certificates argument is set to true (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --rotate-certificates - path: '{.rotateCertificates}' - compare: - op: eq - value: true - - flag: --rotate-certificates - path: '{.rotateCertificates}' - set: false - bin_op: or - remediation: | - If modifying the Kubelet service config file, edit the kubelet.service file - /etc/kubernetes/kubelet-config.json and set the below parameter - --rotate-server-certificates=true - Based on your system, restart the kubelet service and check status - systemctl daemon-reload - systemctl restart kubelet.service - systemctl status kubelet -l - scored: true \ No newline at end of file diff --git a/cfg/oke-1.5.0/policies.yaml b/cfg/oke-1.5.0/policies.yaml deleted file mode 100644 index e90cd877f..000000000 --- a/cfg/oke-1.5.0/policies.yaml +++ /dev/null @@ -1,287 +0,0 @@ ---- -controls: -version: rh-1.0 -id: 5 -text: "Kubernetes Policies" -type: "policies" -groups: - - id: 5.1 - text: "RBAC and Service Accounts" - checks: - - id: 5.1.1 - text: "Ensure that the cluster-admin role is only used where required (Manual)" - type: "manual" - audit: | - #To get a list of users and service accounts with the cluster-admin role - oc get clusterrolebindings -o=customcolumns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | - grep cluster-admin - #To verity that kbueadmin is removed, no results should be returned - oc get secrets kubeadmin -n kube-system - remediation: | - Identify all clusterrolebindings to the cluster-admin role. Check if they are used and - if they need this role or if they could use a role with fewer privileges. - Where possible, first bind users to a lower privileged role and then remove the - clusterrolebinding to the cluster-admin role : - kubectl delete clusterrolebinding [name] - scored: false - - - id: 5.1.2 - text: "Minimize access to secrets (Manual)" - type: "manual" - remediation: | - Where possible, remove get, list and watch access to secret objects in the cluster. - scored: false - - - id: 5.1.3 - text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" - type: "manual" - audit: | - #needs verification - oc get roles --all-namespaces -o yaml - for i in $(oc get roles -A -o jsonpath='{.items[*].metadata.name}'); do oc - describe clusterrole ${i}; done - #Retrieve the cluster roles defined in the cluster and review for wildcards - oc get clusterroles -o yaml - for i in $(oc get clusterroles -o jsonpath='{.items[*].metadata.name}'); do - oc describe clusterrole ${i}; done - remediation: | - Where possible replace any use of wildcards in clusterroles and roles with specific - objects or actions. - scored: false - - - id: 5.1.4 - text: "Minimize access to create pods (Manual)" - type: "manual" - remediation: | - Where possible, remove create access to pod objects in the cluster. - scored: false - - - id: 5.1.5 - text: "Ensure that default service accounts are not actively used. (Manual)" - type: "manual" - remediation: | - None required. - scored: false - - - id: 5.1.6 - text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" - type: "manual" - remediation: | - Modify the definition of pods and service accounts which do not need to mount service - account tokens to disable it. - scored: false - - - id: 5.2 - text: "Pod Security Policies" - checks: - - id: 5.2.1 - text: "Minimize the admission of privileged containers (Manual)" - audit: | - # needs verification - oc get scc -o=custom-columns=NAME:.metadata.name,allowPrivilegedContainer:.allowPrivilegedContainer - tests: - test_items: - - flag: "false" - remediation: | - Create a SCC as described in the OpenShift documentation, ensuring that the Allow - Privileged field is set to false. - scored: false - - - id: 5.2.2 - text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" - audit: | - oc get scc -o=custom-columns=NAME:.metadata.name,allowHostPID:.allowHostPID - tests: - test_items: - - flag: "false" - remediation: | - Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host - PID field is set to false. - scored: false - - - id: 5.2.3 - text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" - audit: | - oc get scc -o=custom-columns=NAME:.metadata.name,allowHostIPC:.allowHostIPC - tests: - test_items: - - flag: "false" - remediation: | - Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host - IPC field is set to false. - scored: false - - - id: 5.2.4 - text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" - audit: | - oc get scc -o=custom-columns=NAME:.metadata.name,allowHostNetwork:.allowHostNetwork - tests: - test_items: - - flag: "false" - remediation: | - Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host - Network field is omitted or set to false. - scored: false - - - id: 5.2.5 - text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" - audit: | - oc get scc -o=custom-columns=NAME:.metadata.name,allowPrivilegeEscalation:.allowPrivilegeEscalation - tests: - test_items: - - flag: "false" - remediation: | - Create a SCC as described in the OpenShift documentation, ensuring that the Allow - Privilege Escalation field is omitted or set to false. - scored: false - - - id: 5.2.6 - text: "Minimize the admission of root containers (Manual)" - audit: | - # needs verification # | awk 'NR>1 {gsub("map\\[type:", "", $2); gsub("\\]$", "", $2); print $1 ":" $2}' - oc get scc -o=custom-columns=NAME:.metadata.name,runAsUser:.runAsUser.type - #For SCCs with MustRunAs verify that the range of UIDs does not include 0 - oc get scc -o=custom-columns=NAME:.metadata.name,uidRangeMin:.runAsUser.uidRangeMin,uidRangeMax:.runAsUser.uidRangeMax - tests: - bin_op: or - test_items: - - flag: "MustRunAsNonRoot" - - flag: "MustRunAs" - compare: - op: nothave - value: 0 - remediation: | - None required. By default, OpenShift includes the non-root SCC with the the Run As User - Strategy is set to either MustRunAsNonRoot. If additional SCCs are appropriate, follow the - OpenShift documentation to create custom SCCs. - scored: false - - - id: 5.2.7 - text: "Minimize the admission of containers with the NET_RAW capability (Manual)" - audit: | - # needs verification - oc get scc -o=custom-columns=NAME:.metadata.name,requiredDropCapabilities:.requiredDropCapabilities - tests: - bin_op: or - test_items: - - flag: "ALL" - - flag: "NET_RAW" - remediation: | - Create a SCC as described in the OpenShift documentation, ensuring that the Required - Drop Capabilities is set to include either NET_RAW or ALL. - scored: false - - - id: 5.2.8 - text: "Minimize the admission of containers with added capabilities (Manual)" - type: "manual" - remediation: | - Ensure that Allowed Capabilities is set to an empty array for every SCC in the cluster - except for the privileged SCC. - scored: false - - - id: 5.2.9 - text: "Minimize the admission of containers with capabilities assigned (Manual)" - type: "manual" - remediation: | - Review the use of capabilites in applications running on your cluster. Where a namespace - contains applicaions which do not require any Linux capabities to operate consider - adding a SCC which forbids the admission of containers which do not drop all capabilities. - scored: false - - - id: 5.3 - text: "Network Policies and CNI" - checks: - - id: 5.3.1 - text: "Ensure that the CNI in use supports Network Policies (Manual)" - type: "manual" - remediation: | - None required. - scored: false - - - id: 5.3.2 - text: "Ensure that all Namespaces have Network Policies defined (Manual)" - type: "manual" - audit: | - #Run the following command and review the NetworkPolicy objects created in the cluster. - oc -n all get networkpolicy - remediation: | - Follow the documentation and create NetworkPolicy objects as you need them. - scored: false - - - id: 5.4 - text: "Secrets Management" - checks: - - id: 5.4.1 - text: "Prefer using secrets as files over secrets as environment variables (Manual)" - type: "manual" - audit: | - #Run the following command to find references to objects which use environment variables defined from secrets. - oc get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} - {.metadata.name} {"\n"}{end}' -A - remediation: | - If possible, rewrite application code to read secrets from mounted secret files, rather than - from environment variables. - scored: false - - - id: 5.4.2 - text: "Consider external secret storage (Manual)" - type: "manual" - remediation: | - Refer to the secrets management options offered by your cloud provider or a third-party - secrets management solution. - scored: false - - - id: 5.5 - text: "Extensible Admission Control" - checks: - - id: 5.5.1 - text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" - type: "manual" - remediation: | - Follow the OpenShift documentation: [Image configuration resources](https://docs.openshift.com/container-platform/4.5/openshift_images/image-configuration.html - scored: false - - - id: 5.7 - text: "General Policies" - checks: - - id: 5.7.1 - text: "Create administrative boundaries between resources using namespaces (Manual)" - type: "manual" - audit: | - #Run the following command and review the namespaces created in the cluster. - oc get namespaces - #Ensure that these namespaces are the ones you need and are adequately administered as per your requirements. - remediation: | - Follow the documentation and create namespaces for objects in your deployment as you need - them. - scored: false - - - id: 5.7.2 - text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual)" - type: "manual" - remediation: | - To enable the default seccomp profile, use the reserved value /runtime/default that will - make sure that the pod uses the default policy available on the host. - scored: false - - - id: 5.7.3 - text: "Apply Security Context to Your Pods and Containers (Manual)" - type: "manual" - remediation: | - Follow the Kubernetes documentation and apply security contexts to your pods. For a - suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker - Containers. - scored: false - - - id: 5.7.4 - text: "The default namespace should not be used (Manual)" - type: "manual" - audit: | - #Run this command to list objects in default namespace - oc project default - oc get all - #The only entries there should be system managed resources such as the kubernetes and openshift service - remediation: | - Ensure that namespaces are created to allow for appropriate segregation of Kubernetes - resources and that all new resources are created in a specific namespace. - scored: false diff --git a/cfg/rh-1.4.0/config.yaml b/cfg/rh-1.4.0/config.yaml deleted file mode 100644 index b7839455a..000000000 --- a/cfg/rh-1.4.0/config.yaml +++ /dev/null @@ -1,2 +0,0 @@ ---- -## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/rh-1.4.0/controlplane.yaml b/cfg/rh-1.4.0/controlplane.yaml deleted file mode 100644 index 606194ddf..000000000 --- a/cfg/rh-1.4.0/controlplane.yaml +++ /dev/null @@ -1,62 +0,0 @@ ---- -controls: -version: rh-1.0 -id: 3 -text: "Control Plane Configuration" -type: "controlplane" -groups: - - id: 3.1 - text: "Authentication and Authorization" - checks: - - id: 3.1.1 - text: "Client certificate authentication should not be used for users (Manual)" - audit: | - # To verify user authentication is enabled - oc describe authentication - # To verify that an identity provider is configured - oc get identity - # To verify that a custom cluster-admin user exists - oc get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | grep cluster-admin | grep User - # To verity that kbueadmin is removed, no results should be returned - oc get secrets kubeadmin -n kube-system - type: manual - remediation: | - Configure an identity provider for the OpenShift cluster. - Understanding identity provider configuration | Authentication | OpenShift - Container Platform 4.5. Once an identity provider has been defined, - you can use RBAC to define and apply permissions. - After you define an identity provider and create a new cluster-admin user, - remove the kubeadmin user to improve cluster security. - scored: false - - - id: 3.2 - text: "Logging" - checks: - - id: 3.2.1 - text: "Ensure that a minimal audit policy is created (Manual)" - audit: | - #To view kube apiserver log files - oc adm node-logs --role=master --path=kube-apiserver/ - #To view openshift apiserver log files - oc adm node-logs --role=master --path=openshift-apiserver/ - #To verify kube apiserver audit config - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]?' - #To verify openshift apiserver audit config - oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]?' - type: manual - remediation: | - No remediation required. - scored: false - - - id: 3.2.2 - text: "Ensure that the audit policy covers key security concerns (Manual)" - audit: | - #To verify openshift apiserver audit config - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' - #To verify kube apiserver audit config - oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' - type: manual - remediation: | - In OpenShift 4.6 and higher, if appropriate for your needs, - modify the audit policy. - scored: false diff --git a/cfg/rh-1.4.0/etcd.yaml b/cfg/rh-1.4.0/etcd.yaml deleted file mode 100644 index 4398d9cc1..000000000 --- a/cfg/rh-1.4.0/etcd.yaml +++ /dev/null @@ -1,183 +0,0 @@ ---- -controls: -version: rh-1.0 -id: 2 -text: "Etcd Node Configuration" -type: "etcd" -groups: - - id: 2 - text: "Etcd Node Configuration Files" - checks: - - id: 2.1 - text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - # Get the pod name in the openshift-etcd namespace - POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." - else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/' - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/' - fi - use_multiple_values: true - tests: - test_items: - - flag: "file" - compare: - op: regex - value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-serving\/etcd-serving-.*\.(?:crt|key)' - remediation: | - OpenShift does not use the etcd-certfile or etcd-keyfile flags. - Certificates for etcd are managed by the etcd cluster operator. - scored: false - - - id: 2.2 - text: "Ensure that the --client-cert-auth argument is set to true (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - # Get the pod name in the openshift-etcd namespace - POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." - else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/' - fi - use_multiple_values: true - tests: - test_items: - - flag: "--client-cert-auth" - compare: - op: eq - value: true - remediation: | - This setting is managed by the cluster etcd operator. No remediation required." - scored: false - - - id: 2.3 - text: "Ensure that the --auto-tls argument is not set to true (Manual)" - audit: | - # Returns 0 if found, 1 if not found - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - # Get the pod name in the openshift-etcd namespace - POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." - else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --auto-tls=true 2>/dev/null ; echo exit_code=$? - fi - use_multiple_values: true - tests: - test_items: - - flag: "exit_code" - compare: - op: eq - value: "1" - remediation: | - This setting is managed by the cluster etcd operator. No remediation required. - scored: false - - - id: 2.4 - text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - # Get the pod name in the openshift-etcd namespace - POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." - else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/' - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/' - fi - use_multiple_values: true - tests: - test_items: - - flag: "file" - compare: - op: regex - value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-peer\/etcd-peer-.*\.(?:crt|key)' - remediation: | - None. This configuration is managed by the etcd operator. - scored: false - - - id: 2.5 - text: "Ensure that the --peer-client-cert-auth argument is set to true (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - # Get the pod name in the openshift-etcd namespace - POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." - else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/' - fi - use_multiple_values: true - tests: - test_items: - - flag: "--peer-client-cert-auth" - compare: - op: eq - value: true - remediation: | - This setting is managed by the cluster etcd operator. No remediation required. - scored: false - - - id: 2.6 - text: "Ensure that the --peer-auto-tls argument is not set to true (Manual)" - audit: | - # Returns 0 if found, 1 if not found - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - # Get the pod name in the openshift-etcd namespace - POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." - else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>/dev/null ; echo exit_code=$? - fi - use_multiple_values: true - tests: - test_items: - - flag: "exit_code" - compare: - op: eq - value: "1" - remediation: | - This setting is managed by the cluster etcd operator. No remediation required. - scored: false - - - id: 2.7 - text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - # Get the pod name in the openshift-etcd namespace - POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." - else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/' - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/' - fi - use_multiple_values: true - tests: - test_items: - - flag: "file" - compare: - op: regex - value: '\/etc\/kubernetes\/static-pod-certs\/configmaps\/etcd-(?:serving|peer-client)-ca\/ca-bundle\.(?:crt|key)' - remediation: | - None required. Certificates for etcd are managed by the OpenShift cluster etcd operator. - scored: false diff --git a/cfg/rh-1.4.0/master.yaml b/cfg/rh-1.4.0/master.yaml deleted file mode 100644 index 37b50f033..000000000 --- a/cfg/rh-1.4.0/master.yaml +++ /dev/null @@ -1,1445 +0,0 @@ ---- -controls: -version: rh-1.0 -id: 1 -text: "Master Node Security Configuration" -type: "master" -groups: - - id: 1.1 - text: "Master Node Configuration Files" - checks: - - id: 1.1.1 - text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-apiserver namespace - POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-kube-apiserver "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml - fi - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.2 - text: "Ensure that the API server pod specification file ownership is set to root:root (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-apiserver namespace - POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-kube-apiserver "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml - fi - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.3 - text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-controller-manager namespace - POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml - fi - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.4 - text: "Ensure that the controller manager pod specification file ownership is set to root:root (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-controller-manager namespace - POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml - fi - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.5 - text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-scheduler namespace - POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml - fi - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.6 - text: "Ensure that the scheduler pod specification file ownership is set to root:root (Manual))" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-scheduler namespace - POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml - fi - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.7 - text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Manual))" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-etcd namespace - POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc rsh -n openshift-etcd "$POD_NAME" stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/manifests/etcd-pod.yaml - fi - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.8 - text: "Ensure that the etcd pod specification file ownership is set to root:root (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-etcd namespace - POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc rsh -n openshift-etcd "$POD_NAME" stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/manifests/etcd-pod.yaml - fi - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.9 - text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - # For CNI multus - # Get the pod name in the openshift-multus namespace - POD_NAME=$(oc get pods -n openshift-multus -l app=multus --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/etc/cni/net.d/*.conf"; 2>/dev/null - oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/var/run/multus/cni/net.d/*.conf"; 2>/dev/null - fi - # For SDN pods - POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null - oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null - fi - - # For OVS pods - POD_NAME=$(oc get pods -n openshift-sdn -l app=ovs --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null - oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null - oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null - fi - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.10 - text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - # For CNI multus - # Get the pod name in the openshift-multus namespace - POD_NAME=$(oc get pods -n openshift-multus -l app=multus --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c '$i %n %U:%G' /host/etc/cni/net.d/*.conf" 2>/dev/null - oc exec -n openshift-multus $i -- /bin/bash -c "stat -c '$i %n %U:%G' /host/var/run/multus/cni/net.d/*.conf" 2>/dev/null - fi - # For SDN pods - POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null - oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null - fi - # For OVS pods in 4.5 - POD_NAME=$(oc get pods -n openshift-sdn -l app=ovs --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null - oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null - oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null - fi - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.11 - text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-etcd namespace - POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-etcd "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /var/lib/etcd/member - fi - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "700" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.12 - text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-etcd namespace - POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-etcd "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /var/lib/etcd/member - fi - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.13 - text: "Ensure that the admin.conf file permissions are set to 644 or more restrictive (Manual))" - audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubeconfig 2> /dev/null - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.14 - text: "Ensure that the admin.conf file ownership is set to root:root (Manual)" - audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubeconfig 2> /dev/null - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.15 - text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-scheduler namespace - POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig - fi - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.16 - text: "Ensure that the scheduler.conf file ownership is set to root:root (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-scheduler namespace - POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig - fi - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.17 - text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-controller-manager namespace - POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig - fi - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.18 - text: "Ensure that the controller-manager.conf file ownership is set to root:root (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-controller-manager namespace - POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig - fi - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.19 - text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Manual)" - audit: | - # Should return root:root for all files and directories - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-controller-manager namespace - POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # echo $i static-pod-certs - oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; - oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; - # echo $i static-pod-resources - oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; - oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; - fi - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.20 - text: "Ensure that the OpenShift PKI certificate file permissions are set to 644 or more restrictive (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-apiserver namespace - POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.crt' -exec stat -c "$POD_NAME %n permissions=%a" {} \; - fi - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.21 - text: "Ensure that the OpenShift PKI key file permissions are set to 600 (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-apiserver namespace - POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.key' -exec stat -c "$POD_NAME %n permissions=%a" {} \; - fi - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.2 - text: "API Server" - checks: - - id: 1.2.1 - text: "Ensure that anonymous requests are authorized (Manual)" - audit: | - # To verify that userGroups include system:unauthenticated - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' - # To verify that userGroups include system:unauthenticated - oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?.userGroups' - # To verify RBAC is enabled - oc get clusterrolebinding - oc get clusterrole - oc get rolebinding - oc get role - tests: - test_items: - - flag: "system:unauthenticated" - remediation: | - None required. The default configuration should not be modified. - scored: false - - - id: 1.2.2 - text: "Ensure that the --basic-auth-file argument is not set (Manual)" - audit: | - oc -n openshift-kube-apiserver get cm config -o yaml | grep --color "basic-auth" - oc -n openshift-apiserver get cm config -o yaml | grep --color "basic-auth" - # Add | awk '$3 != "AVAILABLE" { if ($3){print "available=true"}else{print "available=false"} }; to create AVAILABLE = true/false form - oc get clusteroperator authentication | awk '$3 != "AVAILABLE" { if ($3){print "available=true"}else{print "available=false"} }' - tests: - bin_op: and - test_items: - - flag: "basic-auth-file" - set: false - - flag: "available" - compare: - op: eq - value: true - remediation: | - None required. --basic-auth-file cannot be configured on OpenShift. - scored: false - - - id: 1.2.3 - text: "Ensure that the --token-auth-file parameter is not set (Manual)" - audit: | - # Verify that the token-auth-file flag is not present - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' - #Verify that the authentication operator is running - oc get clusteroperator authentication | awk '$3 != "AVAILABLE" { if ($3){print "available=true"}else{print "available=false"} }' - tests: - bin_op: and - test_items: - - flag: "token-auth-file" - set: false - - flag: "available" - compare: - op: eq - value: true - remediation: | - None is required. - scored: false - - - id: 1.2.4 - text: "Use https for kubelet connections (Manual)" - audit: | - #for 4.5 - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo' - #for 4.6 - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - #for both 4.5 and 4.6 - oc -n openshift-apiserver describe secret serving-cert - tests: - bin_op: and - test_items: - - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.crt" - - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.key" - remediation: | - No remediation is required. - OpenShift platform components use X.509 certificates for authentication. - OpenShift manages the CAs and certificates for platform components. This is not configurable. - scored: false - - - id: 1.2.5 - text: "Ensure that the kubelet uses certificates to authenticate (Manual)" - audit: | - #for 4.5 - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo' - #for 4.6 - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - #for both 4.5 and 4.6 - oc -n openshift-apiserver describe secret serving-cert - tests: - bin_op: and - test_items: - - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.crt" - - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.key" - remediation: | - No remediation is required. - OpenShift platform components use X.509 certificates for authentication. - OpenShift manages the CAs and certificates for platform components. - This is not configurable. - scored: false - - - id: 1.2.6 - text: "Verify that the kubelet certificate authority is set as appropriate (Manual)" - audit: | - # for 4.5 - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo' - # for 4.6 - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - tests: - test_items: - - flag: "/etc/kubernetes/static-pod-resources/configmaps/kubelet-serving-ca/ca-bundle.crt" - remediation: | - No remediation is required. - OpenShift platform components use X.509 certificates for authentication. - OpenShift manages the CAs and certificates for platform components. - This is not configurable. - scored: false - - - id: 1.2.7 - text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Manual)" - audit: | - # To verify that the authorization-mode argument is not used - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - # To verify RBAC is configured: - oc get clusterrolebinding - oc get clusterrole - oc get rolebinding - oc get role - audit_config: | - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - tests: - bin_op: or - test_items: - - path: "{.authorization-mode}" - compare: - op: nothave - value: "AlwaysAllow" - - path: "{.authorization-mode}" - flag: "authorization-mode" - set: false - remediation: | - None. RBAC is always on and the OpenShift API server does not use the values assigned to the flag authorization-mode. - scored: false - - - id: 1.2.8 - text: "Verify that the Node authorizer is enabled (Manual)" - audit: | - # For OCP 4.5 and earlier verify that authorization-mode is not used - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' - # For OCP 4.5 and earlier verify that authorization-mode is not used - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host cat /etc/kubernetes/kubelet.conf | grep authorization-mode 2> /dev/null - oc debug node/$NODE_NAME -- chroot /host ps -aux | grep kubelet | grep authorization-mode 2> /dev/null - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - audit_config: | - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - tests: - bin_op: or - test_items: - - path: "{.authorization-mode}" - compare: - op: has - value: "Node" - - path: "{.authorization-mode}" - flag: "authorization-mode" - set: false - remediation: | - No remediation is required. - scored: false - - - id: 1.2.9 - text: "Verify that RBAC is enabled (Manual)" - audit: | - # For 4.5 To verify that the authorization-mode argument is not used - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - # To verify RBAC is used - oc get clusterrolebinding - oc get clusterrole - oc get rolebinding - oc get role - # For 4.6, verify that the authorization-mode argument includes RBAC - audit_config: | - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - tests: - bin_op: or - test_items: - - path: "{.authorization-mode}" - compare: - op: has - value: "RBAC" - - path: "{.authorization-mode}" - flag: "authorization-mode" - set: false - remediation: | - None. It is not possible to disable RBAC. - scored: false - - - id: 1.2.10 - text: "Ensure that the APIPriorityAndFairness feature gate is enabled (Manual)" - audit: | - #Verify the APIPriorityAndFairness feature-gate - oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' - #Verify the set of admission-plugins for OCP 4.6 and higher - oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - tests: - bin_op: and - test_items: - - flag: "APIPriorityAndFairness=true" - - flag: "EventRateLimit" - set: false - remediation: | - No remediation is required - scored: false - - - id: 1.2.11 - text: "Ensure that the admission control plugin AlwaysAdmit is not set (Manual)" - audit: | - #Verify the set of admission-plugins for OCP 4.6 and higher - oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - tests: - test_items: - - flag: "AlwaysAdmit" - set: false - remediation: | - No remediation is required. The AlwaysAdmit admission controller cannot be enabled in OpenShift. - scored: false - - - id: 1.2.12 - text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" - audit: | - #Verify the set of admissi on-plugins for OCP 4.6 and higher - oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - tests: - test_items: - - flag: "AlwaysPullImages" - set: false - remediation: | - None required. - scored: false - - - id: 1.2.13 - text: "Ensure that the admission control plugin SecurityContextDeny is not set (Manual)" - audit: | - #Verify the set of admission-plugins for OCP 4.6 and higher - oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' - output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') - [ "$output" == "null" ] && echo "ocp 4.5 has SecurityContextDeny and SecurityContextConstraint compiled" || echo $output - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - #Verify that SecurityContextConstraints are deployed - oc get scc - oc describe scc restricted - tests: - bin_op: and - test_items: - - flag: "SecurityContextConstraint" - set: true - - flag: "anyuid" - - flag: "hostaccess" - - flag: "hostmount-anyuid" - - flag: "hostnetwork" - - flag: "node-exporter" - - flag: "nonroot" - - flag: "privileged" - - flag: "restricted" - remediation: | - None required. The Security Context Constraint admission controller cannot be disabled in OpenShift 4. - scored: false - - - id: 1.2.14 - text: "Ensure that the admission control plugin ServiceAccount is set (Manual)" - audit: | - #Verify the list of admission controllers for 4.6 and higher - oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' - output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') - [ "$output" == "null" ] && echo "ocp 4.5 has ServiceAccount compiled" || echo $output - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - #Verify that Service Accounts are present - oc get sa -A - tests: - test_items: - - flag: "ServiceAccount" - set: true - remediation: | - None required. OpenShift is configured to use service accounts by default. - scored: false - - - id: 1.2.15 - text: "Ensure that the admission control plugin NamespaceLifecycle is set (Manual)" - audit: | - #Verify the list of admission controllers for 4.6 and higher - oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' - output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') - [ "$output" == "null" ] && echo "ocp 4.5 has NamespaceLifecycle compiled" || echo $output - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - tests: - test_items: - - flag: "NamespaceLifecycle" - remediation: | - Ensure that the --disable-admission-plugins parameter does not include NamespaceLifecycle. - scored: false - - - id: 1.2.16 - text: "Ensure that the admission control plugin SecurityContextConstraint is set (Manual)" - audit: | - #Verify the set of admission-plugins for OCP 4.6 and higher - oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' - output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') - [ "$output" == "null" ] && echo "ocp 4.5 has SecurityContextConstraint compiled" || echo $output - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - #Verify that SecurityContextConstraints are deployed - oc get scc - oc describe scc restricted - tests: - bin_op: and - test_items: - - flag: "SecurityContextConstraint" - - flag: "anyuid" - - flag: "hostaccess" - - flag: "hostmount-anyuid" - - flag: "hostnetwork" - - flag: "node-exporter" - - flag: "nonroot" - - flag: "privileged" - - flag: "restricted" - remediation: | - None required. Security Context Constraints are enabled by default in OpenShift and cannot be disabled. - scored: false - - - id: 1.2.17 - text: "Ensure that the admission control plugin NodeRestriction is set (Manual)" - audit: | - # For 4.5, review the control plane manifest https://github.com/openshift/origin/blob/release-4.5/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane/manifests.go#L132 - #Verify the set of admission-plugins for OCP 4.6 and higher - oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' - output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') - [ "$output" == "null" ] && echo "ocp 4.5 has NodeRestriction compiled" || echo $output - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - tests: - test_items: - - flag: "NodeRestriction" - remediation: | - The NodeRestriction plugin cannot be disabled. - scored: false - - - id: 1.2.18 - text: "Ensure that the --insecure-bind-address argument is not set (Manual)" - audit: | - # InsecureBindAddress=true should not be in the results - oc get kubeapiservers.operator.openshift.io cluster -o jsonpath='{range .spec.observedConfig.apiServerArguments.feature-gates[*]}{@}{"\n"}{end}' - # Result should be only 6443 - oc -n openshift-kube-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}' - # Result should be only 8443 - oc -n openshift-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}' - tests: - bin_op: and - test_items: - - flag: "insecure-bind-address" - set: false - - flag: 6443 - - flag: 8443 - remediation: | - None required. - scored: false - - - id: 1.2.19 - text: "Ensure that the --insecure-port argument is set to 0 (Manual)" - audit: | - # Should return 6443 - oc -n openshift-kube-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}' - # For OCP 4.6 and above - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments["insecure-port"]' - output=$(oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments["insecure-port"]') - [ "$output" == "null" ] && echo "ocp 4.5 has insecure-port set to \"0\" compiled" || echo $output - tests: - bin_op: and - test_items: - - flag: "\"0\"" - - flag: "6443" - remediation: | - None required. The configuration is managed by the API server operator. - scored: false - - - id: 1.2.20 - text: "Ensure that the --secure-port argument is not set to 0 (Manual)" - audit: | - oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig' - # Should return only 6443 - echo ports=`oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[*].spec.containers[?(@.name=="kube-apiserver")].ports[*].containerPort}'` - tests: - bin_op: and - test_items: - - flag: '"bindAddress": "0.0.0.0:6443"' - - flag: "ports" - compare: - op: regex - value: '\s*(?:6443\s*){1,}$' - remediation: | - None required. - scored: false - - - id: 1.2.21 - text: "Ensure that the healthz endpoint is protected by RBAC (Manual)" - type: manual - audit: | - # Verify endpoints - oc -n openshift-kube-apiserver describe endpoints - # Check config for ports, livenessProbe, readinessProbe, healthz - oc -n openshift-kube-apiserver get cm kube-apiserver-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' - # Test to validate RBAC enabled on the apiserver endpoint; check with non-admin role - oc project openshift-kube-apiserver POD=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') PORT=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].spec.containers[0].ports[0].hostPort}') - # Following should return 403 Forbidden - oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -k - # Create a service account to test RBAC - oc create -n openshift-kube-apiserver sa permission-test-sa - # Should return 403 Forbidden - SA_TOKEN=$(oc sa -n openshift-kube-apiserver get-token permission-test-sa) - oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k - # Cleanup - oc delete -n openshift-kube-apiserver sa permission-test-sa - # As cluster admin, should succeed - CLUSTER_ADMIN_TOKEN=$(oc whoami -t) - oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k - remediation: | - None required as profiling data is protected by RBAC. - scored: false - - - id: 1.2.22 - text: "Ensure that the --audit-log-path argument is set (Manual)" - audit: | - # Should return “/var/log/kube-apiserver/audit.log" - output=$(oc get configmap config -n openshift-kube-apiserver -o jsonpath="{['.data.config\.yaml']}" | jq '.auditConfig.auditFilePath') - [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true - output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-path"][]?') - [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true - POD=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') - oc rsh -n openshift-kube-apiserver -c kube-apiserver $POD ls /var/log/kube-apiserver/audit.log 2>/dev/null - # Should return 0 - echo exit_code=$? - # Should return "/var/log/openshift-apiserver/audit.log" - output=$(oc get configmap config -n openshift-apiserver -o jsonpath="{['.data.config\.yaml']}" | jq '.auditConfig.auditFilePath') - [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true - output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-path"][]?') - [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true - POD=$(oc get pods -n openshift-apiserver -l apiserver=true -o jsonpath='{.items[0].metadata.name}') - oc rsh -n openshift-apiserver $POD ls /var/log/openshift-apiserver/audit.log 2>/dev/null - # Should return 0 - echo exit_code=$? - use_multiple_values: true - tests: - bin_op: or - test_items: - - flag: "/var/log/kube-apiserver/audit.log" - - flag: "/var/log/openshift-apiserver/audit.log" - - flag: "exit_code=0" - - flag: "null" - remediation: | - None required. This is managed by the cluster apiserver operator. - scored: false - - - id: 1.2.23 - text: "Ensure that the audit logs are forwarded off the cluster for retention (Manual)" - type: "manual" - remediation: | - Follow the documentation for log forwarding. Forwarding logs to third party systems - https://docs.openshift.com/container-platform/4.5/logging/cluster-logging-external.html - scored: false - - - id: 1.2.24 - text: "Ensure that the maximumRetainedFiles argument is set to 10 or as appropriate (Manual)" - audit: | - #NOTICE - output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumRetainedFiles) - [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumRetainedFiles=$output" || true - output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumRetainedFiles) - [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumRetainedFiles=$output" || true - output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxbackup"][]?') - [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxbackup=$output" || true - output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxbackup"][]?') - [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxbackup=$output" || true - use_multiple_values: true - tests: - bin_op: or - test_items: - - flag: "maximumRetainedFiles" - compare: - op: gte - value: 10 - - flag: "audit-log-maxbackup" - compare: - op: gte - value: 10 - remediation: | - Set the maximumRetainedFiles parameter to 10 or as an appropriate number of files. maximumRetainedFiles: 10 - scored: false - - - id: 1.2.25 - text: "Ensure that the maximumFileSizeMegabytes argument is set to 100 or as appropriate (Manual)" - audit: | - #NOTICE - output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumFileSizeMegabytes) - [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumFileSizeMegabytes=$output" || true - output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumFileSizeMegabytes) - [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumFileSizeMegabytes=$output" || true - output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxsize"][]?') - [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxsize=$output" || true - output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxsize"][]?') - [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxsize=$output" || true - use_multiple_values: true - tests: - bin_op: or - test_items: - - flag: "maximumFileSizeMegabytes" - compare: - op: gte - value: 100 - - flag: "audit-log-maxsize" - compare: - op: gte - value: 100 - remediation: | - Set the audit-log-maxsize parameter to 100 or as an appropriate number. - maximumFileSizeMegabytes: 100 - scored: false - - - id: 1.2.26 - text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" - audit: | - echo requestTimeoutSeconds=`oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .servingInfo.requestTimeoutSeconds` - tests: - test_items: - - flag: "requestTimeoutSeconds" - remediation: | - TBD - scored: false - - - id: 1.2.27 - text: "Ensure that the --service-account-lookup argument is set to true (Manual)" - audit: | - # For OCP 4.5 - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' | grep service-account-lookup - # For OCP 4.6 and above - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["service-account-lookup"]' - output=$(oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["service-account-lookup"][0]') - [ "$output" == "null" ] && echo "ocp 4.5 has service-account-lookup=true compiled" || echo service-account-lookup=$output - tests: - test_items: - - flag: "service-account-lookup=true" - remediation: | - TBD - scored: false - - - id: 1.2.28 - text: "Ensure that the --service-account-key-file argument is set as appropriate (Manual)" - audit: | - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .serviceAccountPublicKeyFiles[] - tests: - bin_op: and - test_items: - - flag: "/etc/kubernetes/static-pod-resources/configmaps/sa-token-signing-certs" - - flag: "/etc/kubernetes/static-pod-resources/configmaps/bound-sa-token-signing-certs" - remediation: | - The OpenShift API server does not use the service-account-key-file argument. - The ServiceAccount token authenticator is configured with serviceAccountConfig.publicKeyFiles. - OpenShift does not reuse the apiserver TLS key. This is not configurable. - scored: false - - - id: 1.2.29 - text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Manual)" - audit: | - # etcd Certificate File - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .storageConfig.certFile - # etcd Key File - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .storageConfig.keyFile - # NOTICE 4.6 extention - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-certfile"]' - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-keyfile"]' - tests: - bin_op: and - test_items: - - flag: "/etc/kubernetes/static-pod-resources/secrets/etcd-client/tls.crt" - - flag: "/etc/kubernetes/static-pod-resources/secrets/etcd-client/tls.key" - remediation: | - OpenShift automatically manages TLS and client certificate authentication for etcd. - This is not configurable. - scored: false - - - id: 1.2.30 - text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" - audit: | - # TLS Cert File - openshift-kube-apiserver - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .servingInfo.certFile - # TLS Key File - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.servingInfo.keyFile' - # NOTECI 4.6 extention - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["tls-cert-file"]' - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["tls-private-key-file"]' - tests: - bin_op: and - test_items: - - flag: "/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.crt" - - flag: "/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.key" - remediation: | - OpenShift automatically manages TLS authentication for the API server communication with the node/kublet. - This is not configurable. You may optionally set a custom default certificate to be used by the API server - when serving content in order to enable clients to access the API server at a different host name or without - the need to distribute the cluster-managed certificate authority (CA) certificates to the clients. - Follow the directions in the OpenShift documentation User-provided certificates for the API server - scored: false - - - id: 1.2.31 - text: "Ensure that the --client-ca-file argument is set as appropriate (Manual)" - audit: | - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .servingInfo.clientCA - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["client-ca-file"]' - tests: - test_items: - - flag: "/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt" - remediation: | - OpenShift automatically manages TLS authentication for the API server communication with the node/kublet. - This is not configurable. You may optionally set a custom default certificate to be used by the API - server when serving content in order to enable clients to access the API server at a different host name - or without the need to distribute the cluster-managed certificate authority (CA) certificates to the clients. - - User-provided certificates must be provided in a kubernetes.io/tls type Secret in the openshift-config namespace. - Update the API server cluster configuration, - the apiserver/cluster resource, to enable the use of the user-provided certificate. - scored: false - - - id: 1.2.32 - text: "Ensure that the --etcd-cafile argument is set as appropriate (Manual)" - audit: | - #etcd CA File - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .storageConfig.ca - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-cafile"]' - tests: - test_items: - - flag: "/etc/kubernetes/static-pod-resources/configmaps/etcd-serving-ca/ca-bundle.crt" - remediation: | - None required. OpenShift generates the etcd-cafile and sets the arguments appropriately in the API server. Communication with etcd is secured by the etcd serving CA. - scored: false - - - id: 1.2.33 - text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" - audit: | - # encrypt the etcd datastore - oc get openshiftapiserver -o=jsonpath='{range.items[0].status.conditions[?(@.type=="Encrypted")]}{.reason}{"\n"}{.message}{"\n"}' - tests: - test_items: - - flag: "EncryptionCompleted" - remediation: | - Follow the OpenShift documentation for Encrypting etcd data | Authentication | OpenShift Container Platform 4.5 - https://docs.openshift.com/container-platform/4.5/security/encrypting-etcd.html - scored: false - - - id: 1.2.34 - text: "Ensure that encryption providers are appropriately configured (Manual)" - audit: | - # encrypt the etcd datastore - oc get openshiftapiserver -o=jsonpath='{range.items[0].status.conditions[?(@.type=="Encrypted")]}{.reason}{"\n"}{.message}{"\n"}' - tests: - test_items: - - flag: "EncryptionCompleted" - remediation: | - Follow the Kubernetes documentation and configure a EncryptionConfig file. - In this file, choose aescbc, kms or secretbox as the encryption provider. - scored: false - - - id: 1.2.35 - text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" - type: manual - audit: | - # verify cipher suites - oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq .servingInfo - oc get kubeapiservers.operator.openshift.io cluster -o json |jq.spec.observedConfig.servingInfo - oc get openshiftapiservers.operator.openshift.io cluster -o json |jq.spec.observedConfig.servingInfo - oc describe --namespace=openshift-ingress-operator ingresscontroller/default - remediation: | - Verify that the tlsSecurityProfile is set to the value you chose. - Note: The HAProxy Ingress controller image does not support TLS 1.3 - and because the Modern profile requires TLS 1.3, it is not supported. - The Ingress Operator converts the Modern profile to Intermediate. - The Ingress Operator also converts the TLS 1.0 of an Old or Custom profile to 1.1, - and TLS 1.3 of a Custom profile to 1.2. - scored: false - - - id: 1.3 - text: "Controller Manager" - checks: - - id: 1.3.1 - text: "Ensure that garbage collection is configured as appropriate (Manual)" - type: manual - remediation: | - To configure, follow the directions in Configuring garbage collection for containers and images - https://docs.openshift.com/container-platform/4.5/nodes/nodes/nodes-nodes-garbage-collection.html#nodes-nodes-garbage-collection-configuring_nodes-nodes-configuring - scored: false - - - id: 1.3.2 - text: "Ensure that controller manager healthz endpoints are protected by RBAC (Manual)" - type: manual - audit: | - # Verify configuration for ports, livenessProbe, readinessProbe, healthz - oc -n openshift-kube-controller-manager get cm kube-controller-manager-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' - # Verify endpoints - oc -n openshift-kube-controller-manager describe endpoints - # Test to validate RBAC enabled on the controller endpoint; check with non-admin role - oc project openshift-kube-controller-manage - POD=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].metadata.name}') - PORT=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].spec.containers[0].ports[0].hostPort}') - # Following should return 403 Forbidden - oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -k - # Create a service account to test RBAC - oc create -n openshift-kube-controller-manager sa permission-test-sa - # Should return 403 Forbidden - SA_TOKEN=$(oc sa -n openshift-kube-controller-manager get-token permission-test-sa) - oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k - # Cleanup - oc delete -n openshift-kube-controller-manager sa permission-test-sa - # As cluster admin, should succeed - CLUSTER_ADMIN_TOKEN=$(oc whoami -t) - oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k - remediation: | - None required; profiling is protected by RBAC. - scored: false - - - id: 1.3.3 - text: "Ensure that the --use-service-account-credentials argument is set to true (Manual)" - audit: | - echo use-service-account-credentials=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["use-service-account-credentials"][]'` - tests: - test_items: - - flag: "use-service-account-credentials" - compare: - op: eq - value: true - remediation: | - The OpenShift Controller Manager operator manages and updates the OpenShift Controller Manager. - The Kubernetes Controller Manager operator manages and updates the Kubernetes Controller Manager deployed on top of OpenShift. - This operator is configured via KubeControllerManager custom resource. - scored: false - - - id: 1.3.4 - text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Manual)" - audit: | - oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["service-account-private-key-file"][]' - tests: - test_items: - - flag: "/etc/kubernetes/static-pod-resources/secrets/service-account-private-key/service-account.key" - remediation: | - None required. - OpenShift manages the service account credentials for the scheduler automatically. - scored: false - - - id: 1.3.5 - text: "Ensure that the --root-ca-file argument is set as appropriate (Manual)" - audit: | - oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["root-ca-file"][]' - tests: - test_items: - - flag: "/etc/kubernetes/static-pod-resources/configmaps/serviceaccount-ca/ca-bundle.crt" - remediation: | - None required. - Certificates for OpenShift platform components are automatically created and rotated by the OpenShift Container Platform. - scored: false - - - id: 1.3.6 - text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Manual)" - audit: | - oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["feature-gates"][]' - tests: - test_items: - - flag: "RotateKubeletServerCertificate" - compare: - op: eq - value: "true" - remediation: | - None required. - Certificates for OpenShift platform components are automatically created and rotated by the OpenShift Container Platform. - scored: false - - - id: 1.3.7 - text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Manual)" - audit: | - echo port=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq '.extendedArguments["port"][]'` - echo secure-port=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq '.extendedArguments["secure-port"][]'` - #Following should fail with a http code 403 - POD=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].metadata.name}') - oc rsh -n openshift-kube-controller-manager -c kube-controller-manager $POD curl https://localhost:10257/metrics -k - tests: - bin_op: and - test_items: - - flag: "secure-port" - compare: - op: eq - value: "\"10257\"" - - flag: "port" - compare: - op: eq - value: "\"0\"" - - flag: "\"code\": 403" - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the master node and ensure the correct value for the --bind-address parameter - scored: false - - - id: 1.4 - text: "Scheduler" - checks: - - id: 1.4.1 - text: "Ensure that the healthz endpoints for the scheduler are protected by RBAC (Manual)" - type: manual - audit: | - # check configuration for ports, livenessProbe, readinessProbe, healthz - oc -n openshift-kube-scheduler get cm kube-scheduler-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' - # Test to verify endpoints - oc -n openshift-kube-scheduler describe endpoints - # Test to validate RBAC enabled on the scheduler endpoint; check with non-admin role - oc project openshift-kube-scheduler - POD=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].metadata.name}') - PORT=$(oc get pod $POD -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.port}') - # Should return 403 Forbidden - oc rsh ${POD} curl http://localhost:${PORT}/metrics -k - # Create a service account to test RBAC - oc create sa permission-test-sa - # Should return 403 Forbidden - SA_TOKEN=$(oc sa get-token permission-test-sa) - oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k - # Cleanup - oc delete sa permission-test-sa - # As cluster admin, should succeed - CLUSTER_ADMIN_TOKEN=$(oc whoami -t) - oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k - remediation: | - A fix to this issue: https://bugzilla.redhat.com/show_bug.cgi?id=1889488 None required. - Profiling is protected by RBAC and cannot be disabled. - scored: false - - - id: 1.4.2 - text: "Verify that the scheduler API service is protected by authentication and authorization (Manual)" - type: manual - audit: | - # To verify endpoints - oc -n openshift-kube-scheduler describe endpoints - # To verify that bind-adress is not used in the configuration and that port is set to 0 - oc -n openshift-kube-scheduler get cm kube-scheduler-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' - # To test for RBAC: - oc project openshift-kube-scheduler - POD=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].metadata.name}') - POD_IP=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].status.podIP}') - PORT=$(oc get pod $POD -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.port}') - # Should return a 403 - oc rsh ${POD} curl http://${POD_IP}:${PORT}/metrics - # Create a service account to test RBAC - oc create sa permission-test-sa - # Should return 403 Forbidden - SA_TOKEN=$(oc sa get-token permission-test-sa) - oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k - # Cleanup - oc delete sa permission-test-sa - # As cluster admin, should succeed - CLUSTER_ADMIN_TOKEN=$(oc whoami -t) - oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k - remediation: | - By default, the --bind-address argument is not present, - the readinessProbe and livenessProbe arguments are set to 10251 and the port argument is set to 0. - Check the status of this issue: https://bugzilla.redhat.com/show_bug.cgi?id=1889488 - scored: false diff --git a/cfg/rh-1.4.0/node.yaml b/cfg/rh-1.4.0/node.yaml deleted file mode 100644 index fb982d6f3..000000000 --- a/cfg/rh-1.4.0/node.yaml +++ /dev/null @@ -1,429 +0,0 @@ ---- -controls: -version: rh-1.0 -id: 4 -text: "Worker Node Security Configuration" -type: "node" -groups: - - id: 4.1 - text: "Worker Node Configuration Files" - checks: - - id: 4.1.1 - text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)" - audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/systemd/system/kubelet.service 2> /dev/null - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - By default, the kubelet service file has permissions of 644. - scored: true - - - id: 4.1.2 - text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" - audit: | - # Should return root:root for each node - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/systemd/system/kubelet.service 2> /dev/null - tests: - test_items: - - flag: root:root - remediation: | - By default, the kubelet service file has ownership of root:root. - scored: true - - - id: 4.1.3 - text: "If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - # Get the pod name in the openshift-sdn namespace - POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-sdn "$POD_NAME" - stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml 2>/dev/null - fi - tests: - bin_op: or - test_items: - - flag: "permissions" - set: true - compare: - op: bitmask - value: "644" - remediation: | - None needed. - scored: false - - - id: 4.1.4 - text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - # Get the pod name in the openshift-sdn namespace - POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-sdn "$POD_NAME" -- stat -Lc "$i %n %U:%G" /config/kube-proxy-config.yaml 2>/dev/null - fi - use_multiple_values: true - tests: - bin_op: or - test_items: - - flag: root:root - remediation: | - None required. The configuration is managed by OpenShift operators. - scored: false - - - id: 4.1.5 - text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Manual)" - audit: | - # Check permissions - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet.conf 2> /dev/null - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - None required. - scored: false - - - id: 4.1.6 - text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Manual)" - audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet.conf 2> /dev/null - use_multiple_values: true - tests: - test_items: - - flag: root:root - remediation: | - None required. - scored: false - - - id: 4.1.7 - text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated)" - audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet-ca.crt 2> /dev/null - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - None required. - scored: true - - - id: 4.1.8 - text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" - audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet-ca.crt 2> /dev/null - use_multiple_values: true - tests: - test_items: - - flag: root:root - remediation: | - None required. - scored: true - - - id: 4.1.9 - text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated)" - audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /var/lib/kubelet/kubeconfig 2> /dev/null - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - None required. - scored: true - - - id: 4.1.10 - text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)" - audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /var/lib/kubelet/kubeconfig 2> /dev/null - use_multiple_values: true - tests: - test_items: - - flag: root:root - remediation: | - None required. - scored: true - - - id: 4.2 - text: "Kubelet" - checks: - - id: 4.2.1 - text: "Ensure that the --anonymous-auth argument is set to false (Automated)" - audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host grep -B4 -A1 anonymous /etc/kubernetes/kubelet.conf 2> /dev/null - use_multiple_values: true - tests: - test_items: - - flag: "enabled: true" - set: false - remediation: | - Follow the instructions in the documentation to create a Kubelet config CRD - and set the anonymous-auth is set to false. - scored: true - - - id: 4.2.2 - text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Manual)" - type: manual - # Takes a lot of time for connection to fail and - audit: | - POD=$(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') - TOKEN=$(oc whoami -t) - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc exec -n openshift-kube-apiserver $POD -- curl -sS https://172.25.0.1/api/v1/nodes/$NODE_NAME/proxy/configz -k -H "Authorization:Bearer $TOKEN" | jq -r '.kubeletconfig.authorization.mode' 2> /dev/null - use_multiple_values: true - tests: - test_items: - - flag: "Connection timed out" - remediation: | - None required. Unauthenticated/Unauthorized users have no access to OpenShift nodes. - scored: false - - - id: 4.2.3 - text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" - audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host grep clientCAFile /etc/kubernetes/kubelet.conf 2> /dev/null - use_multiple_values: true - tests: - test_items: - - flag: '"clientCAFile": "/etc/kubernetes/kubelet-ca.crt"' - remediation: | - None required. Changing the clientCAFile value is unsupported. - scored: true - - - id: 4.2.4 - text: "Verify that the read only port is not used or is set to 0 (Automated)" - audit: | - echo `oc -n openshift-kube-apiserver get cm kube-apiserver-pod -o yaml | grep --color read-only-port` 2> /dev/null - echo `oc -n openshift-kube-apiserver get cm config -o yaml | grep --color "read-only-port"` 2> /dev/null - tests: - bin_op: or - test_items: - - flag: "read-only-port" - compare: - op: has - value: "[\"0\"]" - - flag: "read-only-port" - set: false - remediation: | - In earlier versions of OpenShift 4, the read-only-port argument is not used. - Follow the instructions in the documentation to create a Kubelet config CRD - and set the --read-only-port is set to 0. - scored: true - - - id: 4.2.5 - text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" - audit: | - # Should return 1 for node - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/${NODE_NAME} -- chroot /host ps -ef | grep kubelet | grep streaming-connection-idle-timeout 2> /dev/null - echo exit_code=$? - # Should return 1 for node - oc debug node/${NODE_NAME} -- chroot /host grep streamingConnectionIdleTimeout /etc/kubernetes/kubelet.conf 2> /dev/null - echo exit_code=$? - use_multiple_values: true - tests: - bin_op: or - test_items: - - flag: --streaming-connection-idle-timeout - compare: - op: noteq - value: 0 - - flag: streamingConnectionIdleTimeout - compare: - op: noteq - value: 0s - - flag: "exit_code" - compare: - op: eq - value: 1 - remediation: | - Follow the instructions in the documentation to create a Kubelet config CRD and set - the --streaming-connection-idle-timeout to the desired value. Do not set the value to 0. - scored: true - - - id: 4.2.6 - text: "Ensure that the --protect-kernel-defaults argument is not set (Manual)" - audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host more /etc/kubernetes/kubelet.conf 2> /dev/null - tests: - test_items: - - flag: protectKernelDefaults - set: false - remediation: | - None required. The OpenShift 4 kubelet modifies the system tunable; - using the protect-kernel-defaults flag will cause the kubelet to fail on start if the tunables - don't match the kubelet configuration and the OpenShift node will fail to start. - scored: false - - - id: 4.2.7 - text: "Ensure that the --make-iptables-util-chains argument is set to true (Manual)" - audit: | - /bin/bash - flag=make-iptables-util-chains - opt=makeIPTablesUtilChains - # look at each machineconfigpool - while read -r pool nodeconfig; do - # true by default - value='true' - # first look for the flag - oc get machineconfig $nodeconfig -o json | jq -r '.spec.config.systemd[][] | select(.name=="kubelet.service") | .contents' | sed -n "/^ExecStart=/,/^\$/ { /^\\s*--$flag=false/ q 100 }" - # if the above command exited with 100, the flag was false - [ $? == 100 ] && value='false' - # now look in the yaml KubeletConfig - yamlconfig=$(oc get machineconfig $nodeconfig -o json | jq -r '.spec.config.storage.files[] | select(.path=="/etc/kubernetes/kubelet.conf") | .contents.source ' | sed 's/^data:,//' | while read; do echo -e ${REPLY//%/\\x}; done) - echo "$yamlconfig" | sed -n "/^$opt:\\s*false\\s*$/ q 100" - [ $? == 100 ] && value='false' - echo "Pool $pool has $flag ($opt) set to $value" - done < <(oc get machineconfigpools -o json | jq -r '.items[] | select(.status.machineCount>0) | .metadata.name + " " + .spec.configuration.name') - use_multiple_values: true - tests: - test_items: - - flag: "set to true" - remediation: | - None required. The --make-iptables-util-chains argument is set to true by default. - scored: false - - - id: 4.2.8 - text: "Ensure that the --hostname-override argument is not set (Manual)" - audit: | - echo `oc get machineconfig 01-worker-kubelet -o yaml | grep hostname-override` - echo `oc get machineconfig 01-master-kubelet -o yaml | grep hostname-override` - tests: - test_items: - - flag: hostname-override - set: false - remediation: | - By default, --hostname-override argument is not set. - scored: false - - - id: 4.2.9 - text: "Ensure that the kubeAPIQPS [--event-qps] argument is set to 0 or a level which ensures appropriate event capture (Manual)" - audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/${NODE_NAME} -- chroot /host cat /etc/kubernetes/kubelet.conf; - oc get machineconfig 01-worker-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050 - oc get machineconfig 01-master-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050 - type: "manual" - remediation: | - Follow the documentation to edit kubelet parameters - https://docs.openshift.com/container-platform/4.5/scalability_and_performance/recommended-host-practices.html#create-a-kubeletconfig-crd-to-edit-kubelet-parameters - KubeAPIQPS: - scored: false - - - id: 4.2.10 - text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" - audit: | - oc get configmap config -n openshift-kube-apiserver -o json \ - | jq -r '.data["config.yaml"]' \ - | jq -r '.apiServerArguments | - .["kubelet-client-certificate"][0], - .["kubelet-client-key"][0] - ' - tests: - bin_op: and - test_items: - - flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.crt" - - flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.key" - remediation: | - OpenShift automatically manages TLS authentication for the API server communication with the node/kublet. - This is not configurable. - scored: true - - - id: 4.2.11 - text: "Ensure that the --rotate-certificates argument is not set to false (Manual)" - audit: | - #Verify the rotateKubeletClientCertificate feature gate is not set to false - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/${NODE_NAME} -- chroot /host cat /etc/kubernetes/kubelet.conf | grep RotateKubeletClientCertificate 2> /dev/null - # Verify the rotateCertificates argument is set to true - oc debug node/${NODE_NAME} -- chroot host grep rotate /etc/kubernetes/kubelet.conf 2> /dev/null - use_multiple_values: true - tests: - bin_op: or - test_items: - - flag: rotateCertificates - compare: - op: eq - value: true - - flag: rotateKubeletClientCertificates - compare: - op: noteq - value: false - - flag: rotateKubeletClientCertificates - set: false - remediation: | - None required. - scored: false - - - id: 4.2.12 - text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" - audit: | - #Verify the rotateKubeletServerCertificate feature gate is on - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/${NODE_NAME} -- chroot /host grep RotateKubeletServerCertificate /etc/kubernetes/kubelet.conf 2> /dev/null - # Verify the rotateCertificates argument is set to true - oc debug node/${NODE_NAME} -- chroot host grep rotate /etc/kubernetes/kubelet.conf 2> /dev/null - use_multiple_values: true - tests: - bin_op: or - test_items: - - flag: rotateCertificates - compare: - op: eq - value: true - - flag: RotateKubeletServerCertificate - compare: - op: eq - value: true - remediation: | - By default, kubelet server certificate rotation is disabled. - scored: false - - - id: 4.2.13 - text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" - audit: | - # needs verification - # verify cipher suites - oc describe --namespace=openshift-ingress-operator ingresscontroller/default - oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo - oc get openshiftapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo - oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq .servingInfo - #check value for tlsSecurityProfile; null is returned if default is used - oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.tlsSecurityProfile - type: manual - remediation: | - Follow the directions above and in the OpenShift documentation to configure the tlsSecurityProfile. - Configuring Ingress - scored: false diff --git a/cfg/rh-1.4.0/policies.yaml b/cfg/rh-1.4.0/policies.yaml deleted file mode 100644 index e90cd877f..000000000 --- a/cfg/rh-1.4.0/policies.yaml +++ /dev/null @@ -1,287 +0,0 @@ ---- -controls: -version: rh-1.0 -id: 5 -text: "Kubernetes Policies" -type: "policies" -groups: - - id: 5.1 - text: "RBAC and Service Accounts" - checks: - - id: 5.1.1 - text: "Ensure that the cluster-admin role is only used where required (Manual)" - type: "manual" - audit: | - #To get a list of users and service accounts with the cluster-admin role - oc get clusterrolebindings -o=customcolumns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | - grep cluster-admin - #To verity that kbueadmin is removed, no results should be returned - oc get secrets kubeadmin -n kube-system - remediation: | - Identify all clusterrolebindings to the cluster-admin role. Check if they are used and - if they need this role or if they could use a role with fewer privileges. - Where possible, first bind users to a lower privileged role and then remove the - clusterrolebinding to the cluster-admin role : - kubectl delete clusterrolebinding [name] - scored: false - - - id: 5.1.2 - text: "Minimize access to secrets (Manual)" - type: "manual" - remediation: | - Where possible, remove get, list and watch access to secret objects in the cluster. - scored: false - - - id: 5.1.3 - text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" - type: "manual" - audit: | - #needs verification - oc get roles --all-namespaces -o yaml - for i in $(oc get roles -A -o jsonpath='{.items[*].metadata.name}'); do oc - describe clusterrole ${i}; done - #Retrieve the cluster roles defined in the cluster and review for wildcards - oc get clusterroles -o yaml - for i in $(oc get clusterroles -o jsonpath='{.items[*].metadata.name}'); do - oc describe clusterrole ${i}; done - remediation: | - Where possible replace any use of wildcards in clusterroles and roles with specific - objects or actions. - scored: false - - - id: 5.1.4 - text: "Minimize access to create pods (Manual)" - type: "manual" - remediation: | - Where possible, remove create access to pod objects in the cluster. - scored: false - - - id: 5.1.5 - text: "Ensure that default service accounts are not actively used. (Manual)" - type: "manual" - remediation: | - None required. - scored: false - - - id: 5.1.6 - text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" - type: "manual" - remediation: | - Modify the definition of pods and service accounts which do not need to mount service - account tokens to disable it. - scored: false - - - id: 5.2 - text: "Pod Security Policies" - checks: - - id: 5.2.1 - text: "Minimize the admission of privileged containers (Manual)" - audit: | - # needs verification - oc get scc -o=custom-columns=NAME:.metadata.name,allowPrivilegedContainer:.allowPrivilegedContainer - tests: - test_items: - - flag: "false" - remediation: | - Create a SCC as described in the OpenShift documentation, ensuring that the Allow - Privileged field is set to false. - scored: false - - - id: 5.2.2 - text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" - audit: | - oc get scc -o=custom-columns=NAME:.metadata.name,allowHostPID:.allowHostPID - tests: - test_items: - - flag: "false" - remediation: | - Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host - PID field is set to false. - scored: false - - - id: 5.2.3 - text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" - audit: | - oc get scc -o=custom-columns=NAME:.metadata.name,allowHostIPC:.allowHostIPC - tests: - test_items: - - flag: "false" - remediation: | - Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host - IPC field is set to false. - scored: false - - - id: 5.2.4 - text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" - audit: | - oc get scc -o=custom-columns=NAME:.metadata.name,allowHostNetwork:.allowHostNetwork - tests: - test_items: - - flag: "false" - remediation: | - Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host - Network field is omitted or set to false. - scored: false - - - id: 5.2.5 - text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" - audit: | - oc get scc -o=custom-columns=NAME:.metadata.name,allowPrivilegeEscalation:.allowPrivilegeEscalation - tests: - test_items: - - flag: "false" - remediation: | - Create a SCC as described in the OpenShift documentation, ensuring that the Allow - Privilege Escalation field is omitted or set to false. - scored: false - - - id: 5.2.6 - text: "Minimize the admission of root containers (Manual)" - audit: | - # needs verification # | awk 'NR>1 {gsub("map\\[type:", "", $2); gsub("\\]$", "", $2); print $1 ":" $2}' - oc get scc -o=custom-columns=NAME:.metadata.name,runAsUser:.runAsUser.type - #For SCCs with MustRunAs verify that the range of UIDs does not include 0 - oc get scc -o=custom-columns=NAME:.metadata.name,uidRangeMin:.runAsUser.uidRangeMin,uidRangeMax:.runAsUser.uidRangeMax - tests: - bin_op: or - test_items: - - flag: "MustRunAsNonRoot" - - flag: "MustRunAs" - compare: - op: nothave - value: 0 - remediation: | - None required. By default, OpenShift includes the non-root SCC with the the Run As User - Strategy is set to either MustRunAsNonRoot. If additional SCCs are appropriate, follow the - OpenShift documentation to create custom SCCs. - scored: false - - - id: 5.2.7 - text: "Minimize the admission of containers with the NET_RAW capability (Manual)" - audit: | - # needs verification - oc get scc -o=custom-columns=NAME:.metadata.name,requiredDropCapabilities:.requiredDropCapabilities - tests: - bin_op: or - test_items: - - flag: "ALL" - - flag: "NET_RAW" - remediation: | - Create a SCC as described in the OpenShift documentation, ensuring that the Required - Drop Capabilities is set to include either NET_RAW or ALL. - scored: false - - - id: 5.2.8 - text: "Minimize the admission of containers with added capabilities (Manual)" - type: "manual" - remediation: | - Ensure that Allowed Capabilities is set to an empty array for every SCC in the cluster - except for the privileged SCC. - scored: false - - - id: 5.2.9 - text: "Minimize the admission of containers with capabilities assigned (Manual)" - type: "manual" - remediation: | - Review the use of capabilites in applications running on your cluster. Where a namespace - contains applicaions which do not require any Linux capabities to operate consider - adding a SCC which forbids the admission of containers which do not drop all capabilities. - scored: false - - - id: 5.3 - text: "Network Policies and CNI" - checks: - - id: 5.3.1 - text: "Ensure that the CNI in use supports Network Policies (Manual)" - type: "manual" - remediation: | - None required. - scored: false - - - id: 5.3.2 - text: "Ensure that all Namespaces have Network Policies defined (Manual)" - type: "manual" - audit: | - #Run the following command and review the NetworkPolicy objects created in the cluster. - oc -n all get networkpolicy - remediation: | - Follow the documentation and create NetworkPolicy objects as you need them. - scored: false - - - id: 5.4 - text: "Secrets Management" - checks: - - id: 5.4.1 - text: "Prefer using secrets as files over secrets as environment variables (Manual)" - type: "manual" - audit: | - #Run the following command to find references to objects which use environment variables defined from secrets. - oc get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} - {.metadata.name} {"\n"}{end}' -A - remediation: | - If possible, rewrite application code to read secrets from mounted secret files, rather than - from environment variables. - scored: false - - - id: 5.4.2 - text: "Consider external secret storage (Manual)" - type: "manual" - remediation: | - Refer to the secrets management options offered by your cloud provider or a third-party - secrets management solution. - scored: false - - - id: 5.5 - text: "Extensible Admission Control" - checks: - - id: 5.5.1 - text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" - type: "manual" - remediation: | - Follow the OpenShift documentation: [Image configuration resources](https://docs.openshift.com/container-platform/4.5/openshift_images/image-configuration.html - scored: false - - - id: 5.7 - text: "General Policies" - checks: - - id: 5.7.1 - text: "Create administrative boundaries between resources using namespaces (Manual)" - type: "manual" - audit: | - #Run the following command and review the namespaces created in the cluster. - oc get namespaces - #Ensure that these namespaces are the ones you need and are adequately administered as per your requirements. - remediation: | - Follow the documentation and create namespaces for objects in your deployment as you need - them. - scored: false - - - id: 5.7.2 - text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual)" - type: "manual" - remediation: | - To enable the default seccomp profile, use the reserved value /runtime/default that will - make sure that the pod uses the default policy available on the host. - scored: false - - - id: 5.7.3 - text: "Apply Security Context to Your Pods and Containers (Manual)" - type: "manual" - remediation: | - Follow the Kubernetes documentation and apply security contexts to your pods. For a - suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker - Containers. - scored: false - - - id: 5.7.4 - text: "The default namespace should not be used (Manual)" - type: "manual" - audit: | - #Run this command to list objects in default namespace - oc project default - oc get all - #The only entries there should be system managed resources such as the kubernetes and openshift service - remediation: | - Ensure that namespaces are created to allow for appropriate segregation of Kubernetes - resources and that all new resources are created in a specific namespace. - scored: false diff --git a/cfg/rh-1.6.0/config.yaml b/cfg/rh-1.6.0/config.yaml deleted file mode 100644 index b7839455a..000000000 --- a/cfg/rh-1.6.0/config.yaml +++ /dev/null @@ -1,2 +0,0 @@ ---- -## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/rh-1.6.0/controlplane.yaml b/cfg/rh-1.6.0/controlplane.yaml deleted file mode 100644 index 606194ddf..000000000 --- a/cfg/rh-1.6.0/controlplane.yaml +++ /dev/null @@ -1,62 +0,0 @@ ---- -controls: -version: rh-1.0 -id: 3 -text: "Control Plane Configuration" -type: "controlplane" -groups: - - id: 3.1 - text: "Authentication and Authorization" - checks: - - id: 3.1.1 - text: "Client certificate authentication should not be used for users (Manual)" - audit: | - # To verify user authentication is enabled - oc describe authentication - # To verify that an identity provider is configured - oc get identity - # To verify that a custom cluster-admin user exists - oc get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | grep cluster-admin | grep User - # To verity that kbueadmin is removed, no results should be returned - oc get secrets kubeadmin -n kube-system - type: manual - remediation: | - Configure an identity provider for the OpenShift cluster. - Understanding identity provider configuration | Authentication | OpenShift - Container Platform 4.5. Once an identity provider has been defined, - you can use RBAC to define and apply permissions. - After you define an identity provider and create a new cluster-admin user, - remove the kubeadmin user to improve cluster security. - scored: false - - - id: 3.2 - text: "Logging" - checks: - - id: 3.2.1 - text: "Ensure that a minimal audit policy is created (Manual)" - audit: | - #To view kube apiserver log files - oc adm node-logs --role=master --path=kube-apiserver/ - #To view openshift apiserver log files - oc adm node-logs --role=master --path=openshift-apiserver/ - #To verify kube apiserver audit config - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]?' - #To verify openshift apiserver audit config - oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]?' - type: manual - remediation: | - No remediation required. - scored: false - - - id: 3.2.2 - text: "Ensure that the audit policy covers key security concerns (Manual)" - audit: | - #To verify openshift apiserver audit config - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' - #To verify kube apiserver audit config - oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' - type: manual - remediation: | - In OpenShift 4.6 and higher, if appropriate for your needs, - modify the audit policy. - scored: false diff --git a/cfg/rh-1.6.0/etcd.yaml b/cfg/rh-1.6.0/etcd.yaml deleted file mode 100644 index 4398d9cc1..000000000 --- a/cfg/rh-1.6.0/etcd.yaml +++ /dev/null @@ -1,183 +0,0 @@ ---- -controls: -version: rh-1.0 -id: 2 -text: "Etcd Node Configuration" -type: "etcd" -groups: - - id: 2 - text: "Etcd Node Configuration Files" - checks: - - id: 2.1 - text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - # Get the pod name in the openshift-etcd namespace - POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." - else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/' - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/' - fi - use_multiple_values: true - tests: - test_items: - - flag: "file" - compare: - op: regex - value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-serving\/etcd-serving-.*\.(?:crt|key)' - remediation: | - OpenShift does not use the etcd-certfile or etcd-keyfile flags. - Certificates for etcd are managed by the etcd cluster operator. - scored: false - - - id: 2.2 - text: "Ensure that the --client-cert-auth argument is set to true (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - # Get the pod name in the openshift-etcd namespace - POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." - else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/' - fi - use_multiple_values: true - tests: - test_items: - - flag: "--client-cert-auth" - compare: - op: eq - value: true - remediation: | - This setting is managed by the cluster etcd operator. No remediation required." - scored: false - - - id: 2.3 - text: "Ensure that the --auto-tls argument is not set to true (Manual)" - audit: | - # Returns 0 if found, 1 if not found - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - # Get the pod name in the openshift-etcd namespace - POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." - else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --auto-tls=true 2>/dev/null ; echo exit_code=$? - fi - use_multiple_values: true - tests: - test_items: - - flag: "exit_code" - compare: - op: eq - value: "1" - remediation: | - This setting is managed by the cluster etcd operator. No remediation required. - scored: false - - - id: 2.4 - text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - # Get the pod name in the openshift-etcd namespace - POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." - else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/' - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/' - fi - use_multiple_values: true - tests: - test_items: - - flag: "file" - compare: - op: regex - value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-peer\/etcd-peer-.*\.(?:crt|key)' - remediation: | - None. This configuration is managed by the etcd operator. - scored: false - - - id: 2.5 - text: "Ensure that the --peer-client-cert-auth argument is set to true (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - # Get the pod name in the openshift-etcd namespace - POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." - else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/' - fi - use_multiple_values: true - tests: - test_items: - - flag: "--peer-client-cert-auth" - compare: - op: eq - value: true - remediation: | - This setting is managed by the cluster etcd operator. No remediation required. - scored: false - - - id: 2.6 - text: "Ensure that the --peer-auto-tls argument is not set to true (Manual)" - audit: | - # Returns 0 if found, 1 if not found - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - # Get the pod name in the openshift-etcd namespace - POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." - else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>/dev/null ; echo exit_code=$? - fi - use_multiple_values: true - tests: - test_items: - - flag: "exit_code" - compare: - op: eq - value: "1" - remediation: | - This setting is managed by the cluster etcd operator. No remediation required. - scored: false - - - id: 2.7 - text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - # Get the pod name in the openshift-etcd namespace - POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." - else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/' - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/' - fi - use_multiple_values: true - tests: - test_items: - - flag: "file" - compare: - op: regex - value: '\/etc\/kubernetes\/static-pod-certs\/configmaps\/etcd-(?:serving|peer-client)-ca\/ca-bundle\.(?:crt|key)' - remediation: | - None required. Certificates for etcd are managed by the OpenShift cluster etcd operator. - scored: false diff --git a/cfg/rh-1.6.0/master.yaml b/cfg/rh-1.6.0/master.yaml deleted file mode 100644 index 37b50f033..000000000 --- a/cfg/rh-1.6.0/master.yaml +++ /dev/null @@ -1,1445 +0,0 @@ ---- -controls: -version: rh-1.0 -id: 1 -text: "Master Node Security Configuration" -type: "master" -groups: - - id: 1.1 - text: "Master Node Configuration Files" - checks: - - id: 1.1.1 - text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-apiserver namespace - POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-kube-apiserver "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml - fi - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.2 - text: "Ensure that the API server pod specification file ownership is set to root:root (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-apiserver namespace - POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-kube-apiserver "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml - fi - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.3 - text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-controller-manager namespace - POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml - fi - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.4 - text: "Ensure that the controller manager pod specification file ownership is set to root:root (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-controller-manager namespace - POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml - fi - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.5 - text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-scheduler namespace - POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml - fi - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.6 - text: "Ensure that the scheduler pod specification file ownership is set to root:root (Manual))" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-scheduler namespace - POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml - fi - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.7 - text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Manual))" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-etcd namespace - POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc rsh -n openshift-etcd "$POD_NAME" stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/manifests/etcd-pod.yaml - fi - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.8 - text: "Ensure that the etcd pod specification file ownership is set to root:root (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-etcd namespace - POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc rsh -n openshift-etcd "$POD_NAME" stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/manifests/etcd-pod.yaml - fi - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.9 - text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - # For CNI multus - # Get the pod name in the openshift-multus namespace - POD_NAME=$(oc get pods -n openshift-multus -l app=multus --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/etc/cni/net.d/*.conf"; 2>/dev/null - oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/var/run/multus/cni/net.d/*.conf"; 2>/dev/null - fi - # For SDN pods - POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null - oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null - fi - - # For OVS pods - POD_NAME=$(oc get pods -n openshift-sdn -l app=ovs --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null - oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null - oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null - fi - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.10 - text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - # For CNI multus - # Get the pod name in the openshift-multus namespace - POD_NAME=$(oc get pods -n openshift-multus -l app=multus --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c '$i %n %U:%G' /host/etc/cni/net.d/*.conf" 2>/dev/null - oc exec -n openshift-multus $i -- /bin/bash -c "stat -c '$i %n %U:%G' /host/var/run/multus/cni/net.d/*.conf" 2>/dev/null - fi - # For SDN pods - POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null - oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null - fi - # For OVS pods in 4.5 - POD_NAME=$(oc get pods -n openshift-sdn -l app=ovs --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null - oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null - oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null - fi - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.11 - text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-etcd namespace - POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-etcd "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /var/lib/etcd/member - fi - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "700" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.12 - text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-etcd namespace - POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-etcd "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /var/lib/etcd/member - fi - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.13 - text: "Ensure that the admin.conf file permissions are set to 644 or more restrictive (Manual))" - audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubeconfig 2> /dev/null - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.14 - text: "Ensure that the admin.conf file ownership is set to root:root (Manual)" - audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubeconfig 2> /dev/null - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.15 - text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-scheduler namespace - POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig - fi - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.16 - text: "Ensure that the scheduler.conf file ownership is set to root:root (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-scheduler namespace - POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig - fi - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.17 - text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-controller-manager namespace - POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig - fi - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.18 - text: "Ensure that the controller-manager.conf file ownership is set to root:root (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-controller-manager namespace - POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig - fi - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.19 - text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Manual)" - audit: | - # Should return root:root for all files and directories - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-controller-manager namespace - POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # echo $i static-pod-certs - oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; - oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; - # echo $i static-pod-resources - oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; - oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; - fi - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.20 - text: "Ensure that the OpenShift PKI certificate file permissions are set to 644 or more restrictive (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-apiserver namespace - POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.crt' -exec stat -c "$POD_NAME %n permissions=%a" {} \; - fi - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.1.21 - text: "Ensure that the OpenShift PKI key file permissions are set to 600 (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - - # Get the pod name in the openshift-kube-apiserver namespace - POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.key' -exec stat -c "$POD_NAME %n permissions=%a" {} \; - fi - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - No remediation required; file permissions are managed by the operator. - scored: false - - - id: 1.2 - text: "API Server" - checks: - - id: 1.2.1 - text: "Ensure that anonymous requests are authorized (Manual)" - audit: | - # To verify that userGroups include system:unauthenticated - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' - # To verify that userGroups include system:unauthenticated - oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?.userGroups' - # To verify RBAC is enabled - oc get clusterrolebinding - oc get clusterrole - oc get rolebinding - oc get role - tests: - test_items: - - flag: "system:unauthenticated" - remediation: | - None required. The default configuration should not be modified. - scored: false - - - id: 1.2.2 - text: "Ensure that the --basic-auth-file argument is not set (Manual)" - audit: | - oc -n openshift-kube-apiserver get cm config -o yaml | grep --color "basic-auth" - oc -n openshift-apiserver get cm config -o yaml | grep --color "basic-auth" - # Add | awk '$3 != "AVAILABLE" { if ($3){print "available=true"}else{print "available=false"} }; to create AVAILABLE = true/false form - oc get clusteroperator authentication | awk '$3 != "AVAILABLE" { if ($3){print "available=true"}else{print "available=false"} }' - tests: - bin_op: and - test_items: - - flag: "basic-auth-file" - set: false - - flag: "available" - compare: - op: eq - value: true - remediation: | - None required. --basic-auth-file cannot be configured on OpenShift. - scored: false - - - id: 1.2.3 - text: "Ensure that the --token-auth-file parameter is not set (Manual)" - audit: | - # Verify that the token-auth-file flag is not present - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' - #Verify that the authentication operator is running - oc get clusteroperator authentication | awk '$3 != "AVAILABLE" { if ($3){print "available=true"}else{print "available=false"} }' - tests: - bin_op: and - test_items: - - flag: "token-auth-file" - set: false - - flag: "available" - compare: - op: eq - value: true - remediation: | - None is required. - scored: false - - - id: 1.2.4 - text: "Use https for kubelet connections (Manual)" - audit: | - #for 4.5 - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo' - #for 4.6 - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - #for both 4.5 and 4.6 - oc -n openshift-apiserver describe secret serving-cert - tests: - bin_op: and - test_items: - - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.crt" - - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.key" - remediation: | - No remediation is required. - OpenShift platform components use X.509 certificates for authentication. - OpenShift manages the CAs and certificates for platform components. This is not configurable. - scored: false - - - id: 1.2.5 - text: "Ensure that the kubelet uses certificates to authenticate (Manual)" - audit: | - #for 4.5 - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo' - #for 4.6 - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - #for both 4.5 and 4.6 - oc -n openshift-apiserver describe secret serving-cert - tests: - bin_op: and - test_items: - - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.crt" - - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.key" - remediation: | - No remediation is required. - OpenShift platform components use X.509 certificates for authentication. - OpenShift manages the CAs and certificates for platform components. - This is not configurable. - scored: false - - - id: 1.2.6 - text: "Verify that the kubelet certificate authority is set as appropriate (Manual)" - audit: | - # for 4.5 - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo' - # for 4.6 - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - tests: - test_items: - - flag: "/etc/kubernetes/static-pod-resources/configmaps/kubelet-serving-ca/ca-bundle.crt" - remediation: | - No remediation is required. - OpenShift platform components use X.509 certificates for authentication. - OpenShift manages the CAs and certificates for platform components. - This is not configurable. - scored: false - - - id: 1.2.7 - text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Manual)" - audit: | - # To verify that the authorization-mode argument is not used - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - # To verify RBAC is configured: - oc get clusterrolebinding - oc get clusterrole - oc get rolebinding - oc get role - audit_config: | - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - tests: - bin_op: or - test_items: - - path: "{.authorization-mode}" - compare: - op: nothave - value: "AlwaysAllow" - - path: "{.authorization-mode}" - flag: "authorization-mode" - set: false - remediation: | - None. RBAC is always on and the OpenShift API server does not use the values assigned to the flag authorization-mode. - scored: false - - - id: 1.2.8 - text: "Verify that the Node authorizer is enabled (Manual)" - audit: | - # For OCP 4.5 and earlier verify that authorization-mode is not used - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' - # For OCP 4.5 and earlier verify that authorization-mode is not used - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host cat /etc/kubernetes/kubelet.conf | grep authorization-mode 2> /dev/null - oc debug node/$NODE_NAME -- chroot /host ps -aux | grep kubelet | grep authorization-mode 2> /dev/null - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - audit_config: | - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - tests: - bin_op: or - test_items: - - path: "{.authorization-mode}" - compare: - op: has - value: "Node" - - path: "{.authorization-mode}" - flag: "authorization-mode" - set: false - remediation: | - No remediation is required. - scored: false - - - id: 1.2.9 - text: "Verify that RBAC is enabled (Manual)" - audit: | - # For 4.5 To verify that the authorization-mode argument is not used - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - # To verify RBAC is used - oc get clusterrolebinding - oc get clusterrole - oc get rolebinding - oc get role - # For 4.6, verify that the authorization-mode argument includes RBAC - audit_config: | - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - tests: - bin_op: or - test_items: - - path: "{.authorization-mode}" - compare: - op: has - value: "RBAC" - - path: "{.authorization-mode}" - flag: "authorization-mode" - set: false - remediation: | - None. It is not possible to disable RBAC. - scored: false - - - id: 1.2.10 - text: "Ensure that the APIPriorityAndFairness feature gate is enabled (Manual)" - audit: | - #Verify the APIPriorityAndFairness feature-gate - oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' - #Verify the set of admission-plugins for OCP 4.6 and higher - oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - tests: - bin_op: and - test_items: - - flag: "APIPriorityAndFairness=true" - - flag: "EventRateLimit" - set: false - remediation: | - No remediation is required - scored: false - - - id: 1.2.11 - text: "Ensure that the admission control plugin AlwaysAdmit is not set (Manual)" - audit: | - #Verify the set of admission-plugins for OCP 4.6 and higher - oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - tests: - test_items: - - flag: "AlwaysAdmit" - set: false - remediation: | - No remediation is required. The AlwaysAdmit admission controller cannot be enabled in OpenShift. - scored: false - - - id: 1.2.12 - text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" - audit: | - #Verify the set of admissi on-plugins for OCP 4.6 and higher - oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - tests: - test_items: - - flag: "AlwaysPullImages" - set: false - remediation: | - None required. - scored: false - - - id: 1.2.13 - text: "Ensure that the admission control plugin SecurityContextDeny is not set (Manual)" - audit: | - #Verify the set of admission-plugins for OCP 4.6 and higher - oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' - output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') - [ "$output" == "null" ] && echo "ocp 4.5 has SecurityContextDeny and SecurityContextConstraint compiled" || echo $output - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - #Verify that SecurityContextConstraints are deployed - oc get scc - oc describe scc restricted - tests: - bin_op: and - test_items: - - flag: "SecurityContextConstraint" - set: true - - flag: "anyuid" - - flag: "hostaccess" - - flag: "hostmount-anyuid" - - flag: "hostnetwork" - - flag: "node-exporter" - - flag: "nonroot" - - flag: "privileged" - - flag: "restricted" - remediation: | - None required. The Security Context Constraint admission controller cannot be disabled in OpenShift 4. - scored: false - - - id: 1.2.14 - text: "Ensure that the admission control plugin ServiceAccount is set (Manual)" - audit: | - #Verify the list of admission controllers for 4.6 and higher - oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' - output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') - [ "$output" == "null" ] && echo "ocp 4.5 has ServiceAccount compiled" || echo $output - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - #Verify that Service Accounts are present - oc get sa -A - tests: - test_items: - - flag: "ServiceAccount" - set: true - remediation: | - None required. OpenShift is configured to use service accounts by default. - scored: false - - - id: 1.2.15 - text: "Ensure that the admission control plugin NamespaceLifecycle is set (Manual)" - audit: | - #Verify the list of admission controllers for 4.6 and higher - oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' - output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') - [ "$output" == "null" ] && echo "ocp 4.5 has NamespaceLifecycle compiled" || echo $output - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - tests: - test_items: - - flag: "NamespaceLifecycle" - remediation: | - Ensure that the --disable-admission-plugins parameter does not include NamespaceLifecycle. - scored: false - - - id: 1.2.16 - text: "Ensure that the admission control plugin SecurityContextConstraint is set (Manual)" - audit: | - #Verify the set of admission-plugins for OCP 4.6 and higher - oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' - output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') - [ "$output" == "null" ] && echo "ocp 4.5 has SecurityContextConstraint compiled" || echo $output - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - #Verify that SecurityContextConstraints are deployed - oc get scc - oc describe scc restricted - tests: - bin_op: and - test_items: - - flag: "SecurityContextConstraint" - - flag: "anyuid" - - flag: "hostaccess" - - flag: "hostmount-anyuid" - - flag: "hostnetwork" - - flag: "node-exporter" - - flag: "nonroot" - - flag: "privileged" - - flag: "restricted" - remediation: | - None required. Security Context Constraints are enabled by default in OpenShift and cannot be disabled. - scored: false - - - id: 1.2.17 - text: "Ensure that the admission control plugin NodeRestriction is set (Manual)" - audit: | - # For 4.5, review the control plane manifest https://github.com/openshift/origin/blob/release-4.5/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane/manifests.go#L132 - #Verify the set of admission-plugins for OCP 4.6 and higher - oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' - output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') - [ "$output" == "null" ] && echo "ocp 4.5 has NodeRestriction compiled" || echo $output - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - tests: - test_items: - - flag: "NodeRestriction" - remediation: | - The NodeRestriction plugin cannot be disabled. - scored: false - - - id: 1.2.18 - text: "Ensure that the --insecure-bind-address argument is not set (Manual)" - audit: | - # InsecureBindAddress=true should not be in the results - oc get kubeapiservers.operator.openshift.io cluster -o jsonpath='{range .spec.observedConfig.apiServerArguments.feature-gates[*]}{@}{"\n"}{end}' - # Result should be only 6443 - oc -n openshift-kube-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}' - # Result should be only 8443 - oc -n openshift-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}' - tests: - bin_op: and - test_items: - - flag: "insecure-bind-address" - set: false - - flag: 6443 - - flag: 8443 - remediation: | - None required. - scored: false - - - id: 1.2.19 - text: "Ensure that the --insecure-port argument is set to 0 (Manual)" - audit: | - # Should return 6443 - oc -n openshift-kube-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}' - # For OCP 4.6 and above - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments["insecure-port"]' - output=$(oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments["insecure-port"]') - [ "$output" == "null" ] && echo "ocp 4.5 has insecure-port set to \"0\" compiled" || echo $output - tests: - bin_op: and - test_items: - - flag: "\"0\"" - - flag: "6443" - remediation: | - None required. The configuration is managed by the API server operator. - scored: false - - - id: 1.2.20 - text: "Ensure that the --secure-port argument is not set to 0 (Manual)" - audit: | - oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig' - # Should return only 6443 - echo ports=`oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[*].spec.containers[?(@.name=="kube-apiserver")].ports[*].containerPort}'` - tests: - bin_op: and - test_items: - - flag: '"bindAddress": "0.0.0.0:6443"' - - flag: "ports" - compare: - op: regex - value: '\s*(?:6443\s*){1,}$' - remediation: | - None required. - scored: false - - - id: 1.2.21 - text: "Ensure that the healthz endpoint is protected by RBAC (Manual)" - type: manual - audit: | - # Verify endpoints - oc -n openshift-kube-apiserver describe endpoints - # Check config for ports, livenessProbe, readinessProbe, healthz - oc -n openshift-kube-apiserver get cm kube-apiserver-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' - # Test to validate RBAC enabled on the apiserver endpoint; check with non-admin role - oc project openshift-kube-apiserver POD=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') PORT=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].spec.containers[0].ports[0].hostPort}') - # Following should return 403 Forbidden - oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -k - # Create a service account to test RBAC - oc create -n openshift-kube-apiserver sa permission-test-sa - # Should return 403 Forbidden - SA_TOKEN=$(oc sa -n openshift-kube-apiserver get-token permission-test-sa) - oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k - # Cleanup - oc delete -n openshift-kube-apiserver sa permission-test-sa - # As cluster admin, should succeed - CLUSTER_ADMIN_TOKEN=$(oc whoami -t) - oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k - remediation: | - None required as profiling data is protected by RBAC. - scored: false - - - id: 1.2.22 - text: "Ensure that the --audit-log-path argument is set (Manual)" - audit: | - # Should return “/var/log/kube-apiserver/audit.log" - output=$(oc get configmap config -n openshift-kube-apiserver -o jsonpath="{['.data.config\.yaml']}" | jq '.auditConfig.auditFilePath') - [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true - output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-path"][]?') - [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true - POD=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') - oc rsh -n openshift-kube-apiserver -c kube-apiserver $POD ls /var/log/kube-apiserver/audit.log 2>/dev/null - # Should return 0 - echo exit_code=$? - # Should return "/var/log/openshift-apiserver/audit.log" - output=$(oc get configmap config -n openshift-apiserver -o jsonpath="{['.data.config\.yaml']}" | jq '.auditConfig.auditFilePath') - [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true - output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-path"][]?') - [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true - POD=$(oc get pods -n openshift-apiserver -l apiserver=true -o jsonpath='{.items[0].metadata.name}') - oc rsh -n openshift-apiserver $POD ls /var/log/openshift-apiserver/audit.log 2>/dev/null - # Should return 0 - echo exit_code=$? - use_multiple_values: true - tests: - bin_op: or - test_items: - - flag: "/var/log/kube-apiserver/audit.log" - - flag: "/var/log/openshift-apiserver/audit.log" - - flag: "exit_code=0" - - flag: "null" - remediation: | - None required. This is managed by the cluster apiserver operator. - scored: false - - - id: 1.2.23 - text: "Ensure that the audit logs are forwarded off the cluster for retention (Manual)" - type: "manual" - remediation: | - Follow the documentation for log forwarding. Forwarding logs to third party systems - https://docs.openshift.com/container-platform/4.5/logging/cluster-logging-external.html - scored: false - - - id: 1.2.24 - text: "Ensure that the maximumRetainedFiles argument is set to 10 or as appropriate (Manual)" - audit: | - #NOTICE - output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumRetainedFiles) - [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumRetainedFiles=$output" || true - output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumRetainedFiles) - [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumRetainedFiles=$output" || true - output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxbackup"][]?') - [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxbackup=$output" || true - output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxbackup"][]?') - [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxbackup=$output" || true - use_multiple_values: true - tests: - bin_op: or - test_items: - - flag: "maximumRetainedFiles" - compare: - op: gte - value: 10 - - flag: "audit-log-maxbackup" - compare: - op: gte - value: 10 - remediation: | - Set the maximumRetainedFiles parameter to 10 or as an appropriate number of files. maximumRetainedFiles: 10 - scored: false - - - id: 1.2.25 - text: "Ensure that the maximumFileSizeMegabytes argument is set to 100 or as appropriate (Manual)" - audit: | - #NOTICE - output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumFileSizeMegabytes) - [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumFileSizeMegabytes=$output" || true - output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumFileSizeMegabytes) - [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumFileSizeMegabytes=$output" || true - output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxsize"][]?') - [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxsize=$output" || true - output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxsize"][]?') - [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxsize=$output" || true - use_multiple_values: true - tests: - bin_op: or - test_items: - - flag: "maximumFileSizeMegabytes" - compare: - op: gte - value: 100 - - flag: "audit-log-maxsize" - compare: - op: gte - value: 100 - remediation: | - Set the audit-log-maxsize parameter to 100 or as an appropriate number. - maximumFileSizeMegabytes: 100 - scored: false - - - id: 1.2.26 - text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" - audit: | - echo requestTimeoutSeconds=`oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .servingInfo.requestTimeoutSeconds` - tests: - test_items: - - flag: "requestTimeoutSeconds" - remediation: | - TBD - scored: false - - - id: 1.2.27 - text: "Ensure that the --service-account-lookup argument is set to true (Manual)" - audit: | - # For OCP 4.5 - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' | grep service-account-lookup - # For OCP 4.6 and above - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["service-account-lookup"]' - output=$(oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["service-account-lookup"][0]') - [ "$output" == "null" ] && echo "ocp 4.5 has service-account-lookup=true compiled" || echo service-account-lookup=$output - tests: - test_items: - - flag: "service-account-lookup=true" - remediation: | - TBD - scored: false - - - id: 1.2.28 - text: "Ensure that the --service-account-key-file argument is set as appropriate (Manual)" - audit: | - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .serviceAccountPublicKeyFiles[] - tests: - bin_op: and - test_items: - - flag: "/etc/kubernetes/static-pod-resources/configmaps/sa-token-signing-certs" - - flag: "/etc/kubernetes/static-pod-resources/configmaps/bound-sa-token-signing-certs" - remediation: | - The OpenShift API server does not use the service-account-key-file argument. - The ServiceAccount token authenticator is configured with serviceAccountConfig.publicKeyFiles. - OpenShift does not reuse the apiserver TLS key. This is not configurable. - scored: false - - - id: 1.2.29 - text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Manual)" - audit: | - # etcd Certificate File - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .storageConfig.certFile - # etcd Key File - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .storageConfig.keyFile - # NOTICE 4.6 extention - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-certfile"]' - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-keyfile"]' - tests: - bin_op: and - test_items: - - flag: "/etc/kubernetes/static-pod-resources/secrets/etcd-client/tls.crt" - - flag: "/etc/kubernetes/static-pod-resources/secrets/etcd-client/tls.key" - remediation: | - OpenShift automatically manages TLS and client certificate authentication for etcd. - This is not configurable. - scored: false - - - id: 1.2.30 - text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" - audit: | - # TLS Cert File - openshift-kube-apiserver - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .servingInfo.certFile - # TLS Key File - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.servingInfo.keyFile' - # NOTECI 4.6 extention - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["tls-cert-file"]' - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["tls-private-key-file"]' - tests: - bin_op: and - test_items: - - flag: "/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.crt" - - flag: "/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.key" - remediation: | - OpenShift automatically manages TLS authentication for the API server communication with the node/kublet. - This is not configurable. You may optionally set a custom default certificate to be used by the API server - when serving content in order to enable clients to access the API server at a different host name or without - the need to distribute the cluster-managed certificate authority (CA) certificates to the clients. - Follow the directions in the OpenShift documentation User-provided certificates for the API server - scored: false - - - id: 1.2.31 - text: "Ensure that the --client-ca-file argument is set as appropriate (Manual)" - audit: | - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .servingInfo.clientCA - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["client-ca-file"]' - tests: - test_items: - - flag: "/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt" - remediation: | - OpenShift automatically manages TLS authentication for the API server communication with the node/kublet. - This is not configurable. You may optionally set a custom default certificate to be used by the API - server when serving content in order to enable clients to access the API server at a different host name - or without the need to distribute the cluster-managed certificate authority (CA) certificates to the clients. - - User-provided certificates must be provided in a kubernetes.io/tls type Secret in the openshift-config namespace. - Update the API server cluster configuration, - the apiserver/cluster resource, to enable the use of the user-provided certificate. - scored: false - - - id: 1.2.32 - text: "Ensure that the --etcd-cafile argument is set as appropriate (Manual)" - audit: | - #etcd CA File - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .storageConfig.ca - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-cafile"]' - tests: - test_items: - - flag: "/etc/kubernetes/static-pod-resources/configmaps/etcd-serving-ca/ca-bundle.crt" - remediation: | - None required. OpenShift generates the etcd-cafile and sets the arguments appropriately in the API server. Communication with etcd is secured by the etcd serving CA. - scored: false - - - id: 1.2.33 - text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" - audit: | - # encrypt the etcd datastore - oc get openshiftapiserver -o=jsonpath='{range.items[0].status.conditions[?(@.type=="Encrypted")]}{.reason}{"\n"}{.message}{"\n"}' - tests: - test_items: - - flag: "EncryptionCompleted" - remediation: | - Follow the OpenShift documentation for Encrypting etcd data | Authentication | OpenShift Container Platform 4.5 - https://docs.openshift.com/container-platform/4.5/security/encrypting-etcd.html - scored: false - - - id: 1.2.34 - text: "Ensure that encryption providers are appropriately configured (Manual)" - audit: | - # encrypt the etcd datastore - oc get openshiftapiserver -o=jsonpath='{range.items[0].status.conditions[?(@.type=="Encrypted")]}{.reason}{"\n"}{.message}{"\n"}' - tests: - test_items: - - flag: "EncryptionCompleted" - remediation: | - Follow the Kubernetes documentation and configure a EncryptionConfig file. - In this file, choose aescbc, kms or secretbox as the encryption provider. - scored: false - - - id: 1.2.35 - text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" - type: manual - audit: | - # verify cipher suites - oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq .servingInfo - oc get kubeapiservers.operator.openshift.io cluster -o json |jq.spec.observedConfig.servingInfo - oc get openshiftapiservers.operator.openshift.io cluster -o json |jq.spec.observedConfig.servingInfo - oc describe --namespace=openshift-ingress-operator ingresscontroller/default - remediation: | - Verify that the tlsSecurityProfile is set to the value you chose. - Note: The HAProxy Ingress controller image does not support TLS 1.3 - and because the Modern profile requires TLS 1.3, it is not supported. - The Ingress Operator converts the Modern profile to Intermediate. - The Ingress Operator also converts the TLS 1.0 of an Old or Custom profile to 1.1, - and TLS 1.3 of a Custom profile to 1.2. - scored: false - - - id: 1.3 - text: "Controller Manager" - checks: - - id: 1.3.1 - text: "Ensure that garbage collection is configured as appropriate (Manual)" - type: manual - remediation: | - To configure, follow the directions in Configuring garbage collection for containers and images - https://docs.openshift.com/container-platform/4.5/nodes/nodes/nodes-nodes-garbage-collection.html#nodes-nodes-garbage-collection-configuring_nodes-nodes-configuring - scored: false - - - id: 1.3.2 - text: "Ensure that controller manager healthz endpoints are protected by RBAC (Manual)" - type: manual - audit: | - # Verify configuration for ports, livenessProbe, readinessProbe, healthz - oc -n openshift-kube-controller-manager get cm kube-controller-manager-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' - # Verify endpoints - oc -n openshift-kube-controller-manager describe endpoints - # Test to validate RBAC enabled on the controller endpoint; check with non-admin role - oc project openshift-kube-controller-manage - POD=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].metadata.name}') - PORT=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].spec.containers[0].ports[0].hostPort}') - # Following should return 403 Forbidden - oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -k - # Create a service account to test RBAC - oc create -n openshift-kube-controller-manager sa permission-test-sa - # Should return 403 Forbidden - SA_TOKEN=$(oc sa -n openshift-kube-controller-manager get-token permission-test-sa) - oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k - # Cleanup - oc delete -n openshift-kube-controller-manager sa permission-test-sa - # As cluster admin, should succeed - CLUSTER_ADMIN_TOKEN=$(oc whoami -t) - oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k - remediation: | - None required; profiling is protected by RBAC. - scored: false - - - id: 1.3.3 - text: "Ensure that the --use-service-account-credentials argument is set to true (Manual)" - audit: | - echo use-service-account-credentials=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["use-service-account-credentials"][]'` - tests: - test_items: - - flag: "use-service-account-credentials" - compare: - op: eq - value: true - remediation: | - The OpenShift Controller Manager operator manages and updates the OpenShift Controller Manager. - The Kubernetes Controller Manager operator manages and updates the Kubernetes Controller Manager deployed on top of OpenShift. - This operator is configured via KubeControllerManager custom resource. - scored: false - - - id: 1.3.4 - text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Manual)" - audit: | - oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["service-account-private-key-file"][]' - tests: - test_items: - - flag: "/etc/kubernetes/static-pod-resources/secrets/service-account-private-key/service-account.key" - remediation: | - None required. - OpenShift manages the service account credentials for the scheduler automatically. - scored: false - - - id: 1.3.5 - text: "Ensure that the --root-ca-file argument is set as appropriate (Manual)" - audit: | - oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["root-ca-file"][]' - tests: - test_items: - - flag: "/etc/kubernetes/static-pod-resources/configmaps/serviceaccount-ca/ca-bundle.crt" - remediation: | - None required. - Certificates for OpenShift platform components are automatically created and rotated by the OpenShift Container Platform. - scored: false - - - id: 1.3.6 - text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Manual)" - audit: | - oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["feature-gates"][]' - tests: - test_items: - - flag: "RotateKubeletServerCertificate" - compare: - op: eq - value: "true" - remediation: | - None required. - Certificates for OpenShift platform components are automatically created and rotated by the OpenShift Container Platform. - scored: false - - - id: 1.3.7 - text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Manual)" - audit: | - echo port=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq '.extendedArguments["port"][]'` - echo secure-port=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq '.extendedArguments["secure-port"][]'` - #Following should fail with a http code 403 - POD=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].metadata.name}') - oc rsh -n openshift-kube-controller-manager -c kube-controller-manager $POD curl https://localhost:10257/metrics -k - tests: - bin_op: and - test_items: - - flag: "secure-port" - compare: - op: eq - value: "\"10257\"" - - flag: "port" - compare: - op: eq - value: "\"0\"" - - flag: "\"code\": 403" - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the master node and ensure the correct value for the --bind-address parameter - scored: false - - - id: 1.4 - text: "Scheduler" - checks: - - id: 1.4.1 - text: "Ensure that the healthz endpoints for the scheduler are protected by RBAC (Manual)" - type: manual - audit: | - # check configuration for ports, livenessProbe, readinessProbe, healthz - oc -n openshift-kube-scheduler get cm kube-scheduler-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' - # Test to verify endpoints - oc -n openshift-kube-scheduler describe endpoints - # Test to validate RBAC enabled on the scheduler endpoint; check with non-admin role - oc project openshift-kube-scheduler - POD=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].metadata.name}') - PORT=$(oc get pod $POD -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.port}') - # Should return 403 Forbidden - oc rsh ${POD} curl http://localhost:${PORT}/metrics -k - # Create a service account to test RBAC - oc create sa permission-test-sa - # Should return 403 Forbidden - SA_TOKEN=$(oc sa get-token permission-test-sa) - oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k - # Cleanup - oc delete sa permission-test-sa - # As cluster admin, should succeed - CLUSTER_ADMIN_TOKEN=$(oc whoami -t) - oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k - remediation: | - A fix to this issue: https://bugzilla.redhat.com/show_bug.cgi?id=1889488 None required. - Profiling is protected by RBAC and cannot be disabled. - scored: false - - - id: 1.4.2 - text: "Verify that the scheduler API service is protected by authentication and authorization (Manual)" - type: manual - audit: | - # To verify endpoints - oc -n openshift-kube-scheduler describe endpoints - # To verify that bind-adress is not used in the configuration and that port is set to 0 - oc -n openshift-kube-scheduler get cm kube-scheduler-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' - # To test for RBAC: - oc project openshift-kube-scheduler - POD=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].metadata.name}') - POD_IP=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].status.podIP}') - PORT=$(oc get pod $POD -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.port}') - # Should return a 403 - oc rsh ${POD} curl http://${POD_IP}:${PORT}/metrics - # Create a service account to test RBAC - oc create sa permission-test-sa - # Should return 403 Forbidden - SA_TOKEN=$(oc sa get-token permission-test-sa) - oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k - # Cleanup - oc delete sa permission-test-sa - # As cluster admin, should succeed - CLUSTER_ADMIN_TOKEN=$(oc whoami -t) - oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k - remediation: | - By default, the --bind-address argument is not present, - the readinessProbe and livenessProbe arguments are set to 10251 and the port argument is set to 0. - Check the status of this issue: https://bugzilla.redhat.com/show_bug.cgi?id=1889488 - scored: false diff --git a/cfg/rh-1.6.0/node.yaml b/cfg/rh-1.6.0/node.yaml deleted file mode 100644 index fb982d6f3..000000000 --- a/cfg/rh-1.6.0/node.yaml +++ /dev/null @@ -1,429 +0,0 @@ ---- -controls: -version: rh-1.0 -id: 4 -text: "Worker Node Security Configuration" -type: "node" -groups: - - id: 4.1 - text: "Worker Node Configuration Files" - checks: - - id: 4.1.1 - text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)" - audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/systemd/system/kubelet.service 2> /dev/null - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - By default, the kubelet service file has permissions of 644. - scored: true - - - id: 4.1.2 - text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" - audit: | - # Should return root:root for each node - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/systemd/system/kubelet.service 2> /dev/null - tests: - test_items: - - flag: root:root - remediation: | - By default, the kubelet service file has ownership of root:root. - scored: true - - - id: 4.1.3 - text: "If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - # Get the pod name in the openshift-sdn namespace - POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-sdn "$POD_NAME" - stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml 2>/dev/null - fi - tests: - bin_op: or - test_items: - - flag: "permissions" - set: true - compare: - op: bitmask - value: "644" - remediation: | - None needed. - scored: false - - - id: 4.1.4 - text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Manual)" - audit: | - # Get the node name where the pod is running - NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') - # Get the pod name in the openshift-sdn namespace - POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else - # Execute the stat command - oc exec -n openshift-sdn "$POD_NAME" -- stat -Lc "$i %n %U:%G" /config/kube-proxy-config.yaml 2>/dev/null - fi - use_multiple_values: true - tests: - bin_op: or - test_items: - - flag: root:root - remediation: | - None required. The configuration is managed by OpenShift operators. - scored: false - - - id: 4.1.5 - text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Manual)" - audit: | - # Check permissions - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet.conf 2> /dev/null - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - None required. - scored: false - - - id: 4.1.6 - text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Manual)" - audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet.conf 2> /dev/null - use_multiple_values: true - tests: - test_items: - - flag: root:root - remediation: | - None required. - scored: false - - - id: 4.1.7 - text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated)" - audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet-ca.crt 2> /dev/null - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - None required. - scored: true - - - id: 4.1.8 - text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" - audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet-ca.crt 2> /dev/null - use_multiple_values: true - tests: - test_items: - - flag: root:root - remediation: | - None required. - scored: true - - - id: 4.1.9 - text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated)" - audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /var/lib/kubelet/kubeconfig 2> /dev/null - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - None required. - scored: true - - - id: 4.1.10 - text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)" - audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /var/lib/kubelet/kubeconfig 2> /dev/null - use_multiple_values: true - tests: - test_items: - - flag: root:root - remediation: | - None required. - scored: true - - - id: 4.2 - text: "Kubelet" - checks: - - id: 4.2.1 - text: "Ensure that the --anonymous-auth argument is set to false (Automated)" - audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host grep -B4 -A1 anonymous /etc/kubernetes/kubelet.conf 2> /dev/null - use_multiple_values: true - tests: - test_items: - - flag: "enabled: true" - set: false - remediation: | - Follow the instructions in the documentation to create a Kubelet config CRD - and set the anonymous-auth is set to false. - scored: true - - - id: 4.2.2 - text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Manual)" - type: manual - # Takes a lot of time for connection to fail and - audit: | - POD=$(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') - TOKEN=$(oc whoami -t) - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc exec -n openshift-kube-apiserver $POD -- curl -sS https://172.25.0.1/api/v1/nodes/$NODE_NAME/proxy/configz -k -H "Authorization:Bearer $TOKEN" | jq -r '.kubeletconfig.authorization.mode' 2> /dev/null - use_multiple_values: true - tests: - test_items: - - flag: "Connection timed out" - remediation: | - None required. Unauthenticated/Unauthorized users have no access to OpenShift nodes. - scored: false - - - id: 4.2.3 - text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" - audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host grep clientCAFile /etc/kubernetes/kubelet.conf 2> /dev/null - use_multiple_values: true - tests: - test_items: - - flag: '"clientCAFile": "/etc/kubernetes/kubelet-ca.crt"' - remediation: | - None required. Changing the clientCAFile value is unsupported. - scored: true - - - id: 4.2.4 - text: "Verify that the read only port is not used or is set to 0 (Automated)" - audit: | - echo `oc -n openshift-kube-apiserver get cm kube-apiserver-pod -o yaml | grep --color read-only-port` 2> /dev/null - echo `oc -n openshift-kube-apiserver get cm config -o yaml | grep --color "read-only-port"` 2> /dev/null - tests: - bin_op: or - test_items: - - flag: "read-only-port" - compare: - op: has - value: "[\"0\"]" - - flag: "read-only-port" - set: false - remediation: | - In earlier versions of OpenShift 4, the read-only-port argument is not used. - Follow the instructions in the documentation to create a Kubelet config CRD - and set the --read-only-port is set to 0. - scored: true - - - id: 4.2.5 - text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" - audit: | - # Should return 1 for node - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/${NODE_NAME} -- chroot /host ps -ef | grep kubelet | grep streaming-connection-idle-timeout 2> /dev/null - echo exit_code=$? - # Should return 1 for node - oc debug node/${NODE_NAME} -- chroot /host grep streamingConnectionIdleTimeout /etc/kubernetes/kubelet.conf 2> /dev/null - echo exit_code=$? - use_multiple_values: true - tests: - bin_op: or - test_items: - - flag: --streaming-connection-idle-timeout - compare: - op: noteq - value: 0 - - flag: streamingConnectionIdleTimeout - compare: - op: noteq - value: 0s - - flag: "exit_code" - compare: - op: eq - value: 1 - remediation: | - Follow the instructions in the documentation to create a Kubelet config CRD and set - the --streaming-connection-idle-timeout to the desired value. Do not set the value to 0. - scored: true - - - id: 4.2.6 - text: "Ensure that the --protect-kernel-defaults argument is not set (Manual)" - audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host more /etc/kubernetes/kubelet.conf 2> /dev/null - tests: - test_items: - - flag: protectKernelDefaults - set: false - remediation: | - None required. The OpenShift 4 kubelet modifies the system tunable; - using the protect-kernel-defaults flag will cause the kubelet to fail on start if the tunables - don't match the kubelet configuration and the OpenShift node will fail to start. - scored: false - - - id: 4.2.7 - text: "Ensure that the --make-iptables-util-chains argument is set to true (Manual)" - audit: | - /bin/bash - flag=make-iptables-util-chains - opt=makeIPTablesUtilChains - # look at each machineconfigpool - while read -r pool nodeconfig; do - # true by default - value='true' - # first look for the flag - oc get machineconfig $nodeconfig -o json | jq -r '.spec.config.systemd[][] | select(.name=="kubelet.service") | .contents' | sed -n "/^ExecStart=/,/^\$/ { /^\\s*--$flag=false/ q 100 }" - # if the above command exited with 100, the flag was false - [ $? == 100 ] && value='false' - # now look in the yaml KubeletConfig - yamlconfig=$(oc get machineconfig $nodeconfig -o json | jq -r '.spec.config.storage.files[] | select(.path=="/etc/kubernetes/kubelet.conf") | .contents.source ' | sed 's/^data:,//' | while read; do echo -e ${REPLY//%/\\x}; done) - echo "$yamlconfig" | sed -n "/^$opt:\\s*false\\s*$/ q 100" - [ $? == 100 ] && value='false' - echo "Pool $pool has $flag ($opt) set to $value" - done < <(oc get machineconfigpools -o json | jq -r '.items[] | select(.status.machineCount>0) | .metadata.name + " " + .spec.configuration.name') - use_multiple_values: true - tests: - test_items: - - flag: "set to true" - remediation: | - None required. The --make-iptables-util-chains argument is set to true by default. - scored: false - - - id: 4.2.8 - text: "Ensure that the --hostname-override argument is not set (Manual)" - audit: | - echo `oc get machineconfig 01-worker-kubelet -o yaml | grep hostname-override` - echo `oc get machineconfig 01-master-kubelet -o yaml | grep hostname-override` - tests: - test_items: - - flag: hostname-override - set: false - remediation: | - By default, --hostname-override argument is not set. - scored: false - - - id: 4.2.9 - text: "Ensure that the kubeAPIQPS [--event-qps] argument is set to 0 or a level which ensures appropriate event capture (Manual)" - audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/${NODE_NAME} -- chroot /host cat /etc/kubernetes/kubelet.conf; - oc get machineconfig 01-worker-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050 - oc get machineconfig 01-master-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050 - type: "manual" - remediation: | - Follow the documentation to edit kubelet parameters - https://docs.openshift.com/container-platform/4.5/scalability_and_performance/recommended-host-practices.html#create-a-kubeletconfig-crd-to-edit-kubelet-parameters - KubeAPIQPS: - scored: false - - - id: 4.2.10 - text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" - audit: | - oc get configmap config -n openshift-kube-apiserver -o json \ - | jq -r '.data["config.yaml"]' \ - | jq -r '.apiServerArguments | - .["kubelet-client-certificate"][0], - .["kubelet-client-key"][0] - ' - tests: - bin_op: and - test_items: - - flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.crt" - - flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.key" - remediation: | - OpenShift automatically manages TLS authentication for the API server communication with the node/kublet. - This is not configurable. - scored: true - - - id: 4.2.11 - text: "Ensure that the --rotate-certificates argument is not set to false (Manual)" - audit: | - #Verify the rotateKubeletClientCertificate feature gate is not set to false - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/${NODE_NAME} -- chroot /host cat /etc/kubernetes/kubelet.conf | grep RotateKubeletClientCertificate 2> /dev/null - # Verify the rotateCertificates argument is set to true - oc debug node/${NODE_NAME} -- chroot host grep rotate /etc/kubernetes/kubelet.conf 2> /dev/null - use_multiple_values: true - tests: - bin_op: or - test_items: - - flag: rotateCertificates - compare: - op: eq - value: true - - flag: rotateKubeletClientCertificates - compare: - op: noteq - value: false - - flag: rotateKubeletClientCertificates - set: false - remediation: | - None required. - scored: false - - - id: 4.2.12 - text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" - audit: | - #Verify the rotateKubeletServerCertificate feature gate is on - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/${NODE_NAME} -- chroot /host grep RotateKubeletServerCertificate /etc/kubernetes/kubelet.conf 2> /dev/null - # Verify the rotateCertificates argument is set to true - oc debug node/${NODE_NAME} -- chroot host grep rotate /etc/kubernetes/kubelet.conf 2> /dev/null - use_multiple_values: true - tests: - bin_op: or - test_items: - - flag: rotateCertificates - compare: - op: eq - value: true - - flag: RotateKubeletServerCertificate - compare: - op: eq - value: true - remediation: | - By default, kubelet server certificate rotation is disabled. - scored: false - - - id: 4.2.13 - text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" - audit: | - # needs verification - # verify cipher suites - oc describe --namespace=openshift-ingress-operator ingresscontroller/default - oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo - oc get openshiftapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo - oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq .servingInfo - #check value for tlsSecurityProfile; null is returned if default is used - oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.tlsSecurityProfile - type: manual - remediation: | - Follow the directions above and in the OpenShift documentation to configure the tlsSecurityProfile. - Configuring Ingress - scored: false diff --git a/cfg/rh-1.6.0/policies.yaml b/cfg/rh-1.6.0/policies.yaml deleted file mode 100644 index e90cd877f..000000000 --- a/cfg/rh-1.6.0/policies.yaml +++ /dev/null @@ -1,287 +0,0 @@ ---- -controls: -version: rh-1.0 -id: 5 -text: "Kubernetes Policies" -type: "policies" -groups: - - id: 5.1 - text: "RBAC and Service Accounts" - checks: - - id: 5.1.1 - text: "Ensure that the cluster-admin role is only used where required (Manual)" - type: "manual" - audit: | - #To get a list of users and service accounts with the cluster-admin role - oc get clusterrolebindings -o=customcolumns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | - grep cluster-admin - #To verity that kbueadmin is removed, no results should be returned - oc get secrets kubeadmin -n kube-system - remediation: | - Identify all clusterrolebindings to the cluster-admin role. Check if they are used and - if they need this role or if they could use a role with fewer privileges. - Where possible, first bind users to a lower privileged role and then remove the - clusterrolebinding to the cluster-admin role : - kubectl delete clusterrolebinding [name] - scored: false - - - id: 5.1.2 - text: "Minimize access to secrets (Manual)" - type: "manual" - remediation: | - Where possible, remove get, list and watch access to secret objects in the cluster. - scored: false - - - id: 5.1.3 - text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" - type: "manual" - audit: | - #needs verification - oc get roles --all-namespaces -o yaml - for i in $(oc get roles -A -o jsonpath='{.items[*].metadata.name}'); do oc - describe clusterrole ${i}; done - #Retrieve the cluster roles defined in the cluster and review for wildcards - oc get clusterroles -o yaml - for i in $(oc get clusterroles -o jsonpath='{.items[*].metadata.name}'); do - oc describe clusterrole ${i}; done - remediation: | - Where possible replace any use of wildcards in clusterroles and roles with specific - objects or actions. - scored: false - - - id: 5.1.4 - text: "Minimize access to create pods (Manual)" - type: "manual" - remediation: | - Where possible, remove create access to pod objects in the cluster. - scored: false - - - id: 5.1.5 - text: "Ensure that default service accounts are not actively used. (Manual)" - type: "manual" - remediation: | - None required. - scored: false - - - id: 5.1.6 - text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" - type: "manual" - remediation: | - Modify the definition of pods and service accounts which do not need to mount service - account tokens to disable it. - scored: false - - - id: 5.2 - text: "Pod Security Policies" - checks: - - id: 5.2.1 - text: "Minimize the admission of privileged containers (Manual)" - audit: | - # needs verification - oc get scc -o=custom-columns=NAME:.metadata.name,allowPrivilegedContainer:.allowPrivilegedContainer - tests: - test_items: - - flag: "false" - remediation: | - Create a SCC as described in the OpenShift documentation, ensuring that the Allow - Privileged field is set to false. - scored: false - - - id: 5.2.2 - text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" - audit: | - oc get scc -o=custom-columns=NAME:.metadata.name,allowHostPID:.allowHostPID - tests: - test_items: - - flag: "false" - remediation: | - Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host - PID field is set to false. - scored: false - - - id: 5.2.3 - text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" - audit: | - oc get scc -o=custom-columns=NAME:.metadata.name,allowHostIPC:.allowHostIPC - tests: - test_items: - - flag: "false" - remediation: | - Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host - IPC field is set to false. - scored: false - - - id: 5.2.4 - text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" - audit: | - oc get scc -o=custom-columns=NAME:.metadata.name,allowHostNetwork:.allowHostNetwork - tests: - test_items: - - flag: "false" - remediation: | - Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host - Network field is omitted or set to false. - scored: false - - - id: 5.2.5 - text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" - audit: | - oc get scc -o=custom-columns=NAME:.metadata.name,allowPrivilegeEscalation:.allowPrivilegeEscalation - tests: - test_items: - - flag: "false" - remediation: | - Create a SCC as described in the OpenShift documentation, ensuring that the Allow - Privilege Escalation field is omitted or set to false. - scored: false - - - id: 5.2.6 - text: "Minimize the admission of root containers (Manual)" - audit: | - # needs verification # | awk 'NR>1 {gsub("map\\[type:", "", $2); gsub("\\]$", "", $2); print $1 ":" $2}' - oc get scc -o=custom-columns=NAME:.metadata.name,runAsUser:.runAsUser.type - #For SCCs with MustRunAs verify that the range of UIDs does not include 0 - oc get scc -o=custom-columns=NAME:.metadata.name,uidRangeMin:.runAsUser.uidRangeMin,uidRangeMax:.runAsUser.uidRangeMax - tests: - bin_op: or - test_items: - - flag: "MustRunAsNonRoot" - - flag: "MustRunAs" - compare: - op: nothave - value: 0 - remediation: | - None required. By default, OpenShift includes the non-root SCC with the the Run As User - Strategy is set to either MustRunAsNonRoot. If additional SCCs are appropriate, follow the - OpenShift documentation to create custom SCCs. - scored: false - - - id: 5.2.7 - text: "Minimize the admission of containers with the NET_RAW capability (Manual)" - audit: | - # needs verification - oc get scc -o=custom-columns=NAME:.metadata.name,requiredDropCapabilities:.requiredDropCapabilities - tests: - bin_op: or - test_items: - - flag: "ALL" - - flag: "NET_RAW" - remediation: | - Create a SCC as described in the OpenShift documentation, ensuring that the Required - Drop Capabilities is set to include either NET_RAW or ALL. - scored: false - - - id: 5.2.8 - text: "Minimize the admission of containers with added capabilities (Manual)" - type: "manual" - remediation: | - Ensure that Allowed Capabilities is set to an empty array for every SCC in the cluster - except for the privileged SCC. - scored: false - - - id: 5.2.9 - text: "Minimize the admission of containers with capabilities assigned (Manual)" - type: "manual" - remediation: | - Review the use of capabilites in applications running on your cluster. Where a namespace - contains applicaions which do not require any Linux capabities to operate consider - adding a SCC which forbids the admission of containers which do not drop all capabilities. - scored: false - - - id: 5.3 - text: "Network Policies and CNI" - checks: - - id: 5.3.1 - text: "Ensure that the CNI in use supports Network Policies (Manual)" - type: "manual" - remediation: | - None required. - scored: false - - - id: 5.3.2 - text: "Ensure that all Namespaces have Network Policies defined (Manual)" - type: "manual" - audit: | - #Run the following command and review the NetworkPolicy objects created in the cluster. - oc -n all get networkpolicy - remediation: | - Follow the documentation and create NetworkPolicy objects as you need them. - scored: false - - - id: 5.4 - text: "Secrets Management" - checks: - - id: 5.4.1 - text: "Prefer using secrets as files over secrets as environment variables (Manual)" - type: "manual" - audit: | - #Run the following command to find references to objects which use environment variables defined from secrets. - oc get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} - {.metadata.name} {"\n"}{end}' -A - remediation: | - If possible, rewrite application code to read secrets from mounted secret files, rather than - from environment variables. - scored: false - - - id: 5.4.2 - text: "Consider external secret storage (Manual)" - type: "manual" - remediation: | - Refer to the secrets management options offered by your cloud provider or a third-party - secrets management solution. - scored: false - - - id: 5.5 - text: "Extensible Admission Control" - checks: - - id: 5.5.1 - text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" - type: "manual" - remediation: | - Follow the OpenShift documentation: [Image configuration resources](https://docs.openshift.com/container-platform/4.5/openshift_images/image-configuration.html - scored: false - - - id: 5.7 - text: "General Policies" - checks: - - id: 5.7.1 - text: "Create administrative boundaries between resources using namespaces (Manual)" - type: "manual" - audit: | - #Run the following command and review the namespaces created in the cluster. - oc get namespaces - #Ensure that these namespaces are the ones you need and are adequately administered as per your requirements. - remediation: | - Follow the documentation and create namespaces for objects in your deployment as you need - them. - scored: false - - - id: 5.7.2 - text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual)" - type: "manual" - remediation: | - To enable the default seccomp profile, use the reserved value /runtime/default that will - make sure that the pod uses the default policy available on the host. - scored: false - - - id: 5.7.3 - text: "Apply Security Context to Your Pods and Containers (Manual)" - type: "manual" - remediation: | - Follow the Kubernetes documentation and apply security contexts to your pods. For a - suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker - Containers. - scored: false - - - id: 5.7.4 - text: "The default namespace should not be used (Manual)" - type: "manual" - audit: | - #Run this command to list objects in default namespace - oc project default - oc get all - #The only entries there should be system managed resources such as the kubernetes and openshift service - remediation: | - Ensure that namespaces are created to allow for appropriate segregation of Kubernetes - resources and that all new resources are created in a specific namespace. - scored: false From e85614e3e26616e3265aaaf7cac7a69b7354c396 Mon Sep 17 00:00:00 2001 From: deboshree-b Date: Mon, 26 Aug 2024 07:31:24 +0530 Subject: [PATCH 09/13] Revert "NDEV-20011 : adding CIS GKE-1.6.0 benchmarks" This reverts commit 75ead5448091d9ec9c088f5dfb71b1d140b1a90e. --- cfg/gke-1.6.0/config.yaml | 2 - cfg/gke-1.6.0/controlplane.yaml | 35 -- cfg/gke-1.6.0/managedservices.yaml | 706 ----------------------------- cfg/gke-1.6.0/master.yaml | 6 - cfg/gke-1.6.0/node.yaml | 335 -------------- cfg/gke-1.6.0/policies.yaml | 239 ---------- 6 files changed, 1323 deletions(-) delete mode 100644 cfg/gke-1.6.0/config.yaml delete mode 100644 cfg/gke-1.6.0/controlplane.yaml delete mode 100644 cfg/gke-1.6.0/managedservices.yaml delete mode 100644 cfg/gke-1.6.0/master.yaml delete mode 100644 cfg/gke-1.6.0/node.yaml delete mode 100644 cfg/gke-1.6.0/policies.yaml diff --git a/cfg/gke-1.6.0/config.yaml b/cfg/gke-1.6.0/config.yaml deleted file mode 100644 index b7839455a..000000000 --- a/cfg/gke-1.6.0/config.yaml +++ /dev/null @@ -1,2 +0,0 @@ ---- -## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/gke-1.6.0/controlplane.yaml b/cfg/gke-1.6.0/controlplane.yaml deleted file mode 100644 index 515a24752..000000000 --- a/cfg/gke-1.6.0/controlplane.yaml +++ /dev/null @@ -1,35 +0,0 @@ ---- -controls: -version: "gke-1.6.0" -id: 2 -text: "Control Plane Configuration" -type: "controlplane" -groups: - - id: 2.1 - text: "Authentication and Authorization" - checks: - - id: 2.1.1 - text: "Client certificate authentication should not be used for users (Manual)" - type: "manual" - remediation: | - Alternative mechanisms provided by Kubernetes such as the use of OIDC should be - implemented in place of client certificates. - You can remediate the availability of client certificates in your GKE cluster. See - Recommendation 5.8.1. - scored: false - - - id: 2.2 - text: "Logging" - type: skip - checks: - - id: 2.2.1 - text: "Ensure that a minimal audit policy is created (Manual)" - type: "manual" - remediation: "This control cannot be modified in GKE." - scored: false - - - id: 2.2.2 - text: "Ensure that the audit policy covers key security concerns (Manual)" - type: "manual" - remediation: "This control cannot be modified in GKE." - scored: false diff --git a/cfg/gke-1.6.0/managedservices.yaml b/cfg/gke-1.6.0/managedservices.yaml deleted file mode 100644 index a15f49c9c..000000000 --- a/cfg/gke-1.6.0/managedservices.yaml +++ /dev/null @@ -1,706 +0,0 @@ ---- -controls: -version: "gke-1.6.0" -id: 5 -text: "Managed Services" -type: "managedservices" -groups: - - id: 5.1 - text: "Image Registry and Image Scanning" - checks: - - id: 5.1.1 - text: "Ensure Image Vulnerability Scanning using GCR Container Analysis - or a third-party provider (Manual)" - type: "manual" - remediation: | - Using Command Line: - - gcloud services enable containerscanning.googleapis.com - scored: false - - - id: 5.1.2 - text: "Minimize user access to GCR (Manual)" - type: "manual" - remediation: | - Using Command Line: - To change roles at the GCR bucket level: - Firstly, run the following if read permissions are required: - - gsutil iam ch [TYPE]:[EMAIL-ADDRESS]:objectViewer - gs://artifacts.[PROJECT_ID].appspot.com - - Then remove the excessively privileged role (Storage Admin / Storage Object Admin / - Storage Object Creator) using: - - gsutil iam ch -d [TYPE]:[EMAIL-ADDRESS]:[ROLE] - gs://artifacts.[PROJECT_ID].appspot.com - - where: - [TYPE] can be one of the following: - o user, if the [EMAIL-ADDRESS] is a Google account - o serviceAccount, if [EMAIL-ADDRESS] specifies a Service account - [EMAIL-ADDRESS] can be one of the following: - o a Google account (for example, someone@example.com) - o a Cloud IAM service account - To modify roles defined at the project level and subsequently inherited within the GCR - bucket, or the Service Account User role, extract the IAM policy file, modify it accordingly - and apply it using: - - gcloud projects set-iam-policy [PROJECT_ID] [POLICY_FILE] - scored: false - - - id: 5.1.3 - text: "Minimize cluster access to read-only for GCR (Manual)" - type: "manual" - remediation: | - Using Command Line: - For an account explicitly granted to the bucket. First, add read access to the Kubernetes - Service Account - - gsutil iam ch [TYPE]:[EMAIL-ADDRESS]:objectViewer - gs://artifacts.[PROJECT_ID].appspot.com - - where: - [TYPE] can be one of the following: - o user, if the [EMAIL-ADDRESS] is a Google account - o serviceAccount, if [EMAIL-ADDRESS] specifies a Service account - [EMAIL-ADDRESS] can be one of the following: - o a Google account (for example, someone@example.com) - o a Cloud IAM service account - - Then remove the excessively privileged role (Storage Admin / Storage Object Admin / - Storage Object Creator) using: - - gsutil iam ch -d [TYPE]:[EMAIL-ADDRESS]:[ROLE] - gs://artifacts.[PROJECT_ID].appspot.com - - For an account that inherits access to the GCR Bucket through Project level permissions, - modify the Projects IAM policy file accordingly, then upload it using: - - gcloud projects set-iam-policy [PROJECT_ID] [POLICY_FILE] - scored: false - - - id: 5.1.4 - text: "Minimize Container Registries to only those approved (Manual)" - type: "manual" - remediation: | - Using Command Line: - First, update the cluster to enable Binary Authorization: - - gcloud container cluster update [CLUSTER_NAME] \ - --enable-binauthz - - Create a Binary Authorization Policy using the Binary Authorization Policy Reference - (https://cloud.google.com/binary-authorization/docs/policy-yaml-reference) for guidance. - Import the policy file into Binary Authorization: - - gcloud container binauthz policy import [YAML_POLICY] - scored: false - - - id: 5.2 - text: "Identity and Access Management (IAM)" - checks: - - id: 5.2.1 - text: "Ensure GKE clusters are not running using the Compute Engine - default service account (Manual)" - type: "manual" - remediation: | - Using Command Line: - Firstly, create a minimally privileged service account: - - gcloud iam service-accounts create [SA_NAME] \ - --display-name "GKE Node Service Account" - export NODE_SA_EMAIL=`gcloud iam service-accounts list \ - --format='value(email)' \ - --filter='displayName:GKE Node Service Account'` - - Grant the following roles to the service account: - - export PROJECT_ID=`gcloud config get-value project` - gcloud projects add-iam-policy-binding $PROJECT_ID \ - --member serviceAccount:$NODE_SA_EMAIL \ - --role roles/monitoring.metricWriter - gcloud projects add-iam-policy-binding $PROJECT_ID \ - --member serviceAccount:$NODE_SA_EMAIL \ - --role roles/monitoring.viewer - gcloud projects add-iam-policy-binding $PROJECT_ID \ - --member serviceAccount:$NODE_SA_EMAIL \ - --role roles/logging.logWriter - - To create a new Node pool using the Service account, run the following command: - - gcloud container node-pools create [NODE_POOL] \ - --service-account=[SA_NAME]@[PROJECT_ID].iam.gserviceaccount.com \ - --cluster=[CLUSTER_NAME] --zone [COMPUTE_ZONE] - - You will need to migrate your workloads to the new Node pool, and delete Node pools that - use the default service account to complete the remediation. - scored: false - - - id: 5.2.2 - text: "Prefer using dedicated GCP Service Accounts and Workload Identity (Manual)" - type: "manual" - remediation: | - Using Command Line: - - gcloud beta container clusters update [CLUSTER_NAME] --zone [CLUSTER_ZONE] \ - --identity-namespace=[PROJECT_ID].svc.id.goog - - Note that existing Node pools are unaffected. New Node pools default to --workload- - metadata-from-node=GKE_METADATA_SERVER . - - Then, modify existing Node pools to enable GKE_METADATA_SERVER: - - gcloud beta container node-pools update [NODEPOOL_NAME] \ - --cluster=[CLUSTER_NAME] --zone [CLUSTER_ZONE] \ - --workload-metadata-from-node=GKE_METADATA_SERVER - - You may also need to modify workloads in order for them to use Workload Identity as - described within https://cloud.google.com/kubernetes-engine/docs/how-to/workload- - identity. Also consider the effects on the availability of your hosted workloads as Node - pools are updated, it may be more appropriate to create new Node Pools. - scored: false - - - id: 5.3 - text: "Cloud Key Management Service (Cloud KMS)" - checks: - - id: 5.3.1 - text: "Ensure Kubernetes Secrets are encrypted using keys managed in Cloud KMS (Manual)" - type: "manual" - remediation: | - Using Command Line: - To create a key - - Create a key ring: - - gcloud kms keyrings create [RING_NAME] \ - --location [LOCATION] \ - --project [KEY_PROJECT_ID] - - Create a key: - - gcloud kms keys create [KEY_NAME] \ - --location [LOCATION] \ - --keyring [RING_NAME] \ - --purpose encryption \ - --project [KEY_PROJECT_ID] - - Grant the Kubernetes Engine Service Agent service account the Cloud KMS CryptoKey - Encrypter/Decrypter role: - - gcloud kms keys add-iam-policy-binding [KEY_NAME] \ - --location [LOCATION] \ - --keyring [RING_NAME] \ - --member serviceAccount:[SERVICE_ACCOUNT_NAME] \ - --role roles/cloudkms.cryptoKeyEncrypterDecrypter \ - --project [KEY_PROJECT_ID] - - To create a new cluster with Application-layer Secrets Encryption: - - gcloud container clusters create [CLUSTER_NAME] \ - --cluster-version=latest \ - --zone [ZONE] \ - --database-encryption-key projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKey s/[KEY_NAME] \ - --project [CLUSTER_PROJECT_ID] - - To enable on an existing cluster: - - gcloud container clusters update [CLUSTER_NAME] \ - --zone [ZONE] \ - --database-encryption-key projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKey s/[KEY_NAME] \ - --project [CLUSTER_PROJECT_ID] - scored: false - - - id: 5.4 - text: "Node Metadata" - checks: - - id: 5.4.1 - text: "Ensure legacy Compute Engine instance metadata APIs are Disabled (Automated)" - type: "manual" - remediation: | - Using Command Line: - To update an existing cluster, create a new Node pool with the legacy GCE metadata - endpoint disabled: - - gcloud container node-pools create [POOL_NAME] \ - --metadata disable-legacy-endpoints=true \ - --cluster [CLUSTER_NAME] \ - --zone [COMPUTE_ZONE] - - You will need to migrate workloads from any existing non-conforming Node pools, to the - new Node pool, then delete non-conforming Node pools to complete the remediation. - scored: false - - - id: 5.4.2 - text: "Ensure the GKE Metadata Server is Enabled (Automated)" - type: "manual" - remediation: | - Using Command Line: - gcloud beta container clusters update [CLUSTER_NAME] \ - --identity-namespace=[PROJECT_ID].svc.id.goog - Note that existing Node pools are unaffected. New Node pools default to --workload- - metadata-from-node=GKE_METADATA_SERVER . - - To modify an existing Node pool to enable GKE Metadata Server: - - gcloud beta container node-pools update [NODEPOOL_NAME] \ - --cluster=[CLUSTER_NAME] \ - --workload-metadata-from-node=GKE_METADATA_SERVER - - You may also need to modify workloads in order for them to use Workload Identity as - described within https://cloud.google.com/kubernetes-engine/docs/how-to/workload- - identity. - scored: false - - - id: 5.5 - text: "Node Configuration and Maintenance" - checks: - - id: 5.5.1 - text: "Ensure Container-Optimized OS (COS) is used for GKE node images (Automated)" - type: "manual" - remediation: | - Using Command Line: - To set the node image to cos for an existing cluster's Node pool: - - gcloud container clusters upgrade [CLUSTER_NAME]\ - --image-type cos \ - --zone [COMPUTE_ZONE] --node-pool [POOL_NAME] - scored: false - - - id: 5.5.2 - text: "Ensure Node Auto-Repair is enabled for GKE nodes (Automated)" - type: "manual" - remediation: | - Using Command Line: - To enable node auto-repair for an existing cluster with Node pool, run the following - command: - - gcloud container node-pools update [POOL_NAME] \ - --cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ - --enable-autorepair - scored: false - - - id: 5.5.3 - text: "Ensure Node Auto-Upgrade is enabled for GKE nodes (Automated)" - type: "manual" - remediation: | - Using Command Line: - To enable node auto-upgrade for an existing cluster's Node pool, run the following - command: - - gcloud container node-pools update [NODE_POOL] \ - --cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ - --enable-autoupgrade - scored: false - - - id: 5.5.4 - text: "Automate GKE version management using Release Channels (Manual)" - type: "manual" - remediation: | - Using Command Line: - Create a new cluster by running the following command: - - gcloud beta container clusters create [CLUSTER_NAME] \ - --zone [COMPUTE_ZONE] \ - --release-channel [RELEASE_CHANNEL] - - where [RELEASE_CHANNEL] is stable or regular according to your needs. - scored: false - - - id: 5.5.5 - text: "Ensure Shielded GKE Nodes are Enabled (Manual)" - type: "manual" - remediation: | - Using Command Line: - To create a Node pool within the cluster with Integrity Monitoring enabled, run the - following command: - - gcloud beta container node-pools create [NODEPOOL_NAME] \ - --cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ - --shielded-integrity-monitoring - - You will also need to migrate workloads from existing non-conforming Node pools to the - newly created Node pool, then delete the non-conforming pools. - scored: false - - - id: 5.5.6 - text: "Ensure Integrity Monitoring for Shielded GKE Nodes is Enabled (Automated)" - type: "manual" - remediation: | - Using Command Line: - To create a Node pool within the cluster with Integrity Monitoring enabled, run the - following command: - - gcloud beta container node-pools create [NODEPOOL_NAME] \ - --cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ - --shielded-integrity-monitoring - - You will also need to migrate workloads from existing non-conforming Node pools to the newly created Node pool, - then delete the non-conforming pools. - scored: false - - - id: 5.5.7 - text: "Ensure Secure Boot for Shielded GKE Nodes is Enabled (Automated)" - type: "manual" - remediation: | - Using Command Line: - To create a Node pool within the cluster with Secure Boot enabled, run the following - command: - - gcloud beta container node-pools create [NODEPOOL_NAME] \ - --cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ - --shielded-secure-boot - - You will also need to migrate workloads from existing non-conforming Node pools to the - newly created Node pool, then delete the non-conforming pools. - scored: false - - - id: 5.6 - text: "Cluster Networking" - checks: - - id: 5.6.1 - text: "Enable VPC Flow Logs and Intranode Visibility (Automated)" - type: "manual" - remediation: | - Using Command Line: - To enable intranode visibility on an existing cluster, run the following command: - - gcloud beta container clusters update [CLUSTER_NAME] \ - --enable-intra-node-visibility - scored: false - - - id: 5.6.2 - text: "Ensure use of VPC-native clusters (Automated)" - type: "manual" - remediation: | - Using Command Line: - To enable Alias IP on a new cluster, run the following command: - - gcloud container clusters create [CLUSTER_NAME] \ - --zone [COMPUTE_ZONE] \ - --enable-ip-alias - scored: false - - - id: 5.6.3 - text: "Ensure Master Authorized Networks is Enabled (Manual)" - type: "manual" - remediation: | - Using Command Line: - To check Master Authorized Networks status for an existing cluster, run the following - command; - - gcloud container clusters describe [CLUSTER_NAME] \ - --zone [COMPUTE_ZONE] \ - --format json | jq '.masterAuthorizedNetworksConfig' - - The output should return - - { - "enabled": true - } - - if Master Authorized Networks is enabled. - - If Master Authorized Networks is disabled, the - above command will return null ( { } ). - scored: false - - - id: 5.6.4 - text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Manual)" - type: "manual" - remediation: | - Using Command Line: - Create a cluster with a Private Endpoint enabled and Public Access disabled by including - the --enable-private-endpoint flag within the cluster create command: - - gcloud container clusters create [CLUSTER_NAME] \ - --enable-private-endpoint - - Setting this flag also requires the setting of --enable-private-nodes , --enable-ip-alias - and --master-ipv4-cidr=[MASTER_CIDR_RANGE] . - scored: false - - - id: 5.6.5 - text: "Ensure clusters are created with Private Nodes (Manual)" - type: "manual" - remediation: | - Using Command Line: - To create a cluster with Private Nodes enabled, include the --enable-private-nodes flag - within the cluster create command: - - gcloud container clusters create [CLUSTER_NAME] \ - --enable-private-nodes - - Setting this flag also requires the setting of --enable-ip-alias and --master-ipv4- - cidr=[MASTER_CIDR_RANGE] . - scored: false - - - id: 5.6.6 - text: "Consider firewalling GKE worker nodes (Manual)" - type: "manual" - remediation: | - Using Command Line: - Use the following command to generate firewall rules, setting the variables as appropriate. - You may want to use the target [TAG] and [SERVICE_ACCOUNT] previously identified. - - gcloud compute firewall-rules create FIREWALL_RULE_NAME \ - --network [NETWORK] \ - --priority [PRIORITY] \ - --direction [DIRECTION] \ - --action [ACTION] \ - --target-tags [TAG] \ - --target-service-accounts [SERVICE_ACCOUNT] \ - --source-ranges [SOURCE_CIDR-RANGE] \ - --source-tags [SOURCE_TAGS] \ - --source-service-accounts=[SOURCE_SERVICE_ACCOUNT] \ - --destination-ranges [DESTINATION_CIDR_RANGE] \ - --rules [RULES] - scored: false - - - id: 5.6.7 - text: "Ensure Network Policy is Enabled and set as appropriate (Manual)" - type: "manual" - remediation: | - Using Command Line: - To enable Network Policy for an existing cluster, firstly enable the Network Policy add-on: - - gcloud container clusters update [CLUSTER_NAME] \ - --zone [COMPUTE_ZONE] \ - --update-addons NetworkPolicy=ENABLED - - Then, enable Network Policy: - - gcloud container clusters update [CLUSTER_NAME] \ - --zone [COMPUTE_ZONE] \ - --enable-network-policy - scored: false - - - id: 5.6.8 - text: "Ensure use of Google-managed SSL Certificates (Manual)" - type: "manual" - remediation: | - If services of type:LoadBalancer are discovered, consider replacing the Service with an - Ingress. - - To configure the Ingress and use Google-managed SSL certificates, follow the instructions - as listed at https://cloud.google.com/kubernetes-engine/docs/how-to/managed-certs. - scored: false - - - id: 5.7 - text: "Logging" - checks: - - id: 5.7.1 - text: "Ensure Stackdriver Kubernetes Logging and Monitoring is Enabled (Automated)" - type: "manual" - remediation: | - Using Command Line: - - STACKDRIVER KUBERNETES ENGINE MONITORING SUPPORT (PREFERRED): - To enable Stackdriver Kubernetes Engine Monitoring for an existing cluster, run the - following command: - - gcloud container clusters update [CLUSTER_NAME] \ - --zone [COMPUTE_ZONE] \ - --enable-stackdriver-kubernetes - - LEGACY STACKDRIVER SUPPORT: - Both Logging and Monitoring support must be enabled. - To enable Legacy Stackdriver Logging for an existing cluster, run the following command: - - gcloud container clusters update [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ - --logging-service logging.googleapis.com - - To enable Legacy Stackdriver Monitoring for an existing cluster, run the following - command: - - gcloud container clusters update [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ - --monitoring-service monitoring.googleapis.com - scored: false - - - id: 5.7.2 - text: "Enable Linux auditd logging (Manual)" - type: "manual" - remediation: | - Using Command Line: - Download the example manifests: - - curl https://raw.githubusercontent.com/GoogleCloudPlatform/k8s-node-tools/master/os-audit/cos-auditd-logging.yaml \ - > cos-auditd-logging.yaml - - Edit the example manifests if needed. Then, deploy them: - - kubectl apply -f cos-auditd-logging.yaml - - Verify that the logging Pods have started. If you defined a different Namespace in your - manifests, replace cos-auditd with the name of the namespace you're using: - - kubectl get pods --namespace=cos-auditd - scored: false - - - id: 5.8 - text: "Authentication and Authorization" - checks: - - id: 5.8.1 - text: "Ensure Basic Authentication using static passwords is Disabled (Automated)" - type: "manual" - remediation: | - Using Command Line: - To update an existing cluster and disable Basic Authentication by removing the static - password: - - gcloud container clusters update [CLUSTER_NAME] \ - --no-enable-basic-auth - scored: false - - - id: 5.8.2 - text: "Ensure authentication using Client Certificates is Disabled (Automated)" - type: "manual" - remediation: | - Using Command Line: - Create a new cluster without a Client Certificate: - - gcloud container clusters create [CLUSTER_NAME] \ - --no-issue-client-certificate - scored: false - - - id: 5.8.3 - text: "Manage Kubernetes RBAC users with Google Groups for GKE (Manual)" - type: "manual" - remediation: | - Using Command Line: - Follow the G Suite Groups instructions at https://cloud.google.com/kubernetes- - engine/docs/how-to/role-based-access-control#google-groups-for-gke. - - Then, create a cluster with - - gcloud beta container clusters create my-cluster \ - --security-group="gke-security-groups@[yourdomain.com]" - - Finally create Roles, ClusterRoles, RoleBindings, and ClusterRoleBindings that - reference your G Suite Groups. - scored: false - - - id: 5.8.4 - text: "Ensure Legacy Authorization (ABAC) is Disabled (Automated)" - type: "manual" - remediation: | - Using Command Line: - To disable Legacy Authorization for an existing cluster, run the following command: - - gcloud container clusters update [CLUSTER_NAME] \ - --zone [COMPUTE_ZONE] \ - --no-enable-legacy-authorization - scored: false - - - id: 5.9 - text: "Storage" - checks: - - id: 5.9.1 - text: "Enable Customer-Managed Encryption Keys (CMEK) for GKE Persistent Disks (PD) (Manual)" - type: "manual" - remediation: | - Using Command Line: - FOR NODE BOOT DISKS: - Create a new node pool using customer-managed encryption keys for the node boot disk, of - [DISK_TYPE] either pd-standard or pd-ssd : - - gcloud beta container node-pools create [CLUSTER_NAME] \ - --disk-type [DISK_TYPE] \ - --boot-disk-kms-key \ - projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME] - - Create a cluster using customer-managed encryption keys for the node boot disk, of - [DISK_TYPE] either pd-standard or pd-ssd : - - gcloud beta container clusters create [CLUSTER_NAME] \ - --disk-type [DISK_TYPE] \ - --boot-disk-kms-key \ - projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME] - - FOR ATTACHED DISKS: - Follow the instructions detailed at https://cloud.google.com/kubernetes- - engine/docs/how-to/using-cmek. - scored: false - - - id: 5.10 - text: "Other Cluster Configurations" - checks: - - id: 5.10.1 - text: "Ensure Kubernetes Web UI is Disabled (Automated)" - type: "manual" - remediation: | - Using Command Line: - To disable the Kubernetes Dashboard on an existing cluster, run the following command: - - gcloud container clusters update [CLUSTER_NAME] \ - --zone [ZONE] \ - --update-addons=KubernetesDashboard=DISABLED - scored: false - - - id: 5.10.2 - text: "Ensure that Alpha clusters are not used for production workloads (Automated)" - type: "manual" - remediation: | - Using Command Line: - Upon creating a new cluster - - gcloud container clusters create [CLUSTER_NAME] \ - --zone [COMPUTE_ZONE] - - Do not use the --enable-kubernetes-alpha argument. - scored: false - - - id: 5.10.3 - text: "Ensure Pod Security Policy is Enabled and set as appropriate (Manual)" - type: "manual" - remediation: | - Using Command Line: - To enable Pod Security Policy for an existing cluster, run the following command: - - gcloud beta container clusters update [CLUSTER_NAME] \ - --zone [COMPUTE_ZONE] \ - --enable-pod-security-policy - scored: false - - - id: 5.10.4 - text: "Consider GKE Sandbox for running untrusted workloads (Manual)" - type: "manual" - remediation: | - Using Command Line: - To enable GKE Sandbox on an existing cluster, a new Node pool must be created. - - gcloud container node-pools create [NODE_POOL_NAME] \ - --zone=[COMPUTE-ZONE] \ - --cluster=[CLUSTER_NAME] \ - --image-type=cos_containerd \ - --sandbox type=gvisor - scored: false - - - id: 5.10.5 - text: "Ensure use of Binary Authorization (Automated)" - type: "manual" - remediation: | - Using Command Line: - Firstly, update the cluster to enable Binary Authorization: - - gcloud container cluster update [CLUSTER_NAME] \ - --zone [COMPUTE-ZONE] \ - --enable-binauthz - - Create a Binary Authorization Policy using the Binary Authorization Policy Reference - (https://cloud.google.com/binary-authorization/docs/policy-yaml-reference) for - guidance. - - Import the policy file into Binary Authorization: - - gcloud container binauthz policy import [YAML_POLICY] - scored: false - - - id: 5.10.6 - text: "Enable Cloud Security Command Center (Cloud SCC) (Manual)" - type: "manual" - remediation: | - Using Command Line: - Follow the instructions at https://cloud.google.com/security-command- - center/docs/quickstart-scc-setup. - scored: false diff --git a/cfg/gke-1.6.0/master.yaml b/cfg/gke-1.6.0/master.yaml deleted file mode 100644 index 9686bf2f8..000000000 --- a/cfg/gke-1.6.0/master.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -controls: -version: "gke-1.6.0" -id: 1 -text: "Control Plane Components" -type: "master" diff --git a/cfg/gke-1.6.0/node.yaml b/cfg/gke-1.6.0/node.yaml deleted file mode 100644 index 30e9aa8c6..000000000 --- a/cfg/gke-1.6.0/node.yaml +++ /dev/null @@ -1,335 +0,0 @@ ---- -controls: -version: "gke-1.6.0" -id: 3 -text: "Worker Node Security Configuration" -type: "node" -groups: - - id: 3.1 - text: "Worker Node Configuration Files" - checks: - - id: 3.1.1 - text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Automated)" - audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - Run the below command (based on the file location on your system) on each worker node. - For example, - chmod 644 $proxykubeconfig - scored: false - - - id: 3.1.2 - text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Automated)" - audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' - tests: - test_items: - - flag: root:root - remediation: | - Run the below command (based on the file location on your system) on each worker node. - For example, chown root:root $proxykubeconfig - scored: false - - - id: 3.1.3 - text: "Ensure that the kubelet configuration file permissions has permissions set to 600 (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - Run the following command (using the config file location identied in the Audit step) - chmod 644 /var/lib/kubelet/config.yaml - scored: false - - - id: 3.1.4 - text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' - tests: - test_items: - - flag: root:root - remediation: | - Run the following command (using the config file location identied in the Audit step) - chown root:root /etc/kubernetes/kubelet.conf - scored: false - - - id: 3.2 - text: "Kubelet" - checks: - - id: 3.2.1 - text: "Ensure that the --anonymous-auth argument is set to false (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: "--anonymous-auth" - path: '{.authentication.anonymous.enabled}' - compare: - op: eq - value: false - remediation: | - If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to - false. - If using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --anonymous-auth=false - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - systemctl status kubelet -l - scored: true - - - id: 3.2.2 - text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --authorization-mode - path: '{.authorization.mode}' - compare: - op: nothave - value: AlwaysAllow - remediation: | - If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If - using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_AUTHZ_ARGS variable. - --authorization-mode=Webhook - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 3.2.3 - text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --client-ca-file - path: '{.authentication.x509.clientCAFile}' - set: true - remediation: | - If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to - the location of the client CA file. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_AUTHZ_ARGS variable. - --client-ca-file= - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 3.2.4 - text: "Ensure that the --read-only-port argument is set to 0 (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: "--read-only-port" - path: '{.readOnlyPort}' - set: true - compare: - op: eq - value: 0 - remediation: | - If using a Kubelet config file, edit the file to set readOnlyPort to 0. - If using command line arguments, edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf - on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --read-only-port=0 - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 3.2.5 - text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --streaming-connection-idle-timeout - path: '{.streamingConnectionIdleTimeout}' - compare: - op: noteq - value: 0 - - flag: --streaming-connection-idle-timeout - path: '{.streamingConnectionIdleTimeout}' - set: false - bin_op: or - remediation: | - If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a - value other than 0. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --streaming-connection-idle-timeout=5m - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 3.2.6 - text: "Ensure that the --protect-kernel-defaults argument is set to true (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --protect-kernel-defaults - path: '{.protectKernelDefaults}' - set: true - compare: - op: eq - value: true - remediation: | - If using a Kubelet config file, edit the file to set protectKernelDefaults: true. - If using command line arguments, edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf - on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --protect-kernel-defaults=true - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 3.2.7 - text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated) " - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --make-iptables-util-chains - path: '{.makeIPTablesUtilChains}' - compare: - op: eq - value: true - - flag: --make-iptables-util-chains - path: '{.makeIPTablesUtilChains}' - set: false - bin_op: or - remediation: | - If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - remove the --make-iptables-util-chains argument from the - KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 3.2.8 - text: "Ensure that the --hostname-override argument is not set (Manual)" - audit: "/bin/ps -fC $kubeletbin " - tests: - test_items: - - flag: --hostname-override - set: false - remediation: | - Edit the kubelet service file $kubeletsvc - on each worker node and remove the --hostname-override argument from the - KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 3.2.9 - text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --event-qps - path: '{.eventRecordQPS}' - set: true - compare: - op: eq - value: 0 - remediation: | - If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. - If using command line arguments, edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf - on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 3.2.10 - text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --tls-cert-file - path: '{.tlsCertFile}' - - flag: --tls-private-key-file - path: '{.tlsPrivateKeyFile}' - remediation: | - If using a Kubelet config file, edit the file to set tlsCertFile to the location - of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile - to the location of the corresponding private key file. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameters in KUBELET_CERTIFICATE_ARGS variable. - --tls-cert-file= - --tls-private-key-file= - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 3.2.11 - text: "Ensure that the --rotate-certificates argument is not set to false (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --rotate-certificates - path: '{.rotateCertificates}' - compare: - op: eq - value: true - - flag: --rotate-certificates - path: '{.rotateCertificates}' - set: false - bin_op: or - remediation: | - If using a Kubelet config file, edit the file to add the line rotateCertificates: true or - remove it altogether to use the default value. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS - variable. - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 3.2.12 - text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: RotateKubeletServerCertificate - path: '{.featureGates.RotateKubeletServerCertificate}' - compare: - op: eq - value: true - remediation: | - Edit the kubelet service file $kubeletsvc - on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. - --feature-gates=RotateKubeletServerCertificate=true - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true diff --git a/cfg/gke-1.6.0/policies.yaml b/cfg/gke-1.6.0/policies.yaml deleted file mode 100644 index 6b3ccda78..000000000 --- a/cfg/gke-1.6.0/policies.yaml +++ /dev/null @@ -1,239 +0,0 @@ ---- -controls: -version: "gke-1.6.0" -id: 4 -text: "Kubernetes Policies" -type: "policies" -groups: - - id: 4.1 - text: "RBAC and Service Accounts" - checks: - - id: 4.1.1 - text: "Ensure that the cluster-admin role is only used where required (Manual)" - type: "manual" - remediation: | - Identify all clusterrolebindings to the cluster-admin role. Check if they are used and - if they need this role or if they could use a role with fewer privileges. - Where possible, first bind users to a lower privileged role and then remove the - clusterrolebinding to the cluster-admin role : - kubectl delete clusterrolebinding [name] - scored: false - - - id: 4.1.2 - text: "Minimize access to secrets (Manual)" - type: "manual" - remediation: | - Where possible, remove get, list and watch access to secret objects in the cluster. - scored: false - - - id: 4.1.3 - text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" - type: "manual" - remediation: | - Where possible replace any use of wildcards in clusterroles and roles with specific - objects or actions. - scored: false - - - id: 4.1.4 - text: "Minimize access to create pods (Manual)" - type: "manual" - remediation: | - Where possible, remove create access to pod objects in the cluster. - scored: false - - - id: 4.1.5 - text: "Ensure that default service accounts are not actively used. (Manual)" - type: "manual" - remediation: | - Create explicit service accounts wherever a Kubernetes workload requires specific access - to the Kubernetes API server. - Modify the configuration of each default service account to include this value - automountServiceAccountToken: false - scored: true - - - id: 4.1.6 - text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" - type: "manual" - remediation: | - Modify the definition of pods and service accounts which do not need to mount service - account tokens to disable it. - scored: false - - - id: 4.2 - text: "Pod Security Policies" - checks: - - id: 4.2.1 - text: "Minimize the admission of privileged containers (Automated)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that - the .spec.privileged field is omitted or set to false. - scored: false - - - id: 4.2.2 - text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the - .spec.hostPID field is omitted or set to false. - scored: false - - - id: 4.2.3 - text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the - .spec.hostIPC field is omitted or set to false. - scored: false - - - id: 4.2.4 - text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the - .spec.hostNetwork field is omitted or set to false. - scored: false - - - id: 4.2.5 - text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the - .spec.allowPrivilegeEscalation field is omitted or set to false. - scored: false - - - id: 4.2.6 - text: "Minimize the admission of root containers (Automated)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the - .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of - UIDs not including 0. - scored: false - - - id: 4.2.7 - text: "Minimize the admission of containers with the NET_RAW capability (Automated)" - type: "manual" - remediation: | - Create a PSP as described in the Kubernetes documentation, ensuring that the - .spec.requiredDropCapabilities is set to include either NET_RAW or ALL. - scored: false - - - id: 4.2.8 - text: "Minimize the admission of containers with added capabilities (Automated)" - type: "manual" - remediation: | - Ensure that allowedCapabilities is not present in PSPs for the cluster unless - it is set to an empty array. - scored: false - - - id: 4.2.9 - text: "Minimize the admission of containers with capabilities assigned (Manual) " - type: "manual" - remediation: | - Review the use of capabilites in applications running on your cluster. Where a namespace - contains applications which do not require any Linux capabities to operate consider adding - a PSP which forbids the admission of containers which do not drop all capabilities. - scored: false - - - id: 4.3 - text: "Network Policies and CNI" - checks: - - id: 4.3.1 - text: "Ensure that the CNI in use supports Network Policies (Manual)" - type: "manual" - remediation: | - To use a CNI plugin with Network Policy, enable Network Policy in GKE, and the CNI plugin - will be updated. See Recommendation 6.6.7. - scored: false - - - id: 4.3.2 - text: "Ensure that all Namespaces have Network Policies defined (Manual)" - type: "manual" - remediation: | - Follow the documentation and create NetworkPolicy objects as you need them. - scored: false - - - id: 4.4 - text: "Secrets Management" - checks: - - id: 4.4.1 - text: "Prefer using secrets as files over secrets as environment variables (Manual)" - type: "manual" - remediation: | - if possible, rewrite application code to read secrets from mounted secret files, rather than - from environment variables. - scored: false - - - id: 4.4.2 - text: "Consider external secret storage (Manual)" - type: "manual" - remediation: | - Refer to the secrets management options offered by your cloud provider or a third-party - secrets management solution. - scored: false - - - id: 4.5 - text: "Extensible Admission Control" - checks: - - id: 4.5.1 - text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" - type: "manual" - remediation: | - Follow the Kubernetes documentation and setup image provenance. - See also Recommendation 6.10.5 for GKE specifically. - scored: false - - - id: 4.6 - text: "General Policies" - checks: - - id: 4.6.1 - text: "Create administrative boundaries between resources using namespaces (Manual)" - type: "manual" - remediation: | - Follow the documentation and create namespaces for objects in your deployment as you need - them. - scored: false - - - id: 4.6.2 - text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual)" - type: "manual" - remediation: | - Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you - would need to enable alpha features in the apiserver by passing "--feature- - gates=AllAlpha=true" argument. - Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS - parameter to "--feature-gates=AllAlpha=true" - KUBE_API_ARGS="--feature-gates=AllAlpha=true" - Based on your system, restart the kube-apiserver service. For example: - systemctl restart kube-apiserver.service - Use annotations to enable the docker/default seccomp profile in your pod definitions. An - example is as below: - apiVersion: v1 - kind: Pod - metadata: - name: trustworthy-pod - annotations: - seccomp.security.alpha.kubernetes.io/pod: docker/default - spec: - containers: - - name: trustworthy-container - image: sotrustworthy:latest - scored: false - - - id: 4.6.3 - text: "Apply Security Context to Your Pods and Containers (Manual)" - type: "manual" - remediation: | - Follow the Kubernetes documentation and apply security contexts to your pods. For a - suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker - Containers. - scored: false - - - id: 4.6.4 - text: "The default namespace should not be used (Manual)" - type: "manual" - remediation: | - Ensure that namespaces are created to allow for appropriate segregation of Kubernetes - resources and that all new resources are created in a specific namespace. - scored: false From f55346bea9bb42693a1ce6ae09a81f0b89480829 Mon Sep 17 00:00:00 2001 From: deboshree-b Date: Tue, 10 Sep 2024 15:02:48 +0530 Subject: [PATCH 10/13] NDEV-20011 : adding gke 1.6.0 benchmark --- cfg/gke-1.6.0/config.yaml | 2 + cfg/gke-1.6.0/controlplane.yaml | 35 ++ cfg/gke-1.6.0/managedservices.yaml | 625 +++++++++++++++++++++++++++++ cfg/gke-1.6.0/master.yaml | 6 + cfg/gke-1.6.0/node.yaml | 273 +++++++++++++ cfg/gke-1.6.0/policies.yaml | 197 +++++++++ 6 files changed, 1138 insertions(+) create mode 100644 cfg/gke-1.6.0/config.yaml create mode 100644 cfg/gke-1.6.0/controlplane.yaml create mode 100644 cfg/gke-1.6.0/managedservices.yaml create mode 100644 cfg/gke-1.6.0/master.yaml create mode 100644 cfg/gke-1.6.0/node.yaml create mode 100644 cfg/gke-1.6.0/policies.yaml diff --git a/cfg/gke-1.6.0/config.yaml b/cfg/gke-1.6.0/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/gke-1.6.0/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/gke-1.6.0/controlplane.yaml b/cfg/gke-1.6.0/controlplane.yaml new file mode 100644 index 000000000..99bbf6f16 --- /dev/null +++ b/cfg/gke-1.6.0/controlplane.yaml @@ -0,0 +1,35 @@ +--- +controls: +version: "gke-1.6.0" +id: 2 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 2.1 + text: "Authentication and Authorization" + checks: + - id: 2.1.1 + text: "Client certificate authentication should not be used for users (Automated)" + audit: "kubectl get secrets --namespace kube-system -o json" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + You can remediate the availability of client certificates in your GKE cluster. See + Recommendation 5.8.1. + scored: false + + - id: 2.2 + text: "Logging" + type: skip + checks: + - id: 2.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + type: "manual" + remediation: "This control cannot be modified in GKE." + scored: false + + - id: 2.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: "This control cannot be modified in GKE." + scored: false diff --git a/cfg/gke-1.6.0/managedservices.yaml b/cfg/gke-1.6.0/managedservices.yaml new file mode 100644 index 000000000..b593d1de9 --- /dev/null +++ b/cfg/gke-1.6.0/managedservices.yaml @@ -0,0 +1,625 @@ +--- +controls: +version: "gke-1.6.0" +id: 5 +text: "Managed Services" +type: "managedservices" +groups: + - id: 5.1 + text: "Image Registry and Image Scanning" + checks: + - id: 5.1.1 + text: "Ensure Image Vulnerability Scanning using GCR Container Analysis + or a third-party provider (Automated)" + remediation: | + Using Command Line: + + gcloud services enable containerscanning.googleapis.com + scored: false + + - id: 5.1.2 + text: "Minimize user access to GCR (Automated)" + remediation: | + Using Command Line: + To change roles at the GCR bucket level: + Firstly, run the following if read permissions are required: + + gsutil iam ch [TYPE]:[EMAIL-ADDRESS]:objectViewer + gs://artifacts.[PROJECT_ID].appspot.com + + Then remove the excessively privileged role (Storage Admin / Storage Object Admin / + Storage Object Creator) using: + + gsutil iam ch -d [TYPE]:[EMAIL-ADDRESS]:[ROLE] + gs://artifacts.[PROJECT_ID].appspot.com + + where: + [TYPE] can be one of the following: + o user, if the [EMAIL-ADDRESS] is a Google account + o serviceAccount, if [EMAIL-ADDRESS] specifies a Service account + [EMAIL-ADDRESS] can be one of the following: + o a Google account (for example, someone@example.com) + o a Cloud IAM service account + To modify roles defined at the project level and subsequently inherited within the GCR + bucket, or the Service Account User role, extract the IAM policy file, modify it accordingly + and apply it using: + + gcloud projects set-iam-policy [PROJECT_ID] [POLICY_FILE] + scored: false + + - id: 5.1.3 + text: "Minimize cluster access to read-only for GCR (Manual)" + remediation: | + Using Command Line: + For an account explicitly granted to the bucket. First, add read access to the Kubernetes + Service Account + + gsutil iam ch [TYPE]:[EMAIL-ADDRESS]:objectViewer + gs://artifacts.[PROJECT_ID].appspot.com + + where: + [TYPE] can be one of the following: + o user, if the [EMAIL-ADDRESS] is a Google account + o serviceAccount, if [EMAIL-ADDRESS] specifies a Service account + [EMAIL-ADDRESS] can be one of the following: + o a Google account (for example, someone@example.com) + o a Cloud IAM service account + + Then remove the excessively privileged role (Storage Admin / Storage Object Admin / + Storage Object Creator) using: + + gsutil iam ch -d [TYPE]:[EMAIL-ADDRESS]:[ROLE] + gs://artifacts.[PROJECT_ID].appspot.com + + For an account that inherits access to the GCR Bucket through Project level permissions, + modify the Projects IAM policy file accordingly, then upload it using: + + gcloud projects set-iam-policy [PROJECT_ID] [POLICY_FILE] + scored: false + + - id: 5.1.4 + text: "Minimize Container Registries to only those approved (Manual)" + remediation: | + Using Command Line: + First, update the cluster to enable Binary Authorization: + + gcloud container cluster update [CLUSTER_NAME] \ + --enable-binauthz + + Create a Binary Authorization Policy using the Binary Authorization Policy Reference + (https://cloud.google.com/binary-authorization/docs/policy-yaml-reference) for guidance. + Import the policy file into Binary Authorization: + + gcloud container binauthz policy import [YAML_POLICY] + scored: false + + - id: 5.2 + text: "Identity and Access Management (IAM)" + checks: + - id: 5.2.1 + text: "Ensure GKE clusters are not running using the Compute Engine + default service account (Automated)" + remediation: | + Using Command Line: + Firstly, create a minimally privileged service account: + + gcloud iam service-accounts create [SA_NAME] \ + --display-name "GKE Node Service Account" + export NODE_SA_EMAIL=`gcloud iam service-accounts list \ + --format='value(email)' \ + --filter='displayName:GKE Node Service Account'` + + Grant the following roles to the service account: + + export PROJECT_ID=`gcloud config get-value project` + gcloud projects add-iam-policy-binding $PROJECT_ID \ + --member serviceAccount:$NODE_SA_EMAIL \ + --role roles/monitoring.metricWriter + gcloud projects add-iam-policy-binding $PROJECT_ID \ + --member serviceAccount:$NODE_SA_EMAIL \ + --role roles/monitoring.viewer + gcloud projects add-iam-policy-binding $PROJECT_ID \ + --member serviceAccount:$NODE_SA_EMAIL \ + --role roles/logging.logWriter + + To create a new Node pool using the Service account, run the following command: + + gcloud container node-pools create [NODE_POOL] \ + --service-account=[SA_NAME]@[PROJECT_ID].iam.gserviceaccount.com \ + --cluster=[CLUSTER_NAME] --zone [COMPUTE_ZONE] + + You will need to migrate your workloads to the new Node pool, and delete Node pools that + use the default service account to complete the remediation. + scored: false + + - id: 5.2.2 + text: "Prefer using dedicated GCP Service Accounts and Workload Identity (Manual)" + remediation: | + Using Command Line: + + gcloud container clusters update [CLUSTER_NAME] --zone [CLUSTER_ZONE] \ + --identity-namespace=[PROJECT_ID].svc.id.goog + + Note that existing Node pools are unaffected. New Node pools default to --workload- + metadata-from-node=GKE_METADATA_SERVER . + + Then, modify existing Node pools to enable GKE_METADATA_SERVER: + + gcloud container node-pools update [NODEPOOL_NAME] \ + --cluster=[CLUSTER_NAME] --zone [CLUSTER_ZONE] \ + --workload-metadata-from-node=GKE_METADATA_SERVER + + You may also need to modify workloads in order for them to use Workload Identity as + described within https://cloud.google.com/kubernetes-engine/docs/how-to/workload- + identity. Also consider the effects on the availability of your hosted workloads as Node + pools are updated, it may be more appropriate to create new Node Pools. + scored: false + + - id: 5.3 + text: "Cloud Key Management Service (Cloud KMS)" + checks: + - id: 5.3.1 + text: "Ensure Kubernetes Secrets are encrypted using keys managed in Cloud KMS (Automated)" + remediation: | + Using Command Line: + To create a key + + Create a key ring: + + gcloud kms keyrings create [RING_NAME] \ + --location [LOCATION] \ + --project [KEY_PROJECT_ID] + + Create a key: + + gcloud kms keys create [KEY_NAME] \ + --location [LOCATION] \ + --keyring [RING_NAME] \ + --purpose encryption \ + --project [KEY_PROJECT_ID] + + Grant the Kubernetes Engine Service Agent service account the Cloud KMS CryptoKey + Encrypter/Decrypter role: + + gcloud kms keys add-iam-policy-binding [KEY_NAME] \ + --location [LOCATION] \ + --keyring [RING_NAME] \ + --member serviceAccount:[SERVICE_ACCOUNT_NAME] \ + --role roles/cloudkms.cryptoKeyEncrypterDecrypter \ + --project [KEY_PROJECT_ID] + + To create a new cluster with Application-layer Secrets Encryption: + + gcloud container clusters create [CLUSTER_NAME] \ + --cluster-version=latest \ + --zone [ZONE] \ + --database-encryption-key projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKey s/[KEY_NAME] \ + --project [CLUSTER_PROJECT_ID] + + To enable on an existing cluster: + + gcloud container clusters update [CLUSTER_NAME] \ + --zone [ZONE] \ + --database-encryption-key projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKey s/[KEY_NAME] \ + --project [CLUSTER_PROJECT_ID] + scored: false + + - id: 5.4 + text: "Node Metadata" + checks: + - id: 5.4.1 + text: "Ensure the GKE Metadata Server is Enabled (Automated)" + remediation: | + Using Command Line: + gcloud container clusters update [CLUSTER_NAME] \ + --identity-namespace=[PROJECT_ID].svc.id.goog + Note that existing Node pools are unaffected. New Node pools default to --workload- + metadata-from-node=GKE_METADATA_SERVER . + + To modify an existing Node pool to enable GKE Metadata Server: + + gcloud container node-pools update [NODEPOOL_NAME] \ + --cluster=[CLUSTER_NAME] \ + --workload-metadata-from-node=GKE_METADATA_SERVER + + You may also need to modify workloads in order for them to use Workload Identity as + described within https://cloud.google.com/kubernetes-engine/docs/how-to/workload- + identity. + scored: false + + - id: 5.5 + text: "Node Configuration and Maintenance" + checks: + - id: 5.5.1 + text: "Ensure Container-Optimized OS (COS) is used for GKE node images (Automated)" + remediation: | + Using Command Line: + To set the node image to cos for an existing cluster's Node pool: + + gcloud container clusters upgrade [CLUSTER_NAME]\ + --image-type cos \ + --zone [COMPUTE_ZONE] --node-pool [POOL_NAME] + scored: false + + - id: 5.5.2 + text: "Ensure Node Auto-Repair is enabled for GKE nodes (Automated)" + remediation: | + Using Command Line: + To enable node auto-repair for an existing cluster with Node pool, run the following + command: + + gcloud container node-pools update [POOL_NAME] \ + --cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ + --enable-autorepair + scored: false + + - id: 5.5.3 + text: "Ensure Node Auto-Upgrade is enabled for GKE nodes (Automated)" + remediation: | + Using Command Line: + To enable node auto-upgrade for an existing cluster's Node pool, run the following + command: + + gcloud container node-pools update [NODE_POOL] \ + --cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ + --enable-autoupgrade + scored: false + + - id: 5.5.4 + text: "Automate GKE version management using Release Channels (Automated)" + remediation: | + Using Command Line: + Create a new cluster by running the following command: + + gcloud container clusters create [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] \ + --release-channel [RELEASE_CHANNEL] + + where [RELEASE_CHANNEL] is stable or regular according to your needs. + scored: false + + - id: 5.5.5 + text: "Ensure Shielded GKE Nodes are Enabled (Automated)" + remediation: | + Using Command Line: + To migrate an existing cluster, the flag --enable-shielded-nodes needs to be + specified in the cluster update command: + + gcloud container clusters update [CLUSTER_NAME] \ + --zone [CLUSTER_ZONE] \ + --enable-shielded-nodes + scored: false + + - id: 5.5.6 + text: "Ensure Integrity Monitoring for Shielded GKE Nodes is Enabled (Automated)" + remediation: | + Using Command Line: + To create a Node pool within the cluster with Integrity Monitoring enabled, run the + following command: + + gcloud beta container node-pools create [NODEPOOL_NAME] \ + --cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ + --shielded-integrity-monitoring + + You will also need to migrate workloads from existing non-conforming Node pools to the newly created Node pool, + then delete the non-conforming pools. + scored: false + + - id: 5.5.7 + text: "Ensure Secure Boot for Shielded GKE Nodes is Enabled (Automated)" + remediation: | + Using Command Line: + To create a Node pool within the cluster with Secure Boot enabled, run the following + command: + + gcloud container node-pools create [NODEPOOL_NAME] \ + --cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ + --shielded-secure-boot + + You will also need to migrate workloads from existing non-conforming Node pools to the + newly created Node pool, then delete the non-conforming pools. + scored: false + + - id: 5.6 + text: "Cluster Networking" + checks: + - id: 5.6.1 + text: "Enable VPC Flow Logs and Intranode Visibility (Automated)" + remediation: | + Using Command Line: + To enable intranode visibility on an existing cluster, run the following command: + + gcloud beta container clusters update [CLUSTER_NAME] \ + --enable-intra-node-visibility + scored: false + + - id: 5.6.2 + text: "Ensure use of VPC-native clusters (Automated)" + remediation: | + Using Command Line: + To enable Alias IP on a new cluster, run the following command: + + gcloud container clusters create [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] \ + --enable-ip-alias + scored: false + + - id: 5.6.3 + text: "Ensure Control Plane Authorized Networks is Enabled (Manual)" + remediation: | + Using Command Line: + To check Master Authorized Networks status for an existing cluster, run the following + command; + + gcloud container clusters describe [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] \ + --enable -master-authorized-networks + + The output should return + + { + "enabled": true + } + + if Control Plane Authorized Networks is enabled. + + If Control Plane Authorized Networks is disabled, the + above command will return null ( { } ). + scored: false + + - id: 5.6.4 + text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Manual)" + remediation: | + Using Command Line: + Create a cluster with a Private Endpoint enabled and Public Access disabled by including + the --enable-private-endpoint flag within the cluster create command: + + gcloud container clusters create [CLUSTER_NAME] \ + --enable-private-endpoint + + Setting this flag also requires the setting of --enable-private-nodes , --enable-ip-alias + and --master-ipv4-cidr=[MASTER_CIDR_RANGE] . + scored: false + + - id: 5.6.5 + text: "Ensure clusters are created with Private Nodes (Manual)" + remediation: | + Using Command Line: + To create a cluster with Private Nodes enabled, include the --enable-private-nodes flag + within the cluster create command: + + gcloud container clusters create [CLUSTER_NAME] \ + --enable-private-nodes + + Setting this flag also requires the setting of --enable-ip-alias and --master-ipv4- + cidr=[MASTER_CIDR_RANGE] . + scored: false + + - id: 5.6.6 + text: "Consider firewalling GKE worker nodes (Manual)" + remediation: | + Using Command Line: + Use the following command to generate firewall rules, setting the variables as appropriate. + You may want to use the target [TAG] and [SERVICE_ACCOUNT] previously identified. + + gcloud compute firewall-rules create FIREWALL_RULE_NAME \ + --network [NETWORK] \ + --priority [PRIORITY] \ + --direction [DIRECTION] \ + --action [ACTION] \ + --target-tags [TAG] \ + --target-service-accounts [SERVICE_ACCOUNT] \ + --source-ranges [SOURCE_CIDR-RANGE] \ + --source-tags [SOURCE_TAGS] \ + --source-service-accounts=[SOURCE_SERVICE_ACCOUNT] \ + --destination-ranges [DESTINATION_CIDR_RANGE] \ + --rules [RULES] + scored: false + + - id: 5.6.7 + text: "Ensure Network Policy is Enabled and set as appropriate (Manual)" + remediation: | + Using Command Line: + To enable Network Policy for an existing cluster, firstly enable the Network Policy add-on: + + gcloud container clusters update [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] \ + --update-addons NetworkPolicy=ENABLED + + Then, enable Network Policy: + + gcloud container clusters update [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] \ + --enable-network-policy + scored: false + + - id: 5.6.8 + text: "Ensure use of Google-managed SSL Certificates (Manual)" + remediation: | + If services of type:LoadBalancer are discovered, consider replacing the Service with an + Ingress. + + To configure the Ingress and use Google-managed SSL certificates, follow the instructions + as listed at https://cloud.google.com/kubernetes-engine/docs/how-to/managed-certs. + scored: false + + - id: 5.7 + text: "Logging" + checks: + - id: 5.7.1 + text: "Ensure Logging and Cloud Monitoring is Enabled (Automated)" + remediation: | + Using Command Line: + + To enable Logging for an existing cluster, run the + following command: + + gcloud container clusters update [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] \ + --logging [COMPONENTS_TO_BE_LOGGED] + + To enable Cloud Monitoring for an existing cluster, run the following command: + + gcloud container clusters update [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ + --monitoring [COMPONENTS_TO_BE_LOGGED] + scored: false + + - id: 5.7.2 + text: "Enable Linux auditd logging (Manual)" + remediation: | + Using Command Line: + Download the example manifests: + + curl https://raw.githubusercontent.com/GoogleCloudPlatform/k8s-node-tools/master/os-audit/cos-auditd-logging.yaml \ + > cos-auditd-logging.yaml + + Edit the example manifests if needed. Then, deploy them: + + kubectl apply -f cos-auditd-logging.yaml + + Verify that the logging Pods have started. If you defined a different Namespace in your + manifests, replace cos-auditd with the name of the namespace you're using: + + kubectl get pods --namespace=cos-auditd + scored: false + + - id: 5.8 + text: "Authentication and Authorization" + checks: + - id: 5.8.1 + text: "Ensure authentication using Client Certificates is Disabled (Automated)" + remediation: | + Using Command Line: + Create a new cluster without a Client Certificate: + + gcloud container clusters create [CLUSTER_NAME] \ + --no-issue-client-certificate + scored: false + + - id: 5.8.2 + text: "Manage Kubernetes RBAC users with Google Groups for GKE (Manual)" + remediation: | + Using Command Line: + Follow the G Suite Groups instructions at https://cloud.google.com/kubernetes- + engine/docs/how-to/role-based-access-control#google-groups-for-gke. + + Then, create a cluster with + + gcloud container clusters create [CLUSTER_NAME] \ + --security-group=[SECURITY_GROUP_NAME] + + Finally create Roles, ClusterRoles, RoleBindings, and ClusterRoleBindings that + reference your G Suite Groups. + scored: false + + - id: 5.8.3 + text: "Ensure Legacy Authorization (ABAC) is Disabled (Automated)" + remediation: | + Using Command Line: + To disable Legacy Authorization for an existing cluster, run the following command: + + gcloud container clusters update [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] \ + --no-enable-legacy-authorization + scored: false + + - id: 5.9 + text: "Storage" + checks: + - id: 5.9.1 + text: "Enable Customer-Managed Encryption Keys (CMEK) for GKE Persistent Disks (PD) (Automated)" + remediation: | + This cannot be remediated by updating an existing cluster. The node pool must either + be recreated or a new cluster created. + scored: false + + - id: 5.9.2 + text: "Enable Customer-Managed Encryption Keys (CMEK) for Boot Disks (PD) (Automated)" + remediation: | + Using Command Line: + FOR NODE BOOT DISKS: + Create a new node pool using customer-managed encryption keys for the node boot disk, of + [DISK_TYPE] either pd-standard or pd-ssd : + + gcloud container node-pools create [CLUSTER_NAME] \ + --disk-type [DISK_TYPE] \ + --boot-disk-kms-key \ + projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME] + + Create a cluster using customer-managed encryption keys for the node boot disk, of + [DISK_TYPE] either pd-standard or pd-ssd : + + gcloud beta container clusters create [CLUSTER_NAME] \ + --disk-type [DISK_TYPE] \ + --boot-disk-kms-key \ + projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME] + + FOR ATTACHED DISKS: + Follow the instructions detailed at https://cloud.google.com/kubernetes- + engine/docs/how-to/using-cmek. + scored: false + + - id: 5.10 + text: "Other Cluster Configurations" + checks: + - id: 5.10.1 + text: "Ensure Kubernetes Web UI is Disabled (Automated)" + remediation: | + Using Command Line: + To disable the Kubernetes Dashboard on an existing cluster, run the following command: + + gcloud container clusters update [CLUSTER_NAME] \ + --zone [ZONE] \ + --update-addons=KubernetesDashboard=DISABLED + scored: false + + - id: 5.10.2 + text: "Ensure that Alpha clusters are not used for production workloads (Automated)" + remediation: | + Using Command Line: + Upon creating a new cluster + + gcloud container clusters create [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] + + Do not use the --enable-kubernetes-alpha argument. + scored: false + + - id: 5.10.3 + text: "Consider GKE Sandbox for running untrusted workloads (Manual)" + remediation: | + Using Command Line: + To enable GKE Sandbox on an existing cluster, a new Node pool must be created. + + gcloud container node-pools create [NODE_POOL_NAME] \ + --zone=[COMPUTE-ZONE] \ + --cluster=[CLUSTER_NAME] \ + --image-type=cos_containerd \ + --sandbox type=gvisor + scored: false + + - id: 5.10.4 + text: "Ensure use of Binary Authorization (Automated)" + remediation: | + Using Command Line: + Firstly, update the cluster to enable Binary Authorization: + + gcloud container cluster update [CLUSTER_NAME] \ + --zone [COMPUTE-ZONE] \ + --binauthz-evaluation-mode=[EVALUATION_MODE] + + Create a Binary Authorization Policy using the Binary Authorization Policy Reference + (https://cloud.google.com/binary-authorization/docs/policy-yaml-reference) for + guidance. + + Import the policy file into Binary Authorization: + + gcloud container binauthz policy import [YAML_POLICY] + scored: false + + - id: 5.10.5 + text: "Enable Security Posture (Manual)" + remediation: | + Enable security posture via the UI, gCloud or API. + https://cloud.google.com/kubernetes-engine/docs/how-to/protect-workload-configuration + scored: false diff --git a/cfg/gke-1.6.0/master.yaml b/cfg/gke-1.6.0/master.yaml new file mode 100644 index 000000000..9686bf2f8 --- /dev/null +++ b/cfg/gke-1.6.0/master.yaml @@ -0,0 +1,6 @@ +--- +controls: +version: "gke-1.6.0" +id: 1 +text: "Control Plane Components" +type: "master" diff --git a/cfg/gke-1.6.0/node.yaml b/cfg/gke-1.6.0/node.yaml new file mode 100644 index 000000000..252dc910e --- /dev/null +++ b/cfg/gke-1.6.0/node.yaml @@ -0,0 +1,273 @@ +--- +controls: +version: "gke-1.6.0" +id: 3 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 3.1 + text: "Worker Node Configuration Files" + checks: + - id: 3.1.1 + text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on each worker node. + For example, + chmod 644 $proxykubeconfig + scored: false + + - id: 3.1.2 + text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 3.1.3 + text: "Ensure that the kubelet configuration file permissions has permissions set to 600 (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identied in the Audit step) + chmod 644 /var/lib/kubelet/config.yaml + scored: false + + - id: 3.1.4 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identied in the Audit step) + chown root:root /etc/kubernetes/kubelet.conf + scored: false + + - id: 3.2 + text: "Kubelet" + checks: + - id: 3.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to + false. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + set: true + remediation: | + If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.4 + text: "Ensure that the --read-only-port argument is set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set readOnlyPort to 0. + If using command line arguments, edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated) " + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.7 + text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. + If using command line arguments, edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.8 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line rotateCertificates: true or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.9 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: eq + value: true + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true diff --git a/cfg/gke-1.6.0/policies.yaml b/cfg/gke-1.6.0/policies.yaml new file mode 100644 index 000000000..b4c3d506a --- /dev/null +++ b/cfg/gke-1.6.0/policies.yaml @@ -0,0 +1,197 @@ +--- +controls: +version: "gke-1.6.0" +id: 4 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 4.1 + text: "RBAC and Service Accounts" + checks: + - id: 4.1.1 + text: "Ensure that the cluster-admin role is only used where required (Automated)" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 4.1.2 + text: "Minimize access to secrets (Automated)" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + scored: false + + - id: 4.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Automated)" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 4.1.4 + text: "Ensure that default service accounts are not actively used. (Automated)" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: true + + - id: 4.1.5 + text: "Ensure that Service Account Tokens are only mounted where necessary (Automated)" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 4.1.6 + text: "Avoid use of system:masters group (Automated)" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 4.1.7 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 4.1.8 + text: "Avoid bindings to system:anonymous (Automated)" + remediation: | + Identify all clusterrolebindings and rolebindings to the user system:anonymous.Strongly consider replacing unsafe bindings with an authenticated, user-defined group. + Where possible, bind to non-default, user-defined groups with least-privilege roles. + If there are any unsafe bindings to the user system:anonymous, proceed to delete them + after consideration for cluster operations with only necessary, safer bindings. + kubectl delete clusterrolebinding + scored: false + + - id: 4.1.9 + text: "Avoid non-default bindings to system:unauthenticated (Automated)" + remediation: | + Identify all non-default clusterrolebindings and rolebindings to the group + system:unauthenticated.Strongly consider replacing non-default, unsafe bindings with an authenticated, user- + defined group. Where possible, bind to non-default, user-defined groups with least- + privilege roles.If there are any non-default, unsafe bindings to the group system:unauthenticated, + proceed to delete them after consideration for cluster operations with only necessary,safer bindings. + kubectl delete clusterrolebinding + scored: false + + - id: 4.1.10 + text: "Avoid non-default bindings to system:authenticated (Automated)" + remediation: | + Identify all non-default clusterrolebindings and rolebindings to the group system:authenticated.Strongly consider replacing non-default, unsafe bindings with an authenticated, user- + defined group. Strongly consider replacing non-default, unsafe bindings with an authenticated, user-defined group. Where possible, bind to non-default, user-defined groups with least- + privilege roles.If there are any non-default, unsafe bindings to the group system:authenticated, + proceed to delete them after consideration for cluster operations with only necessary,safer bindings. + kubectl delete clusterrolebinding + scored: false + + - id: 4.2 + text: "Pod Security Policies" + checks: + - id: 4.2.1 + text: "Ensure that the cluster enforces Pod Security Standard Baseline profile or stricter for all namespaces. (Manual)" + remediation: | + Ensure that Pod Security Admission is in place for every namespace which contains + user workloads. Run the following command to enforce the Baseline profile in a namespace: + kubectl label namespace pod-security.kubernetes.io/enforce=baseline + scored: false + + - id: 4.3 + text: "Network Policies and CNI" + checks: + - id: 4.3.1 + text: "Ensure that the CNI in use supports Network Policies (Manual)" + remediation: | + To use a CNI plugin with Network Policy, enable Network Policy in GKE, and the CNI plugin + will be updated. See Recommendation 5.6.7. + scored: false + + - id: 4.3.2 + text: "Ensure that all Namespaces have Network Policies defined (Automated)" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 4.4 + text: "Secrets Management" + checks: + - id: 4.4.1 + text: "Prefer using secrets as files over secrets as environment variables (Automated)" + remediation: | + if possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 4.4.2 + text: "Consider external secret storage (Automated)" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 4.5 + text: "Extensible Admission Control" + checks: + - id: 4.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Automated)" + + remediation: | + Follow the Kubernetes documentation and setup image provenance. + See also Recommendation 6.10.5 for GKE specifically. + scored: false + + - id: 4.6 + text: "General Policies" + checks: + - id: 4.6.1 + text: "Create administrative boundaries between resources using namespaces (Automated)" + + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 4.6.2 + text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Automated)" + remediation: | + Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you + would need to enable alpha features in the apiserver by passing "--feature- + gates=AllAlpha=true" argument. + Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS + parameter to "--feature-gates=AllAlpha=true" + KUBE_API_ARGS="--feature-gates=AllAlpha=true" + Based on your system, restart the kube-apiserver service. For example: + systemctl restart kube-apiserver.service + Use annotations to enable the docker/default seccomp profile in your pod definitions. An + example is as below: + apiVersion: v1 + kind: Pod + metadata: + name: trustworthy-pod + annotations: + seccomp.security.alpha.kubernetes.io/pod: docker/default + spec: + containers: + - name: trustworthy-container + image: sotrustworthy:latest + scored: false + + - id: 4.6.3 + text: "Apply Security Context to Your Pods and Containers (Automated)" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 4.6.4 + text: "The default namespace should not be used (Automated)" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false From 1388da7aa02fb96b55426b75bc23e2e52b9fb448 Mon Sep 17 00:00:00 2001 From: deboshree-b Date: Tue, 10 Sep 2024 15:10:41 +0530 Subject: [PATCH 11/13] NDEV-20011 : adding type and test for scored = true benchmarks --- cfg/gke-1.6.0/node.yaml | 9 ++++++++- cfg/gke-1.6.0/policies.yaml | 24 ++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/cfg/gke-1.6.0/node.yaml b/cfg/gke-1.6.0/node.yaml index 252dc910e..03b2f5455 100644 --- a/cfg/gke-1.6.0/node.yaml +++ b/cfg/gke-1.6.0/node.yaml @@ -64,6 +64,7 @@ groups: checks: - id: 3.2.1 text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + type: "automated" audit: "/bin/ps -fC $kubeletbin" audit_config: "/bin/cat $kubeletconf" tests: @@ -88,6 +89,7 @@ groups: - id: 3.2.2 text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + type: "automated" audit: "/bin/ps -fC $kubeletbin" audit_config: "/bin/cat $kubeletconf" tests: @@ -110,6 +112,7 @@ groups: - id: 3.2.3 text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + type: "automated" audit: "/bin/ps -fC $kubeletbin" audit_config: "/bin/cat $kubeletconf" tests: @@ -153,6 +156,7 @@ groups: - id: 3.2.5 text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" + type: "automated" audit: "/bin/ps -fC $kubeletbin" audit_config: "/bin/cat $kubeletconf" tests: @@ -179,7 +183,8 @@ groups: scored: true - id: 3.2.6 - text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated) " + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + type: "automated" audit: "/bin/ps -fC $kubeletbin" audit_config: "/bin/cat $kubeletconf" tests: @@ -206,6 +211,7 @@ groups: - id: 3.2.7 text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Automated)" + type: "automated" audit: "/bin/ps -fC $kubeletbin" audit_config: "/bin/cat $kubeletconf" tests: @@ -254,6 +260,7 @@ groups: - id: 3.2.9 text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + type: "automated" audit: "/bin/ps -fC $kubeletbin" audit_config: "/bin/cat $kubeletconf" tests: diff --git a/cfg/gke-1.6.0/policies.yaml b/cfg/gke-1.6.0/policies.yaml index b4c3d506a..3ee492996 100644 --- a/cfg/gke-1.6.0/policies.yaml +++ b/cfg/gke-1.6.0/policies.yaml @@ -33,6 +33,30 @@ groups: - id: 4.1.4 text: "Ensure that default service accounts are not actively used. (Automated)" + type: "automated" + audit: | + For each namespace in the cluster, review the rights assigned to the default service + account and ensure that it has no roles or cluster roles bound to it apart from the + defaults. + Additionally ensure that the automountServiceAccountToken: false setting is in + place for each default service account. + tests: + test_items: + - key: "metadata.name" + value: "default" + set: true + - key: "metadata.namespace" + set: true + - key: "automountServiceAccountToken" + set: true + compare: + op: eq + value: false + - key: "metadata.name" + path: '{.metadata.name}' + exclude: "default" + related_command: | + kubectl get rolebindings,clusterrolebindings -n -o json remediation: | Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server. From 5e7030ffbbe585ed2c540947826595a8a1583528 Mon Sep 17 00:00:00 2001 From: deboshree-b Date: Tue, 10 Sep 2024 17:09:02 +0530 Subject: [PATCH 12/13] NDEV-20011 : adding gke-1.6.0 in config.yaml --- cfg/config.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cfg/config.yaml b/cfg/config.yaml index 105d3c630..5c7a59239 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -285,6 +285,7 @@ version_mapping: "eks-1.2.0": "eks-1.2.0" "gke-1.0": "gke-1.0" "gke-1.2.0": "gke-1.2.0" + "gke-1.6.0": "gke-1.6.0" "ocp-3.10": "rh-0.7" "ocp-3.11": "rh-0.7" "ocp-4.0": "rh-1.0" @@ -371,6 +372,12 @@ target_mapping: - "controlplane" - "policies" - "managedservices" + "gke-1.6.0": + - "master" + - "node" + - "controlplane" + - "policies" + - "managedservices" "eks-1.0.1": - "master" - "node" From 083b0029dd4bc366dbd74069ca53b1177a50b05a Mon Sep 17 00:00:00 2001 From: deboshree-b Date: Tue, 10 Sep 2024 17:51:33 +0530 Subject: [PATCH 13/13] NDEV-20011 : updating test for gke 1.6.0 - 4.1.4 benchmark --- cfg/gke-1.6.0/policies.yaml | 26 ++++++++------------------ 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/cfg/gke-1.6.0/policies.yaml b/cfg/gke-1.6.0/policies.yaml index 3ee492996..8ca114a56 100644 --- a/cfg/gke-1.6.0/policies.yaml +++ b/cfg/gke-1.6.0/policies.yaml @@ -34,29 +34,19 @@ groups: - id: 4.1.4 text: "Ensure that default service accounts are not actively used. (Automated)" type: "automated" - audit: | - For each namespace in the cluster, review the rights assigned to the default service - account and ensure that it has no roles or cluster roles bound to it apart from the - defaults. - Additionally ensure that the automountServiceAccountToken: false setting is in - place for each default service account. + audit: "/bin/cat $apiserverconf $controller_managerconf $schedulerconf $etcdconf" + audit_config: "/bin/cat $kubeletconf" tests: test_items: - - key: "metadata.name" - value: "default" - set: true - - key: "metadata.namespace" - set: true - - key: "automountServiceAccountToken" - set: true + - flag: "automountServiceAccountToken" + path: '{.automountServiceAccountToken}' compare: op: eq value: false - - key: "metadata.name" - path: '{.metadata.name}' - exclude: "default" - related_command: | - kubectl get rolebindings,clusterrolebindings -n -o json + - flag: "roleRef.name" + path: '{.roleRef.name}' + set: false + bin_op: and remediation: | Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.