From ff3a4c22c74cf3b0fddd7c344a116038d56eab62 Mon Sep 17 00:00:00 2001 From: Andy Pitcher Date: Mon, 25 Nov 2024 19:18:48 +0100 Subject: [PATCH 1/4] Remove old versions in ConfigMap - Generic: - cis-1.4 - cis-1.5 - cis-1.6 - cis-1.20 - cis-1.23 - K3s: - k3s-cis-1.6-hardened - k3s-cis-1.6-permissive - k3s-cis-1.20-hardened - k3s-cis-1.20-permissive - RKE1: - rke-cis-1.4 - rke-cis-1.5-hardened - rke-cis-1.5-permissive - rke-cis-1.6-hardened - rke-cis-1.6-permissive - rke-cis-1.20-hardened - rke-cis-1.20-permissive - RKE2: - rke2-cis-1.5-hardened - rke2-cis-1.5-permissive - rke2-cis-1.6-hardened - rke2-cis-1.6-permissive - rke2-cis-1.20-hardened - rke2-cis-1.20-permissive Add version_mappings for: - Generic: - cis-1.23 - cis-1.24 - cis-1.7 - cis-1.8 --- package/cfg/config.yaml | 149 ++-------------------------------------- 1 file changed, 4 insertions(+), 145 deletions(-) diff --git a/package/cfg/config.yaml b/package/cfg/config.yaml index e2e16c9a..8c62135f 100644 --- a/package/cfg/config.yaml +++ b/package/cfg/config.yaml @@ -211,25 +211,10 @@ managedservices: # TODO: Clean up in the next refactor version_mapping: - "rke-1.13": - - "rke-cis-1.4" - "rke-1.14": - - "rke-cis-1.4" - "rke-1.15": - - "rke-cis-1.5-permissive" - "rke-1.16": - - "rke-cis-1.6-permissive" - "rke-1.17": - - "rke-cis-1.6-permissive" - "rke-1.18": - - "rke-cis-1.6-permissive" - "rke-1.19": - - "rke-cis-1.20-permissive" - "rke-1.20": - - "rke-cis-1.20-permissive" - "v1.18.10+rke2r1": - - "rke2-cis-1.5-hardened" - - "rke2-cis-1.5-permissive" + "1.23": "cis-1.23" + "1.24": "cis-1.24" + "1.25": "cis-1.7" + "1.26": "cis-1.8" "eks-1.2.0": - "eks-1.2.0" "gke-1.2.0": @@ -238,10 +223,6 @@ version_mapping: - "gke-1.6.0" "aks-1.0": - "aks-1.0" - "v1.20.5+rke2r1": - - "rke2-cis-1.6-hardened" - "v1.20.5+k3s1": - - "k3s-cis-1.6-hardened" "v1.24.17+k3s1": "k3s-cis-1.24-hardened" "v1.25.16+k3s4": "k3s-cis-1.7-hardened" "v1.26.15+k3s1": "k3s-cis-1.8-hardened" @@ -261,28 +242,6 @@ target_mapping: - "node" # CIS - Generic profiles - "cis-1.4": - - "master" - - "node" - - "etcd" - "cis-1.5": - - "master" - - "node" - - "controlplane" - - "etcd" - - "policies" - "cis-1.6": - - "master" - - "node" - - "controlplane" - - "etcd" - - "policies" - "cis-1.20": - - "master" - - "node" - - "controlplane" - - "etcd" - - "policies" "cis-1.23": - "master" - "node" @@ -310,29 +269,7 @@ target_mapping: # RKE1 # rke1: Generic - "rke-cis-1.4": - - "master" - - "node" - - "etcd" # rke1: Permissive - "rke-cis-1.5-permissive": - - "master" - - "node" - - "controlplane" - - "etcd" - - "policies" - "rke-cis-1.6-permissive": - - "master" - - "node" - - "controlplane" - - "etcd" - - "policies" - "rke-cis-1.20-permissive": - - "master" - - "node" - - "controlplane" - - "etcd" - - "policies" "rke-cis-1.23-permissive": - "master" - "node" @@ -358,24 +295,6 @@ target_mapping: - "etcd" - "policies" # rke1 : Hardened - "rke-cis-1.5-hardened": - - "master" - - "node" - - "controlplane" - - "etcd" - - "policies" - "rke-cis-1.6-hardened": - - "master" - - "node" - - "controlplane" - - "etcd" - - "policies" - "rke-cis-1.20-hardened": - - "master" - - "node" - - "controlplane" - - "etcd" - - "policies" "rke-cis-1.23-hardened": - "master" - "node" @@ -404,24 +323,6 @@ target_mapping: # RKE2 # rke2: Generic # rke2: Permissive - "rke2-cis-1.5-permissive": - - "master" - - "node" - - "controlplane" - - "etcd" - - "policies" - "rke2-cis-1.6-permissive": - - "master" - - "node" - - "controlplane" - - "etcd" - - "policies" - "rke2-cis-1.20-permissive": - - "master" - - "node" - - "controlplane" - - "etcd" - - "policies" "rke2-cis-1.23-permissive": - "master" - "node" @@ -447,24 +348,6 @@ target_mapping: - "etcd" - "policies" # rke2: Hardened - "rke2-cis-1.5-hardened": - - "master" - - "node" - - "controlplane" - - "etcd" - - "policies" - "rke2-cis-1.6-hardened": - - "master" - - "node" - - "controlplane" - - "etcd" - - "policies" - "rke2-cis-1.20-hardened": - - "master" - - "node" - - "controlplane" - - "etcd" - - "policies" "rke2-cis-1.23-hardened": - "master" - "node" @@ -493,18 +376,6 @@ target_mapping: # K3S # k3s: Generic # k3s: Permissive - "k3s-cis-1.6-permissive": - - "master" - - "node" - - "controlplane" - - "etcd" - - "policies" - "k3s-cis-1.20-permissive": - - "master" - - "node" - - "controlplane" - - "etcd" - - "policies" "k3s-cis-1.23-permissive": - "master" - "node" @@ -530,18 +401,6 @@ target_mapping: - "etcd" - "policies" # k3s: Hardened - "k3s-cis-1.6-hardened": - - "master" - - "node" - - "controlplane" - - "etcd" - - "policies" - "k3s-cis-1.20-hardened": - - "master" - - "node" - - "controlplane" - - "etcd" - - "policies" "k3s-cis-1.23-hardened": - "master" - "node" From 1b169e2d7ae39f12d5950a6585d3ca8e1c161764 Mon Sep 17 00:00:00 2001 From: Andy Pitcher Date: Mon, 25 Nov 2024 19:43:07 +0100 Subject: [PATCH 2/4] Add cis-1.9 generic version --- hack/e2e | 2 +- package/cfg/config.yaml | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/hack/e2e b/hack/e2e index 023abf49..c823e4ba 100755 --- a/hack/e2e +++ b/hack/e2e @@ -60,7 +60,7 @@ function check_binaries(){ function check_config_files(){ echo "> Check for upstream test files:" - dirs="ack-1.0 aks-1.0 cis-1.23 cis-1.24 cis-1.7 cis-1.8 config.yaml eks-1.0.1 eks-1.1.0 eks-1.2.0 eks-stig-kubernetes-v1r6 gke-1.0 gke-1.2.0 gke-1.6.0 rh-0.7 rh-1.0" + dirs="ack-1.0 aks-1.0 cis-1.23 cis-1.24 cis-1.7 cis-1.8 cis-1.9 config.yaml eks-1.0.1 eks-1.1.0 eks-1.2.0 eks-stig-kubernetes-v1r6 gke-1.0 gke-1.2.0 gke-1.6.0 rh-0.7 rh-1.0" for d in ${dirs}; do if ! kubectl exec -n cis-operator-system security-scan-runner-scan-test -c rancher-cis-benchmark -- stat "/etc/kube-bench/cfg/$d"; then diff --git a/package/cfg/config.yaml b/package/cfg/config.yaml index 8c62135f..7bc63291 100644 --- a/package/cfg/config.yaml +++ b/package/cfg/config.yaml @@ -215,6 +215,9 @@ version_mapping: "1.24": "cis-1.24" "1.25": "cis-1.7" "1.26": "cis-1.8" + "1.27": "cis-1.9" + "1.28": "cis-1.9" + "1.29": "cis-1.9" "eks-1.2.0": - "eks-1.2.0" "gke-1.2.0": @@ -266,6 +269,12 @@ target_mapping: - "controlplane" - "etcd" - "policies" + "cis-1.9": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" # RKE1 # rke1: Generic From ef601fd8ed67fa05d069593a10e298da2411931a Mon Sep 17 00:00:00 2001 From: Andy Pitcher Date: Mon, 25 Nov 2024 19:44:44 +0100 Subject: [PATCH 3/4] Remove generic profiles dir inside package/cfg Reason: The generic profiles are downloaded from upstream, no need to have them here anymore (ref. https://github.com/aquasecurity/kube-bench/tree/main/cfg). --- package/cfg/cis-1.23/config.yaml | 2 - package/cfg/cis-1.23/controlplane.yaml | 46 -- package/cfg/cis-1.23/etcd.yaml | 135 ---- package/cfg/cis-1.23/master.yaml | 956 ------------------------- package/cfg/cis-1.23/node.yaml | 456 ------------ package/cfg/cis-1.23/policies.yaml | 269 ------- package/cfg/cis-1.24/config.yaml | 2 - package/cfg/cis-1.24/controlplane.yaml | 46 -- package/cfg/cis-1.24/etcd.yaml | 135 ---- package/cfg/cis-1.24/master.yaml | 951 ------------------------ package/cfg/cis-1.24/node.yaml | 452 ------------ package/cfg/cis-1.24/policies.yaml | 269 ------- package/cfg/cis-1.7/config.yaml | 2 - package/cfg/cis-1.7/controlplane.yaml | 60 -- package/cfg/cis-1.7/etcd.yaml | 135 ---- package/cfg/cis-1.7/master.yaml | 947 ------------------------ package/cfg/cis-1.7/node.yaml | 451 ------------ package/cfg/cis-1.7/policies.yaml | 304 -------- package/cfg/cis-1.8/config.yaml | 2 - package/cfg/cis-1.8/controlplane.yaml | 58 -- package/cfg/cis-1.8/etcd.yaml | 128 ---- package/cfg/cis-1.8/master.yaml | 870 ---------------------- package/cfg/cis-1.8/node.yaml | 431 ----------- package/cfg/cis-1.8/policies.yaml | 270 ------- 24 files changed, 7377 deletions(-) delete mode 100644 package/cfg/cis-1.23/config.yaml delete mode 100644 package/cfg/cis-1.23/controlplane.yaml delete mode 100644 package/cfg/cis-1.23/etcd.yaml delete mode 100644 package/cfg/cis-1.23/master.yaml delete mode 100644 package/cfg/cis-1.23/node.yaml delete mode 100644 package/cfg/cis-1.23/policies.yaml delete mode 100644 package/cfg/cis-1.24/config.yaml delete mode 100644 package/cfg/cis-1.24/controlplane.yaml delete mode 100644 package/cfg/cis-1.24/etcd.yaml delete mode 100644 package/cfg/cis-1.24/master.yaml delete mode 100644 package/cfg/cis-1.24/node.yaml delete mode 100644 package/cfg/cis-1.24/policies.yaml delete mode 100644 package/cfg/cis-1.7/config.yaml delete mode 100644 package/cfg/cis-1.7/controlplane.yaml delete mode 100644 package/cfg/cis-1.7/etcd.yaml delete mode 100644 package/cfg/cis-1.7/master.yaml delete mode 100644 package/cfg/cis-1.7/node.yaml delete mode 100644 package/cfg/cis-1.7/policies.yaml delete mode 100644 package/cfg/cis-1.8/config.yaml delete mode 100644 package/cfg/cis-1.8/controlplane.yaml delete mode 100644 package/cfg/cis-1.8/etcd.yaml delete mode 100644 package/cfg/cis-1.8/master.yaml delete mode 100644 package/cfg/cis-1.8/node.yaml delete mode 100644 package/cfg/cis-1.8/policies.yaml diff --git a/package/cfg/cis-1.23/config.yaml b/package/cfg/cis-1.23/config.yaml deleted file mode 100644 index b7839455..00000000 --- a/package/cfg/cis-1.23/config.yaml +++ /dev/null @@ -1,2 +0,0 @@ ---- -## Version-specific settings that override the values in cfg/config.yaml diff --git a/package/cfg/cis-1.23/controlplane.yaml b/package/cfg/cis-1.23/controlplane.yaml deleted file mode 100644 index 1036d1e9..00000000 --- a/package/cfg/cis-1.23/controlplane.yaml +++ /dev/null @@ -1,46 +0,0 @@ ---- -controls: -version: "cis-1.23" -id: 3 -text: "Control Plane Configuration" -type: "controlplane" -groups: - - id: 3.1 - text: "Authentication and Authorization" - checks: - - id: 3.1.1 - text: "Client certificate authentication should not be used for users (Manual)" - type: "manual" - remediation: | - Alternative mechanisms provided by Kubernetes such as the use of OIDC should be - implemented in place of client certificates. - scored: false - - - id: 3.2 - text: "Logging" - checks: - - id: 3.2.1 - text: "Ensure that a minimal audit policy is created (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-policy-file" - set: true - remediation: | - Create an audit policy file for your cluster. - scored: false - - - id: 3.2.2 - text: "Ensure that the audit policy covers key security concerns (Manual)" - type: "manual" - remediation: | - Review the audit policy provided for the cluster and ensure that it covers - at least the following areas, - - Access to Secrets managed by the cluster. Care should be taken to only - log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in - order to avoid risk of logging sensitive data. - - Modification of Pod and Deployment objects. - - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. - For most requests, minimally logging at the Metadata level is recommended - (the most basic level of logging). - scored: false diff --git a/package/cfg/cis-1.23/etcd.yaml b/package/cfg/cis-1.23/etcd.yaml deleted file mode 100644 index 92b72a6f..00000000 --- a/package/cfg/cis-1.23/etcd.yaml +++ /dev/null @@ -1,135 +0,0 @@ ---- -controls: -version: "cis-1.23" -id: 2 -text: "Etcd Node Configuration" -type: "etcd" -groups: - - id: 2 - text: "Etcd Node Configuration" - checks: - - id: 2.1 - text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--cert-file" - env: "ETCD_CERT_FILE" - - flag: "--key-file" - env: "ETCD_KEY_FILE" - remediation: | - Follow the etcd service documentation and configure TLS encryption. - Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml - on the master node and set the below parameters. - --cert-file= - --key-file= - scored: true - - - id: 2.2 - text: "Ensure that the --client-cert-auth argument is set to true (Automated)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - test_items: - - flag: "--client-cert-auth" - env: "ETCD_CLIENT_CERT_AUTH" - compare: - op: eq - value: true - remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and set the below parameter. - --client-cert-auth="true" - scored: true - - - id: 2.3 - text: "Ensure that the --auto-tls argument is not set to true (Automated)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--auto-tls" - env: "ETCD_AUTO_TLS" - set: false - - flag: "--auto-tls" - env: "ETCD_AUTO_TLS" - compare: - op: eq - value: false - remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and either remove the --auto-tls parameter or set it to false. - --auto-tls=false - scored: true - - - id: 2.4 - text: "Ensure that the --peer-cert-file and --peer-key-file arguments are - set as appropriate (Automated)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--peer-cert-file" - env: "ETCD_PEER_CERT_FILE" - - flag: "--peer-key-file" - env: "ETCD_PEER_KEY_FILE" - remediation: | - Follow the etcd service documentation and configure peer TLS encryption as appropriate - for your etcd cluster. - Then, edit the etcd pod specification file $etcdconf on the - master node and set the below parameters. - --peer-client-file= - --peer-key-file= - scored: true - - - id: 2.5 - text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - test_items: - - flag: "--peer-client-cert-auth" - env: "ETCD_PEER_CLIENT_CERT_AUTH" - compare: - op: eq - value: true - remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and set the below parameter. - --peer-client-cert-auth=true - scored: true - - - id: 2.6 - text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--peer-auto-tls" - env: "ETCD_PEER_AUTO_TLS" - set: false - - flag: "--peer-auto-tls" - env: "ETCD_PEER_AUTO_TLS" - compare: - op: eq - value: false - remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and either remove the --peer-auto-tls parameter or set it to false. - --peer-auto-tls=false - scored: true - - - id: 2.7 - text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - test_items: - - flag: "--trusted-ca-file" - env: "ETCD_TRUSTED_CA_FILE" - remediation: | - [Manual test] - Follow the etcd documentation and create a dedicated certificate authority setup for the - etcd service. - Then, edit the etcd pod specification file $etcdconf on the - master node and set the below parameter. - --trusted-ca-file= - scored: false diff --git a/package/cfg/cis-1.23/master.yaml b/package/cfg/cis-1.23/master.yaml deleted file mode 100644 index 500dd0ed..00000000 --- a/package/cfg/cis-1.23/master.yaml +++ /dev/null @@ -1,956 +0,0 @@ ---- -controls: -version: "cis-1.23" -id: 1 -text: "Control Plane Security Configuration" -type: "master" -groups: - - id: 1.1 - text: "Control Plane Node Configuration Files" - checks: - - id: 1.1.1 - text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - Run the below command (based on the file location on your system) on the - control plane node. - For example, chmod 644 $apiserverconf - scored: true - - - id: 1.1.2 - text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root $apiserverconf - scored: true - - - id: 1.1.3 - text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 644 $controllermanagerconf - scored: true - - - id: 1.1.4 - text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root $controllermanagerconf - scored: true - - - id: 1.1.5 - text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 644 $schedulerconf - scored: true - - - id: 1.1.6 - text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root $schedulerconf - scored: true - - - id: 1.1.7 - text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chmod 644 $etcdconf - scored: true - - - id: 1.1.8 - text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown root:root $etcdconf - scored: true - - - id: 1.1.9 - text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual)" - audit: | - ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a - find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 644 - scored: false - - - id: 1.1.10 - text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" - audit: | - ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G - find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown root:root - scored: false - - - id: 1.1.11 - text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" - audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c permissions=%a - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "700" - remediation: | - On the etcd server node, get the etcd data directory, passed as an argument --data-dir, - from the command 'ps -ef | grep etcd'. - Run the below command (based on the etcd data directory found above). For example, - chmod 700 /var/lib/etcd - scored: true - - - id: 1.1.12 - text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" - audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G - tests: - test_items: - - flag: "etcd:etcd" - remediation: | - On the etcd server node, get the etcd data directory, passed as an argument --data-dir, - from the command 'ps -ef | grep etcd'. - Run the below command (based on the etcd data directory found above). - For example, chown etcd:etcd /var/lib/etcd - scored: true - - - id: 1.1.13 - text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi'" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 600 /etc/kubernetes/admin.conf - scored: true - - - id: 1.1.14 - text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'" - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root /etc/kubernetes/admin.conf - scored: true - - - id: 1.1.15 - text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chmod 644 $schedulerkubeconfig - scored: true - - - id: 1.1.16 - text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown root:root $schedulerkubeconfig - scored: true - - - id: 1.1.17 - text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chmod 644 $controllermanagerkubeconfig - scored: true - - - id: 1.1.18 - text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown root:root $controllermanagerkubeconfig - scored: true - - - id: 1.1.19 - text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" - audit: "find /etc/kubernetes/pki/ | xargs stat -c %U:%G" - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown -R root:root /etc/kubernetes/pki/ - scored: true - - - id: 1.1.20 - text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Manual)" - audit: "find /etc/kubernetes/pki/ -name '*.crt' | xargs stat -c permissions=%a" - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chmod -R 644 /etc/kubernetes/pki/*.crt - scored: false - - - id: 1.1.21 - text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" - audit: "find /etc/kubernetes/pki/ -name '*.key' | xargs stat -c permissions=%a" - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chmod -R 600 /etc/kubernetes/pki/*.key - scored: false - - - id: 1.2 - text: "API Server" - checks: - - id: 1.2.1 - text: "Ensure that the --anonymous-auth argument is set to false (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--anonymous-auth" - compare: - op: eq - value: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the below parameter. - --anonymous-auth=false - scored: false - - - id: 1.2.2 - text: "Ensure that the --token-auth-file parameter is not set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--token-auth-file" - set: false - remediation: | - Follow the documentation and configure alternate mechanisms for authentication. Then, - edit the API server pod specification file $apiserverconf - on the control plane node and remove the --token-auth-file= parameter. - scored: true - - - id: 1.2.3 - text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--enable-admission-plugins" - compare: - op: nothave - value: "DenyServiceExternalIPs" - set: true - - flag: "--enable-admission-plugins" - set: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and remove the `DenyServiceExternalIPs` - from enabled admission plugins. - scored: true - - - id: 1.2.4 - text: "Ensure that the --kubelet-https argument is set to true (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--kubelet-https" - compare: - op: eq - value: true - - flag: "--kubelet-https" - set: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and remove the --kubelet-https parameter. - scored: true - - - id: 1.2.5 - text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--kubelet-client-certificate" - - flag: "--kubelet-client-key" - remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the - apiserver and kubelets. Then, edit API server pod specification file - $apiserverconf on the control plane node and set the - kubelet client certificate and key parameters as below. - --kubelet-client-certificate= - --kubelet-client-key= - scored: true - - - id: 1.2.6 - text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--kubelet-certificate-authority" - remediation: | - Follow the Kubernetes documentation and setup the TLS connection between - the apiserver and kubelets. Then, edit the API server pod specification file - $apiserverconf on the control plane node and set the - --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. - --kubelet-certificate-authority= - scored: true - - - id: 1.2.7 - text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--authorization-mode" - compare: - op: nothave - value: "AlwaysAllow" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. - One such example could be as below. - --authorization-mode=RBAC - scored: true - - - id: 1.2.8 - text: "Ensure that the --authorization-mode argument includes Node (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--authorization-mode" - compare: - op: has - value: "Node" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --authorization-mode parameter to a value that includes Node. - --authorization-mode=Node,RBAC - scored: true - - - id: 1.2.9 - text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--authorization-mode" - compare: - op: has - value: "RBAC" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, - for example `--authorization-mode=Node,RBAC`. - scored: true - - - id: 1.2.10 - text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "EventRateLimit" - remediation: | - Follow the Kubernetes documentation and set the desired limits in a configuration file. - Then, edit the API server pod specification file $apiserverconf - and set the below parameters. - --enable-admission-plugins=...,EventRateLimit,... - --admission-control-config-file= - scored: false - - - id: 1.2.11 - text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--enable-admission-plugins" - compare: - op: nothave - value: AlwaysAdmit - - flag: "--enable-admission-plugins" - set: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a - value that does not include AlwaysAdmit. - scored: true - - - id: 1.2.12 - text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "AlwaysPullImages" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --enable-admission-plugins parameter to include - AlwaysPullImages. - --enable-admission-plugins=...,AlwaysPullImages,... - scored: false - - - id: 1.2.13 - text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "SecurityContextDeny" - - flag: "--enable-admission-plugins" - compare: - op: has - value: "PodSecurityPolicy" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --enable-admission-plugins parameter to include - SecurityContextDeny, unless PodSecurityPolicy is already in place. - --enable-admission-plugins=...,SecurityContextDeny,... - scored: false - - - id: 1.2.14 - text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--disable-admission-plugins" - compare: - op: nothave - value: "ServiceAccount" - - flag: "--disable-admission-plugins" - set: false - remediation: | - Follow the documentation and create ServiceAccount objects as per your environment. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and ensure that the --disable-admission-plugins parameter is set to a - value that does not include ServiceAccount. - scored: true - - - id: 1.2.15 - text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--disable-admission-plugins" - compare: - op: nothave - value: "NamespaceLifecycle" - - flag: "--disable-admission-plugins" - set: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --disable-admission-plugins parameter to - ensure it does not include NamespaceLifecycle. - scored: true - - - id: 1.2.16 - text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "NodeRestriction" - remediation: | - Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the --enable-admission-plugins parameter to a - value that includes NodeRestriction. - --enable-admission-plugins=...,NodeRestriction,... - scored: true - - - id: 1.2.17 - text: "Ensure that the --secure-port argument is not set to 0 (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--secure-port" - compare: - op: gt - value: 0 - - flag: "--secure-port" - set: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and either remove the --secure-port parameter or - set it to a different (non-zero) desired port. - scored: true - - - id: 1.2.18 - text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--profiling" - compare: - op: eq - value: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the below parameter. - --profiling=false - scored: true - - - id: 1.2.19 - text: "Ensure that the --audit-log-path argument is set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-log-path" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-path parameter to a suitable path and - file where you would like audit logs to be written, for example, - --audit-log-path=/var/log/apiserver/audit.log - scored: true - - - id: 1.2.20 - text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-log-maxage" - compare: - op: gte - value: 30 - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-maxage parameter to 30 - or as an appropriate number of days, for example, - --audit-log-maxage=30 - scored: true - - - id: 1.2.21 - text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-log-maxbackup" - compare: - op: gte - value: 10 - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate - value. For example, - --audit-log-maxbackup=10 - scored: true - - - id: 1.2.22 - text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-log-maxsize" - compare: - op: gte - value: 100 - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. - For example, to set it as 100 MB, --audit-log-maxsize=100 - scored: true - - - id: 1.2.23 - text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - type: manual - remediation: | - Edit the API server pod specification file $apiserverconf - and set the below parameter as appropriate and if needed. - For example, --request-timeout=300s - scored: false - - - id: 1.2.24 - text: "Ensure that the --service-account-lookup argument is set to true (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--service-account-lookup" - set: false - - flag: "--service-account-lookup" - compare: - op: eq - value: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the below parameter. - --service-account-lookup=true - Alternatively, you can delete the --service-account-lookup parameter from this file so - that the default takes effect. - scored: true - - - id: 1.2.25 - text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--service-account-key-file" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --service-account-key-file parameter - to the public key file for service accounts. For example, - --service-account-key-file= - scored: true - - - id: 1.2.26 - text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--etcd-certfile" - - flag: "--etcd-keyfile" - remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the etcd certificate and key file parameters. - --etcd-certfile= - --etcd-keyfile= - scored: true - - - id: 1.2.27 - text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--tls-cert-file" - - flag: "--tls-private-key-file" - remediation: | - Follow the Kubernetes documentation and set up the TLS connection on the apiserver. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the TLS certificate and private key file parameters. - --tls-cert-file= - --tls-private-key-file= - scored: true - - - id: 1.2.28 - text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--client-ca-file" - remediation: | - Follow the Kubernetes documentation and set up the TLS connection on the apiserver. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the client certificate authority file. - --client-ca-file= - scored: true - - - id: 1.2.29 - text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--etcd-cafile" - remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the etcd certificate authority file parameter. - --etcd-cafile= - scored: true - - - id: 1.2.30 - text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--encryption-provider-config" - remediation: | - Follow the Kubernetes documentation and configure a EncryptionConfig file. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the --encryption-provider-config parameter to the path of that file. - For example, --encryption-provider-config= - scored: false - - - id: 1.2.31 - text: "Ensure that encryption providers are appropriately configured (Manual)" - type: manual - audit: | - ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') - if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi - tests: - test_items: - - flag: "provider" - compare: - op: valid_elements - value: "aescbc,kms,secretbox" - remediation: | - Follow the Kubernetes documentation and configure a EncryptionConfig file. - In this file, choose aescbc, kms or secretbox as the encryption provider. - scored: false - - - id: 1.2.32 - text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--tls-cipher-suites" - compare: - op: valid_elements - value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" - remediation: | - Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml - on the control plane node and set the below parameter. - --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, - TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 - scored: false - - - id: 1.3 - text: "Controller Manager" - checks: - - id: 1.3.1 - text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--terminated-pod-gc-threshold" - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, - for example, --terminated-pod-gc-threshold=10 - scored: false - - - id: 1.3.2 - text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--profiling" - compare: - op: eq - value: false - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the below parameter. - --profiling=false - scored: true - - - id: 1.3.3 - text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--use-service-account-credentials" - compare: - op: noteq - value: false - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node to set the below parameter. - --use-service-account-credentials=true - scored: true - - - id: 1.3.4 - text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--service-account-private-key-file" - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --service-account-private-key-file parameter - to the private key file for service accounts. - --service-account-private-key-file= - scored: true - - - id: 1.3.5 - text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--root-ca-file" - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. - --root-ca-file= - scored: true - - - id: 1.3.6 - text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--feature-gates" - compare: - op: nothave - value: "RotateKubeletServerCertificate=false" - set: true - - flag: "--feature-gates" - set: false - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. - --feature-gates=RotateKubeletServerCertificate=true - scored: true - - - id: 1.3.7 - text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--bind-address" - compare: - op: eq - value: "127.0.0.1" - - flag: "--bind-address" - set: false - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and ensure the correct value for the --bind-address parameter - scored: true - - - id: 1.4 - text: "Scheduler" - checks: - - id: 1.4.1 - text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" - tests: - test_items: - - flag: "--profiling" - compare: - op: eq - value: false - remediation: | - Edit the Scheduler pod specification file $schedulerconf file - on the control plane node and set the below parameter. - --profiling=false - scored: true - - - id: 1.4.2 - text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" - audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--bind-address" - compare: - op: eq - value: "127.0.0.1" - - flag: "--bind-address" - set: false - remediation: | - Edit the Scheduler pod specification file $schedulerconf - on the control plane node and ensure the correct value for the --bind-address parameter - scored: true diff --git a/package/cfg/cis-1.23/node.yaml b/package/cfg/cis-1.23/node.yaml deleted file mode 100644 index 4c1cfc7f..00000000 --- a/package/cfg/cis-1.23/node.yaml +++ /dev/null @@ -1,456 +0,0 @@ ---- -controls: -version: "cis-1.23" -id: 4 -text: "Worker Node Security Configuration" -type: "node" -groups: - - id: 4.1 - text: "Worker Node Configuration Files" - checks: - - id: 4.1.1 - text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, chmod 644 $kubeletsvc - scored: true - - - id: 4.1.2 - text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' - tests: - test_items: - - flag: root:root - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chown root:root $kubeletsvc - scored: true - - - id: 4.1.3 - text: "If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual)" - audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' - tests: - bin_op: or - test_items: - - flag: "permissions" - set: true - compare: - op: bitmask - value: "644" - - flag: "$proxykubeconfig" - set: false - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chmod 644 $proxykubeconfig - scored: false - - - id: 4.1.4 - text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" - audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' - tests: - bin_op: or - test_items: - - flag: root:root - - flag: "$proxykubeconfig" - set: false - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, chown root:root $proxykubeconfig - scored: false - - - id: 4.1.5 - text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chmod 644 $kubeletkubeconfig - scored: true - - - id: 4.1.6 - text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' - tests: - test_items: - - flag: root:root - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chown root:root $kubeletkubeconfig - scored: true - - - id: 4.1.7 - text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Manual)" - audit: "check_cafile_permissions.sh" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - Run the following command to modify the file permissions of the - --client-ca-file chmod 644 - scored: false - - - id: 4.1.8 - text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" - audit: "check_cafile_ownership.sh" - tests: - test_items: - - flag: root:root - compare: - op: eq - value: root:root - remediation: | - Run the following command to modify the ownership of the --client-ca-file. - chown root:root - scored: false - - - id: 4.1.9 - text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - Run the following command (using the config file location identified in the Audit step) - chmod 644 $kubeletconf - scored: true - - - id: 4.1.10 - text: "Ensure that the kubelet --config configuration file ownership is set to root:root (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' - tests: - test_items: - - flag: root:root - remediation: | - Run the following command (using the config file location identified in the Audit step) - chown root:root $kubeletconf - scored: true - - - id: 4.2 - text: "Kubelet" - checks: - - id: 4.2.1 - text: "Ensure that the --anonymous-auth argument is set to false (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: "--anonymous-auth" - path: '{.authentication.anonymous.enabled}' - compare: - op: eq - value: false - remediation: | - If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to - `false`. - If using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - `--anonymous-auth=false` - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 4.2.2 - text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --authorization-mode - path: '{.authorization.mode}' - compare: - op: nothave - value: AlwaysAllow - remediation: | - If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If - using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_AUTHZ_ARGS variable. - --authorization-mode=Webhook - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 4.2.3 - text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --client-ca-file - path: '{.authentication.x509.clientCAFile}' - remediation: | - If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to - the location of the client CA file. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_AUTHZ_ARGS variable. - --client-ca-file= - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 4.2.4 - text: "Ensure that the --read-only-port argument is set to 0 (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - bin_op: or - test_items: - - flag: "--read-only-port" - path: '{.readOnlyPort}' - compare: - op: eq - value: 0 - - flag: "--read-only-port" - path: '{.readOnlyPort}' - set: false - remediation: | - If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --read-only-port=0 - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 4.2.5 - text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --streaming-connection-idle-timeout - path: '{.streamingConnectionIdleTimeout}' - compare: - op: noteq - value: 0 - - flag: --streaming-connection-idle-timeout - path: '{.streamingConnectionIdleTimeout}' - set: false - bin_op: or - remediation: | - If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a - value other than 0. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --streaming-connection-idle-timeout=5m - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 4.2.6 - text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --protect-kernel-defaults - path: '{.protectKernelDefaults}' - compare: - op: eq - value: true - remediation: | - If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --protect-kernel-defaults=true - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 4.2.7 - text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --make-iptables-util-chains - path: '{.makeIPTablesUtilChains}' - compare: - op: eq - value: true - - flag: --make-iptables-util-chains - path: '{.makeIPTablesUtilChains}' - set: false - bin_op: or - remediation: | - If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - remove the --make-iptables-util-chains argument from the - KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 4.2.8 - text: "Ensure that the --hostname-override argument is not set (Manual)" - # This is one of those properties that can only be set as a command line argument. - # To check if the property is set as expected, we need to parse the kubelet command - # instead reading the Kubelet Configuration file. - audit: "/bin/ps -fC $kubeletbin " - tests: - test_items: - - flag: --hostname-override - set: false - remediation: | - Edit the kubelet service file $kubeletsvc - on each worker node and remove the --hostname-override argument from the - KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 4.2.9 - text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --event-qps - path: '{.eventRecordQPS}' - compare: - op: eq - value: 0 - remediation: | - If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 4.2.10 - text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --tls-cert-file - path: '{.tlsCertFile}' - - flag: --tls-private-key-file - path: '{.tlsPrivateKeyFile}' - remediation: | - If using a Kubelet config file, edit the file to set `tlsCertFile` to the location - of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` - to the location of the corresponding private key file. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameters in KUBELET_CERTIFICATE_ARGS variable. - --tls-cert-file= - --tls-private-key-file= - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 4.2.11 - text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --rotate-certificates - path: '{.rotateCertificates}' - compare: - op: eq - value: true - - flag: --rotate-certificates - path: '{.rotateCertificates}' - set: false - bin_op: or - remediation: | - If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or - remove it altogether to use the default value. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS - variable. - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 4.2.12 - text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - bin_op: or - test_items: - - flag: RotateKubeletServerCertificate - path: '{.featureGates.RotateKubeletServerCertificate}' - compare: - op: nothave - value: false - - flag: RotateKubeletServerCertificate - path: '{.featureGates.RotateKubeletServerCertificate}' - set: false - remediation: | - Edit the kubelet service file $kubeletsvc - on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. - --feature-gates=RotateKubeletServerCertificate=true - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 4.2.13 - text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --tls-cipher-suites - path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' - compare: - op: valid_elements - value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - or to a subset of these values. - If using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the --tls-cipher-suites parameter as follows, or to a subset of these values. - --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false diff --git a/package/cfg/cis-1.23/policies.yaml b/package/cfg/cis-1.23/policies.yaml deleted file mode 100644 index 10e3c227..00000000 --- a/package/cfg/cis-1.23/policies.yaml +++ /dev/null @@ -1,269 +0,0 @@ ---- -controls: -version: "cis-1.23" -id: 5 -text: "Kubernetes Policies" -type: "policies" -groups: - - id: 5.1 - text: "RBAC and Service Accounts" - checks: - - id: 5.1.1 - text: "Ensure that the cluster-admin role is only used where required (Manual)" - type: "manual" - remediation: | - Identify all clusterrolebindings to the cluster-admin role. Check if they are used and - if they need this role or if they could use a role with fewer privileges. - Where possible, first bind users to a lower privileged role and then remove the - clusterrolebinding to the cluster-admin role : - kubectl delete clusterrolebinding [name] - scored: false - - - id: 5.1.2 - text: "Minimize access to secrets (Manual)" - type: "manual" - remediation: | - Where possible, remove get, list and watch access to Secret objects in the cluster. - scored: false - - - id: 5.1.3 - text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" - type: "manual" - remediation: | - Where possible replace any use of wildcards in clusterroles and roles with specific - objects or actions. - scored: false - - - id: 5.1.4 - text: "Minimize access to create pods (Manual)" - type: "manual" - remediation: | - Where possible, remove create access to pod objects in the cluster. - scored: false - - - id: 5.1.5 - text: "Ensure that default service accounts are not actively used. (Manual)" - type: "manual" - remediation: | - Create explicit service accounts wherever a Kubernetes workload requires specific access - to the Kubernetes API server. - Modify the configuration of each default service account to include this value - automountServiceAccountToken: false - scored: false - - - id: 5.1.6 - text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" - type: "manual" - remediation: | - Modify the definition of pods and service accounts which do not need to mount service - account tokens to disable it. - scored: false - - - id: 5.1.7 - text: "Avoid use of system:masters group (Manual)" - type: "manual" - remediation: | - Remove the system:masters group from all users in the cluster. - scored: false - - - id: 5.1.8 - text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" - type: "manual" - remediation: | - Where possible, remove the impersonate, bind and escalate rights from subjects. - scored: false - - - id: 5.2 - text: "Pod Security Standards" - checks: - - id: 5.2.1 - text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" - type: "manual" - remediation: | - Ensure that either Pod Security Admission or an external policy control system is in place - for every namespace which contains user workloads. - scored: false - - - id: 5.2.2 - text: "Minimize the admission of privileged containers (Manual)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of privileged containers. - scored: false - - - id: 5.2.3 - text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of `hostPID` containers. - scored: true - - - id: 5.2.4 - text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of `hostIPC` containers. - scored: true - - - id: 5.2.5 - text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of `hostNetwork` containers. - scored: true - - - id: 5.2.6 - text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. - scored: true - - - id: 5.2.7 - text: "Minimize the admission of root containers (Automated)" - type: "manual" - remediation: | - Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` - or `MustRunAs` with the range of UIDs not including 0, is set. - scored: true - - - id: 5.2.8 - text: "Minimize the admission of containers with the NET_RAW capability (Automated)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of containers with the `NET_RAW` capability. - scored: true - - - id: 5.2.9 - text: "Minimize the admission of containers with added capabilities (Automated)" - type: "manual" - remediation: | - Ensure that `allowedCapabilities` is not present in policies for the cluster unless - it is set to an empty array. - scored: true - - - id: 5.2.10 - text: "Minimize the admission of containers with capabilities assigned (Manual)" - type: "manual" - remediation: | - Review the use of capabilites in applications running on your cluster. Where a namespace - contains applicaions which do not require any Linux capabities to operate consider adding - a PSP which forbids the admission of containers which do not drop all capabilities. - scored: false - - - id: 5.2.11 - text: "Minimize the admission of Windows HostProcess containers (Manual)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. - scored: false - - - id: 5.2.12 - text: "Minimize the admission of HostPath volumes (Manual)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of containers with `hostPath` volumes. - scored: false - - - id: 5.2.13 - text: "Minimize the admission of containers which use HostPorts (Manual)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of containers which use `hostPort` sections. - scored: false - - - id: 5.3 - text: "Network Policies and CNI" - checks: - - id: 5.3.1 - text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" - type: "manual" - remediation: | - If the CNI plugin in use does not support network policies, consideration should be given to - making use of a different plugin, or finding an alternate mechanism for restricting traffic - in the Kubernetes cluster. - scored: false - - - id: 5.3.2 - text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" - type: "manual" - remediation: | - Follow the documentation and create NetworkPolicy objects as you need them. - scored: false - - - id: 5.4 - text: "Secrets Management" - checks: - - id: 5.4.1 - text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" - type: "manual" - remediation: | - If possible, rewrite application code to read Secrets from mounted secret files, rather than - from environment variables. - scored: false - - - id: 5.4.2 - text: "Consider external secret storage (Manual)" - type: "manual" - remediation: | - Refer to the Secrets management options offered by your cloud provider or a third-party - secrets management solution. - scored: false - - - id: 5.5 - text: "Extensible Admission Control" - checks: - - id: 5.5.1 - text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" - type: "manual" - remediation: | - Follow the Kubernetes documentation and setup image provenance. - scored: false - - - id: 5.7 - text: "General Policies" - checks: - - id: 5.7.1 - text: "Create administrative boundaries between resources using namespaces (Manual)" - type: "manual" - remediation: | - Follow the documentation and create namespaces for objects in your deployment as you need - them. - scored: false - - - id: 5.7.2 - text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" - type: "manual" - remediation: | - Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. - An example is as below: - securityContext: - seccompProfile: - type: RuntimeDefault - scored: false - - - id: 5.7.3 - text: "Apply SecurityContext to your Pods and Containers (Manual)" - type: "manual" - remediation: | - Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a - suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker - Containers. - scored: false - - - id: 5.7.4 - text: "The default namespace should not be used (Manual)" - type: "manual" - remediation: | - Ensure that namespaces are created to allow for appropriate segregation of Kubernetes - resources and that all new resources are created in a specific namespace. - scored: false diff --git a/package/cfg/cis-1.24/config.yaml b/package/cfg/cis-1.24/config.yaml deleted file mode 100644 index b7839455..00000000 --- a/package/cfg/cis-1.24/config.yaml +++ /dev/null @@ -1,2 +0,0 @@ ---- -## Version-specific settings that override the values in cfg/config.yaml diff --git a/package/cfg/cis-1.24/controlplane.yaml b/package/cfg/cis-1.24/controlplane.yaml deleted file mode 100644 index 73e22062..00000000 --- a/package/cfg/cis-1.24/controlplane.yaml +++ /dev/null @@ -1,46 +0,0 @@ ---- -controls: -version: "cis-1.24" -id: 3 -text: "Control Plane Configuration" -type: "controlplane" -groups: - - id: 3.1 - text: "Authentication and Authorization" - checks: - - id: 3.1.1 - text: "Client certificate authentication should not be used for users (Manual)" - type: "manual" - remediation: | - Alternative mechanisms provided by Kubernetes such as the use of OIDC should be - implemented in place of client certificates. - scored: false - - - id: 3.2 - text: "Logging" - checks: - - id: 3.2.1 - text: "Ensure that a minimal audit policy is created (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-policy-file" - set: true - remediation: | - Create an audit policy file for your cluster. - scored: false - - - id: 3.2.2 - text: "Ensure that the audit policy covers key security concerns (Manual)" - type: "manual" - remediation: | - Review the audit policy provided for the cluster and ensure that it covers - at least the following areas, - - Access to Secrets managed by the cluster. Care should be taken to only - log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in - order to avoid risk of logging sensitive data. - - Modification of Pod and Deployment objects. - - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. - For most requests, minimally logging at the Metadata level is recommended - (the most basic level of logging). - scored: false diff --git a/package/cfg/cis-1.24/etcd.yaml b/package/cfg/cis-1.24/etcd.yaml deleted file mode 100644 index 918069ee..00000000 --- a/package/cfg/cis-1.24/etcd.yaml +++ /dev/null @@ -1,135 +0,0 @@ ---- -controls: -version: "cis-1.24" -id: 2 -text: "Etcd Node Configuration" -type: "etcd" -groups: - - id: 2 - text: "Etcd Node Configuration" - checks: - - id: 2.1 - text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--cert-file" - env: "ETCD_CERT_FILE" - - flag: "--key-file" - env: "ETCD_KEY_FILE" - remediation: | - Follow the etcd service documentation and configure TLS encryption. - Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml - on the master node and set the below parameters. - --cert-file= - --key-file= - scored: true - - - id: 2.2 - text: "Ensure that the --client-cert-auth argument is set to true (Automated)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - test_items: - - flag: "--client-cert-auth" - env: "ETCD_CLIENT_CERT_AUTH" - compare: - op: eq - value: true - remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and set the below parameter. - --client-cert-auth="true" - scored: true - - - id: 2.3 - text: "Ensure that the --auto-tls argument is not set to true (Automated)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--auto-tls" - env: "ETCD_AUTO_TLS" - set: false - - flag: "--auto-tls" - env: "ETCD_AUTO_TLS" - compare: - op: eq - value: false - remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and either remove the --auto-tls parameter or set it to false. - --auto-tls=false - scored: true - - - id: 2.4 - text: "Ensure that the --peer-cert-file and --peer-key-file arguments are - set as appropriate (Automated)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--peer-cert-file" - env: "ETCD_PEER_CERT_FILE" - - flag: "--peer-key-file" - env: "ETCD_PEER_KEY_FILE" - remediation: | - Follow the etcd service documentation and configure peer TLS encryption as appropriate - for your etcd cluster. - Then, edit the etcd pod specification file $etcdconf on the - master node and set the below parameters. - --peer-client-file= - --peer-key-file= - scored: true - - - id: 2.5 - text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - test_items: - - flag: "--peer-client-cert-auth" - env: "ETCD_PEER_CLIENT_CERT_AUTH" - compare: - op: eq - value: true - remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and set the below parameter. - --peer-client-cert-auth=true - scored: true - - - id: 2.6 - text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--peer-auto-tls" - env: "ETCD_PEER_AUTO_TLS" - set: false - - flag: "--peer-auto-tls" - env: "ETCD_PEER_AUTO_TLS" - compare: - op: eq - value: false - remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and either remove the --peer-auto-tls parameter or set it to false. - --peer-auto-tls=false - scored: true - - - id: 2.7 - text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - test_items: - - flag: "--trusted-ca-file" - env: "ETCD_TRUSTED_CA_FILE" - remediation: | - [Manual test] - Follow the etcd documentation and create a dedicated certificate authority setup for the - etcd service. - Then, edit the etcd pod specification file $etcdconf on the - master node and set the below parameter. - --trusted-ca-file= - scored: false diff --git a/package/cfg/cis-1.24/master.yaml b/package/cfg/cis-1.24/master.yaml deleted file mode 100644 index d6fe54a0..00000000 --- a/package/cfg/cis-1.24/master.yaml +++ /dev/null @@ -1,951 +0,0 @@ ---- -controls: -version: "cis-1.24" -id: 1 -text: "Control Plane Security Configuration" -type: "master" -groups: - - id: 1.1 - text: "Control Plane Node Configuration Files" - checks: - - id: 1.1.1 - text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "644" - remediation: | - Run the below command (based on the file location on your system) on the - control plane node. - For example, chmod 644 $apiserverconf - scored: true - - - id: 1.1.2 - text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root $apiserverconf - scored: true - - - id: 1.1.3 - text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 600 $controllermanagerconf - scored: true - - - id: 1.1.4 - text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root $controllermanagerconf - scored: true - - - id: 1.1.5 - text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 600 $schedulerconf - scored: true - - - id: 1.1.6 - text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root $schedulerconf - scored: true - - - id: 1.1.7 - text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chmod 600 $etcdconf - scored: true - - - id: 1.1.8 - text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown root:root $etcdconf - scored: true - - - id: 1.1.9 - text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" - audit: | - ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a - find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 600 - scored: false - - - id: 1.1.10 - text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" - audit: | - ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G - find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown root:root - scored: false - - - id: 1.1.11 - text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" - audit: | - DATA_DIR='' - for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do - if test -d "$d"; then DATA_DIR="$d"; fi - done - if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi - stat -c permissions=%a "$DATA_DIR" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "700" - remediation: | - On the etcd server node, get the etcd data directory, passed as an argument --data-dir, - from the command 'ps -ef | grep etcd'. - Run the below command (based on the etcd data directory found above). For example, - chmod 700 /var/lib/etcd - scored: true - - - id: 1.1.12 - text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" - audit: | - DATA_DIR='' - for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do - if test -d "$d"; then DATA_DIR="$d"; fi - done - if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi - stat -c %U:%G "$DATA_DIR" - tests: - test_items: - - flag: "etcd:etcd" - remediation: | - On the etcd server node, get the etcd data directory, passed as an argument --data-dir, - from the command 'ps -ef | grep etcd'. - Run the below command (based on the etcd data directory found above). - For example, chown etcd:etcd /var/lib/etcd - scored: true - - - id: 1.1.13 - text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi'" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 600 /etc/kubernetes/admin.conf - scored: true - - - id: 1.1.14 - text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'" - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root /etc/kubernetes/admin.conf - scored: true - - - id: 1.1.15 - text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chmod 600 $schedulerkubeconfig - scored: true - - - id: 1.1.16 - text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown root:root $schedulerkubeconfig - scored: true - - - id: 1.1.17 - text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chmod 600 $controllermanagerkubeconfig - scored: true - - - id: 1.1.18 - text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown root:root $controllermanagerkubeconfig - scored: true - - - id: 1.1.19 - text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" - audit: "find /etc/kubernetes/pki/ | xargs stat -c %U:%G" - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown -R root:root /etc/kubernetes/pki/ - scored: true - - - id: 1.1.20 - text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" - audit: "find /etc/kubernetes/pki/ -name '*.crt' | xargs stat -c permissions=%a" - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chmod -R 600 /etc/kubernetes/pki/*.crt - scored: false - - - id: 1.1.21 - text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" - audit: "find /etc/kubernetes/pki/ -name '*.key' | xargs stat -c permissions=%a" - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chmod -R 600 /etc/kubernetes/pki/*.key - scored: false - - - id: 1.2 - text: "API Server" - checks: - - id: 1.2.1 - text: "Ensure that the --anonymous-auth argument is set to false (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--anonymous-auth" - compare: - op: eq - value: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the below parameter. - --anonymous-auth=false - scored: false - - - id: 1.2.2 - text: "Ensure that the --token-auth-file parameter is not set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--token-auth-file" - set: false - remediation: | - Follow the documentation and configure alternate mechanisms for authentication. Then, - edit the API server pod specification file $apiserverconf - on the control plane node and remove the --token-auth-file= parameter. - scored: true - - - id: 1.2.3 - text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--enable-admission-plugins" - compare: - op: nothave - value: "DenyServiceExternalIPs" - set: true - - flag: "--enable-admission-plugins" - set: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and remove the `DenyServiceExternalIPs` - from enabled admission plugins. - scored: true - - - id: 1.2.4 - text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--kubelet-client-certificate" - - flag: "--kubelet-client-key" - remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the - apiserver and kubelets. Then, edit API server pod specification file - $apiserverconf on the control plane node and set the - kubelet client certificate and key parameters as below. - --kubelet-client-certificate= - --kubelet-client-key= - scored: true - - - id: 1.2.5 - text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--kubelet-certificate-authority" - remediation: | - Follow the Kubernetes documentation and setup the TLS connection between - the apiserver and kubelets. Then, edit the API server pod specification file - $apiserverconf on the control plane node and set the - --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. - --kubelet-certificate-authority= - scored: true - - - id: 1.2.6 - text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--authorization-mode" - compare: - op: nothave - value: "AlwaysAllow" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. - One such example could be as below. - --authorization-mode=RBAC - scored: true - - - id: 1.2.7 - text: "Ensure that the --authorization-mode argument includes Node (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--authorization-mode" - compare: - op: has - value: "Node" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --authorization-mode parameter to a value that includes Node. - --authorization-mode=Node,RBAC - scored: true - - - id: 1.2.8 - text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--authorization-mode" - compare: - op: has - value: "RBAC" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, - for example `--authorization-mode=Node,RBAC`. - scored: true - - - id: 1.2.9 - text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "EventRateLimit" - remediation: | - Follow the Kubernetes documentation and set the desired limits in a configuration file. - Then, edit the API server pod specification file $apiserverconf - and set the below parameters. - --enable-admission-plugins=...,EventRateLimit,... - --admission-control-config-file= - scored: false - - - id: 1.2.10 - text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--enable-admission-plugins" - compare: - op: nothave - value: AlwaysAdmit - - flag: "--enable-admission-plugins" - set: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a - value that does not include AlwaysAdmit. - scored: true - - - id: 1.2.11 - text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "AlwaysPullImages" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --enable-admission-plugins parameter to include - AlwaysPullImages. - --enable-admission-plugins=...,AlwaysPullImages,... - scored: false - - - id: 1.2.12 - text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "SecurityContextDeny" - - flag: "--enable-admission-plugins" - compare: - op: has - value: "PodSecurityPolicy" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --enable-admission-plugins parameter to include - SecurityContextDeny, unless PodSecurityPolicy is already in place. - --enable-admission-plugins=...,SecurityContextDeny,... - scored: false - - - id: 1.2.13 - text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--disable-admission-plugins" - compare: - op: nothave - value: "ServiceAccount" - - flag: "--disable-admission-plugins" - set: false - remediation: | - Follow the documentation and create ServiceAccount objects as per your environment. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and ensure that the --disable-admission-plugins parameter is set to a - value that does not include ServiceAccount. - scored: true - - - id: 1.2.14 - text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--disable-admission-plugins" - compare: - op: nothave - value: "NamespaceLifecycle" - - flag: "--disable-admission-plugins" - set: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --disable-admission-plugins parameter to - ensure it does not include NamespaceLifecycle. - scored: true - - - id: 1.2.15 - text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "NodeRestriction" - remediation: | - Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the --enable-admission-plugins parameter to a - value that includes NodeRestriction. - --enable-admission-plugins=...,NodeRestriction,... - scored: true - - - id: 1.2.16 - text: "Ensure that the --secure-port argument is not set to 0 (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--secure-port" - compare: - op: gt - value: 0 - - flag: "--secure-port" - set: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and either remove the --secure-port parameter or - set it to a different (non-zero) desired port. - scored: true - - - id: 1.2.17 - text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--profiling" - compare: - op: eq - value: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the below parameter. - --profiling=false - scored: true - - - id: 1.2.18 - text: "Ensure that the --audit-log-path argument is set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-log-path" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-path parameter to a suitable path and - file where you would like audit logs to be written, for example, - --audit-log-path=/var/log/apiserver/audit.log - scored: true - - - id: 1.2.19 - text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-log-maxage" - compare: - op: gte - value: 30 - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-maxage parameter to 30 - or as an appropriate number of days, for example, - --audit-log-maxage=30 - scored: true - - - id: 1.2.20 - text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-log-maxbackup" - compare: - op: gte - value: 10 - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate - value. For example, - --audit-log-maxbackup=10 - scored: true - - - id: 1.2.21 - text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-log-maxsize" - compare: - op: gte - value: 100 - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. - For example, to set it as 100 MB, --audit-log-maxsize=100 - scored: true - - - id: 1.2.22 - text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - type: manual - remediation: | - Edit the API server pod specification file $apiserverconf - and set the below parameter as appropriate and if needed. - For example, --request-timeout=300s - scored: false - - - id: 1.2.23 - text: "Ensure that the --service-account-lookup argument is set to true (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--service-account-lookup" - set: false - - flag: "--service-account-lookup" - compare: - op: eq - value: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the below parameter. - --service-account-lookup=true - Alternatively, you can delete the --service-account-lookup parameter from this file so - that the default takes effect. - scored: true - - - id: 1.2.24 - text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--service-account-key-file" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --service-account-key-file parameter - to the public key file for service accounts. For example, - --service-account-key-file= - scored: true - - - id: 1.2.25 - text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--etcd-certfile" - - flag: "--etcd-keyfile" - remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the etcd certificate and key file parameters. - --etcd-certfile= - --etcd-keyfile= - scored: true - - - id: 1.2.26 - text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--tls-cert-file" - - flag: "--tls-private-key-file" - remediation: | - Follow the Kubernetes documentation and set up the TLS connection on the apiserver. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the TLS certificate and private key file parameters. - --tls-cert-file= - --tls-private-key-file= - scored: true - - - id: 1.2.27 - text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--client-ca-file" - remediation: | - Follow the Kubernetes documentation and set up the TLS connection on the apiserver. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the client certificate authority file. - --client-ca-file= - scored: true - - - id: 1.2.28 - text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--etcd-cafile" - remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the etcd certificate authority file parameter. - --etcd-cafile= - scored: true - - - id: 1.2.29 - text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--encryption-provider-config" - remediation: | - Follow the Kubernetes documentation and configure a EncryptionConfig file. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the --encryption-provider-config parameter to the path of that file. - For example, --encryption-provider-config= - scored: false - - - id: 1.2.30 - text: "Ensure that encryption providers are appropriately configured (Manual)" - type: manual - audit: | - ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') - if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi - tests: - test_items: - - flag: "provider" - compare: - op: valid_elements - value: "aescbc,kms,secretbox" - remediation: | - Follow the Kubernetes documentation and configure a EncryptionConfig file. - In this file, choose aescbc, kms or secretbox as the encryption provider. - scored: false - - - id: 1.2.31 - text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--tls-cipher-suites" - compare: - op: valid_elements - value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" - remediation: | - Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml - on the control plane node and set the below parameter. - --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, - TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 - scored: false - - - id: 1.3 - text: "Controller Manager" - checks: - - id: 1.3.1 - text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--terminated-pod-gc-threshold" - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, - for example, --terminated-pod-gc-threshold=10 - scored: false - - - id: 1.3.2 - text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--profiling" - compare: - op: eq - value: false - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the below parameter. - --profiling=false - scored: true - - - id: 1.3.3 - text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--use-service-account-credentials" - compare: - op: noteq - value: false - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node to set the below parameter. - --use-service-account-credentials=true - scored: true - - - id: 1.3.4 - text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--service-account-private-key-file" - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --service-account-private-key-file parameter - to the private key file for service accounts. - --service-account-private-key-file= - scored: true - - - id: 1.3.5 - text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--root-ca-file" - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. - --root-ca-file= - scored: true - - - id: 1.3.6 - text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--feature-gates" - compare: - op: nothave - value: "RotateKubeletServerCertificate=false" - set: true - - flag: "--feature-gates" - set: false - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. - --feature-gates=RotateKubeletServerCertificate=true - scored: true - - - id: 1.3.7 - text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--bind-address" - compare: - op: eq - value: "127.0.0.1" - - flag: "--bind-address" - set: false - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and ensure the correct value for the --bind-address parameter - scored: true - - - id: 1.4 - text: "Scheduler" - checks: - - id: 1.4.1 - text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" - tests: - test_items: - - flag: "--profiling" - compare: - op: eq - value: false - remediation: | - Edit the Scheduler pod specification file $schedulerconf file - on the control plane node and set the below parameter. - --profiling=false - scored: true - - - id: 1.4.2 - text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" - audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--bind-address" - compare: - op: eq - value: "127.0.0.1" - - flag: "--bind-address" - set: false - remediation: | - Edit the Scheduler pod specification file $schedulerconf - on the control plane node and ensure the correct value for the --bind-address parameter - scored: true diff --git a/package/cfg/cis-1.24/node.yaml b/package/cfg/cis-1.24/node.yaml deleted file mode 100644 index 10c47580..00000000 --- a/package/cfg/cis-1.24/node.yaml +++ /dev/null @@ -1,452 +0,0 @@ ---- -controls: -version: "cis-1.24" -id: 4 -text: "Worker Node Security Configuration" -type: "node" -groups: - - id: 4.1 - text: "Worker Node Configuration Files" - checks: - - id: 4.1.1 - text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, chmod 600 $kubeletsvc - scored: true - - - id: 4.1.2 - text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' - tests: - test_items: - - flag: root:root - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chown root:root $kubeletsvc - scored: true - - - id: 4.1.3 - text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" - audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' - tests: - bin_op: or - test_items: - - flag: "permissions" - set: true - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chmod 600 $proxykubeconfig - scored: false - - - id: 4.1.4 - text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" - audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' - tests: - bin_op: or - test_items: - - flag: root:root - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, chown root:root $proxykubeconfig - scored: false - - - id: 4.1.5 - text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chmod 600 $kubeletkubeconfig - scored: true - - - id: 4.1.6 - text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' - tests: - test_items: - - flag: root:root - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chown root:root $kubeletkubeconfig - scored: true - - - id: 4.1.7 - text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" - audit: "check_cafile_permissions.sh" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the following command to modify the file permissions of the - --client-ca-file chmod 600 - scored: false - - - id: 4.1.8 - text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" - audit: "check_cafile_ownership.sh" - tests: - test_items: - - flag: root:root - compare: - op: eq - value: root:root - remediation: | - Run the following command to modify the ownership of the --client-ca-file. - chown root:root - scored: false - - - id: 4.1.9 - text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Manual)" - audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the following command (using the config file location identified in the Audit step) - chmod 600 $kubeletconf - scored: false - - - id: 4.1.10 - text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Manual)" - audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' - tests: - test_items: - - flag: root:root - remediation: | - Run the following command (using the config file location identified in the Audit step) - chown root:root $kubeletconf - scored: false - - - id: 4.2 - text: "Kubelet" - checks: - - id: 4.2.1 - text: "Ensure that the --anonymous-auth argument is set to false (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: "--anonymous-auth" - path: '{.authentication.anonymous.enabled}' - compare: - op: eq - value: false - remediation: | - If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to - `false`. - If using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - `--anonymous-auth=false` - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 4.2.2 - text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --authorization-mode - path: '{.authorization.mode}' - compare: - op: nothave - value: AlwaysAllow - remediation: | - If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If - using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_AUTHZ_ARGS variable. - --authorization-mode=Webhook - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 4.2.3 - text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --client-ca-file - path: '{.authentication.x509.clientCAFile}' - remediation: | - If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to - the location of the client CA file. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_AUTHZ_ARGS variable. - --client-ca-file= - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 4.2.4 - text: "Verify that the --read-only-port argument is set to 0 (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - bin_op: or - test_items: - - flag: "--read-only-port" - path: '{.readOnlyPort}' - compare: - op: eq - value: 0 - - flag: "--read-only-port" - path: '{.readOnlyPort}' - set: false - remediation: | - If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --read-only-port=0 - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 4.2.5 - text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --streaming-connection-idle-timeout - path: '{.streamingConnectionIdleTimeout}' - compare: - op: noteq - value: 0 - - flag: --streaming-connection-idle-timeout - path: '{.streamingConnectionIdleTimeout}' - set: false - bin_op: or - remediation: | - If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a - value other than 0. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --streaming-connection-idle-timeout=5m - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 4.2.6 - text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --protect-kernel-defaults - path: '{.protectKernelDefaults}' - compare: - op: eq - value: true - remediation: | - If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --protect-kernel-defaults=true - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 4.2.7 - text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --make-iptables-util-chains - path: '{.makeIPTablesUtilChains}' - compare: - op: eq - value: true - - flag: --make-iptables-util-chains - path: '{.makeIPTablesUtilChains}' - set: false - bin_op: or - remediation: | - If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - remove the --make-iptables-util-chains argument from the - KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 4.2.8 - text: "Ensure that the --hostname-override argument is not set (Manual)" - # This is one of those properties that can only be set as a command line argument. - # To check if the property is set as expected, we need to parse the kubelet command - # instead reading the Kubelet Configuration file. - audit: "/bin/ps -fC $kubeletbin " - tests: - test_items: - - flag: --hostname-override - set: false - remediation: | - Edit the kubelet service file $kubeletsvc - on each worker node and remove the --hostname-override argument from the - KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 4.2.9 - text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --event-qps - path: '{.eventRecordQPS}' - compare: - op: eq - value: 0 - remediation: | - If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 4.2.10 - text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --tls-cert-file - path: '{.tlsCertFile}' - - flag: --tls-private-key-file - path: '{.tlsPrivateKeyFile}' - remediation: | - If using a Kubelet config file, edit the file to set `tlsCertFile` to the location - of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` - to the location of the corresponding private key file. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameters in KUBELET_CERTIFICATE_ARGS variable. - --tls-cert-file= - --tls-private-key-file= - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 4.2.11 - text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --rotate-certificates - path: '{.rotateCertificates}' - compare: - op: eq - value: true - - flag: --rotate-certificates - path: '{.rotateCertificates}' - set: false - bin_op: or - remediation: | - If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or - remove it altogether to use the default value. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS - variable. - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 4.2.12 - text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - bin_op: or - test_items: - - flag: RotateKubeletServerCertificate - path: '{.featureGates.RotateKubeletServerCertificate}' - compare: - op: nothave - value: false - - flag: RotateKubeletServerCertificate - path: '{.featureGates.RotateKubeletServerCertificate}' - set: false - remediation: | - Edit the kubelet service file $kubeletsvc - on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. - --feature-gates=RotateKubeletServerCertificate=true - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 4.2.13 - text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --tls-cipher-suites - path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' - compare: - op: valid_elements - value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - or to a subset of these values. - If using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the --tls-cipher-suites parameter as follows, or to a subset of these values. - --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false diff --git a/package/cfg/cis-1.24/policies.yaml b/package/cfg/cis-1.24/policies.yaml deleted file mode 100644 index 605eef1e..00000000 --- a/package/cfg/cis-1.24/policies.yaml +++ /dev/null @@ -1,269 +0,0 @@ ---- -controls: -version: "cis-1.24" -id: 5 -text: "Kubernetes Policies" -type: "policies" -groups: - - id: 5.1 - text: "RBAC and Service Accounts" - checks: - - id: 5.1.1 - text: "Ensure that the cluster-admin role is only used where required (Manual)" - type: "manual" - remediation: | - Identify all clusterrolebindings to the cluster-admin role. Check if they are used and - if they need this role or if they could use a role with fewer privileges. - Where possible, first bind users to a lower privileged role and then remove the - clusterrolebinding to the cluster-admin role : - kubectl delete clusterrolebinding [name] - scored: false - - - id: 5.1.2 - text: "Minimize access to secrets (Manual)" - type: "manual" - remediation: | - Where possible, remove get, list and watch access to Secret objects in the cluster. - scored: false - - - id: 5.1.3 - text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" - type: "manual" - remediation: | - Where possible replace any use of wildcards in clusterroles and roles with specific - objects or actions. - scored: false - - - id: 5.1.4 - text: "Minimize access to create pods (Manual)" - type: "manual" - remediation: | - Where possible, remove create access to pod objects in the cluster. - scored: false - - - id: 5.1.5 - text: "Ensure that default service accounts are not actively used. (Manual)" - type: "manual" - remediation: | - Create explicit service accounts wherever a Kubernetes workload requires specific access - to the Kubernetes API server. - Modify the configuration of each default service account to include this value - automountServiceAccountToken: false - scored: false - - - id: 5.1.6 - text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" - type: "manual" - remediation: | - Modify the definition of pods and service accounts which do not need to mount service - account tokens to disable it. - scored: false - - - id: 5.1.7 - text: "Avoid use of system:masters group (Manual)" - type: "manual" - remediation: | - Remove the system:masters group from all users in the cluster. - scored: false - - - id: 5.1.8 - text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" - type: "manual" - remediation: | - Where possible, remove the impersonate, bind and escalate rights from subjects. - scored: false - - - id: 5.2 - text: "Pod Security Standards" - checks: - - id: 5.2.1 - text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" - type: "manual" - remediation: | - Ensure that either Pod Security Admission or an external policy control system is in place - for every namespace which contains user workloads. - scored: false - - - id: 5.2.2 - text: "Minimize the admission of privileged containers (Manual)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of privileged containers. - scored: false - - - id: 5.2.3 - text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of `hostPID` containers. - scored: true - - - id: 5.2.4 - text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of `hostIPC` containers. - scored: true - - - id: 5.2.5 - text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of `hostNetwork` containers. - scored: true - - - id: 5.2.6 - text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. - scored: true - - - id: 5.2.7 - text: "Minimize the admission of root containers (Automated)" - type: "manual" - remediation: | - Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` - or `MustRunAs` with the range of UIDs not including 0, is set. - scored: true - - - id: 5.2.8 - text: "Minimize the admission of containers with the NET_RAW capability (Automated)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of containers with the `NET_RAW` capability. - scored: true - - - id: 5.2.9 - text: "Minimize the admission of containers with added capabilities (Automated)" - type: "manual" - remediation: | - Ensure that `allowedCapabilities` is not present in policies for the cluster unless - it is set to an empty array. - scored: true - - - id: 5.2.10 - text: "Minimize the admission of containers with capabilities assigned (Manual)" - type: "manual" - remediation: | - Review the use of capabilites in applications running on your cluster. Where a namespace - contains applicaions which do not require any Linux capabities to operate consider adding - a PSP which forbids the admission of containers which do not drop all capabilities. - scored: false - - - id: 5.2.11 - text: "Minimize the admission of Windows HostProcess containers (Manual)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. - scored: false - - - id: 5.2.12 - text: "Minimize the admission of HostPath volumes (Manual)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of containers with `hostPath` volumes. - scored: false - - - id: 5.2.13 - text: "Minimize the admission of containers which use HostPorts (Manual)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of containers which use `hostPort` sections. - scored: false - - - id: 5.3 - text: "Network Policies and CNI" - checks: - - id: 5.3.1 - text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" - type: "manual" - remediation: | - If the CNI plugin in use does not support network policies, consideration should be given to - making use of a different plugin, or finding an alternate mechanism for restricting traffic - in the Kubernetes cluster. - scored: false - - - id: 5.3.2 - text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" - type: "manual" - remediation: | - Follow the documentation and create NetworkPolicy objects as you need them. - scored: false - - - id: 5.4 - text: "Secrets Management" - checks: - - id: 5.4.1 - text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" - type: "manual" - remediation: | - If possible, rewrite application code to read Secrets from mounted secret files, rather than - from environment variables. - scored: false - - - id: 5.4.2 - text: "Consider external secret storage (Manual)" - type: "manual" - remediation: | - Refer to the Secrets management options offered by your cloud provider or a third-party - secrets management solution. - scored: false - - - id: 5.5 - text: "Extensible Admission Control" - checks: - - id: 5.5.1 - text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" - type: "manual" - remediation: | - Follow the Kubernetes documentation and setup image provenance. - scored: false - - - id: 5.7 - text: "General Policies" - checks: - - id: 5.7.1 - text: "Create administrative boundaries between resources using namespaces (Manual)" - type: "manual" - remediation: | - Follow the documentation and create namespaces for objects in your deployment as you need - them. - scored: false - - - id: 5.7.2 - text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" - type: "manual" - remediation: | - Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. - An example is as below: - securityContext: - seccompProfile: - type: RuntimeDefault - scored: false - - - id: 5.7.3 - text: "Apply SecurityContext to your Pods and Containers (Manual)" - type: "manual" - remediation: | - Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a - suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker - Containers. - scored: false - - - id: 5.7.4 - text: "The default namespace should not be used (Manual)" - type: "manual" - remediation: | - Ensure that namespaces are created to allow for appropriate segregation of Kubernetes - resources and that all new resources are created in a specific namespace. - scored: false diff --git a/package/cfg/cis-1.7/config.yaml b/package/cfg/cis-1.7/config.yaml deleted file mode 100644 index b7839455..00000000 --- a/package/cfg/cis-1.7/config.yaml +++ /dev/null @@ -1,2 +0,0 @@ ---- -## Version-specific settings that override the values in cfg/config.yaml diff --git a/package/cfg/cis-1.7/controlplane.yaml b/package/cfg/cis-1.7/controlplane.yaml deleted file mode 100644 index eca90eb1..00000000 --- a/package/cfg/cis-1.7/controlplane.yaml +++ /dev/null @@ -1,60 +0,0 @@ ---- -controls: -version: "cis-1.7" -id: 3 -text: "Control Plane Configuration" -type: "controlplane" -groups: - - id: 3.1 - text: "Authentication and Authorization" - checks: - - id: 3.1.1 - text: "Client certificate authentication should not be used for users (Manual)" - type: "manual" - remediation: | - Alternative mechanisms provided by Kubernetes such as the use of OIDC should be - implemented in place of client certificates. - scored: false - - id: 3.1.2 - text: "Service account token authentication should not be used for users (Manual)" - type: "manual" - remediation: | - Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented - in place of service account tokens. - scored: false - - id: 3.1.3 - text: "Bootstrap token authentication should not be used for users (Manual)" - type: "manual" - remediation: | - Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented - in place of bootstrap tokens. - scored: false - - - id: 3.2 - text: "Logging" - checks: - - id: 3.2.1 - text: "Ensure that a minimal audit policy is created (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-policy-file" - set: true - remediation: | - Create an audit policy file for your cluster. - scored: false - - - id: 3.2.2 - text: "Ensure that the audit policy covers key security concerns (Manual)" - type: "manual" - remediation: | - Review the audit policy provided for the cluster and ensure that it covers - at least the following areas, - - Access to Secrets managed by the cluster. Care should be taken to only - log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in - order to avoid risk of logging sensitive data. - - Modification of Pod and Deployment objects. - - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. - For most requests, minimally logging at the Metadata level is recommended - (the most basic level of logging). - scored: false diff --git a/package/cfg/cis-1.7/etcd.yaml b/package/cfg/cis-1.7/etcd.yaml deleted file mode 100644 index 5fbc8ca0..00000000 --- a/package/cfg/cis-1.7/etcd.yaml +++ /dev/null @@ -1,135 +0,0 @@ ---- -controls: -version: "cis-1.7" -id: 2 -text: "Etcd Node Configuration" -type: "etcd" -groups: - - id: 2 - text: "Etcd Node Configuration" - checks: - - id: 2.1 - text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--cert-file" - env: "ETCD_CERT_FILE" - - flag: "--key-file" - env: "ETCD_KEY_FILE" - remediation: | - Follow the etcd service documentation and configure TLS encryption. - Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml - on the master node and set the below parameters. - --cert-file= - --key-file= - scored: true - - - id: 2.2 - text: "Ensure that the --client-cert-auth argument is set to true (Automated)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - test_items: - - flag: "--client-cert-auth" - env: "ETCD_CLIENT_CERT_AUTH" - compare: - op: eq - value: true - remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and set the below parameter. - --client-cert-auth="true" - scored: true - - - id: 2.3 - text: "Ensure that the --auto-tls argument is not set to true (Automated)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--auto-tls" - env: "ETCD_AUTO_TLS" - set: false - - flag: "--auto-tls" - env: "ETCD_AUTO_TLS" - compare: - op: eq - value: false - remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and either remove the --auto-tls parameter or set it to false. - --auto-tls=false - scored: true - - - id: 2.4 - text: "Ensure that the --peer-cert-file and --peer-key-file arguments are - set as appropriate (Automated)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--peer-cert-file" - env: "ETCD_PEER_CERT_FILE" - - flag: "--peer-key-file" - env: "ETCD_PEER_KEY_FILE" - remediation: | - Follow the etcd service documentation and configure peer TLS encryption as appropriate - for your etcd cluster. - Then, edit the etcd pod specification file $etcdconf on the - master node and set the below parameters. - --peer-client-file= - --peer-key-file= - scored: true - - - id: 2.5 - text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - test_items: - - flag: "--peer-client-cert-auth" - env: "ETCD_PEER_CLIENT_CERT_AUTH" - compare: - op: eq - value: true - remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and set the below parameter. - --peer-client-cert-auth=true - scored: true - - - id: 2.6 - text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--peer-auto-tls" - env: "ETCD_PEER_AUTO_TLS" - set: false - - flag: "--peer-auto-tls" - env: "ETCD_PEER_AUTO_TLS" - compare: - op: eq - value: false - remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and either remove the --peer-auto-tls parameter or set it to false. - --peer-auto-tls=false - scored: true - - - id: 2.7 - text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - test_items: - - flag: "--trusted-ca-file" - env: "ETCD_TRUSTED_CA_FILE" - remediation: | - [Manual test] - Follow the etcd documentation and create a dedicated certificate authority setup for the - etcd service. - Then, edit the etcd pod specification file $etcdconf on the - master node and set the below parameter. - --trusted-ca-file= - scored: false diff --git a/package/cfg/cis-1.7/master.yaml b/package/cfg/cis-1.7/master.yaml deleted file mode 100644 index 59fe83fb..00000000 --- a/package/cfg/cis-1.7/master.yaml +++ /dev/null @@ -1,947 +0,0 @@ ---- -controls: -version: "cis-1.7" -id: 1 -text: "Control Plane Security Configuration" -type: "master" -groups: - - id: 1.1 - text: "Control Plane Node Configuration Files" - checks: - - id: 1.1.1 - text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the - control plane node. - For example, chmod 600 $apiserverconf - scored: true - - - id: 1.1.2 - text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root $apiserverconf - scored: true - - - id: 1.1.3 - text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 600 $controllermanagerconf - scored: true - - - id: 1.1.4 - text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root $controllermanagerconf - scored: true - - - id: 1.1.5 - text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 600 $schedulerconf - scored: true - - - id: 1.1.6 - text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root $schedulerconf - scored: true - - - id: 1.1.7 - text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chmod 600 $etcdconf - scored: true - - - id: 1.1.8 - text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown root:root $etcdconf - scored: true - - - id: 1.1.9 - text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" - audit: | - ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a - find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 600 - scored: false - - - id: 1.1.10 - text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" - audit: | - ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G - find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown root:root - scored: false - - - id: 1.1.11 - text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" - audit: | - DATA_DIR='' - for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do - if test -d "$d"; then DATA_DIR="$d"; fi - done - if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi - stat -c permissions=%a "$DATA_DIR" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "700" - remediation: | - On the etcd server node, get the etcd data directory, passed as an argument --data-dir, - from the command 'ps -ef | grep etcd'. - Run the below command (based on the etcd data directory found above). For example, - chmod 700 /var/lib/etcd - scored: true - - - id: 1.1.12 - text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" - audit: | - DATA_DIR='' - for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do - if test -d "$d"; then DATA_DIR="$d"; fi - done - if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi - stat -c %U:%G "$DATA_DIR" - tests: - test_items: - - flag: "etcd:etcd" - remediation: | - On the etcd server node, get the etcd data directory, passed as an argument --data-dir, - from the command 'ps -ef | grep etcd'. - Run the below command (based on the etcd data directory found above). - For example, chown etcd:etcd /var/lib/etcd - scored: true - - - id: 1.1.13 - text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi'" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 600 /etc/kubernetes/admin.conf - scored: true - - - id: 1.1.14 - text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'" - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root /etc/kubernetes/admin.conf - scored: true - - - id: 1.1.15 - text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chmod 600 $schedulerkubeconfig - scored: true - - - id: 1.1.16 - text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown root:root $schedulerkubeconfig - scored: true - - - id: 1.1.17 - text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chmod 600 $controllermanagerkubeconfig - scored: true - - - id: 1.1.18 - text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown root:root $controllermanagerkubeconfig - scored: true - - - id: 1.1.19 - text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" - audit: "find /etc/kubernetes/pki/ | xargs stat -c %U:%G" - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown -R root:root /etc/kubernetes/pki/ - scored: true - - - id: 1.1.20 - text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" - audit: "find /etc/kubernetes/pki/ -name '*.crt' | xargs stat -c permissions=%a" - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chmod -R 600 /etc/kubernetes/pki/*.crt - scored: false - - - id: 1.1.21 - text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" - audit: "find /etc/kubernetes/pki/ -name '*.key' | xargs stat -c permissions=%a" - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chmod -R 600 /etc/kubernetes/pki/*.key - scored: false - - - id: 1.2 - text: "API Server" - checks: - - id: 1.2.1 - text: "Ensure that the --anonymous-auth argument is set to false (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--anonymous-auth" - compare: - op: eq - value: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the below parameter. - --anonymous-auth=false - scored: false - - - id: 1.2.2 - text: "Ensure that the --token-auth-file parameter is not set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--token-auth-file" - set: false - remediation: | - Follow the documentation and configure alternate mechanisms for authentication. Then, - edit the API server pod specification file $apiserverconf - on the control plane node and remove the --token-auth-file= parameter. - scored: true - - - id: 1.2.3 - text: "Ensure that the --DenyServiceExternalIPs is set (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "DenyServiceExternalIPs" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and add the `DenyServiceExternalIPs` plugin - to the enabled admission plugins, as such --enable-admission-plugin=DenyServiceExternalIPs. - scored: false - - - id: 1.2.4 - text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--kubelet-client-certificate" - - flag: "--kubelet-client-key" - remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the - apiserver and kubelets. Then, edit API server pod specification file - $apiserverconf on the control plane node and set the - kubelet client certificate and key parameters as below. - --kubelet-client-certificate= - --kubelet-client-key= - scored: true - - - id: 1.2.5 - text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--kubelet-certificate-authority" - remediation: | - Follow the Kubernetes documentation and setup the TLS connection between - the apiserver and kubelets. Then, edit the API server pod specification file - $apiserverconf on the control plane node and set the - --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. - --kubelet-certificate-authority= - scored: true - - - id: 1.2.6 - text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--authorization-mode" - compare: - op: nothave - value: "AlwaysAllow" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. - One such example could be as below. - --authorization-mode=RBAC - scored: true - - - id: 1.2.7 - text: "Ensure that the --authorization-mode argument includes Node (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--authorization-mode" - compare: - op: has - value: "Node" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --authorization-mode parameter to a value that includes Node. - --authorization-mode=Node,RBAC - scored: true - - - id: 1.2.8 - text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--authorization-mode" - compare: - op: has - value: "RBAC" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, - for example `--authorization-mode=Node,RBAC`. - scored: true - - - id: 1.2.9 - text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "EventRateLimit" - remediation: | - Follow the Kubernetes documentation and set the desired limits in a configuration file. - Then, edit the API server pod specification file $apiserverconf - and set the below parameters. - --enable-admission-plugins=...,EventRateLimit,... - --admission-control-config-file= - scored: false - - - id: 1.2.10 - text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--enable-admission-plugins" - compare: - op: nothave - value: AlwaysAdmit - - flag: "--enable-admission-plugins" - set: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a - value that does not include AlwaysAdmit. - scored: true - - - id: 1.2.11 - text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "AlwaysPullImages" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --enable-admission-plugins parameter to include - AlwaysPullImages. - --enable-admission-plugins=...,AlwaysPullImages,... - scored: false - - - id: 1.2.12 - text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "SecurityContextDeny" - - flag: "--enable-admission-plugins" - compare: - op: has - value: "PodSecurityPolicy" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --enable-admission-plugins parameter to include - SecurityContextDeny, unless PodSecurityPolicy is already in place. - --enable-admission-plugins=...,SecurityContextDeny,... - scored: false - - - id: 1.2.13 - text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--disable-admission-plugins" - compare: - op: nothave - value: "ServiceAccount" - - flag: "--disable-admission-plugins" - set: false - remediation: | - Follow the documentation and create ServiceAccount objects as per your environment. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and ensure that the --disable-admission-plugins parameter is set to a - value that does not include ServiceAccount. - scored: true - - - id: 1.2.14 - text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--disable-admission-plugins" - compare: - op: nothave - value: "NamespaceLifecycle" - - flag: "--disable-admission-plugins" - set: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --disable-admission-plugins parameter to - ensure it does not include NamespaceLifecycle. - scored: true - - - id: 1.2.15 - text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "NodeRestriction" - remediation: | - Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the --enable-admission-plugins parameter to a - value that includes NodeRestriction. - --enable-admission-plugins=...,NodeRestriction,... - scored: true - - - id: 1.2.16 - text: "Ensure that the --secure-port argument is not set to 0 - NoteThis recommendation is obsolete and will be deleted per the consensus process (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--secure-port" - compare: - op: gt - value: 0 - - flag: "--secure-port" - set: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and either remove the --secure-port parameter or - set it to a different (non-zero) desired port. - scored: false - - - id: 1.2.17 - text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--profiling" - compare: - op: eq - value: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the below parameter. - --profiling=false - scored: true - - - id: 1.2.18 - text: "Ensure that the --audit-log-path argument is set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-log-path" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-path parameter to a suitable path and - file where you would like audit logs to be written, for example, - --audit-log-path=/var/log/apiserver/audit.log - scored: true - - - id: 1.2.19 - text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-log-maxage" - compare: - op: gte - value: 30 - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-maxage parameter to 30 - or as an appropriate number of days, for example, - --audit-log-maxage=30 - scored: true - - - id: 1.2.20 - text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-log-maxbackup" - compare: - op: gte - value: 10 - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate - value. For example, - --audit-log-maxbackup=10 - scored: true - - - id: 1.2.21 - text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-log-maxsize" - compare: - op: gte - value: 100 - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. - For example, to set it as 100 MB, --audit-log-maxsize=100 - scored: true - - - id: 1.2.22 - text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - type: manual - remediation: | - Edit the API server pod specification file $apiserverconf - and set the below parameter as appropriate and if needed. - For example, --request-timeout=300s - scored: false - - - id: 1.2.23 - text: "Ensure that the --service-account-lookup argument is set to true (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--service-account-lookup" - set: false - - flag: "--service-account-lookup" - compare: - op: eq - value: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the below parameter. - --service-account-lookup=true - Alternatively, you can delete the --service-account-lookup parameter from this file so - that the default takes effect. - scored: true - - - id: 1.2.24 - text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--service-account-key-file" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --service-account-key-file parameter - to the public key file for service accounts. For example, - --service-account-key-file= - scored: true - - - id: 1.2.25 - text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--etcd-certfile" - - flag: "--etcd-keyfile" - remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the etcd certificate and key file parameters. - --etcd-certfile= - --etcd-keyfile= - scored: true - - - id: 1.2.26 - text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--tls-cert-file" - - flag: "--tls-private-key-file" - remediation: | - Follow the Kubernetes documentation and set up the TLS connection on the apiserver. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the TLS certificate and private key file parameters. - --tls-cert-file= - --tls-private-key-file= - scored: true - - - id: 1.2.27 - text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--client-ca-file" - remediation: | - Follow the Kubernetes documentation and set up the TLS connection on the apiserver. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the client certificate authority file. - --client-ca-file= - scored: true - - - id: 1.2.28 - text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--etcd-cafile" - remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the etcd certificate authority file parameter. - --etcd-cafile= - scored: true - - - id: 1.2.29 - text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--encryption-provider-config" - remediation: | - Follow the Kubernetes documentation and configure a EncryptionConfig file. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the --encryption-provider-config parameter to the path of that file. - For example, --encryption-provider-config= - scored: false - - - id: 1.2.30 - text: "Ensure that encryption providers are appropriately configured (Manual)" - audit: | - ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') - if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi - tests: - test_items: - - flag: "provider" - compare: - op: valid_elements - value: "aescbc,kms,secretbox" - remediation: | - Follow the Kubernetes documentation and configure a EncryptionConfig file. - In this file, choose aescbc, kms or secretbox as the encryption provider. - scored: false - - - id: 1.2.31 - text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--tls-cipher-suites" - compare: - op: valid_elements - value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" - remediation: | - Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml - on the control plane node and set the below parameter. - --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, - TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 - scored: false - - - id: 1.3 - text: "Controller Manager" - checks: - - id: 1.3.1 - text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--terminated-pod-gc-threshold" - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, - for example, --terminated-pod-gc-threshold=10 - scored: false - - - id: 1.3.2 - text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--profiling" - compare: - op: eq - value: false - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the below parameter. - --profiling=false - scored: true - - - id: 1.3.3 - text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--use-service-account-credentials" - compare: - op: noteq - value: false - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node to set the below parameter. - --use-service-account-credentials=true - scored: true - - - id: 1.3.4 - text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--service-account-private-key-file" - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --service-account-private-key-file parameter - to the private key file for service accounts. - --service-account-private-key-file= - scored: true - - - id: 1.3.5 - text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--root-ca-file" - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. - --root-ca-file= - scored: true - - - id: 1.3.6 - text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--feature-gates" - compare: - op: nothave - value: "RotateKubeletServerCertificate=false" - set: true - - flag: "--feature-gates" - set: false - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. - --feature-gates=RotateKubeletServerCertificate=true - scored: true - - - id: 1.3.7 - text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--bind-address" - compare: - op: eq - value: "127.0.0.1" - - flag: "--bind-address" - set: false - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and ensure the correct value for the --bind-address parameter - scored: true - - - id: 1.4 - text: "Scheduler" - checks: - - id: 1.4.1 - text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" - tests: - test_items: - - flag: "--profiling" - compare: - op: eq - value: false - remediation: | - Edit the Scheduler pod specification file $schedulerconf file - on the control plane node and set the below parameter. - --profiling=false - scored: true - - - id: 1.4.2 - text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" - audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--bind-address" - compare: - op: eq - value: "127.0.0.1" - - flag: "--bind-address" - set: false - remediation: | - Edit the Scheduler pod specification file $schedulerconf - on the control plane node and ensure the correct value for the --bind-address parameter - scored: true diff --git a/package/cfg/cis-1.7/node.yaml b/package/cfg/cis-1.7/node.yaml deleted file mode 100644 index 6ab83020..00000000 --- a/package/cfg/cis-1.7/node.yaml +++ /dev/null @@ -1,451 +0,0 @@ ---- -controls: -version: "cis-1.7" -id: 4 -text: "Worker Node Security Configuration" -type: "node" -groups: - - id: 4.1 - text: "Worker Node Configuration Files" - checks: - - id: 4.1.1 - text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, chmod 600 $kubeletsvc - scored: true - - - id: 4.1.2 - text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' - tests: - test_items: - - flag: root:root - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chown root:root $kubeletsvc - scored: true - - - id: 4.1.3 - text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" - audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' - tests: - bin_op: or - test_items: - - flag: "permissions" - set: true - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chmod 600 $proxykubeconfig - scored: false - - - id: 4.1.4 - text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" - audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' - tests: - bin_op: or - test_items: - - flag: root:root - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, chown root:root $proxykubeconfig - scored: false - - - id: 4.1.5 - text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chmod 600 $kubeletkubeconfig - scored: true - - - id: 4.1.6 - text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' - tests: - test_items: - - flag: root:root - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chown root:root $kubeletkubeconfig - scored: true - - - id: 4.1.7 - text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" - audit: "check_cafile_permissions.sh" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the following command to modify the file permissions of the - --client-ca-file chmod 600 - scored: false - - - id: 4.1.8 - text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" - audit: "check_cafile_ownership.sh" - tests: - test_items: - - flag: root:root - compare: - op: eq - value: root:root - remediation: | - Run the following command to modify the ownership of the --client-ca-file. - chown root:root - scored: false - - - id: 4.1.9 - text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Manual)" - audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the following command (using the config file location identified in the Audit step) - chmod 600 $kubeletconf - scored: false - - - id: 4.1.10 - text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Manual)" - audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' - tests: - test_items: - - flag: root:root - remediation: | - Run the following command (using the config file location identified in the Audit step) - chown root:root $kubeletconf - scored: false - - - id: 4.2 - text: "Kubelet" - checks: - - id: 4.2.1 - text: "Ensure that the --anonymous-auth argument is set to false (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: "--anonymous-auth" - path: '{.authentication.anonymous.enabled}' - compare: - op: eq - value: false - remediation: | - If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to - `false`. - If using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - `--anonymous-auth=false` - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 4.2.2 - text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --authorization-mode - path: '{.authorization.mode}' - compare: - op: nothave - value: AlwaysAllow - remediation: | - If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If - using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_AUTHZ_ARGS variable. - --authorization-mode=Webhook - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 4.2.3 - text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --client-ca-file - path: '{.authentication.x509.clientCAFile}' - remediation: | - If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to - the location of the client CA file. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_AUTHZ_ARGS variable. - --client-ca-file= - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 4.2.4 - text: "Verify that the --read-only-port argument is set to 0 (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - bin_op: or - test_items: - - flag: "--read-only-port" - path: '{.readOnlyPort}' - compare: - op: eq - value: 0 - - flag: "--read-only-port" - path: '{.readOnlyPort}' - set: false - remediation: | - If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --read-only-port=0 - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 4.2.5 - text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --streaming-connection-idle-timeout - path: '{.streamingConnectionIdleTimeout}' - compare: - op: noteq - value: 0 - - flag: --streaming-connection-idle-timeout - path: '{.streamingConnectionIdleTimeout}' - set: false - bin_op: or - remediation: | - If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a - value other than 0. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --streaming-connection-idle-timeout=5m - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 4.2.6 - text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --make-iptables-util-chains - path: '{.makeIPTablesUtilChains}' - compare: - op: eq - value: true - - flag: --make-iptables-util-chains - path: '{.makeIPTablesUtilChains}' - set: false - bin_op: or - remediation: | - If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - remove the --make-iptables-util-chains argument from the - KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 4.2.7 - text: "Ensure that the --hostname-override argument is not set (Manual)" - # This is one of those properties that can only be set as a command line argument. - # To check if the property is set as expected, we need to parse the kubelet command - # instead reading the Kubelet Configuration file. - audit: "/bin/ps -fC $kubeletbin " - tests: - test_items: - - flag: --hostname-override - set: false - remediation: | - Edit the kubelet service file $kubeletsvc - on each worker node and remove the --hostname-override argument from the - KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 4.2.8 - text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --event-qps - path: '{.eventRecordQPS}' - compare: - op: gte - value: 0 - - flag: --event-qps - path: '{.eventRecordQPS}' - set: false - bin_op: or - remediation: | - If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 4.2.9 - text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --tls-cert-file - path: '{.tlsCertFile}' - - flag: --tls-private-key-file - path: '{.tlsPrivateKeyFile}' - remediation: | - If using a Kubelet config file, edit the file to set `tlsCertFile` to the location - of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` - to the location of the corresponding private key file. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameters in KUBELET_CERTIFICATE_ARGS variable. - --tls-cert-file= - --tls-private-key-file= - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 4.2.10 - text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --rotate-certificates - path: '{.rotateCertificates}' - compare: - op: eq - value: true - - flag: --rotate-certificates - path: '{.rotateCertificates}' - set: false - bin_op: or - remediation: | - If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or - remove it altogether to use the default value. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS - variable. - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - - id: 4.2.11 - text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - bin_op: or - test_items: - - flag: RotateKubeletServerCertificate - path: '{.featureGates.RotateKubeletServerCertificate}' - compare: - op: nothave - value: false - - flag: RotateKubeletServerCertificate - path: '{.featureGates.RotateKubeletServerCertificate}' - set: false - remediation: | - Edit the kubelet service file $kubeletsvc - on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. - --feature-gates=RotateKubeletServerCertificate=true - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 4.2.12 - text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - bin_op: or - test_items: - - flag: RotateKubeletServerCertificate - path: '{.featureGates.RotateKubeletServerCertificate}' - compare: - op: nothave - value: false - - flag: RotateKubeletServerCertificate - path: '{.featureGates.RotateKubeletServerCertificate}' - set: false - remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - or to a subset of these values. - If using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the --tls-cipher-suites parameter as follows, or to a subset of these values. - --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - - id: 4.2.13 - text: "Ensure that a limit is set on pod PIDs (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --pod-max-pids - path: '{.podPidsLimit}' - remediation: | - Decide on an appropriate level for this parameter and set it, - either via the --pod-max-pids command line parameter or the PodPidsLimit configuration file setting. - scored: false diff --git a/package/cfg/cis-1.7/policies.yaml b/package/cfg/cis-1.7/policies.yaml deleted file mode 100644 index 88d6695e..00000000 --- a/package/cfg/cis-1.7/policies.yaml +++ /dev/null @@ -1,304 +0,0 @@ ---- -controls: -version: "cis-1.7" -id: 5 -text: "Kubernetes Policies" -type: "policies" -groups: - - id: 5.1 - text: "RBAC and Service Accounts" - checks: - - id: 5.1.1 - text: "Ensure that the cluster-admin role is only used where required (Manual)" - type: "manual" - remediation: | - Identify all clusterrolebindings to the cluster-admin role. Check if they are used and - if they need this role or if they could use a role with fewer privileges. - Where possible, first bind users to a lower privileged role and then remove the - clusterrolebinding to the cluster-admin role : - kubectl delete clusterrolebinding [name] - scored: false - - - id: 5.1.2 - text: "Minimize access to secrets (Manual)" - type: "manual" - remediation: | - Where possible, remove get, list and watch access to Secret objects in the cluster. - scored: false - - - id: 5.1.3 - text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" - type: "manual" - remediation: | - Where possible replace any use of wildcards in clusterroles and roles with specific - objects or actions. - scored: false - - - id: 5.1.4 - text: "Minimize access to create pods (Manual)" - type: "manual" - remediation: | - Where possible, remove create access to pod objects in the cluster. - scored: false - - - id: 5.1.5 - text: "Ensure that default service accounts are not actively used. (Manual)" - type: "manual" - remediation: | - Create explicit service accounts wherever a Kubernetes workload requires specific access - to the Kubernetes API server. - Modify the configuration of each default service account to include this value - automountServiceAccountToken: false - scored: false - - - id: 5.1.6 - text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" - type: "manual" - remediation: | - Modify the definition of pods and service accounts which do not need to mount service - account tokens to disable it. - scored: false - - - id: 5.1.7 - text: "Avoid use of system:masters group (Manual)" - type: "manual" - remediation: | - Remove the system:masters group from all users in the cluster. - scored: false - - - id: 5.1.8 - text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" - type: "manual" - remediation: | - Where possible, remove the impersonate, bind and escalate rights from subjects. - scored: false - - - id: 5.1.9 - text: "Minimize access to create persistent volumes (Manual)" - type: "manual" - remediation: | - Where possible, remove create access to PersistentVolume objects in the cluster. - scored: false - - - id: 5.1.10 - text: "Minimize access to the proxy sub-resource of nodes (Manual)" - type: "manual" - remediation: | - Where possible, remove access to the proxy sub-resource of node objects. - scored: false - - - id: 5.1.11 - text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)" - type: "manual" - remediation: | - Where possible, remove access to the approval sub-resource of certificatesigningrequest objects. - scored: false - - - id: 5.1.12 - text: "Minimize access to webhook configuration objects (Manual)" - type: "manual" - remediation: | - Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects - scored: false - - - id: 5.1.13 - text: "Minimize access to the service account token creation (Manual)" - type: "manual" - remediation: | - Where possible, remove access to the token sub-resource of serviceaccount objects. - scored: false - - - id: 5.2 - text: "Pod Security Standards" - checks: - - id: 5.2.1 - text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" - type: "manual" - remediation: | - Ensure that either Pod Security Admission or an external policy control system is in place - for every namespace which contains user workloads. - scored: false - - - id: 5.2.2 - text: "Minimize the admission of privileged containers (Manual)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of privileged containers. - scored: false - - - id: 5.2.3 - text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of `hostPID` containers. - scored: true - - - id: 5.2.4 - text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of `hostIPC` containers. - scored: true - - - id: 5.2.5 - text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of `hostNetwork` containers. - scored: true - - - id: 5.2.6 - text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. - scored: true - - - id: 5.2.7 - text: "Minimize the admission of root containers (Automated)" - type: "manual" - remediation: | - Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` - or `MustRunAs` with the range of UIDs not including 0, is set. - scored: true - - - id: 5.2.8 - text: "Minimize the admission of containers with the NET_RAW capability (Automated)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of containers with the `NET_RAW` capability. - scored: true - - - id: 5.2.9 - text: "Minimize the admission of containers with added capabilities (Automated)" - type: "manual" - remediation: | - Ensure that `allowedCapabilities` is not present in policies for the cluster unless - it is set to an empty array. - scored: true - - - id: 5.2.10 - text: "Minimize the admission of containers with capabilities assigned (Manual)" - type: "manual" - remediation: | - Review the use of capabilites in applications running on your cluster. Where a namespace - contains applicaions which do not require any Linux capabities to operate consider adding - a PSP which forbids the admission of containers which do not drop all capabilities. - scored: false - - - id: 5.2.11 - text: "Minimize the admission of Windows HostProcess containers (Manual)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. - scored: false - - - id: 5.2.12 - text: "Minimize the admission of HostPath volumes (Manual)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of containers with `hostPath` volumes. - scored: false - - - id: 5.2.13 - text: "Minimize the admission of containers which use HostPorts (Manual)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of containers which use `hostPort` sections. - scored: false - - - id: 5.3 - text: "Network Policies and CNI" - checks: - - id: 5.3.1 - text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" - type: "manual" - remediation: | - If the CNI plugin in use does not support network policies, consideration should be given to - making use of a different plugin, or finding an alternate mechanism for restricting traffic - in the Kubernetes cluster. - scored: false - - - id: 5.3.2 - text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" - type: "manual" - remediation: | - Follow the documentation and create NetworkPolicy objects as you need them. - scored: false - - - id: 5.4 - text: "Secrets Management" - checks: - - id: 5.4.1 - text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" - type: "manual" - remediation: | - If possible, rewrite application code to read Secrets from mounted secret files, rather than - from environment variables. - scored: false - - - id: 5.4.2 - text: "Consider external secret storage (Manual)" - type: "manual" - remediation: | - Refer to the Secrets management options offered by your cloud provider or a third-party - secrets management solution. - scored: false - - - id: 5.5 - text: "Extensible Admission Control" - checks: - - id: 5.5.1 - text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" - type: "manual" - remediation: | - Follow the Kubernetes documentation and setup image provenance. - scored: false - - - id: 5.7 - text: "General Policies" - checks: - - id: 5.7.1 - text: "Create administrative boundaries between resources using namespaces (Manual)" - type: "manual" - remediation: | - Follow the documentation and create namespaces for objects in your deployment as you need - them. - scored: false - - - id: 5.7.2 - text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" - type: "manual" - remediation: | - Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. - An example is as below: - securityContext: - seccompProfile: - type: RuntimeDefault - scored: false - - - id: 5.7.3 - text: "Apply SecurityContext to your Pods and Containers (Manual)" - type: "manual" - remediation: | - Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a - suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker - Containers. - scored: false - - - id: 5.7.4 - text: "The default namespace should not be used (Manual)" - type: "manual" - remediation: | - Ensure that namespaces are created to allow for appropriate segregation of Kubernetes - resources and that all new resources are created in a specific namespace. - scored: false diff --git a/package/cfg/cis-1.8/config.yaml b/package/cfg/cis-1.8/config.yaml deleted file mode 100644 index b7839455..00000000 --- a/package/cfg/cis-1.8/config.yaml +++ /dev/null @@ -1,2 +0,0 @@ ---- -## Version-specific settings that override the values in cfg/config.yaml diff --git a/package/cfg/cis-1.8/controlplane.yaml b/package/cfg/cis-1.8/controlplane.yaml deleted file mode 100644 index 1ea821da..00000000 --- a/package/cfg/cis-1.8/controlplane.yaml +++ /dev/null @@ -1,58 +0,0 @@ ---- -controls: -version: "cis-1.8" -id: 3 -text: "Control Plane Configuration" -type: "controlplane" -groups: - - id: 3.1 - text: "Authentication and Authorization" - checks: - - id: 3.1.1 - text: "Client certificate authentication should not be used for users (Manual)" - type: "manual" - remediation: | - Alternative mechanisms provided by Kubernetes such as the use of OIDC should be - implemented in place of client certificates. - scored: false - - id: 3.1.2 - text: "Service account token authentication should not be used for users (Manual)" - type: "manual" - remediation: | - Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented - in place of service account tokens. - scored: false - - id: 3.1.3 - text: "Bootstrap token authentication should not be used for users (Manual)" - type: "manual" - remediation: | - Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented - in place of bootstrap tokens. - scored: false - - id: 3.2 - text: "Logging" - checks: - - id: 3.2.1 - text: "Ensure that a minimal audit policy is created (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-policy-file" - set: true - remediation: | - Create an audit policy file for your cluster. - scored: false - - id: 3.2.2 - text: "Ensure that the audit policy covers key security concerns (Manual)" - type: "manual" - remediation: | - Review the audit policy provided for the cluster and ensure that it covers - at least the following areas, - - Access to Secrets managed by the cluster. Care should be taken to only - log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in - order to avoid risk of logging sensitive data. - - Modification of Pod and Deployment objects. - - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. - For most requests, minimally logging at the Metadata level is recommended - (the most basic level of logging). - scored: false diff --git a/package/cfg/cis-1.8/etcd.yaml b/package/cfg/cis-1.8/etcd.yaml deleted file mode 100644 index dbb82358..00000000 --- a/package/cfg/cis-1.8/etcd.yaml +++ /dev/null @@ -1,128 +0,0 @@ ---- -controls: -version: "cis-1.8" -id: 2 -text: "Etcd Node Configuration" -type: "etcd" -groups: - - id: 2 - text: "Etcd Node Configuration" - checks: - - id: 2.1 - text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--cert-file" - env: "ETCD_CERT_FILE" - - flag: "--key-file" - env: "ETCD_KEY_FILE" - remediation: | - Follow the etcd service documentation and configure TLS encryption. - Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml - on the master node and set the below parameters. - --cert-file= - --key-file= - scored: true - - id: 2.2 - text: "Ensure that the --client-cert-auth argument is set to true (Automated)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - test_items: - - flag: "--client-cert-auth" - env: "ETCD_CLIENT_CERT_AUTH" - compare: - op: eq - value: true - remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and set the below parameter. - --client-cert-auth="true" - scored: true - - id: 2.3 - text: "Ensure that the --auto-tls argument is not set to true (Automated)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--auto-tls" - env: "ETCD_AUTO_TLS" - set: false - - flag: "--auto-tls" - env: "ETCD_AUTO_TLS" - compare: - op: eq - value: false - remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and either remove the --auto-tls parameter or set it to false. - --auto-tls=false - scored: true - - id: 2.4 - text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--peer-cert-file" - env: "ETCD_PEER_CERT_FILE" - - flag: "--peer-key-file" - env: "ETCD_PEER_KEY_FILE" - remediation: | - Follow the etcd service documentation and configure peer TLS encryption as appropriate - for your etcd cluster. - Then, edit the etcd pod specification file $etcdconf on the - master node and set the below parameters. - --peer-client-file= - --peer-key-file= - scored: true - - id: 2.5 - text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - test_items: - - flag: "--peer-client-cert-auth" - env: "ETCD_PEER_CLIENT_CERT_AUTH" - compare: - op: eq - value: true - remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and set the below parameter. - --peer-client-cert-auth=true - scored: true - - id: 2.6 - text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--peer-auto-tls" - env: "ETCD_PEER_AUTO_TLS" - set: false - - flag: "--peer-auto-tls" - env: "ETCD_PEER_AUTO_TLS" - compare: - op: eq - value: false - remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and either remove the --peer-auto-tls parameter or set it to false. - --peer-auto-tls=false - scored: true - - id: 2.7 - text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" - audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" - tests: - test_items: - - flag: "--trusted-ca-file" - env: "ETCD_TRUSTED_CA_FILE" - remediation: | - [Manual test] - Follow the etcd documentation and create a dedicated certificate authority setup for the - etcd service. - Then, edit the etcd pod specification file $etcdconf on the - master node and set the below parameter. - --trusted-ca-file= - scored: false diff --git a/package/cfg/cis-1.8/master.yaml b/package/cfg/cis-1.8/master.yaml deleted file mode 100644 index 1796f6a1..00000000 --- a/package/cfg/cis-1.8/master.yaml +++ /dev/null @@ -1,870 +0,0 @@ ---- -controls: -version: "cis-1.8" -id: 1 -text: "Control Plane Security Configuration" -type: "master" -groups: - - id: 1.1 - text: "Control Plane Node Configuration Files" - checks: - - id: 1.1.1 - text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the - control plane node. - For example, chmod 600 $apiserverconf - scored: true - - id: 1.1.2 - text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root $apiserverconf - scored: true - - id: 1.1.3 - text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 600 $controllermanagerconf - scored: true - - id: 1.1.4 - text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root $controllermanagerconf - scored: true - - id: 1.1.5 - text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 600 $schedulerconf - scored: true - - id: 1.1.6 - text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root $schedulerconf - scored: true - - id: 1.1.7 - text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chmod 600 $etcdconf - scored: true - - id: 1.1.8 - text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown root:root $etcdconf - scored: true - - id: 1.1.9 - text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" - audit: | - ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a - find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 600 - scored: false - - id: 1.1.10 - text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" - audit: | - ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G - find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown root:root - scored: false - - id: 1.1.11 - text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" - audit: | - DATA_DIR='' - for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do - if test -d "$d"; then DATA_DIR="$d"; fi - done - if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi - stat -c permissions=%a "$DATA_DIR" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "700" - remediation: | - On the etcd server node, get the etcd data directory, passed as an argument --data-dir, - from the command 'ps -ef | grep etcd'. - Run the below command (based on the etcd data directory found above). For example, - chmod 700 /var/lib/etcd - scored: true - - id: 1.1.12 - text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" - audit: | - DATA_DIR='' - for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do - if test -d "$d"; then DATA_DIR="$d"; fi - done - if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi - stat -c %U:%G "$DATA_DIR" - tests: - test_items: - - flag: "etcd:etcd" - remediation: | - On the etcd server node, get the etcd data directory, passed as an argument --data-dir, - from the command 'ps -ef | grep etcd'. - Run the below command (based on the etcd data directory found above). - For example, chown etcd:etcd /var/lib/etcd - scored: true - - id: 1.1.13 - text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi'" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 600 /etc/kubernetes/admin.conf - scored: true - - id: 1.1.14 - text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'" - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root /etc/kubernetes/admin.conf - scored: true - - id: 1.1.15 - text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chmod 600 $schedulerkubeconfig - scored: true - - id: 1.1.16 - text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown root:root $schedulerkubeconfig - scored: true - - id: 1.1.17 - text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chmod 600 $controllermanagerkubeconfig - scored: true - - id: 1.1.18 - text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown root:root $controllermanagerkubeconfig - scored: true - - id: 1.1.19 - text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" - audit: "find /etc/kubernetes/pki/ | xargs stat -c %U:%G" - use_multiple_values: true - tests: - test_items: - - flag: "root:root" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown -R root:root /etc/kubernetes/pki/ - scored: true - - id: 1.1.20 - text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" - audit: "find /etc/kubernetes/pki/ -name '*.crt' | xargs stat -c permissions=%a" - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chmod -R 600 /etc/kubernetes/pki/*.crt - scored: false - - id: 1.1.21 - text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" - audit: "find /etc/kubernetes/pki/ -name '*.key' | xargs stat -c permissions=%a" - use_multiple_values: true - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chmod -R 600 /etc/kubernetes/pki/*.key - scored: false - - id: 1.2 - text: "API Server" - checks: - - id: 1.2.1 - text: "Ensure that the --anonymous-auth argument is set to false (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--anonymous-auth" - compare: - op: eq - value: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the below parameter. - --anonymous-auth=false - scored: false - - id: 1.2.2 - text: "Ensure that the --token-auth-file parameter is not set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--token-auth-file" - set: false - remediation: | - Follow the documentation and configure alternate mechanisms for authentication. Then, - edit the API server pod specification file $apiserverconf - on the control plane node and remove the --token-auth-file= parameter. - scored: true - - id: 1.2.3 - text: "Ensure that the --DenyServiceExternalIPs is set (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "DenyServiceExternalIPs" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and add the `DenyServiceExternalIPs` plugin - to the enabled admission plugins, as such --enable-admission-plugin=DenyServiceExternalIPs. - scored: false - - id: 1.2.4 - text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--kubelet-client-certificate" - - flag: "--kubelet-client-key" - remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the - apiserver and kubelets. Then, edit API server pod specification file - $apiserverconf on the control plane node and set the - kubelet client certificate and key parameters as below. - --kubelet-client-certificate= - --kubelet-client-key= - scored: true - - id: 1.2.5 - text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--kubelet-certificate-authority" - remediation: | - Follow the Kubernetes documentation and setup the TLS connection between - the apiserver and kubelets. Then, edit the API server pod specification file - $apiserverconf on the control plane node and set the - --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. - --kubelet-certificate-authority= - scored: true - - id: 1.2.6 - text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--authorization-mode" - compare: - op: nothave - value: "AlwaysAllow" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. - One such example could be as below. - --authorization-mode=RBAC - scored: true - - id: 1.2.7 - text: "Ensure that the --authorization-mode argument includes Node (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--authorization-mode" - compare: - op: has - value: "Node" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --authorization-mode parameter to a value that includes Node. - --authorization-mode=Node,RBAC - scored: true - - id: 1.2.8 - text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--authorization-mode" - compare: - op: has - value: "RBAC" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, - for example `--authorization-mode=Node,RBAC`. - scored: true - - id: 1.2.9 - text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "EventRateLimit" - remediation: | - Follow the Kubernetes documentation and set the desired limits in a configuration file. - Then, edit the API server pod specification file $apiserverconf - and set the below parameters. - --enable-admission-plugins=...,EventRateLimit,... - --admission-control-config-file= - scored: false - - id: 1.2.10 - text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--enable-admission-plugins" - compare: - op: nothave - value: AlwaysAdmit - - flag: "--enable-admission-plugins" - set: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a - value that does not include AlwaysAdmit. - scored: true - - id: 1.2.11 - text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "AlwaysPullImages" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --enable-admission-plugins parameter to include - AlwaysPullImages. - --enable-admission-plugins=...,AlwaysPullImages,... - scored: false - - id: 1.2.12 - text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "SecurityContextDeny" - - flag: "--enable-admission-plugins" - compare: - op: has - value: "PodSecurityPolicy" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --enable-admission-plugins parameter to include - SecurityContextDeny, unless PodSecurityPolicy is already in place. - --enable-admission-plugins=...,SecurityContextDeny,... - scored: false - - id: 1.2.13 - text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--disable-admission-plugins" - compare: - op: nothave - value: "ServiceAccount" - - flag: "--disable-admission-plugins" - set: false - remediation: | - Follow the documentation and create ServiceAccount objects as per your environment. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and ensure that the --disable-admission-plugins parameter is set to a - value that does not include ServiceAccount. - scored: true - - id: 1.2.14 - text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--disable-admission-plugins" - compare: - op: nothave - value: "NamespaceLifecycle" - - flag: "--disable-admission-plugins" - set: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --disable-admission-plugins parameter to - ensure it does not include NamespaceLifecycle. - scored: true - - id: 1.2.15 - text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--enable-admission-plugins" - compare: - op: has - value: "NodeRestriction" - remediation: | - Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the --enable-admission-plugins parameter to a - value that includes NodeRestriction. - --enable-admission-plugins=...,NodeRestriction,... - scored: true - - id: 1.2.16 - text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--profiling" - compare: - op: eq - value: false - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the below parameter. - --profiling=false - scored: true - - id: 1.2.17 - text: "Ensure that the --audit-log-path argument is set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-log-path" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-path parameter to a suitable path and - file where you would like audit logs to be written, for example, - --audit-log-path=/var/log/apiserver/audit.log - scored: true - - id: 1.2.18 - text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-log-maxage" - compare: - op: gte - value: 30 - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-maxage parameter to 30 - or as an appropriate number of days, for example, - --audit-log-maxage=30 - scored: true - - id: 1.2.19 - text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-log-maxbackup" - compare: - op: gte - value: 10 - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate - value. For example, - --audit-log-maxbackup=10 - scored: true - - id: 1.2.20 - text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--audit-log-maxsize" - compare: - op: gte - value: 100 - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. - For example, to set it as 100 MB, --audit-log-maxsize=100 - scored: true - - id: 1.2.21 - text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - type: manual - remediation: | - Edit the API server pod specification file $apiserverconf - and set the below parameter as appropriate and if needed. - For example, --request-timeout=300s - scored: false - - id: 1.2.22 - text: "Ensure that the --service-account-lookup argument is set to true (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--service-account-lookup" - set: false - - flag: "--service-account-lookup" - compare: - op: eq - value: true - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the below parameter. - --service-account-lookup=true - Alternatively, you can delete the --service-account-lookup parameter from this file so - that the default takes effect. - scored: true - - id: 1.2.23 - text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--service-account-key-file" - remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --service-account-key-file parameter - to the public key file for service accounts. For example, - --service-account-key-file= - scored: true - - id: 1.2.24 - text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--etcd-certfile" - - flag: "--etcd-keyfile" - remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the etcd certificate and key file parameters. - --etcd-certfile= - --etcd-keyfile= - scored: true - - id: 1.2.25 - text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - bin_op: and - test_items: - - flag: "--tls-cert-file" - - flag: "--tls-private-key-file" - remediation: | - Follow the Kubernetes documentation and set up the TLS connection on the apiserver. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the TLS certificate and private key file parameters. - --tls-cert-file= - --tls-private-key-file= - scored: true - - id: 1.2.26 - text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--client-ca-file" - remediation: | - Follow the Kubernetes documentation and set up the TLS connection on the apiserver. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the client certificate authority file. - --client-ca-file= - scored: true - - id: 1.2.27 - text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--etcd-cafile" - remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the etcd certificate authority file parameter. - --etcd-cafile= - scored: true - - id: 1.2.28 - text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--encryption-provider-config" - remediation: | - Follow the Kubernetes documentation and configure a EncryptionConfig file. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the --encryption-provider-config parameter to the path of that file. - For example, --encryption-provider-config= - scored: false - - id: 1.2.29 - text: "Ensure that encryption providers are appropriately configured (Manual)" - audit: | - ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') - if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi - tests: - test_items: - - flag: "provider" - compare: - op: valid_elements - value: "aescbc,kms,secretbox" - remediation: | - Follow the Kubernetes documentation and configure a EncryptionConfig file. - In this file, choose aescbc, kms or secretbox as the encryption provider. - scored: false - - id: 1.2.30 - text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" - tests: - test_items: - - flag: "--tls-cipher-suites" - compare: - op: valid_elements - value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" - remediation: | - Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml - on the control plane node and set the below parameter. - --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, - TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 - scored: false - - id: 1.3 - text: "Controller Manager" - checks: - - id: 1.3.1 - text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--terminated-pod-gc-threshold" - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, - for example, --terminated-pod-gc-threshold=10 - scored: false - - id: 1.3.2 - text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--profiling" - compare: - op: eq - value: false - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the below parameter. - --profiling=false - scored: true - - id: 1.3.3 - text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--use-service-account-credentials" - compare: - op: noteq - value: false - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node to set the below parameter. - --use-service-account-credentials=true - scored: true - - id: 1.3.4 - text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--service-account-private-key-file" - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --service-account-private-key-file parameter - to the private key file for service accounts. - --service-account-private-key-file= - scored: true - - id: 1.3.5 - text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - test_items: - - flag: "--root-ca-file" - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. - --root-ca-file= - scored: true - - id: 1.3.6 - text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--feature-gates" - compare: - op: nothave - value: "RotateKubeletServerCertificate=false" - set: true - - flag: "--feature-gates" - set: false - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. - --feature-gates=RotateKubeletServerCertificate=true - scored: true - - id: 1.3.7 - text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--bind-address" - compare: - op: eq - value: "127.0.0.1" - - flag: "--bind-address" - set: false - remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and ensure the correct value for the --bind-address parameter - scored: true - - id: 1.4 - text: "Scheduler" - checks: - - id: 1.4.1 - text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" - tests: - test_items: - - flag: "--profiling" - compare: - op: eq - value: false - remediation: | - Edit the Scheduler pod specification file $schedulerconf file - on the control plane node and set the below parameter. - --profiling=false - scored: true - - id: 1.4.2 - text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" - audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" - tests: - bin_op: or - test_items: - - flag: "--bind-address" - compare: - op: eq - value: "127.0.0.1" - - flag: "--bind-address" - set: false - remediation: | - Edit the Scheduler pod specification file $schedulerconf - on the control plane node and ensure the correct value for the --bind-address parameter - scored: true diff --git a/package/cfg/cis-1.8/node.yaml b/package/cfg/cis-1.8/node.yaml deleted file mode 100644 index 7fbc2484..00000000 --- a/package/cfg/cis-1.8/node.yaml +++ /dev/null @@ -1,431 +0,0 @@ ---- -controls: -version: "cis-1.8" -id: 4 -text: "Worker Node Security Configuration" -type: "node" -groups: - - id: 4.1 - text: "Worker Node Configuration Files" - checks: - - id: 4.1.1 - text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, chmod 600 $kubeletsvc - scored: true - - id: 4.1.2 - text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" - audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"' - tests: - test_items: - - flag: root:root - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chown root:root $kubeletsvc - scored: true - - id: 4.1.3 - text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" - audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' - tests: - bin_op: or - test_items: - - flag: "permissions" - set: true - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chmod 600 $proxykubeconfig - scored: false - - id: 4.1.4 - text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" - audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' - tests: - bin_op: or - test_items: - - flag: root:root - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, chown root:root $proxykubeconfig - scored: false - - id: 4.1.5 - text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chmod 600 $kubeletkubeconfig - scored: true - - id: 4.1.6 - text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' - tests: - test_items: - - flag: root:root - remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chown root:root $kubeletkubeconfig - scored: true - - id: 4.1.7 - text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" - audit: | - CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) - if test -z $CAFILE; then CAFILE=$kubeletcafile; fi - if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the following command to modify the file permissions of the - --client-ca-file chmod 600 - scored: false - - id: 4.1.8 - text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" - audit: | - CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) - if test -z $CAFILE; then CAFILE=$kubeletcafile; fi - if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi - tests: - test_items: - - flag: root:root - compare: - op: eq - value: root:root - remediation: | - Run the following command to modify the ownership of the --client-ca-file. - chown root:root - scored: false - - id: 4.1.9 - text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' - tests: - test_items: - - flag: "permissions" - compare: - op: bitmask - value: "600" - remediation: | - Run the following command (using the config file location identified in the Audit step) - chmod 600 $kubeletconf - scored: true - - id: 4.1.10 - text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' - tests: - test_items: - - flag: root:root - remediation: | - Run the following command (using the config file location identified in the Audit step) - chown root:root $kubeletconf - scored: true - - id: 4.2 - text: "Kubelet" - checks: - - id: 4.2.1 - text: "Ensure that the --anonymous-auth argument is set to false (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: "--anonymous-auth" - path: '{.authentication.anonymous.enabled}' - compare: - op: eq - value: false - remediation: | - If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to - `false`. - If using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - `--anonymous-auth=false` - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - id: 4.2.2 - text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --authorization-mode - path: '{.authorization.mode}' - compare: - op: nothave - value: AlwaysAllow - remediation: | - If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If - using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_AUTHZ_ARGS variable. - --authorization-mode=Webhook - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - id: 4.2.3 - text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --client-ca-file - path: '{.authentication.x509.clientCAFile}' - remediation: | - If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to - the location of the client CA file. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_AUTHZ_ARGS variable. - --client-ca-file= - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - id: 4.2.4 - text: "Verify that the --read-only-port argument is set to 0 (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - bin_op: or - test_items: - - flag: "--read-only-port" - path: '{.readOnlyPort}' - compare: - op: eq - value: 0 - - flag: "--read-only-port" - path: '{.readOnlyPort}' - set: false - remediation: | - If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --read-only-port=0 - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - id: 4.2.5 - text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --streaming-connection-idle-timeout - path: '{.streamingConnectionIdleTimeout}' - compare: - op: noteq - value: 0 - - flag: --streaming-connection-idle-timeout - path: '{.streamingConnectionIdleTimeout}' - set: false - bin_op: or - remediation: | - If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a - value other than 0. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --streaming-connection-idle-timeout=5m - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - id: 4.2.6 - text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --make-iptables-util-chains - path: '{.makeIPTablesUtilChains}' - compare: - op: eq - value: true - - flag: --make-iptables-util-chains - path: '{.makeIPTablesUtilChains}' - set: false - bin_op: or - remediation: | - If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - remove the --make-iptables-util-chains argument from the - KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - id: 4.2.7 - text: "Ensure that the --hostname-override argument is not set (Manual)" - # This is one of those properties that can only be set as a command line argument. - # To check if the property is set as expected, we need to parse the kubelet command - # instead reading the Kubelet Configuration file. - audit: "/bin/ps -fC $kubeletbin " - tests: - test_items: - - flag: --hostname-override - set: false - remediation: | - Edit the kubelet service file $kubeletsvc - on each worker node and remove the --hostname-override argument from the - KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - id: 4.2.8 - text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --event-qps - path: '{.eventRecordQPS}' - compare: - op: gte - value: 0 - - flag: --event-qps - path: '{.eventRecordQPS}' - set: false - bin_op: or - remediation: | - If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - id: 4.2.9 - text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --tls-cert-file - path: '{.tlsCertFile}' - - flag: --tls-private-key-file - path: '{.tlsPrivateKeyFile}' - remediation: | - If using a Kubelet config file, edit the file to set `tlsCertFile` to the location - of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` - to the location of the corresponding private key file. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameters in KUBELET_CERTIFICATE_ARGS variable. - --tls-cert-file= - --tls-private-key-file= - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - id: 4.2.10 - text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --rotate-certificates - path: '{.rotateCertificates}' - compare: - op: eq - value: true - - flag: --rotate-certificates - path: '{.rotateCertificates}' - set: false - bin_op: or - remediation: | - If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or - remove it altogether to use the default value. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS - variable. - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: true - - id: 4.2.11 - text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - bin_op: or - test_items: - - flag: RotateKubeletServerCertificate - path: '{.featureGates.RotateKubeletServerCertificate}' - compare: - op: nothave - value: false - - flag: RotateKubeletServerCertificate - path: '{.featureGates.RotateKubeletServerCertificate}' - set: false - remediation: | - Edit the kubelet service file $kubeletsvc - on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. - --feature-gates=RotateKubeletServerCertificate=true - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - id: 4.2.12 - text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --tls-cipher-suites - path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' - compare: - op: valid_elements - value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - or to a subset of these values. - If using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the --tls-cipher-suites parameter as follows, or to a subset of these values. - --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false - - id: 4.2.13 - text: "Ensure that a limit is set on pod PIDs (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" - tests: - test_items: - - flag: --pod-max-pids - path: '{.podPidsLimit}' - remediation: | - Decide on an appropriate level for this parameter and set it, - either via the --pod-max-pids command line parameter or the PodPidsLimit configuration file setting. - scored: false diff --git a/package/cfg/cis-1.8/policies.yaml b/package/cfg/cis-1.8/policies.yaml deleted file mode 100644 index bb5e3f80..00000000 --- a/package/cfg/cis-1.8/policies.yaml +++ /dev/null @@ -1,270 +0,0 @@ ---- -controls: -version: "cis-1.8" -id: 5 -text: "Kubernetes Policies" -type: "policies" -groups: - - id: 5.1 - text: "RBAC and Service Accounts" - checks: - - id: 5.1.1 - text: "Ensure that the cluster-admin role is only used where required (Manual)" - type: "manual" - remediation: | - Identify all clusterrolebindings to the cluster-admin role. Check if they are used and - if they need this role or if they could use a role with fewer privileges. - Where possible, first bind users to a lower privileged role and then remove the - clusterrolebinding to the cluster-admin role : - kubectl delete clusterrolebinding [name] - scored: false - - id: 5.1.2 - text: "Minimize access to secrets (Manual)" - type: "manual" - remediation: | - Where possible, remove get, list and watch access to Secret objects in the cluster. - scored: false - - id: 5.1.3 - text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" - type: "manual" - remediation: | - Where possible replace any use of wildcards in clusterroles and roles with specific - objects or actions. - scored: false - - id: 5.1.4 - text: "Minimize access to create pods (Manual)" - type: "manual" - remediation: | - Where possible, remove create access to pod objects in the cluster. - scored: false - - id: 5.1.5 - text: "Ensure that default service accounts are not actively used. (Manual)" - type: "manual" - remediation: | - Create explicit service accounts wherever a Kubernetes workload requires specific access - to the Kubernetes API server. - Modify the configuration of each default service account to include this value - automountServiceAccountToken: false - scored: false - - id: 5.1.6 - text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" - type: "manual" - remediation: | - Modify the definition of pods and service accounts which do not need to mount service - account tokens to disable it. - scored: false - - id: 5.1.7 - text: "Avoid use of system:masters group (Manual)" - type: "manual" - remediation: | - Remove the system:masters group from all users in the cluster. - scored: false - - id: 5.1.8 - text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" - type: "manual" - remediation: | - Where possible, remove the impersonate, bind and escalate rights from subjects. - scored: false - - id: 5.1.9 - text: "Minimize access to create persistent volumes (Manual)" - type: "manual" - remediation: | - Where possible, remove create access to PersistentVolume objects in the cluster. - scored: false - - id: 5.1.10 - text: "Minimize access to the proxy sub-resource of nodes (Manual)" - type: "manual" - remediation: | - Where possible, remove access to the proxy sub-resource of node objects. - scored: false - - id: 5.1.11 - text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)" - type: "manual" - remediation: | - Where possible, remove access to the approval sub-resource of certificatesigningrequest objects. - scored: false - - id: 5.1.12 - text: "Minimize access to webhook configuration objects (Manual)" - type: "manual" - remediation: | - Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects - scored: false - - id: 5.1.13 - text: "Minimize access to the service account token creation (Manual)" - type: "manual" - remediation: | - Where possible, remove access to the token sub-resource of serviceaccount objects. - scored: false - - id: 5.2 - text: "Pod Security Standards" - checks: - - id: 5.2.1 - text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" - type: "manual" - remediation: | - Ensure that either Pod Security Admission or an external policy control system is in place - for every namespace which contains user workloads. - scored: false - - id: 5.2.2 - text: "Minimize the admission of privileged containers (Manual)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of privileged containers. - scored: false - - id: 5.2.3 - text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of `hostPID` containers. - scored: true - - id: 5.2.4 - text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of `hostIPC` containers. - scored: true - - id: 5.2.5 - text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of `hostNetwork` containers. - scored: true - - id: 5.2.6 - text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. - scored: true - - id: 5.2.7 - text: "Minimize the admission of root containers (Automated)" - type: "manual" - remediation: | - Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` - or `MustRunAs` with the range of UIDs not including 0, is set. - scored: true - - id: 5.2.8 - text: "Minimize the admission of containers with the NET_RAW capability (Automated)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of containers with the `NET_RAW` capability. - scored: true - - id: 5.2.9 - text: "Minimize the admission of containers with added capabilities (Automated)" - type: "manual" - remediation: | - Ensure that `allowedCapabilities` is not present in policies for the cluster unless - it is set to an empty array. - scored: true - - id: 5.2.10 - text: "Minimize the admission of containers with capabilities assigned (Manual)" - type: "manual" - remediation: | - Review the use of capabilites in applications running on your cluster. Where a namespace - contains applicaions which do not require any Linux capabities to operate consider adding - a PSP which forbids the admission of containers which do not drop all capabilities. - scored: false - - id: 5.2.11 - text: "Minimize the admission of Windows HostProcess containers (Manual)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. - scored: false - - id: 5.2.12 - text: "Minimize the admission of HostPath volumes (Manual)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of containers with `hostPath` volumes. - scored: false - - id: 5.2.13 - text: "Minimize the admission of containers which use HostPorts (Manual)" - type: "manual" - remediation: | - Add policies to each namespace in the cluster which has user workloads to restrict the - admission of containers which use `hostPort` sections. - scored: false - - id: 5.3 - text: "Network Policies and CNI" - checks: - - id: 5.3.1 - text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" - type: "manual" - remediation: | - If the CNI plugin in use does not support network policies, consideration should be given to - making use of a different plugin, or finding an alternate mechanism for restricting traffic - in the Kubernetes cluster. - scored: false - - id: 5.3.2 - text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" - type: "manual" - remediation: | - Follow the documentation and create NetworkPolicy objects as you need them. - scored: false - - id: 5.4 - text: "Secrets Management" - checks: - - id: 5.4.1 - text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" - type: "manual" - remediation: | - If possible, rewrite application code to read Secrets from mounted secret files, rather than - from environment variables. - scored: false - - id: 5.4.2 - text: "Consider external secret storage (Manual)" - type: "manual" - remediation: | - Refer to the Secrets management options offered by your cloud provider or a third-party - secrets management solution. - scored: false - - id: 5.5 - text: "Extensible Admission Control" - checks: - - id: 5.5.1 - text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" - type: "manual" - remediation: | - Follow the Kubernetes documentation and setup image provenance. - scored: false - - id: 5.7 - text: "General Policies" - checks: - - id: 5.7.1 - text: "Create administrative boundaries between resources using namespaces (Manual)" - type: "manual" - remediation: | - Follow the documentation and create namespaces for objects in your deployment as you need - them. - scored: false - - id: 5.7.2 - text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" - type: "manual" - remediation: | - Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. - An example is as below: - securityContext: - seccompProfile: - type: RuntimeDefault - scored: false - - id: 5.7.3 - text: "Apply SecurityContext to your Pods and Containers (Manual)" - type: "manual" - remediation: | - Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a - suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker - Containers. - scored: false - - id: 5.7.4 - text: "The default namespace should not be used (Manual)" - type: "manual" - remediation: | - Ensure that namespaces are created to allow for appropriate segregation of Kubernetes - resources and that all new resources are created in a specific namespace. - scored: false From 276e8aaf3b80953dc56671c58696b7d4c170cc0a Mon Sep 17 00:00:00 2001 From: Andy Pitcher Date: Wed, 27 Nov 2024 09:53:10 +0100 Subject: [PATCH 4/4] Add comments for version_mapping and target_mapping --- package/cfg/config.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/package/cfg/config.yaml b/package/cfg/config.yaml index 7bc63291..db4e7f11 100644 --- a/package/cfg/config.yaml +++ b/package/cfg/config.yaml @@ -209,7 +209,7 @@ policies: managedservices: components: [] -# TODO: Clean up in the next refactor +# Version mapping: Maps k8s versions to a CIS version/profile. version_mapping: "1.23": "cis-1.23" "1.24": "cis-1.24" @@ -231,6 +231,7 @@ version_mapping: "v1.26.15+k3s1": "k3s-cis-1.8-hardened" "v1.27.16+k3s1": "k3s-cis-1.9" +# Target mapping: Defines which components (eg. master, node, etcd) should be evaluated for a given CIS profile. target_mapping: # EKS "eks-1.2.0":