diff --git a/package/cfg/rke2-cis-1.9/config.yaml b/package/cfg/rke2-cis-1.9/config.yaml new file mode 100644 index 00000000..c8174f02 --- /dev/null +++ b/package/cfg/rke2-cis-1.9/config.yaml @@ -0,0 +1,61 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml + +master: + components: + - apiserver + - scheduler + - controllermanager + - etcd + - policies + apiserver: + bins: + - kube-apiserver + confs: + - /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml + defaultconf: /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml + scheduler: + bins: + - kube-scheduler + confs: + - /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml + kubeconfig: + - /var/lib/rancher/rke2/server/cred/scheduler.kubeconfig + defaultconf: /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml + controllermanager: + bins: + - kube-controller-manager + confs: + - /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml + kubeconfig: + - /var/lib/rancher/rke2/server/cred/controller.kubeconfig + defaultconf: /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml + etcd: + bins: + - etcd + datadirs: + - /var/lib/rancher/rke2/server/db/etcd + defaultconf: /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml + +etcd: + components: + - etcd + + etcd: + bins: + - etcd + defaultconf: /var/lib/rancher/rke2/server/db/etcd/config + +node: + components: + - kubelet + - proxy + kubelet: + defaultkubeconfig: /var/lib/rancher/rke2/agent/kubelet.kubeconfig + defaultcafile: /var/lib/rancher/rke2/agent/client-ca.crt + proxy: + defaultkubeconfig: /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +policies: + components: + - policies diff --git a/package/cfg/rke2-cis-1.9/controlplane.yaml b/package/cfg/rke2-cis-1.9/controlplane.yaml new file mode 100644 index 00000000..61801ffb --- /dev/null +++ b/package/cfg/rke2-cis-1.9/controlplane.yaml @@ -0,0 +1,62 @@ +--- +controls: +version: "cis-1.8" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + + - id: 3.1.2 + text: "Service account token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of service account tokens. + scored: false + + - id: 3.1.3 + text: "Bootstrap token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of bootstrap tokens. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: true + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/package/cfg/rke2-cis-1.9/etcd.yaml b/package/cfg/rke2-cis-1.9/etcd.yaml new file mode 100644 index 00000000..5b20c2d8 --- /dev/null +++ b/package/cfg/rke2-cis-1.9/etcd.yaml @@ -0,0 +1,174 @@ +--- +controls: +version: "cis-1.8" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + # When possible, we check the flag, the environment variable, and the configuration file + # kube-bench does not allow nested bin_ops, so when multiple flags are being checked in a single test, + # we only check the config path. + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit_config: "cat $etcdconf" + tests: + bin_op: and + test_items: + - path: "{.client-transport-security.cert-file}" + compare: + op: eq + value: "/var/lib/rancher/rke2/server/tls/etcd/server-client.crt" + - path: "{.client-transport-security.key-file}" + compare: + op: eq + value: "/var/lib/rancher/rke2/server/tls/etcd/server-client.key" + remediation: | + By default, RKE2 generates cert and key files for etcd. + These are located in /var/lib/rancher/rke2/server/tls/etcd/. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use custom cert and key files. + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -fC $etcdbin" + audit_config: "cat $etcdconf" + tests: + bin_op: or + test_items: + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + - path: "{.client-transport-security.client-cert-auth}" + compare: + op: eq + value: true + remediation: | + By default, RKE2 sets the --client-cert-auth parameter to true. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to disable client certificate authentication. + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -fC $etcdbin" + audit_config: "cat $etcdconf" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + - path: "{.client-transport-security.auto-tls}" + compare: + op: eq + value: false + remediation: | + By default, RKE2 does not set the --auto-tls parameter. + If this check fails, edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + client-transport-security: + auto-tls: false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)" + audit_config: "cat $etcdconf" + tests: + bin_op: and + test_items: + - path: "{.peer-transport-security.cert-file}" + compare: + op: eq + value: "/var/lib/rancher/rke2/server/tls/etcd/peer-server-client.crt" + - path: "{.peer-transport-security.key-file}" + compare: + op: eq + value: "/var/lib/rancher/rke2/server/tls/etcd/peer-server-client.key" + remediation: | + By default, RKE2 generates peer cert and key files for etcd. + These are located in /var/lib/rancher/rke2/server/tls/etcd/. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use custom peer cert and key files. + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -fC $etcdbin" + audit_config: "cat $etcdconf" + tests: + bin_op: or + test_items: + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + - path: "{.peer-transport-security.client-cert-auth}" + compare: + op: eq + value: true + remediation: | + By default, RKE2 sets the --peer-cert-auth parameter to true. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to disable peer client certificate authentication. + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -fC $etcdbin" + audit_config: "cat $etcdconf" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + set: true + - path: "{.peer-transport-security.auto-tls}" + compare: + op: eq + value: false + remediation: | + By default, RKE2 does not set the --peer-auto-tls parameter. + If this check fails, edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + peer-transport-security: + auto-tls: false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" + audit: "/bin/ps -fC $etcdbin" + audit_config: "cat $etcdconf" + tests: + bin_op: or + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + - path: "{.peer-transport-security.trusted-ca-file}" + compare: + op: eq + value: "/var/lib/rancher/rke2/server/tls/etcd/peer-ca.crt" + set: true + remediation: | + By default, RKE2 generates a unique certificate authority for etcd. + This is located at /var/lib/rancher/rke2/server/tls/etcd/peer-ca.crt. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use a shared certificate authority. + scored: true diff --git a/package/cfg/rke2-cis-1.9/master.yaml b/package/cfg/rke2-cis-1.9/master.yaml new file mode 100644 index 00000000..d80512a3 --- /dev/null +++ b/package/cfg/rke2-cis-1.9/master.yaml @@ -0,0 +1,1037 @@ +--- +controls: +version: "cis-1.8" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "stat -c permissions=%a $apiserverconf" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 600 $apiserverconf + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Manual)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c permissions=%a $etcdconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + If running master only with no etcd role, this check is Not applicable. + If controlplane and etcd roles are present on the same nodes but this check is warn then + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $etcdconf + scored: false + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Manual)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + If running master only with no etcd role, this check is Not applicable. + If controlplane and etcd roles are present on the same nodes but this check is warn then + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: false + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" + audit: | + ps -fC ${kubeletbin:-kubelet} | grep -- --cni-conf-dir || echo "/etc/cni/net.d" | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Note that for many CNIs, a lock file is created with permissions 750. This is expected and can be ignored. + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /var/lib/cni/networks/ and chmod 600 /etc/cni/net.d/ + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + ps -fC ${kubeletbin:-kubelet} | grep -- --cni-conf-dir || echo "/etc/cni/net.d" | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Manual)" + audit: "stat -c permissions=%a $etcddatadir" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + set: true + remediation: | + If running master only with no etcd role, this check is Not applicable. + If controlplane and etcd roles are present on the same nodes but this check is warn then + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 $etcddatadir + scored: false + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Manual)" + audit: "stat -c %U:%G $etcddatadir" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + If running master only with no etcd role, this check is Not applicable. + If controlplane and etcd roles are present on the same nodes but this check is warn then + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd $etcddatadir + scored: false + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/cred/admin.kubeconfig" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /var/lib/rancher/rke2/server/cred/admin.kubeconfig + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/cred/admin.kubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /var/lib/rancher/rke2/server/cred/admin.kubeconfig + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G $schedulerkubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G $controllermanagerkubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/tls" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /var/lib/rancher/rke2/server/tls + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/tls/*.crt" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /var/lib/rancher/rke2/server/tls/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/tls/*.key" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /var/lib/rancher/rke2/server/tls/*.key + scored: true + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $apiserverbin" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + By default, RKE2 sets the --anonymous-auth argument to false. + If this check fails, edit the RKE2 config file /etc/rancher/rke2/config.yaml and remove anything similar to below. + kube-apiserver-arg: + - "anonymous-auth=true" + scored: true + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -fC $apiserverbin" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. + If this check fails, edit the RKE2 config file /etc/rancher/rke2/config.yaml and remove anything similar to below. + kube-apiserver-arg: + - "token-auth-file=" + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is set (Manual)" + audit: "/bin/ps -fC $apiserverbin" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "DenyServiceExternalIPs" + remediation: | + By default, RKE2 does not set DenyServiceExternalIPs. + To enable this flag, edit the RKE2 config file /etc/rancher/rke2/config.yaml like below. + kube-apiserver-arg: + - "enable-admission-plugins=DenyServiceExternalIPs" + scored: false + + - id: 1.2.4 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -fC $apiserverbin" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + set: true + - flag: "--kubelet-client-key" + set: true + remediation: | + By default, RKE2 automatically provides the kubelet client certificate and key. + They are generated and located at /var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt and /var/lib/rancher/rke2/server/tls/client-kube-apiserver.key + If for some reason you need to provide your own certificate and key, you can set the + below parameters in the RKE2 config file /etc/rancher/rke2/config.yaml. + kube-apiserver-arg: + - "kubelet-client-certificate=" + - "kubelet-client-key=" + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $apiserverbin" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + set: true + remediation: | + By default, RKE2 automatically provides the kubelet CA cert file, at /var/lib/rancher/rke2/server/tls/server-ca.crt. + If for some reason you need to provide your own ca certificate, look at using the rke2 certificate command line tool. + If this check fails, edit the RKE2 config file /etc/rancher/rke2/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "kubelet-certificate-authority=" + scored: true + + - id: 1.2.6 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $apiserverbin" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + set: true + remediation: | + By default, RKE2 does not set the --authorization-mode to AlwaysAllow. + If this check fails, edit RKE2 config file /etc/rancher/rke2/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "authorization-mode=AlwaysAllow" + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "/bin/ps -fC $apiserverbin" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + set: true + remediation: | + By default, RKE2 sets the --authorization-mode to Node and RBAC. + If this check fails, edit the RKE2 config file /etc/rancher/rke2/config.yaml, + ensure that you are not overriding authorization-mode. + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "/bin/ps -fC $apiserverbin" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + set: true + remediation: | + By default, RKE2 sets the --authorization-mode to Node and RBAC. + If this check fails, edit the RKE2 config file /etc/rancher/rke2/config.yaml, + ensure that you are not overriding authorization-mode. + scored: true + + - id: 1.2.9 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "/bin/ps -fC $apiserverbin" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + set: true + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the RKE2 config file /etc/rancher/rke2/config.yaml and set the below parameters. + kube-apiserver-arg: + - "enable-admission-plugins=...,EventRateLimit,..." + - "admission-control-config-file=" + scored: false + + - id: 1.2.10 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "/bin/ps -fC $apiserverbin" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + By default, RKE2 does not set the --enable-admission-plugins to AlwaysAdmit. + If this check fails, edit RKE2 config file /etc/rancher/rke2/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "enable-admission-plugins=AlwaysAdmit" + scored: true + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "/bin/ps -fC $apiserverbin" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + set: true + remediation: | + Permissive, per CIS guidelines, + "This setting could impact offline or isolated clusters, which have images pre-loaded and + do not have access to a registry to pull in-use images. This setting is not appropriate for + clusters which use this configuration." + Edit the RKE2 config file /etc/rancher/rke2/config.yaml + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.12 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + audit: "/bin/ps -fC $apiserverbin" + type: "skip" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + set: true + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + set: true + remediation: | + Not Applicable. + Enabling Pod Security Policy is no longer supported on RKE2 v1.25+ and will cause applications to unexpectedly fail. + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "/bin/ps -fC $apiserverbin" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + By default, RKE2 does not set the --disable-admission-plugins to anything. + Follow the documentation and create ServiceAccount objects as per your environment. + If this check fails, edit the RKE2 config file /etc/rancher/rke2/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "disable-admission-plugins=ServiceAccount" + scored: true + + - id: 1.2.14 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "/bin/ps -fC $apiserverbin" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + By default, RKE2 does not set the --disable-admission-plugins to anything. + If this check fails, edit the RKE2 config file /etc/rancher/rke2/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "disable-admission-plugins=...,NamespaceLifecycle,..." + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "/bin/ps -fC $apiserverbin" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + set: true + remediation: | + By default, RKE2 sets the --enable-admission-plugins to NodeRestriction. + Check the RKE2 config file /etc/rancher/rke2/config.yaml, and ensure that you are not overriding the admission plugins. + If you are, include NodeRestriction in the list. + kube-apiserver-arg: + - "enable-admission-plugins=...,NodeRestriction,..." + scored: true + + - id: 1.2.16 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -fC $apiserverbin" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + By default, RKE2 sets the --profiling argument to false. + If this check fails, edit the RKE2 config file /etc/rancher/rke2/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "profiling=true" + scored: true + + - id: 1.2.17 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "/bin/ps -fC $apiserverbin" + tests: + test_items: + - flag: "--audit-log-path" + set: true + remediation: | + By default, RKE2 sets the --audit-log-path argument to /var/lib/rancher/rke2/server/logs/audit.log + If you want to change this, edit the RKE2 config file /etc/rancher/rke2/config.yaml + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + kube-apiserver-arg: + - "audit-log-path=/var/log/rke2/audit.log" + scored: true + + - id: 1.2.18 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "/bin/ps -fC $apiserverbin" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + set: true + remediation: | + By default, RKE2 sets the --audit-log-maxage argument to 30 days. + If you want to change this, edit the RKE2 config file /etc/rancher/rke2/config.yaml + on the control plane node and set the --audit-log-maxage parameter to an appropriate number of days, for example, + kube-apiserver-arg: + - "audit-log-maxage=40" + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "/bin/ps -fC $apiserverbin" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + set: true + remediation: | + By default, RKE2 sets the --audit-log-maxbackup argument to 10. + If you want to change this, edit the RKE2 config file /etc/rancher/rke2/config.yaml + on the control plane node and set the --audit-log-maxbackup parameter to an appropriate value. + For example, + kube-apiserver-arg: + - "audit-log-maxbackup=15" + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "/bin/ps -fC $apiserverbin" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + set: true + remediation: | + By default, RKE2 sets the --audit-log-maxsize argument to 100 MB. + If you want to change this, edit the RKE2 config file /etc/rancher/rke2/config.yaml + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, + kube-apiserver-arg: + - "audit-log-maxsize=150" + scored: true + + - id: 1.2.21 + text: "Ensure that the --request-timeout argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $apiserverbin" + tests: + bin_op: or + test_items: + - flag: "--request-timeout" + set: false + - flag: "--request-timeout" + remediation: | + Permissive, per CIS guidelines, + "it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed". + Edit the RKE2 config file /etc/rancher/rke2/config.yaml + and set the below parameter if needed. For example, + kube-apiserver-arg: + - "request-timeout=300s" + scored: true + + - id: 1.2.22 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "/bin/ps -fC $apiserverbin" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + By default, RKE2 does not set the --service-account-lookup argument. + Edit the RKE2 config file /etc/rancher/rke2/config.yaml and set the service-account-lookup. For example, + kube-apiserver-arg: + - "service-account-lookup=true" + Alternatively, you can delete the service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.23 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $apiserverbin" + tests: + test_items: + - flag: "--service-account-key-file" + set: true + remediation: | + RKE2 automatically generates and sets the service account key file. + It is located at /var/lib/rancher/rke2/server/tls/service.key. + If this check fails, edit RKE2 config file /etc/rancher/rke2/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "service-account-key-file=" + scored: true + + - id: 1.2.24 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "/bin/ps -fC $apiserverbin" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + set: true + - flag: "--etcd-keyfile" + set: true + remediation: | + RKE2 automatically generates and sets the etcd certificate and key files. + They are located at /var/lib/rancher/rke2/server/tls/etcd/client.crt and /var/lib/rancher/rke2/server/tls/etcd/client.key. + If this check fails, edit the RKE2 config file /etc/rancher/rke2/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "etcd-certfile=" + - "etcd-keyfile=" + scored: true + + - id: 1.2.25 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -fC $apiserverbin" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + By default, RKE2 automatically generates and provides the TLS certificate and private key for the apiserver. + They are generated and located at /var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt and /var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key + If this check fails, edit the RKE2 config file /etc/rancher/rke2/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "tls-cert-file=" + - "tls-private-key-file=" + scored: true + + - id: 1.2.26 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $apiserverbin" + tests: + test_items: + - flag: "--client-ca-file" + set: true + remediation: | + By default, RKE2 automatically provides the client certificate authority file. + It is generated and located at /var/lib/rancher/rke2/server/tls/client-ca.crt. + If for some reason you need to provide your own ca certificate, look at using the rke2 certificate command line tool. + If this check fails, edit the RKE2 config file /etc/rancher/rke2/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "client-ca-file=" + scored: true + + - id: 1.2.27 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $apiserverbin" + tests: + test_items: + - flag: "--etcd-cafile" + set: true + remediation: | + By default, RKE2 automatically provides the etcd certificate authority file. + It is generated and located at /var/lib/rancher/rke2/server/tls/client-ca.crt. + If for some reason you need to provide your own ca certificate, look at using the rke2 certificate command line tool. + If this check fails, edit the RKE2 config file /etc/rancher/rke2/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "etcd-cafile=" + scored: true + + - id: 1.2.28 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $apiserverbin" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + RKE2 always is configured to encrypt secrets. + Secrets encryption is managed with the rke2 secrets-encrypt command line tool. + If needed, you can find the generated encryption config at /var/lib/rancher/rke2/server/cred/encryption-config.json + scored: true + + - id: 1.2.29 + text: "Ensure that encryption providers are appropriately configured (Automated)" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -o 'providers\"\:\[.*\]' $ENCRYPTION_PROVIDER_CONFIG | grep -o "[A-Za-z]*" | head -2 | tail -1 | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + RKE2 always is configured to use the aescbc encryption provider to encrypt secrets. + Secrets encryption is managed with the rke2 secrets-encrypt command line tool. + If needed, you can find the generated encryption config at /var/lib/rancher/rke2/server/cred/encryption-config.json + scored: true + + - id: 1.2.30 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Automated)" + audit: "/bin/ps -fC $apiserverbin" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + By default, the RKE2 kube-apiserver complies with this test. Changes to these values may cause regression, therefore ensure that all apiserver clients support the new TLS configuration before applying it in production deployments. + If a custom TLS configuration is required, consider also creating a custom version of this rule that aligns with your requirements. + If this check fails, remove any custom configuration around `tls-cipher-suites` or update the /etc/rancher/rke2/config.yaml file to match the default by adding the following: + kube-apiserver-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + scored: true + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "/bin/ps -fC $controllermanagerbin" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + By default, RKE2 sets a terminated-pod-gc-threshold of 1000. + If you need to change this value, edit the RKE2 config file /etc/rancher/rke2/config.yaml on the control plane node + and set the --terminated-pod-gc-threshold to an appropriate threshold, + kube-controller-manager-arg: + - "terminated-pod-gc-threshold=10" + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -fC $controllermanagerbin" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + By default, RKE2 sets the --profiling argument to false. + If this check fails, edit the RKE2 config file /etc/rancher/rke2/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "profiling=true" + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "/bin/ps -fC $controllermanagerbin" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + set: true + remediation: | + By default, RKE2 sets the --use-service-account-credentials argument to true. + If this check fails, edit the RKE2 config file /etc/rancher/rke2/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "use-service-account-credentials=false" + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $controllermanagerbin" + tests: + test_items: + - flag: "--service-account-private-key-file" + set: true + remediation: | + By default, RKE2 automatically provides the service account private key file. + It is generated and located at /var/lib/rancher/rke2/server/tls/service.current.key. + If this check fails, edit the RKE2 config file /etc/rancher/rke2/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "service-account-private-key-file=" + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $controllermanagerbin" + tests: + test_items: + - flag: "--root-ca-file" + set: true + remediation: | + By default, RKE2 automatically provides the root CA file. + It is generated and located at /var/lib/rancher/rke2/server/tls/server-ca.crt. + If for some reason you need to provide your own ca certificate, look at using the rke2 certificate command line tool. + If this check fails, edit the RKE2 config file /etc/rancher/rke2/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "root-ca-file=" + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -fC $controllermanagerbin" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + By default, RKE2 does not set the RotateKubeletServerCertificate feature gate. + If you have enabled this feature gate, you should remove it. + If this check fails, edit the RKE2 config file /etc/rancher/rke2/config.yaml, remove any lines like below. + kube-controller-manager-arg: + - "feature-gate=RotateKubeletServerCertificate" + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -fC $controllermanagerbin" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + By default, RKE2 sets the --bind-address argument to 127.0.0.1 + If this check fails, edit the RKE2 config file /etc/rancher/rke2/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "bind-address=" + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -fC $schedulerbin" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + By default, RKE2 sets the --profiling argument to false. + If this check fails, edit the RKE2 config file /etc/rancher/rke2/config.yaml and remove any lines like below. + kube-scheduler-arg: + - "profiling=true" + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -fC $schedulerbin" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + By default, RKE2 sets the --bind-address argument to 127.0.0.1 + If this check fails, edit the RKE2 config file /etc/rancher/rke2/config.yaml and remove any lines like below. + kube-scheduler-arg: + - "bind-address=" + scored: true diff --git a/package/cfg/rke2-cis-1.9/node.yaml b/package/cfg/rke2-cis-1.9/node.yaml new file mode 100644 index 00000000..4c33a6bf --- /dev/null +++ b/package/cfg/rke2-cis-1.9/node.yaml @@ -0,0 +1,445 @@ +--- +controls: +version: "cis-1.8" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + The kubelet is managed by the RKE2 process. There is no kubelet service file, all configuration is passed in as arguments at runtime. + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"' + type: "skip" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Not Applicable. + The kubelet is managed by the RKE2 process. There is no kubelet service file, all configuration is passed in as arguments at runtime. + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: true + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: true + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + set: true + compare: + op: eq + value: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletcafile; then stat -c permissions=%a $kubeletcafile; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletcafile + scored: true + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletcafile; then stat -c %U:%G $kubeletcafile; fi'' ' + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root $kubeletcafile + scored: true + + - id: 4.1.9 + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Not Applicable. + The kubelet is managed by the RKE2 process. There is no kubelet config file, all configuration is passed in as arguments at runtime. + scored: true + + - id: 4.1.10 + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + type: "skip" + tests: + test_items: + - flag: root:root + set: true + remediation: | + Not Applicable. + The kubelet is managed by the RKE2 process. There is no kubelet config file, all configuration is passed in as arguments at runtime. + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + By default, RKE2 sets the --anonymous-auth to false. + If this check fails, edit the RKE2 config file /etc/rancher/rke2/config.yaml, remove any lines similar to below. + kubelet-arg: + - "anonymous-auth=true" + Based on your system, restart the RKE2 service. For example, + systemctl restart rke2-server.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + By default, RKE2 does not set the --authorization-mode to AlwaysAllow. + If this check fails, edit the RKE2 config file /etc/rancher/rke2/config.yaml, remove any lines similar to below. + kubelet-arg: + - "authorization-mode=AlwaysAllow" + Based on your system, restart the RKE2 service. For example, + systemctl restart rke2-server.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + By default, RKE2 automatically provides the client ca certificate for the Kubelet. + It is generated and located at /var/lib/rancher/rke2/agent/client-ca.crt + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + By default, RKE2 sets the --read-only-port to 0. If you have set this to a different value, you + should set it back to 0. Edit the RKE2 config file /etc/rancher/rke2/config.yaml, remove any lines similar to below. + kubelet-arg: + - "read-only-port=XXXX" + Based on your system, restart the RKE2 service. For example, + systemctl restart rke2-server.service + scored: true + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + Edit the RKE2 config file /etc/rancher/rke2/config.yaml, set the following parameter to an appropriate value. + kubelet-arg: + - "streaming-connection-idle-timeout=5m" + Based on your system, restart the RKE2 service. For example, + systemctl restart rke2-server.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + Edit the RKE2 config file /etc/rancher/rke2/config.yaml, set the following parameter. + kubelet-arg: + - "make-iptables-util-chains=true" + Or, remove the --make-iptables-util-chains argument to let RKE2 use the default value. + Based on your system, restart the RKE2 service. For example, + systemctl restart rke2-server.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --hostname-override argument is not set (Automated)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + type: skip + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Not Applicable. + By default, RKE2 does set the --hostname-override argument. Per CIS guidelines, this is to comply + with cloud providers that require this flag to ensure that hostname matches node names. + scored: true + + - id: 4.2.8 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: gte + value: 0 + - flag: --event-qps + path: '{.eventRecordQPS}' + set: false + bin_op: or + remediation: | + Edit the RKE2 config file /etc/rancher/rke2/config.yaml, set the following parameter to an appropriate value. + kubelet-arg: + - "event-qps=" + Based on your system, restart the RKE2 service. For example, + systemctl restart rke2-server.service + scored: false + + - id: 4.2.9 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + By default, RKE2 automatically provides the TLS certificate and private key for the Kubelet. + They are generated and located at /var/lib/rancher/rke2/agent/serving-kubelet.crt and /var/lib/rancher/rke2/agent/serving-kubelet.key + If this check fails, edit the RKE2 config file /etc/rancher/rke2/config.yaml and remove any lines similar to below. + kubelet-arg: + - "tls-cert-file=" + - "tls-private-key-file=" + scored: true + + - id: 4.2.10 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + By default, RKE2 does not set the --rotate-certificates argument. + If this check fails, edit the RKE2 config file /etc/rancher/rke2/config.yaml, remove any rotate-certificates parameter. + Based on your system, restart the RKE2 service. For example, + systemctl restart rke2-server.service + scored: true + + - id: 4.2.11 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + By default, RKE2 does not set the RotateKubeletServerCertificate feature gate. + If this check fails, edit the RKE2 config file /etc/rancher/rke2/config.yaml, remove any RotateKubeletServerCertificate parameter. + Based on your system, restart the RKE2 service. For example, + systemctl restart rke2-server.service + scored: true + + - id: 4.2.12 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + Edit the RKE2 config file /etc/rancher/rke2/config.yaml, + kubelet-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + or to a subset of these values. + Based on your system, restart the RKE2 service. For example, + systemctl restart rke2-server.service + scored: false + + - id: 4.2.13 + text: "Ensure that a limit is set on pod PIDs (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --pod-max-pids + path: '{.podPidsLimit}' + remediation: | + Edit the RKE2 config file /etc/rancher/rke2/config.yaml, set the following parameter to an appropriate value. + kubelet-arg: + - "pod-max-pids=" + Based on your system, restart the RKE2 service. For example, + systemctl restart rke2-server.service + scored: false + + - id: 4.3 + text: "kube-proxy" + checks: + - id: 4.3.1 + text: "Ensure that the kube-proxy metrics service is bound to localhost (Automated)" + audit: "/bin/ps -fC $proxybin" + audit_config: "/bin/sh -c 'if test -e $proxyconf; then cat $proxyconf; fi'" + tests: + bin_op: or + test_items: + - flag: "--metrics-bind-address" + path: '{.metricsBindAddress}' + compare: + op: has + value: "127.0.0.1" + - flag: "--metrics-bind-address" + path: '{.metricsBindAddress}' + set: false + remediation: | + Modify or remove any values which bind the metrics service to a non-localhost address. + The default value is 127.0.0.1:10249. + scored: true diff --git a/package/cfg/rke2-cis-1.9/policies.yaml b/package/cfg/rke2-cis-1.9/policies.yaml new file mode 100644 index 00000000..521718c9 --- /dev/null +++ b/package/cfg/rke2-cis-1.9/policies.yaml @@ -0,0 +1,422 @@ +--- +controls: +version: "cis-1.8" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Automated)" + audit: | + kubectl get clusterrolebindings -o=custom-columns=ROLE:.roleRef.name,NAME:.metadata.name,SUBJECT:.subjects[*].name --no-headers | grep cluster-admin + use_multiple_values: true + tests: + test_items: + - flag: "cluster-admin" + compare: + op: valid_elements + value: cluster-admin, helm-kube-system-traefik, helm-kube-system-traefik-crd + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. K3s gives exceptions + to the helm-kube-system-traefik and helm-kube-system-traefik-crd clusterrolebindings + as these are required for traefik installation into the kube-system namespace for regular operations. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role: + ``` + kubectl delete clusterrolebinding [name] + ``` + scored: true + + - id: 5.1.2 + text: "Minimize access to secrets (Automated)" + audit: "echo \"canGetListWatchSecretsAsSystemAuthenticated: $(kubectl auth can-i get,list,watch secrets --all-namespaces --as=system:authenticated)\"" + tests: + test_items: + - flag: "canGetListWatchSecretsAsSystemAuthenticated" + compare: + op: eq + value: no + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: true + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Automated)" + audit: | + # Check Roles + kubectl get roles --all-namespaces -o custom-columns=ROLE_NAMESPACE:.metadata.namespace,ROLE_NAME:.metadata.name --no-headers | while read -r role_namespace role_name + do + role_rules=$(kubectl get role -n "${role_namespace}" "${role_name}" -o=json | jq -c '.rules') + if echo "${role_rules}" | grep -q "\[\"\*\"\]"; then + printf "**role_name: %-50s role_namespace: %-25s role_rules: %s is_compliant: false\n" "${role_name}" "${role_namespace}" "${role_rules}" + else + printf "**role_name: %-50s role_namespace: %-25s is_compliant: true\n" "${role_name}" "${role_namespace}" + fi; + done + + cr_whitelist="cluster-admin k3s-cloud-controller-manager local-path-provisioner-role" + cr_whitelist="$cr_whitelist system:kube-controller-manager system:kubelet-api-admin system:controller:namespace-controller" + cr_whitelist="$cr_whitelist system:controller:disruption-controller system:controller:generic-garbage-collector" + cr_whitelist="$cr_whitelist system:controller:horizontal-pod-autoscaler system:controller:resourcequota-controller" + # Check ClusterRoles + kubectl get clusterroles -o custom-columns=CLUSTERROLE_NAME:.metadata.name --no-headers | while read -r clusterrole_name + do + clusterrole_rules=$(kubectl get clusterrole "${clusterrole_name}" -o=json | jq -c '.rules') + if echo "${cr_whitelist}" | grep -q "${clusterrole_name}"; then + printf "**clusterrole_name: %-50s is_whitelist: true is_compliant: true\n" "${clusterrole_name}" + elif echo "${clusterrole_rules}" | grep -q "\[\"\*\"\]"; then + echo "**clusterrole_name: ${clusterrole_name} clusterrole_rules: ${clusterrole_rules} is_compliant: false" + else + printf "**clusterrole_name: %-50s is_whitelist: false is_compliant: true\n" "${clusterrole_name}" + fi; + done + use_multiple_values: true + tests: + test_items: + - flag: "is_compliant" + compare: + op: eq + value: true + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions. + K3s gives exceptions for following cluster roles, which are required for regular operations: + - k3s-cloud-controller-manager, local-path-provisioner-role, cluster-admin + - system:kube-controller-manager, system:kubelet-api-admin, system:controller:namespace-controller, + - system:controller:disruption-controller, system:controller:generic-garbage-collector, + - system:controller:horizontal-pod-autoscaler, system:controller:resourcequota-controller + scored: true + + - id: 5.1.4 + text: "Minimize access to create pods (Automated)" + audit: | + echo "canCreatePodsAsSystemAuthenticated: $(kubectl auth can-i create pods --all-namespaces --as=system:authenticated)" + tests: + test_items: + - flag: "canCreatePodsAsSystemAuthenticated" + compare: + op: eq + value: no + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: true + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Automated)" + audit: | + kubectl get serviceaccounts --all-namespaces --field-selector metadata.name=default \ + -o custom-columns=N:.metadata.namespace,SA:.metadata.name,ASA:.automountServiceAccountToken --no-headers \ + | while read -r namespace serviceaccount automountserviceaccounttoken + do + if [ "${automountserviceaccounttoken}" = "" ]; then + automountserviceaccounttoken="notset" + fi + if [ "${namespace}" != "kube-system" ] && [ "${automountserviceaccounttoken}" != "false" ]; then + printf "**namespace: %-20s service_account: %-10s automountServiceAccountToken: %-6s is_compliant: false\n" "${namespace}" "${serviceaccount}" "${automountserviceaccounttoken}" + else + printf "**namespace: %-20s service_account: %-10s automountServiceAccountToken: %-6s is_compliant: true\n" "${namespace}" "${serviceaccount}" "${automountserviceaccounttoken}" + fi + done + use_multiple_values: true + tests: + test_items: + - flag: "is_compliant" + compare: + op: eq + value: true + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + K3s makes an exception for the default service account in the kube-system namespace. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + Or using kubectl: + kubectl patch serviceaccount --namespace default --patch '{"automountServiceAccountToken": false}' + scored: true + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Automated)" + audit: | + kubectl get pods --all-namespaces -o custom-columns=POD_NAMESPACE:.metadata.namespace,POD_NAME:.metadata.name,POD_SERVICE_ACCOUNT:.spec.serviceAccount,POD_IS_AUTOMOUNTSERVICEACCOUNTTOKEN:.spec.automountServiceAccountToken --no-headers | while read -r pod_namespace pod_name pod_service_account pod_is_automountserviceaccounttoken + do + # Retrieve automountServiceAccountToken's value for ServiceAccount and Pod, set to notset if null or . + svacc_is_automountserviceaccounttoken=$(kubectl get serviceaccount -n "${pod_namespace}" "${pod_service_account}" -o json | jq -r '.automountServiceAccountToken' | sed -e 's//notset/g' -e 's/null/notset/g') + pod_is_automountserviceaccounttoken=$(echo "${pod_is_automountserviceaccounttoken}" | sed -e 's//notset/g' -e 's/null/notset/g') + if [ "${svacc_is_automountserviceaccounttoken}" = "false" ] && ( [ "${pod_is_automountserviceaccounttoken}" = "false" ] || [ "${pod_is_automountserviceaccounttoken}" = "notset" ] ); then + is_compliant="true" + elif [ "${svacc_is_automountserviceaccounttoken}" = "true" ] && [ "${pod_is_automountserviceaccounttoken}" = "false" ]; then + is_compliant="true" + else + is_compliant="false" + fi + echo "**namespace: ${pod_namespace} pod_name: ${pod_name} service_account: ${pod_service_account} pod_is_automountserviceaccounttoken: ${pod_is_automountserviceaccounttoken} svacc_is_automountServiceAccountToken: ${svacc_is_automountserviceaccounttoken} is_compliant: ${is_compliant}" + done + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: "is_compliant" + compare: + op: eq + value: true + - flag: "service_account" + compare: + op: valid_elements + value: coredns, helm-traefik, helm-traefik-crd, traefik, metrics-server, svclb, local-path-provisioner-service-account + remediation: | + Modify the definition of ServiceAccounts and Pods which do not need to mount service + account tokens to disable it, with `automountServiceAccountToken: false`. + If both the ServiceAccount and the Pod's .spec specify a value for automountServiceAccountToken, the Pod spec takes precedence. + Condition: Pod is_compliant to true when + - ServiceAccount is automountServiceAccountToken: false and Pod is automountServiceAccountToken: false or notset + - ServiceAccount is automountServiceAccountToken: true notset and Pod is automountServiceAccountToken: false + K3s gives exceptions to the following service-accounts, which are required for regular operations: + - coredns, helm-traefik, helm-traefik-crd, traefik, metrics-server, svclb, local-path-provisioner-service-account + scored: true + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.1.9 + text: "Minimize access to create persistent volumes (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to PersistentVolume objects in the cluster. + scored: false + + - id: 5.1.10 + text: "Minimize access to the proxy sub-resource of nodes (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the proxy sub-resource of node objects. + scored: false + + - id: 5.1.11 + text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the approval sub-resource of certificatesigningrequest objects. + scored: false + + - id: 5.1.12 + text: "Minimize access to webhook configuration objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects + scored: false + + - id: 5.1.13 + text: "Minimize access to the service account token creation (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the token sub-resource of serviceaccount objects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Manual)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: true + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilities in applications running on your cluster. Where a namespace + contains applications which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false