From 2b36590a3b01db0e7de6bf99cc4532fdbac1c5a7 Mon Sep 17 00:00:00 2001 From: Mikolaj Stefaniak Date: Fri, 12 Apr 2024 11:00:30 +0200 Subject: [PATCH] feat: unified policies metadata and new console output --- gke-policies-v2/README.md | 70 ++++++------ gke-policies-v2/policy/autopilot_cluster.rego | 4 +- .../policy/cluster_binary_authorization.rego | 2 +- .../cluster_enable_security_posture.rego | 2 +- .../cluster_enable_workload_scanning.rego | 2 +- .../policy/cluster_gce_csi_driver.rego | 4 +- .../policy/cluster_maintenance_window.rego | 6 +- .../policy/cluster_receive_updates.rego | 6 +- .../policy/cluster_release_channels.rego | 4 +- .../policy/control_plane_access.rego | 8 +- ...rol_plane_disable_cert_authentication.rego | 6 +- ...ol_plane_disable_legacy_authorization.rego | 4 +- ...plane_disable_password_authentication.rego | 6 +- .../policy/control_plane_endpoint.rego | 4 +- .../policy/control_plane_redundancy.rego | 6 +- gke-policies-v2/policy/ilb_subsetting.rego | 6 +- .../policy/intranode_visibility.rego | 4 +- .../policy/monitoring_and_logging.rego | 10 +- .../policy/nap_forbid_default_sa.rego | 4 +- .../policy/nap_forbid_single_zone.rego | 4 +- .../policy/nap_integrity_monitoring.rego | 5 +- gke-policies-v2/policy/nap_use_cos.rego | 5 +- gke-policies-v2/policy/network_policies.rego | 10 +- .../policy/node_local_dns_cache.rego | 4 +- .../policy/node_pool_autorepair.rego | 4 +- .../policy/node_pool_autoscaling.rego | 4 +- .../policy/node_pool_autoupgrade.rego | 4 +- .../policy/node_pool_disk_encryption.rego | 2 +- .../policy/node_pool_forbid_default_sa.rego | 4 +- .../node_pool_integrity_monitoring.rego | 4 +- .../policy/node_pool_multi_zone.rego | 4 +- .../policy/node_pool_secure_boot.rego | 6 +- gke-policies-v2/policy/node_pool_use_cos.rego | 4 +- .../policy/node_pool_version_skew.rego | 14 +-- .../policy/node_rbac_security_group.rego | 6 +- gke-policies-v2/policy/private_cluster.rego | 4 +- gke-policies-v2/policy/secret_encryption.rego | 4 +- gke-policies-v2/policy/shielded_nodes.rego | 5 +- .../policy/vpc_native_cluster.rego | 8 +- gke-policies-v2/policy/workload_identity.rego | 4 +- go.mod | 4 +- go.sum | 10 +- internal/app/app.go | 31 ++++- internal/app/app_check_clusters.go | 51 ++++++--- internal/app/app_scc.go | 21 +++- internal/gke/gke.go | 8 ++ internal/gke/gke_test.go | 5 + internal/outputs/console_collector.go | 106 +++++++++++++++--- internal/outputs/console_collector_test.go | 6 +- internal/outputs/icons.go | 1 + internal/outputs/output.go | 35 ++---- internal/outputs/validation_report.go | 57 +++++++++- internal/outputs/validation_report_test.go | 11 ++ 53 files changed, 402 insertions(+), 211 deletions(-) diff --git a/gke-policies-v2/README.md b/gke-policies-v2/README.md index a26fa237..8c708190 100644 --- a/gke-policies-v2/README.md +++ b/gke-policies-v2/README.md @@ -10,20 +10,21 @@ of our policy files. |Name|Group|Description|CIS Benchmark| |-|-|-|-| -|[Control Plane redundancy](../gke-policies-v2/policy/control_plane_redundancy.rego)|Availability|GKE cluster should be regional for maximum availability of control plane during upgrades and zonal outages|| -|[Multi-zone node pools](../gke-policies-v2/policy/node_pool_multi_zone.rego)|Availability|GKE node pools should be regional (multiple zones) for maximum nodes availability during zonal outages|| -|[Use Node Auto-Repair](../gke-policies-v2/policy/node_pool_autorepair.rego)|Availability|GKE node pools should have Node Auto-Repair enabled to configure Kubernetes Engine|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.5.2| -|[Cloud Monitoring and Logging](../gke-policies-v2/policy/monitoring_and_logging.rego)|Maintenance|GKE cluster should use Cloud Logging and Monitoring|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.7.1| +|[Enable node auto-repair](../gke-policies-v2/policy/node_pool_autorepair.rego)|Availability|GKE node pools should have Node Auto-Repair enabled to configure Kubernetes Engine|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.5.2| +|[Ensure redudndancy of the node pools](../gke-policies-v2/policy/node_pool_multi_zone.rego)|Availability|GKE node pools should be regional (multiple zones) for maximum nodes availability during zonal outages|| +|[Ensure redundancy of the Control Plane](../gke-policies-v2/policy/control_plane_redundancy.rego)|Availability|GKE cluster should be regional for maximum availability of control plane during upgrades and zonal outages|| +|[Enable Cloud Monitoring and Logging](../gke-policies-v2/policy/monitoring_and_logging.rego)|Maintenance|GKE cluster should use Cloud Logging and Monitoring|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.7.1| +|[Enable Compute Engine persistent disk CSI driver](../gke-policies-v2/policy/cluster_gce_csi_driver.rego)|Management|Automatic deployment and management of the Compute Engine persistent disk CSI driver. The driver provides support for features like customer managed encryption keys or volume snapshots.|| +|[Enable GKE upgrade notifications](../gke-policies-v2/policy/cluster_receive_updates.rego)|Management|GKE cluster should be proactively receive updates about GKE upgrades and GKE versions|| |[Enable binary authorization in the cluster](../gke-policies-v2/policy/cluster_binary_authorization.rego)|Management|GKE cluster should enable for deploy-time security control that ensures only trusted container images are deployed to gain tighter control over your container environment.|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.10.5| -|[GKE Autopilot mode](../gke-policies-v2/policy/autopilot_cluster.rego)|Management|GKE Autopilot mode is the recommended way to operate a GKE cluster|| -|[GKE VPC-native cluster](../gke-policies-v2/policy/vpc_native_cluster.rego)|Management|GKE cluster nodepool should be VPC-native as per our best-practices|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.6.2| -|[Receive updates about new GKE versions](../gke-policies-v2/policy/cluster_receive_updates.rego)|Management|GKE cluster should be proactively receive updates about GKE upgrades and GKE versions|| -|[Schedule maintenance windows and exclusions](../gke-policies-v2/policy/cluster_maintenance_window.rego)|Management|GKE cluster should schedule maintenance windows and exclusions to upgrade predictability and to align updates with off-peak business hours.|| -|[Use Compute Engine persistent disk CSI driver](../gke-policies-v2/policy/cluster_gce_csi_driver.rego)|Management|Automatic deployment and management of the Compute Engine persistent disk CSI driver. The driver provides support for features like customer managed encryption keys or volume snapshots.|| -|[Version skew between node pools and control plane](../gke-policies-v2/policy/node_pool_version_skew.rego)|Management|Difference between cluster control plane version and node pools version should be no more than 2 minor versions.|| -|[GKE L4 ILB Subsetting](../gke-policies-v2/policy/ilb_subsetting.rego)|Scalability|GKE cluster should use GKE L4 ILB Subsetting if nodes > 250|| +|[Enable maintenance windows](../gke-policies-v2/policy/cluster_maintenance_window.rego)|Management|GKE cluster should use maintenance windows and exclusions to upgrade predictability and to align updates with off-peak business hours.|| +|[Ensure acceptable version skew in a cluster](../gke-policies-v2/policy/node_pool_version_skew.rego)|Management|Difference between cluster control plane version and node pools version should be no more than 2 minor versions.|| +|[Use GKE Autopilot mode](../gke-policies-v2/policy/autopilot_cluster.rego)|Management|GKE Autopilot mode is the recommended way to operate a GKE cluster|| +|[Use VPC-native cluster](../gke-policies-v2/policy/vpc_native_cluster.rego)|Management|GKE cluster nodepool should be VPC-native as per our best-practices|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.6.2| +|[Enable GKE L4 ILB Subsetting](../gke-policies-v2/policy/ilb_subsetting.rego)|Scalability|GKE cluster should use GKE L4 ILB Subsetting if nodes > 250|| +|[Enable GKE node local DNS cache](../gke-policies-v2/policy/node_local_dns_cache.rego)|Scalability|GKE cluster should use node local DNS cache|| +|[Enable node pool auto-scaling](../gke-policies-v2/policy/node_pool_autoscaling.rego)|Scalability|GKE node pools should have autoscaling configured to proper resize nodes according to traffic|| |[GKE Nodes Limit](../gke-policies-v2/scalability/limits_nodes.rego)|Scalability|GKE Nodes Limit|| -|[GKE node local DNS cache](../gke-policies-v2/policy/node_local_dns_cache.rego)|Scalability|GKE cluster should use node local DNS cache|| |[Number of HPAs in a cluster](../gke-policies-v2/scalability/limits_hpas.rego)|Scalability|The optimal number of Horizontal Pod Autoscalers in a cluster|| |[Number of PODs in a cluster](../gke-policies-v2/scalability/limits_pods.rego)|Scalability|The total number of PODs running in a cluster|| |[Number of PODs per node](../gke-policies-v2/scalability/limits_pods_per_node.rego)|Scalability|The total number of PODs running on a single node|| @@ -33,29 +34,28 @@ of our policy files. |[Number of secrets with KMS encryption](../gke-policies-v2/scalability/limit_secrets_encryption.rego)|Scalability|The total number of secrets when KMS secret encryption is enabled|| |[Number of services in a cluster](../gke-policies-v2/scalability/limit_services.rego)|Scalability|The total number of services running in a cluster|| |[Number of services per namespace](../gke-policies-v2/scalability/limit_services_per_ns.rego)|Scalability|The total number of services running in single namespace|| -|[Use node pool autoscaling](../gke-policies-v2/policy/node_pool_autoscaling.rego)|Scalability|GKE node pools should have autoscaling configured to proper resize nodes according to traffic|| -|[Control Plane endpoint access](../gke-policies-v2/policy/control_plane_access.rego)|Security|Control Plane endpoint access should be limited to authorized networks only|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.6.3| -|[Control Plane endpoint visibility](../gke-policies-v2/policy/control_plane_endpoint.rego)|Security|Control Plane endpoint should be locked from external access|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.6.4| -|[Control plane user basic authentication](../gke-policies-v2/policy/control_plane_disable_password_authentication.rego)|Security|Disable Basic Authentication (basic auth) for API server authentication as it uses static passwords which need to be rotated.|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.8.1| -|[Control plane user certificate authentication](../gke-policies-v2/policy/control_plane_disable_cert_authentication.rego)|Security|Disable Client Certificates, which require certificate rotation, for authentication. Instead, use another authentication method like OpenID Connect.|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.8.2| +|[Change default Service Accounts in Node Auto-Provisioning](../gke-policies-v2/policy/nap_forbid_default_sa.rego)|Security|Node Auto-Provisioning configuration should not allow default Service Accounts|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.2.1| +|[Change default Service Accounts in node pools](../gke-policies-v2/policy/node_pool_forbid_default_sa.rego)|Security|GKE node pools should have a dedicated sa with a restricted set of permissions|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.2.1| +|[Configure Container-Optimized OS for Node Auto-Provisioning node pools](../gke-policies-v2/policy/nap_use_cos.rego)|Security|Nodes in Node Auto-Provisioning should use Container-Optimized OS|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.5.1| +|[Configure Container-Optimized OS for node pools](../gke-policies-v2/policy/node_pool_use_cos.rego)|Security|GKE node pools should use Container-Optimized OS which is maintained by Google and optimized for running Docker containers with security and efficiency.|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.5.1| +|[Disable control plane certificate authentication](../gke-policies-v2/policy/control_plane_disable_cert_authentication.rego)|Security|Disable Client Certificates, which require certificate rotation, for authentication. Instead, use another authentication method like OpenID Connect.|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.8.2| +|[Disable legacy ABAC authorization](../gke-policies-v2/policy/control_plane_disable_legacy_authorization.rego)|Security|GKE cluster should use RBAC instead of legacy ABAC authorization|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.8.4| +|[Disalbe control plane basic authentication](../gke-policies-v2/policy/control_plane_disable_password_authentication.rego)|Security|Disable Basic Authentication (basic auth) for API server authentication as it uses static passwords which need to be rotated.|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.8.1| |[Enable Customer-Managed Encryption Keys for persistent disks](../gke-policies-v2/policy/node_pool_disk_encryption.rego)|Security|Use Customer-Managed Encryption Keys (CMEK) to encrypt node boot and dynamically-provisioned attached Google Compute Engine Persistent Disks (PDs) using keys managed within Cloud Key Management Service (Cloud KMS).|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.9.1| +|[Enable GKE intranode visibility](../gke-policies-v2/policy/intranode_visibility.rego)|Security|GKE cluster should have intranode visibility enabled|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.6.1| +|[Enable Google Groups for RBAC](../gke-policies-v2/policy/node_rbac_security_group.rego)|Security|GKE cluster should have RBAC security Google group enabled|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.8.3| +|[Enable Kubernetes Network Policies](../gke-policies-v2/policy/network_policies.rego)|Security|GKE cluster should have Network Policies or Dataplane V2 enabled|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.6.7| +|[Enable Kubernetes secrets encryption](../gke-policies-v2/policy/secret_encryption.rego)|Security|GKE cluster should use encryption for kubernetes application secrets|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.3.1| +|[Enable Secure boot for node pools](../gke-policies-v2/policy/node_pool_secure_boot.rego)|Security|Secure Boot helps ensure that the system only runs authentic software by verifying the digital signature of all boot components, and halting the boot process if signature verification fails|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.5.7| |[Enable Security Posture dashboard](../gke-policies-v2/policy/cluster_enable_security_posture.rego)|Security|The Security Posture feature enables scanning of clusters and running workloads against standards and industry best practices. The dashboard displays the scan results and provides actionable recommendations for concerns. || +|[Enable Shielded Nodes](../gke-policies-v2/policy/shielded_nodes.rego)|Security|GKE cluster should use shielded nodes|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.5.5| |[Enable Workload vulnerability scanning](../gke-policies-v2/policy/cluster_enable_workload_scanning.rego)|Security|The Workload vulnerability scanning is a set of capabilities in the security posture dashboard that automatically scans for known vulnerabilities in your container images and in specific language packages during the runtime phase of software delivery lifecycle.|| -|[Enrollment in Release Channels](../gke-policies-v2/policy/cluster_release_channels.rego)|Security|GKE cluster should be enrolled in release channels|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.5.4| -|[Ensure that node pool locations within Node Auto-Provisioning are covering more than one zone (or not enforced at all)](../gke-policies-v2/policy/nap_forbid_single_zone.rego)|Security|Node Auto-Provisioning configuration should cover more than one zone|| -|[Ensure that nodes in Node Auto-Provisioning node pools will use Container-Optimized OS](../gke-policies-v2/policy/nap_use_cos.rego)|Security|Nodes in Node Auto-Provisioning should use Container-Optimized OS|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.5.1| -|[Ensure that nodes in Node Auto-Provisioning node pools will use integrity monitoring](../gke-policies-v2/policy/nap_integrity_monitoring.rego)|Security|Nodes in Node Auto-Provisioning should use integrity monitoring|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.5.6| -|[Forbid default Service Accounts in Node Auto-Provisioning](../gke-policies-v2/policy/nap_forbid_default_sa.rego)|Security|Node Auto-Provisioning configuration should not allow default Service Accounts|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.2.1| -|[Forbid default compute SA on node_pool](../gke-policies-v2/policy/node_pool_forbid_default_sa.rego)|Security|GKE node pools should have a dedicated sa with a restricted set of permissions|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.2.1| -|[GKE Network Policies engine](../gke-policies-v2/policy/network_policies.rego)|Security|GKE cluster should have Network Policies or Dataplane V2 enabled|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.6.7| -|[GKE RBAC authorization](../gke-policies-v2/policy/control_plane_disable_legacy_authorization.rego)|Security|GKE cluster should use RBAC instead of legacy ABAC authorization|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.8.4| -|[GKE Shielded Nodes](../gke-policies-v2/policy/shielded_nodes.rego)|Security|GKE cluster should use shielded nodes|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.5.5| -|[GKE Workload Identity](../gke-policies-v2/policy/workload_identity.rego)|Security|GKE cluster should have Workload Identity enabled|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.2.2| -|[GKE intranode visibility](../gke-policies-v2/policy/intranode_visibility.rego)|Security|GKE cluster should have intranode visibility enabled|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.6.1| -|[GKE private cluster](../gke-policies-v2/policy/private_cluster.rego)|Security|GKE cluster should be private to ensure network isolation|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.6.5| -|[Integrity monitoring on the nodes](../gke-policies-v2/policy/node_pool_integrity_monitoring.rego)|Security|GKE node pools should have integrity monitoring feature enabled to detect changes in a VM boot measurements|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.5.6| -|[Kubernetes secrets encryption](../gke-policies-v2/policy/secret_encryption.rego)|Security|GKE cluster should use encryption for kubernetes application secrets|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.3.1| -|[Secure boot on the nodes](../gke-policies-v2/policy/node_pool_secure_boot.rego)|Security|Secure Boot helps ensure that the system only runs authentic software by verifying the digital signature of all boot components, and halting the boot process if signature verification fails|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.5.7| -|[Use Container-Optimized OS](../gke-policies-v2/policy/node_pool_use_cos.rego)|Security|GKE node pools should use Container-Optimized OS which is maintained by Google and optimized for running Docker containers with security and efficiency.|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.5.1| -|[Use Node Auto-Upgrade](../gke-policies-v2/policy/node_pool_autoupgrade.rego)|Security|GKE node pools should have Node Auto-Upgrade enabled to configure Kubernetes Engine|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.5.3| -|[Use RBAC Google group](../gke-policies-v2/policy/node_rbac_security_group.rego)|Security|GKE cluster should have RBAC security Google group enabled|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.8.3| +|[Enable control plane private endpoint](../gke-policies-v2/policy/control_plane_endpoint.rego)|Security|Control Plane endpoint should be locked from external access|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.6.4| +|[Enable integrity monitoring for Node Auto-Provisioning node pools](../gke-policies-v2/policy/nap_integrity_monitoring.rego)|Security|Nodes in Node Auto-Provisioning should use integrity monitoring|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.5.6| +|[Enable integrity monitoring for node pools](../gke-policies-v2/policy/node_pool_integrity_monitoring.rego)|Security|GKE node pools should have integrity monitoring feature enabled to detect changes in a VM boot measurements|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.5.6| +|[Enable node auto-upgrade](../gke-policies-v2/policy/node_pool_autoupgrade.rego)|Security|GKE node pools should have Node Auto-Upgrade enabled to configure Kubernetes Engine|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.5.3| +|[Enroll cluster in Release Channels](../gke-policies-v2/policy/cluster_release_channels.rego)|Security|GKE cluster should be enrolled in release channels|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.5.4| +|[Ensure redundancy of Node Auto-provisioning node pools](../gke-policies-v2/policy/nap_forbid_single_zone.rego)|Security|Node Auto-Provisioning configuration should cover more than one zone|| +|[Limit Control Plane endpoint access](../gke-policies-v2/policy/control_plane_access.rego)|Security|Control Plane endpoint access should be limited to authorized networks only|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.6.3| +|[Use GKE Workload Identity](../gke-policies-v2/policy/workload_identity.rego)|Security|GKE cluster should have Workload Identity enabled|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.2.2| +|[Use private nodes](../gke-policies-v2/policy/private_cluster.rego)|Security|GKE cluster should be private to ensure network isolation|[CIS GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/cis-benchmarks#accessing-gke-benchmark) 1.4: 5.6.5| diff --git a/gke-policies-v2/policy/autopilot_cluster.rego b/gke-policies-v2/policy/autopilot_cluster.rego index a4e759f7..197d6bbe 100644 --- a/gke-policies-v2/policy/autopilot_cluster.rego +++ b/gke-policies-v2/policy/autopilot_cluster.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: GKE Autopilot mode +# title: Use GKE Autopilot mode # description: GKE Autopilot mode is the recommended way to operate a GKE cluster # custom: # group: Management @@ -35,5 +35,5 @@ valid { violation[msg] { not input.data.gke.autopilot.enabled - msg := "GKE Autopilot mode is the recommended way to operate a GKE cluster" + msg := "Cluster is not using Autopilot mode" } diff --git a/gke-policies-v2/policy/cluster_binary_authorization.rego b/gke-policies-v2/policy/cluster_binary_authorization.rego index 90a149c9..aa2595ce 100644 --- a/gke-policies-v2/policy/cluster_binary_authorization.rego +++ b/gke-policies-v2/policy/cluster_binary_authorization.rego @@ -39,5 +39,5 @@ valid { violation[msg] { not input.data.gke.binary_authorization.enabled - msg := "GKE cluster has not configured binary authorization policies" + msg := "Cluster is not configured with binary authorization" } diff --git a/gke-policies-v2/policy/cluster_enable_security_posture.rego b/gke-policies-v2/policy/cluster_enable_security_posture.rego index e6b1d60a..5656feed 100644 --- a/gke-policies-v2/policy/cluster_enable_security_posture.rego +++ b/gke-policies-v2/policy/cluster_enable_security_posture.rego @@ -38,5 +38,5 @@ valid { violation[msg] { not input.data.gke.security_posture_config.mode == 2 - msg := "GKE cluster has not enabled Security Posture" + msg := "Cluster is not configure with Security Posture" } diff --git a/gke-policies-v2/policy/cluster_enable_workload_scanning.rego b/gke-policies-v2/policy/cluster_enable_workload_scanning.rego index 057cda7c..b9ace876 100644 --- a/gke-policies-v2/policy/cluster_enable_workload_scanning.rego +++ b/gke-policies-v2/policy/cluster_enable_workload_scanning.rego @@ -40,5 +40,5 @@ valid { violation[msg] { not input.data.gke.security_posture_config.vulnerability_mode == 2 - msg := "GKE cluster has not configured workload vulnerability scanning" + msg := "Cluster is not configured with workload vulnerability scanning" } diff --git a/gke-policies-v2/policy/cluster_gce_csi_driver.rego b/gke-policies-v2/policy/cluster_gce_csi_driver.rego index 0e70d2c8..6b505ea3 100644 --- a/gke-policies-v2/policy/cluster_gce_csi_driver.rego +++ b/gke-policies-v2/policy/cluster_gce_csi_driver.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Use Compute Engine persistent disk CSI driver +# title: Enable Compute Engine persistent disk CSI driver # description: Automatic deployment and management of the Compute Engine persistent disk CSI driver. The driver provides support for features like customer managed encryption keys or volume snapshots. # custom: # group: Management @@ -36,5 +36,5 @@ valid { violation[msg] { not input.data.gke.addons_config.gce_persistent_disk_csi_driver_config.enabled - msg := "GKE cluster has not configured GCE persistent disk CSI driver" + msg := "Cluster is not configured with GCE persistent disk CSI driver" } diff --git a/gke-policies-v2/policy/cluster_maintenance_window.rego b/gke-policies-v2/policy/cluster_maintenance_window.rego index 5f970e1d..a40e5285 100644 --- a/gke-policies-v2/policy/cluster_maintenance_window.rego +++ b/gke-policies-v2/policy/cluster_maintenance_window.rego @@ -13,8 +13,8 @@ # limitations under the License. # METADATA -# title: Schedule maintenance windows and exclusions -# description: GKE cluster should schedule maintenance windows and exclusions to upgrade predictability and to align updates with off-peak business hours. +# title: Enable maintenance windows +# description: GKE cluster should use maintenance windows and exclusions to upgrade predictability and to align updates with off-peak business hours. # custom: # group: Management # severity: Medium @@ -38,5 +38,5 @@ valid { violation[msg] { not input.data.gke.maintenance_policy.window.Policy - msg := "GKE cluster has not configured maintenance window" + msg := "GKE cluster is not configured with maintenance window" } diff --git a/gke-policies-v2/policy/cluster_receive_updates.rego b/gke-policies-v2/policy/cluster_receive_updates.rego index 86327044..3ac1e9d0 100644 --- a/gke-policies-v2/policy/cluster_receive_updates.rego +++ b/gke-policies-v2/policy/cluster_receive_updates.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Receive updates about new GKE versions +# title: Enable GKE upgrade notifications # description: GKE cluster should be proactively receive updates about GKE upgrades and GKE versions # custom: # group: Management @@ -38,10 +38,10 @@ valid { violation[msg] { not input.data.gke.notification_config.pubsub.enabled - msg := "Pub/Sub notifications are not enabled" + msg := "Cluster is not configured with upgrade notifications" } violation[msg] { not input.data.gke.notification_config.pubsub.topic - msg := "Pub/Sub topic is not configured" + msg := "Cluster is not configured with upgrade notofications topic" } diff --git a/gke-policies-v2/policy/cluster_release_channels.rego b/gke-policies-v2/policy/cluster_release_channels.rego index 7457e463..ea366599 100644 --- a/gke-policies-v2/policy/cluster_release_channels.rego +++ b/gke-policies-v2/policy/cluster_release_channels.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Enrollment in Release Channels +# title: Enroll cluster in Release Channels # description: GKE cluster should be enrolled in release channels # custom: # group: Security @@ -40,5 +40,5 @@ valid { violation[msg] { not input.data.gke.release_channel.channel - msg := "GKE cluster is not enrolled in release channel" + msg := "Cluster is not enrolled in any release channel" } diff --git a/gke-policies-v2/policy/control_plane_access.rego b/gke-policies-v2/policy/control_plane_access.rego index 5c9cdc74..3d3f5fef 100644 --- a/gke-policies-v2/policy/control_plane_access.rego +++ b/gke-policies-v2/policy/control_plane_access.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Control Plane endpoint access +# title: Limit Control Plane endpoint access # description: Control Plane endpoint access should be limited to authorized networks only # custom: # group: Security @@ -41,15 +41,15 @@ valid { violation[msg] { not input.data.gke.master_authorized_networks_config.enabled - msg := "GKE cluster has not enabled master authorized networks configuration" + msg := "Cluster is not configured with master authorized networks" } violation[msg] { not input.data.gke.master_authorized_networks_config.cidr_blocks - msg := "GKE cluster's master authorized networks has no CIDR blocks element" + msg := "Cluster is not configured with master authorized networks CIDRs" } violation[msg] { count(input.data.gke.master_authorized_networks_config.cidr_blocks) < 1 - msg := "GKE cluster's master authorized networks has no CIDR blocks defined" + msg := "Cluster is not configured with master authorized networks CIDRs" } diff --git a/gke-policies-v2/policy/control_plane_disable_cert_authentication.rego b/gke-policies-v2/policy/control_plane_disable_cert_authentication.rego index b02c9ef8..d11affaa 100644 --- a/gke-policies-v2/policy/control_plane_disable_cert_authentication.rego +++ b/gke-policies-v2/policy/control_plane_disable_cert_authentication.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Control plane user certificate authentication +# title: Disable control plane certificate authentication # description: >- # Disable Client Certificates, which require certificate rotation, for authentication. Instead, # use another authentication method like OpenID Connect. @@ -40,10 +40,10 @@ valid { violation[msg] { input.data.gke.master_auth.client_certificate - msg := "The GKE cluster authentication should not be configured with a client certificate" + msg := "Cluster authentication is configured with a client certificate" } violation[msg] { input.data.gke.master_auth.client_key - msg := "The GKE cluster authentication should not be configured with a client key" + msg := "Cluster authentication is configured with a client key" } diff --git a/gke-policies-v2/policy/control_plane_disable_legacy_authorization.rego b/gke-policies-v2/policy/control_plane_disable_legacy_authorization.rego index d7265a54..852633ba 100644 --- a/gke-policies-v2/policy/control_plane_disable_legacy_authorization.rego +++ b/gke-policies-v2/policy/control_plane_disable_legacy_authorization.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: GKE RBAC authorization +# title: Disable legacy ABAC authorization # description: GKE cluster should use RBAC instead of legacy ABAC authorization # custom: # group: Security @@ -39,5 +39,5 @@ valid { violation[msg] { input.data.gke.legacy_abac.enabled - msg := "The GKE cluster is configured to use legacy ABAC authorization mechanism" + msg := "Cluster authorization is configured with legacy ABAC" } diff --git a/gke-policies-v2/policy/control_plane_disable_password_authentication.rego b/gke-policies-v2/policy/control_plane_disable_password_authentication.rego index a4e0eccc..9334da29 100644 --- a/gke-policies-v2/policy/control_plane_disable_password_authentication.rego +++ b/gke-policies-v2/policy/control_plane_disable_password_authentication.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Control plane user basic authentication +# title: Disalbe control plane basic authentication # description: >- # Disable Basic Authentication (basic auth) for API server authentication as it uses static # passwords which need to be rotated. @@ -41,10 +41,10 @@ valid { violation[msg] { input.data.gke.master_auth.password - msg := "The GKE cluster authentication should not be configured with a client password" + msg := "Cluster authentication is configured with a client password" } violation[msg] { input.data.gke.master_auth.username - msg := "The GKE cluster authentication should not be configured with a client username" + msg := "Cluster authentication is configured with a client username" } diff --git a/gke-policies-v2/policy/control_plane_endpoint.rego b/gke-policies-v2/policy/control_plane_endpoint.rego index bd6b95e7..38931df4 100644 --- a/gke-policies-v2/policy/control_plane_endpoint.rego +++ b/gke-policies-v2/policy/control_plane_endpoint.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Control Plane endpoint visibility +# title: Enable control plane private endpoint # description: Control Plane endpoint should be locked from external access # custom: # group: Security @@ -39,5 +39,5 @@ valid { violation[msg] { not input.data.gke.private_cluster_config.enable_private_endpoint - msg := "GKE cluster has not enabled private endpoint" + msg := "Cluster is not configured with private endpoint" } diff --git a/gke-policies-v2/policy/control_plane_redundancy.rego b/gke-policies-v2/policy/control_plane_redundancy.rego index 5489a19b..3a14239b 100644 --- a/gke-policies-v2/policy/control_plane_redundancy.rego +++ b/gke-policies-v2/policy/control_plane_redundancy.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Control Plane redundancy +# title: Ensure redundancy of the Control Plane # description: GKE cluster should be regional for maximum availability of control plane during upgrades and zonal outages # custom: # group: Availability @@ -37,10 +37,10 @@ valid { violation[msg] { not input.data.gke.location - msg := "Missing GKE cluster location object" + msg := "Cluster location infromation is missing" } violation[msg] { not location.regional(input.data.gke.location) - msg := sprintf("Invalid GKE Control plane location %q (not regional)", [input.data.gke.location]) + msg := sprintf("Cluster location %q is not regional", [input.data.gke.location]) } diff --git a/gke-policies-v2/policy/ilb_subsetting.rego b/gke-policies-v2/policy/ilb_subsetting.rego index f1a9c171..7e469252 100644 --- a/gke-policies-v2/policy/ilb_subsetting.rego +++ b/gke-policies-v2/policy/ilb_subsetting.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: GKE L4 ILB Subsetting +# title: Enable GKE L4 ILB Subsetting # description: GKE cluster should use GKE L4 ILB Subsetting if nodes > 250 # custom: # group: Scalability @@ -37,7 +37,5 @@ valid { violation[msg] { input.data.gke.current_node_count > 250 not input.data.gke.network_config.enable_l4ilb_subsetting = true - - msg := sprintf("The GKE cluster has %v nodes but is not configured to use L4 ILB Subsetting", [input.data.gke.current_node_count]) - + msg := sprintf("Cluster has %v nodes and is not configured with L4 ILB Subsetting", [input.data.gke.current_node_count]) } diff --git a/gke-policies-v2/policy/intranode_visibility.rego b/gke-policies-v2/policy/intranode_visibility.rego index d9b0370d..59b87449 100644 --- a/gke-policies-v2/policy/intranode_visibility.rego +++ b/gke-policies-v2/policy/intranode_visibility.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: GKE intranode visibility +# title: Enable GKE intranode visibility # description: GKE cluster should have intranode visibility enabled # custom: # group: Security @@ -39,5 +39,5 @@ valid { violation[msg] { not input.data.gke.networkConfig.enableIntraNodeVisibility = true - msg := "The GKE cluster does not have Intranode Visibility enabled" + msg := "Cluster is not configured with Intranode Visibility" } diff --git a/gke-policies-v2/policy/monitoring_and_logging.rego b/gke-policies-v2/policy/monitoring_and_logging.rego index 09134a05..a49bcaed 100644 --- a/gke-policies-v2/policy/monitoring_and_logging.rego +++ b/gke-policies-v2/policy/monitoring_and_logging.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Cloud Monitoring and Logging +# title: Enable Cloud Monitoring and Logging # description: GKE cluster should use Cloud Logging and Monitoring # custom: # group: Maintenance @@ -43,12 +43,10 @@ valid { violation[msg] { not input.data.gke.logging_config.component_config.enable_components - - msg := "The GKE cluster does not have Cloud Logging enabled" + msg := "Cluster is not configured with Cloud Logging" } violation[msg] { not input.data.gke.monitoring_config.component_config.enable_components - - msg := "The GKE cluster does not have Cloud Monitoring enabled" -} \ No newline at end of file + msg := "Cluster is not configured with Cloud Monitoring" +} diff --git a/gke-policies-v2/policy/nap_forbid_default_sa.rego b/gke-policies-v2/policy/nap_forbid_default_sa.rego index 3b6c89c8..620811c7 100644 --- a/gke-policies-v2/policy/nap_forbid_default_sa.rego +++ b/gke-policies-v2/policy/nap_forbid_default_sa.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Forbid default Service Accounts in Node Auto-Provisioning +# title: Change default Service Accounts in Node Auto-Provisioning # description: Node Auto-Provisioning configuration should not allow default Service Accounts # custom: # group: Security @@ -42,5 +42,5 @@ violation[msg] { not input.data.gke.autopilot.enabled input.data.gke.autoscaling.enable_node_autoprovisioning == true input.data.gke.autoscaling.autoprovisioning_node_pool_defaults.service_account == "default" - msg := "GKE cluster Node Auto-Provisioning should have a dedicated Service Account configured" + msg := "Cluster is configured with default service account for Node Auto-Provisioning" } diff --git a/gke-policies-v2/policy/nap_forbid_single_zone.rego b/gke-policies-v2/policy/nap_forbid_single_zone.rego index c74cf35b..1651b608 100644 --- a/gke-policies-v2/policy/nap_forbid_single_zone.rego +++ b/gke-policies-v2/policy/nap_forbid_single_zone.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Ensure that node pool locations within Node Auto-Provisioning are covering more than one zone (or not enforced at all) +# title: Ensure redundancy of Node Auto-provisioning node pools # description: Node Auto-Provisioning configuration should cover more than one zone # custom: # group: Security @@ -37,5 +37,5 @@ valid { violation[msg] { input.data.gke.autoscaling.enable_node_autoprovisioning == true count(input.data.gke.autoscaling.autoprovisioning_locations) == 1 - msg := "GKE cluster Node Auto-Provisioning configuration should cover more than one zone" + msg := "Cluster is not configured with multiple zones for NAP node pools" } diff --git a/gke-policies-v2/policy/nap_integrity_monitoring.rego b/gke-policies-v2/policy/nap_integrity_monitoring.rego index aa37b9fe..94ec5a24 100644 --- a/gke-policies-v2/policy/nap_integrity_monitoring.rego +++ b/gke-policies-v2/policy/nap_integrity_monitoring.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Ensure that nodes in Node Auto-Provisioning node pools will use integrity monitoring +# title: Enable integrity monitoring for Node Auto-Provisioning node pools # description: Nodes in Node Auto-Provisioning should use integrity monitoring # custom: # group: Security @@ -45,6 +45,5 @@ valid { violation[msg] { input.data.gke.autoscaling.enable_node_autoprovisioning == true input.data.gke.autoscaling.autoprovisioning_node_pool_defaults.shielded_instance_config.enable_integrity_monitoring == false - - msg := "GKE cluster Node Auto-Provisioning configuration use integrity monitoring" + msg := "Cluster is not configured with integrity monitoring for NAP node pools" } diff --git a/gke-policies-v2/policy/nap_use_cos.rego b/gke-policies-v2/policy/nap_use_cos.rego index 89ceb467..d7d813b3 100644 --- a/gke-policies-v2/policy/nap_use_cos.rego +++ b/gke-policies-v2/policy/nap_use_cos.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Ensure that nodes in Node Auto-Provisioning node pools will use Container-Optimized OS +# title: Configure Container-Optimized OS for Node Auto-Provisioning node pools # description: Nodes in Node Auto-Provisioning should use Container-Optimized OS # custom: # group: Security @@ -42,6 +42,5 @@ valid { violation[msg] { input.data.gke.autoscaling.enable_node_autoprovisioning == true not lower(input.data.gke.autoscaling.autoprovisioning_node_pool_defaults.image_type) in { "cos", "cos_containerd"} - - msg := "GKE cluster Node Auto-Provisioning configuration use Container-Optimized OS" + msg := "Cluster is not configured with COS for NAP node pools" } diff --git a/gke-policies-v2/policy/network_policies.rego b/gke-policies-v2/policy/network_policies.rego index cea084b3..98f65b5b 100644 --- a/gke-policies-v2/policy/network_policies.rego +++ b/gke-policies-v2/policy/network_policies.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: GKE Network Policies engine +# title: Enable Kubernetes Network Policies # description: GKE cluster should have Network Policies or Dataplane V2 enabled # custom: # group: Security @@ -41,21 +41,19 @@ violation[msg] { input.data.gke.addons_config.network_policy_config.disabled not input.data.gke.network_policy not input.data.gke.network_config.datapath_provider == 2 - - msg := "No Network Policies Engines enabled" + msg := "Cluster is not configured with Kubneretes Network Policies" } violation[msg] { count(input.data.gke.addons_config.network_policy_config) == 0 not input.data.gke.network_policy.enabled not input.data.gke.network_config.datapath_provider == 2 - msg := "Network Policies enabled but without configuration" + msg := "Cluster is configured with Kubneretes Network Policies without configuration" } violation[msg] { input.data.gke.addons_config.network_policy_config.disabled count(input.data.gke.network_policy) == 0 not input.data.gke.network_config.datapath_provider == 2 - - msg := "Not DPv2 nor Network Policies are enabled onto the cluster" + msg := "Cluster is not DPv2 and has not configured Kubneretes Network Policies" } diff --git a/gke-policies-v2/policy/node_local_dns_cache.rego b/gke-policies-v2/policy/node_local_dns_cache.rego index 6a0bf18d..9bc533a5 100644 --- a/gke-policies-v2/policy/node_local_dns_cache.rego +++ b/gke-policies-v2/policy/node_local_dns_cache.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: GKE node local DNS cache +# title: Enable GKE node local DNS cache # description: GKE cluster should use node local DNS cache # custom: # group: Scalability @@ -36,5 +36,5 @@ valid { violation[msg] { not input.data.gke.addons_config.dns_cache_config.enabled = true - msg := "The GKE cluster does not have node local DNS cache enabled" + msg := "Cluster is not configured with node local DNS cache" } diff --git a/gke-policies-v2/policy/node_pool_autorepair.rego b/gke-policies-v2/policy/node_pool_autorepair.rego index 80dba5ef..f39e8037 100644 --- a/gke-policies-v2/policy/node_pool_autorepair.rego +++ b/gke-policies-v2/policy/node_pool_autorepair.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Use Node Auto-Repair +# title: Enable node auto-repair # description: GKE node pools should have Node Auto-Repair enabled to configure Kubernetes Engine # custom: # group: Availability @@ -41,5 +41,5 @@ valid { violation[msg] { some pool not input.data.gke.node_pools[pool].management.auto_repair - msg := sprintf("autorepair not set for GKE node pool %q", [input.data.gke.node_pools[pool].name]) + msg := sprintf("Node pool %q is not configured with auto-repair", [input.data.gke.node_pools[pool].name]) } diff --git a/gke-policies-v2/policy/node_pool_autoscaling.rego b/gke-policies-v2/policy/node_pool_autoscaling.rego index 2cf10414..e9423c92 100644 --- a/gke-policies-v2/policy/node_pool_autoscaling.rego +++ b/gke-policies-v2/policy/node_pool_autoscaling.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Use node pool autoscaling +# title: Enable node pool auto-scaling # description: GKE node pools should have autoscaling configured to proper resize nodes according to traffic # custom: # group: Scalability @@ -39,5 +39,5 @@ valid { violation[msg] { some pool not input.data.gke.node_pools[pool].autoscaling.enabled - msg := sprintf("Node pool %q does not have autoscaling configured.", [input.data.gke.node_pools[pool].name]) + msg := sprintf("Node pool %q is not configured with autoscaling", [input.data.gke.node_pools[pool].name]) } diff --git a/gke-policies-v2/policy/node_pool_autoupgrade.rego b/gke-policies-v2/policy/node_pool_autoupgrade.rego index 4bcadd12..5d2aaed8 100644 --- a/gke-policies-v2/policy/node_pool_autoupgrade.rego +++ b/gke-policies-v2/policy/node_pool_autoupgrade.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Use Node Auto-Upgrade +# title: Enable node auto-upgrade # description: GKE node pools should have Node Auto-Upgrade enabled to configure Kubernetes Engine # custom: # group: Security @@ -41,5 +41,5 @@ valid { violation[msg] { some pool not input.data.gke.node_pools[pool].management.auto_upgrade - msg := sprintf("autoupgrade not set for GKE node pool %q", [input.data.gke.node_pools[pool].name]) + msg := sprintf("Node pool %q is not configured with auto-upgrade", [input.data.gke.node_pools[pool].name]) } diff --git a/gke-policies-v2/policy/node_pool_disk_encryption.rego b/gke-policies-v2/policy/node_pool_disk_encryption.rego index 00730cc1..984f9fd8 100644 --- a/gke-policies-v2/policy/node_pool_disk_encryption.rego +++ b/gke-policies-v2/policy/node_pool_disk_encryption.rego @@ -42,5 +42,5 @@ valid { violation[msg] { some pool not input.data.gke.node_pools[pool].config.boot_disk_kms_key - msg := sprintf("GKE cluster node_pool %q has no CMEK configured for the boot disks", [input.data.gke.node_pools[pool].name]) + msg := sprintf("Node pool %q is not configured with CMEK for the boot disk", [input.data.gke.node_pools[pool].name]) } diff --git a/gke-policies-v2/policy/node_pool_forbid_default_sa.rego b/gke-policies-v2/policy/node_pool_forbid_default_sa.rego index e358d36e..211c9587 100644 --- a/gke-policies-v2/policy/node_pool_forbid_default_sa.rego +++ b/gke-policies-v2/policy/node_pool_forbid_default_sa.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Forbid default compute SA on node_pool +# title: Change default Service Accounts in node pools # description: GKE node pools should have a dedicated sa with a restricted set of permissions # custom: # group: Security @@ -42,5 +42,5 @@ violation[msg] { not input.data.gke.autopilot.enabled some pool input.data.gke.node_pools[pool].config.service_account == "default" - msg := sprintf("GKE cluster node_pool %q should have a dedicated SA", [input.data.gke.node_pools[pool].name]) + msg := sprintf("Node pool %q is configured with default SA", [input.data.gke.node_pools[pool].name]) } diff --git a/gke-policies-v2/policy/node_pool_integrity_monitoring.rego b/gke-policies-v2/policy/node_pool_integrity_monitoring.rego index 2a022330..bfe8101b 100644 --- a/gke-policies-v2/policy/node_pool_integrity_monitoring.rego +++ b/gke-policies-v2/policy/node_pool_integrity_monitoring.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Integrity monitoring on the nodes +# title: Enable integrity monitoring for node pools # description: GKE node pools should have integrity monitoring feature enabled to detect changes in a VM boot measurements # custom: # group: Security @@ -40,5 +40,5 @@ valid { violation[msg] { some pool not input.data.gke.node_pools[pool].config.shielded_instance_config.enable_integrity_monitoring - msg := sprintf("Node pool %q has disabled integrity monitoring feature.", [input.data.gke.node_pools[pool].name]) + msg := sprintf("Node pool %q is not configured with integrity monitoring", [input.data.gke.node_pools[pool].name]) } diff --git a/gke-policies-v2/policy/node_pool_multi_zone.rego b/gke-policies-v2/policy/node_pool_multi_zone.rego index e998b0e9..752a59fe 100644 --- a/gke-policies-v2/policy/node_pool_multi_zone.rego +++ b/gke-policies-v2/policy/node_pool_multi_zone.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Multi-zone node pools +# title: Ensure redudndancy of the node pools # description: GKE node pools should be regional (multiple zones) for maximum nodes availability during zonal outages # custom: # group: Availability @@ -38,5 +38,5 @@ valid { violation[msg] { some pool count(input.data.gke.node_pools[pool].locations) < 2 - msg := sprintf("Node pool %q is not on multiple zones.", [input.data.gke.node_pools[pool].name]) + msg := sprintf("Node pool %q is not configured with multiple zones", [input.data.gke.node_pools[pool].name]) } \ No newline at end of file diff --git a/gke-policies-v2/policy/node_pool_secure_boot.rego b/gke-policies-v2/policy/node_pool_secure_boot.rego index ffec4dcf..1969c7b4 100644 --- a/gke-policies-v2/policy/node_pool_secure_boot.rego +++ b/gke-policies-v2/policy/node_pool_secure_boot.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Secure boot on the nodes +# title: Enable Secure boot for node pools # description: Secure Boot helps ensure that the system only runs authentic software by verifying the digital signature of all boot components, and halting the boot process if signature verification fails # custom: # group: Security @@ -40,5 +40,5 @@ valid { violation[msg] { some pool not input.data.gke.node_pools[pool].config.shielded_instance_config.enable_secure_boot - msg := sprintf("Node pool %q has disabled secure boot.", [input.data.gke.node_pools[pool].name]) -} + msg := sprintf("Node pool %q is not configured with secure boot", [input.data.gke.node_pools[pool].name]) +} \ No newline at end of file diff --git a/gke-policies-v2/policy/node_pool_use_cos.rego b/gke-policies-v2/policy/node_pool_use_cos.rego index ead52d2a..a9674f30 100644 --- a/gke-policies-v2/policy/node_pool_use_cos.rego +++ b/gke-policies-v2/policy/node_pool_use_cos.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Use Container-Optimized OS +# title: Configure Container-Optimized OS for node pools # description: GKE node pools should use Container-Optimized OS which is maintained by Google and optimized for running Docker containers with security and efficiency. # custom: # group: Security @@ -45,5 +45,5 @@ violation[msg] { some pool not lower(input.data.gke.node_pools[pool].config.image_type) in {"cos", "cos_containerd"} not startswith(lower(input.data.gke.node_pools[pool].config.image_type), "windows") - msg := sprintf("Node pool %q does not use Container-Optimized OS.", [input.data.gke.node_pools[pool].name]) + msg := sprintf("Node pool %q is not configured with COS", [input.data.gke.node_pools[pool].name]) } diff --git a/gke-policies-v2/policy/node_pool_version_skew.rego b/gke-policies-v2/policy/node_pool_version_skew.rego index e97264d0..4fee1705 100644 --- a/gke-policies-v2/policy/node_pool_version_skew.rego +++ b/gke-policies-v2/policy/node_pool_version_skew.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Version skew between node pools and control plane +# title: Ensure acceptable version skew in a cluster # description: Difference between cluster control plane version and node pools version should be no more than 2 minor versions. # custom: # group: Management @@ -41,26 +41,26 @@ valid { violation[msg] { not input.data.gke.current_master_version - msg := "control plane version is undefined" + msg := "Control plane version is undefined" } violation[msg] { some node_pool not input.data.gke.node_pools[node_pool].version - msg := sprintf("node pool %q control plane version is undefined", [input.data.gke.node_pools[node_pool].name]) + msg := sprintf("Node pool %q version is undefined", [input.data.gke.node_pools[node_pool].name]) } violation[msg] { master_ver := regex.find_all_string_submatch_n(expr, input.data.gke.current_master_version, 1) count(master_ver) == 0 - msg := sprintf("control plane version %q does not match version regex", [input.data.gke.current_master_version]) + msg := sprintf("Control plane version %q does not match version regex", [input.data.gke.current_master_version]) } violation[msg] { some node_pool node_pool_ver := regex.find_all_string_submatch_n(expr, input.data.gke.node_pools[node_pool].version, 1) count(node_pool_ver) == 0 - msg := sprintf("node pool %q version %q does not match version regex", [input.data.gke.node_pools[node_pool].name, input.data.gke.node_pools[node_pool].version]) + msg := sprintf("Node pool %q version %q does not match version regex", [input.data.gke.node_pools[node_pool].name, input.data.gke.node_pools[node_pool].version]) } violation[msg] { @@ -68,7 +68,7 @@ violation[msg] { some node_pool node_pool_ver := regex.find_all_string_submatch_n(expr, input.data.gke.node_pools[node_pool].version, 1) master_ver[0][1] != node_pool_ver[0][1] - msg := sprintf("node pool %q and control plane major versions differ", [input.data.gke.node_pools[node_pool].name]) + msg := sprintf("Node pool %q and control plane major versions differ", [input.data.gke.node_pools[node_pool].name]) } violation[msg] { @@ -77,5 +77,5 @@ violation[msg] { node_pool_ver := regex.find_all_string_submatch_n(expr, input.data.gke.node_pools[node_pool].version, 1) minor_diff := to_number(master_ver[0][2]) - to_number(node_pool_ver[0][2]) abs(minor_diff) > 2 - msg := sprintf("node pool %q and control plane minor versions difference is greater than 2", [input.data.gke.node_pools[node_pool].name]) + msg := sprintf("Node pool %q and control plane minor versions difference is greater than 2", [input.data.gke.node_pools[node_pool].name]) } diff --git a/gke-policies-v2/policy/node_rbac_security_group.rego b/gke-policies-v2/policy/node_rbac_security_group.rego index df27333e..e33f63e4 100644 --- a/gke-policies-v2/policy/node_rbac_security_group.rego +++ b/gke-policies-v2/policy/node_rbac_security_group.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Use RBAC Google group +# title: Enable Google Groups for RBAC # description: GKE cluster should have RBAC security Google group enabled # custom: # group: Security @@ -42,5 +42,5 @@ valid { violation[msg] { not input.data.gke.authenticator_groups_config.enabled - msg := sprintf("RBAC security group not enabled for cluster %q", [input.data.gke.name]) -} \ No newline at end of file + msg := "Cluster is not configured with Google Groups for RBAC" +} diff --git a/gke-policies-v2/policy/private_cluster.rego b/gke-policies-v2/policy/private_cluster.rego index 56c875ce..fd92d0a0 100644 --- a/gke-policies-v2/policy/private_cluster.rego +++ b/gke-policies-v2/policy/private_cluster.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: GKE private cluster +# title: Use private nodes # description: GKE cluster should be private to ensure network isolation # custom: # group: Security @@ -38,5 +38,5 @@ valid { violation[msg] { not input.data.gke.private_cluster_config.enable_private_nodes - msg := "GKE cluster has not enabled private nodes" + msg := "Cluster is not configured with private nodes" } diff --git a/gke-policies-v2/policy/secret_encryption.rego b/gke-policies-v2/policy/secret_encryption.rego index 26c4837c..c0f6bab0 100644 --- a/gke-policies-v2/policy/secret_encryption.rego +++ b/gke-policies-v2/policy/secret_encryption.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Kubernetes secrets encryption +# title: Enable Kubernetes secrets encryption # description: GKE cluster should use encryption for kubernetes application secrets # custom: # group: Security @@ -41,5 +41,5 @@ valid { violation[msg] { input.data.gke.database_encryption.state != 1 - msg := "The GKE cluster is not configured to encrypt kubernetes application secrets" + msg := "Cluster is not configured with kubernetes secrets encryption" } diff --git a/gke-policies-v2/policy/shielded_nodes.rego b/gke-policies-v2/policy/shielded_nodes.rego index 208fb088..3a0bfece 100644 --- a/gke-policies-v2/policy/shielded_nodes.rego +++ b/gke-policies-v2/policy/shielded_nodes.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: GKE Shielded Nodes +# title: Enable Shielded Nodes # description: GKE cluster should use shielded nodes # custom: # group: Security @@ -39,6 +39,5 @@ valid { violation[msg] { not input.data.gke.shielded_nodes.enabled = true - - msg := "The GKE cluster does not have shielded nodes enabled" + msg := "Cluster is not configured with shielded nodes" } diff --git a/gke-policies-v2/policy/vpc_native_cluster.rego b/gke-policies-v2/policy/vpc_native_cluster.rego index b3ca7670..f9a6a91d 100644 --- a/gke-policies-v2/policy/vpc_native_cluster.rego +++ b/gke-policies-v2/policy/vpc_native_cluster.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: GKE VPC-native cluster +# title: Use VPC-native cluster # description: GKE cluster nodepool should be VPC-native as per our best-practices # custom: # group: Management @@ -40,10 +40,10 @@ valid { violation[msg] { some pool not input.data.gke.node_pools[pool].network_config.pod_ipv4_cidr_block - msg := sprintf("Nodepool %q of the GKE cluster is not configured to use VPC-native routing", [input.data.gke.node_pools[pool].name]) + msg := sprintf("Nodepool %q is not configured with use VPC-native routing", [input.data.gke.node_pools[pool].name]) } violation[msg] { not input.data.gke.ip_allocation_policy.use_ip_aliases - msg := "the GKE cluster is not configured to use VPC-native routing" -} \ No newline at end of file + msg := "Cluster is not configured with VPC-native routing" +} diff --git a/gke-policies-v2/policy/workload_identity.rego b/gke-policies-v2/policy/workload_identity.rego index 59ed0578..34114b37 100644 --- a/gke-policies-v2/policy/workload_identity.rego +++ b/gke-policies-v2/policy/workload_identity.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: GKE Workload Identity +# title: Use GKE Workload Identity # description: GKE cluster should have Workload Identity enabled # custom: # group: Security @@ -41,5 +41,5 @@ valid { violation[msg] { not input.data.gke.workload_identity_config.workload_pool - msg := "The GKE cluster does not have workload identity enabled" + msg := "Cluster is not configured with Workload Identity" } diff --git a/go.mod b/go.mod index 234c6a35..8a19d707 100644 --- a/go.mod +++ b/go.mod @@ -8,10 +8,10 @@ require ( cloud.google.com/go/pubsub v1.33.0 cloud.google.com/go/securitycenter v1.24.3 cloud.google.com/go/storage v1.36.0 + github.com/fatih/color v1.16.0 github.com/go-git/go-billy/v5 v5.5.0 github.com/go-git/go-git/v5 v5.11.0 github.com/googleapis/gax-go/v2 v2.12.0 - github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db github.com/open-policy-agent/opa v0.60.0 github.com/prometheus/client_golang v1.18.0 github.com/prometheus/common v0.46.0 @@ -73,6 +73,8 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect diff --git a/go.sum b/go.sum index 90ae23af..7724a220 100644 --- a/go.sum +++ b/go.sum @@ -91,6 +91,8 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= @@ -208,10 +210,13 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -369,6 +374,7 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/internal/app/app.go b/internal/app/app.go index a06a2c77..622fd89d 100644 --- a/internal/app/app.go +++ b/internal/app/app.go @@ -23,6 +23,7 @@ import ( "os" "reflect" + "github.com/fatih/color" cfg "github.com/google/gke-policy-automation/internal/config" "github.com/google/gke-policy-automation/internal/gke" "github.com/google/gke-policy-automation/internal/inputs" @@ -38,6 +39,8 @@ const ( ) var errNoPolicies = errors.New("no policies to check against") +var consoleInfoColorF = color.New(color.Bold, color.FgHiWhite).Sprintf +var consoleWarnColorF = color.New(color.Bold, color.FgHiYellow).Sprintf type PolicyAutomation interface { LoadCliConfig(cliConfig *CliConfig, defaultsFn setConfigDefaults, validateFn validateConfig) error @@ -127,9 +130,15 @@ func (p *PolicyAutomationApp) CheckBestPractices() error { } func (p *PolicyAutomationApp) CheckScalability() error { - p.out.ColorPrintf("%s [yellow][bold]Running scalability check requires metrics from kube-state-metrics!\n", outputs.IconInfo) + p.out.Printf("%s %s\n", + outputs.IconInfo, + consoleWarnColorF("Running scalability check requires metrics from kube-state-metrics!"), + ) docsTitle := fmt.Sprintf("%s \x1b]8;;%s\x07%s\x1b]8;;\x07", outputs.IconHyperlink, "https://github.com/google/gke-policy-automation/blob/scalability-docs/docs/user-guide.md#checking-scalability-limits", "documentation") - p.out.ColorPrintf("%s [yellow][bold]Check the %s for more details.\n", outputs.IconInfo, docsTitle) + p.out.Printf("%s %s\n", + outputs.IconInfo, + consoleWarnColorF("Check the %s for more details", docsTitle), + ) return p.evaluateClusters([]string{regoPackageBaseScalability}) } @@ -185,7 +194,10 @@ func (p *PolicyAutomationApp) ClusterJSONData() error { for _, dumpCollector := range p.clusterDumpCollectors { colType := reflect.TypeOf(dumpCollector).String() log.Debugf("closing cluster dump collector %s", colType) - p.out.ColorPrintf("%s [light_gray][bold]closing cluster dump collector ...\n", outputs.IconInfo) + p.out.Printf("%s %s\n", + outputs.IconInfo, + consoleInfoColorF("Closing cluster dump collector ..."), + ) if err := dumpCollector.Close(); err != nil { log.Errorf("failed to close cluster dump collector %s due to %s", colType, err) return err @@ -213,7 +225,8 @@ func (p *PolicyAutomationApp) PolicyCheck() error { log.Errorf("could not parse policy files: %s", err) return err } - p.out.ColorPrintf("%s [bold][green] All policies validated correctly\n", outputs.IconInfo) + correctF := color.New(color.Bold, color.FgHiGreen).Sprint + p.out.Printf("%s\n", correctF("All policies validated correctly")) log.Info("All policies validated correctly") return nil } @@ -242,7 +255,10 @@ func (p *PolicyAutomationApp) PolicyGenerateDocumentation() error { } documentationGenerator := outputs.NewMarkdownPolicyDocumentation(pa.GetPolicies()) - p.out.ColorPrintf("%s [light_gray][bold]Writing policy documentation ... [%s]\n", outputs.IconInfo, p.policyDocsFile) + p.out.Printf("%s %s\n", + outputs.IconInfo, + consoleInfoColorF("Writing policy documentation ... [%s]", p.policyDocsFile), + ) log.Infof("Writing policy documentation to file %s", p.policyDocsFile) if _, err := w.Write([]byte(documentationGenerator.GenerateDocumentation())); err != nil { p.out.ErrorPrint("could not write documentation file", err) @@ -264,7 +280,10 @@ func (p *PolicyAutomationApp) loadPolicyFiles() ([]*policy.PolicyFile, error) { policyConfig.GitBranch, policyConfig.GitDirectory) } - p.out.ColorPrintf("%s [light_gray][bold]Reading policy files... [%s]\n", outputs.IconInfo, policySrc) + p.out.Printf("%s %s\n", + outputs.IconInfo, + consoleInfoColorF("Reading policy files... [%s]", policySrc), + ) log.Infof("Reading policy files from %s", policySrc) files, err := policySrc.GetPolicyFiles() if err != nil { diff --git a/internal/app/app_check_clusters.go b/internal/app/app_check_clusters.go index 0e940485..07bb7a02 100644 --- a/internal/app/app_check_clusters.go +++ b/internal/app/app_check_clusters.go @@ -66,13 +66,19 @@ func (p *PolicyAutomationApp) getClusters() ([]string, error) { func (p *PolicyAutomationApp) discoverClusters() ([]string, error) { if p.config.ClusterDiscovery.Organization != "" { log.Infof("Discovering clusters in organization %s", p.config.ClusterDiscovery.Organization) - p.out.ColorPrintf("%s [light_gray][bold]Discovering clusters in for organization... [%s]\n", outputs.IconInfo, p.config.ClusterDiscovery.Organization) + p.out.Printf("%s %s\n", + outputs.IconInfo, + consoleInfoColorF("Discovering clusters in for organization... [%s]", p.config.ClusterDiscovery.Organization), + ) return p.discovery.GetClustersInOrg(p.config.ClusterDiscovery.Organization) } clusters := make([]string, 0) for _, folder := range p.config.ClusterDiscovery.Folders { log.Infof("Discovering clusters in folder %s", folder) - p.out.ColorPrintf("%s [light_gray][bold]Discovering clusters in folder... [%s]\n", outputs.IconInfo, folder) + p.out.Printf("%s %s\n", + outputs.IconInfo, + consoleInfoColorF("Discovering clusters in folder... [%s]", folder), + ) results, err := p.discovery.GetClustersInFolder(folder) if err != nil { return nil, err @@ -81,7 +87,10 @@ func (p *PolicyAutomationApp) discoverClusters() ([]string, error) { } for _, project := range p.config.ClusterDiscovery.Projects { log.Infof("Discovering clusters in project %s", project) - p.out.ColorPrintf("%s [light_gray][bold]Discovering clusters in project... [%s]\n", outputs.IconInfo, project) + p.out.Printf("%s %s\n", + outputs.IconInfo, + consoleInfoColorF("Discovering clusters in project... [%s]", project), + ) results, err := p.discovery.GetClustersInProject(project) if err != nil { return nil, err @@ -99,13 +108,16 @@ func (p *PolicyAutomationApp) evaluateClusters(regoPackageBases []string) error return err } if len(files) == 0 { - p.out.ColorPrintf("[yellow][bold]No policies to check against\n") + p.out.Printf("%s\n", consoleWarnColorF("No policies to check against")) log.Errorf("No policies to check against") return errNoPolicies } // create a PolicyAgent client instance pa := policy.NewPolicyAgent(p.ctx) - p.out.ColorPrintf("%s [light_gray][bold]Parsing REGO policies...\n", outputs.IconInfo) + p.out.Printf("%s %s\n", + outputs.IconInfo, + consoleInfoColorF("Parsing REGO policies..."), + ) log.Info("Parsing rego policies") // parsing policies before running checks if err := pa.WithFiles(files, p.config.PolicyExclusions); err != nil { @@ -121,13 +133,18 @@ func (p *PolicyAutomationApp) evaluateClusters(regoPackageBases []string) error return err } if len(clusterIds) < 1 { - p.out.ColorPrintf("%s [yellow][bold]No clusters to check, finishing...\n", outputs.IconInfo) + p.out.Printf("%s\n", consoleWarnColorF("No clusters to check, finishing...")) log.Info("Cluster review finished") - p.out.ColorPrintf("%s [light_gray][bold]Cluster review finished\n", outputs.IconInfo) + p.out.Printf("%s %s\n", + outputs.IconInfo, + consoleInfoColorF("Cluster review finished"), + ) return nil } - p.out.ColorPrintf("%s [light_gray][bold]Fetching data from %d input(s) for %d cluster(s)\n", - outputs.IconInfo, len(p.inputs), len(clusterIds)) + p.out.Printf("%s %s\n", + outputs.IconInfo, + consoleInfoColorF("Fetching data from %d input(s) for %d cluster(s)", len(p.inputs), len(clusterIds)), + ) clusterData, errors := inputs.GetAllInputsData(p.inputs, clusterIds) if len(errors) > 0 { p.out.ErrorPrint("could not fetch the cluster details", errors[0]) @@ -139,8 +156,10 @@ func (p *PolicyAutomationApp) evaluateClusters(regoPackageBases []string) error evalResults := &evaluationResults{} for _, cluster := range clusterData { - p.out.ColorPrintf("%s [light_gray][bold]Evaluating policies against GKE cluster... [%s]\n", - outputs.IconInfo, cluster.Name) + p.out.Printf("%s %s\n", + outputs.IconInfo, + consoleInfoColorF("Evaluating policies against GKE cluster... [%s]", cluster.Name), + ) log.Infof("Evaluating policies against GKE cluster %s", cluster.Name) for _, pkgBase := range regoPackageBases { evalResult, err := pa.Evaluate(cluster, pkgBase) @@ -156,7 +175,10 @@ func (p *PolicyAutomationApp) evaluateClusters(regoPackageBases []string) error for _, c := range p.collectors { log.Infof("Collector %s registering the results", c.Name()) - p.out.ColorPrintf("%s [light_gray][bold]Writing evaluation results ... [%s]\n", outputs.IconInfo, c.Name()) + p.out.Printf("%s %s\n", + outputs.IconInfo, + consoleInfoColorF("Writing evaluation results ... [%s]", c.Name()), + ) if err = c.RegisterResult(evalResults.List()); err != nil { p.out.ErrorPrint("failed to register evaluation results", err) log.Errorf("could not register evaluation results: %s", err) @@ -170,7 +192,10 @@ func (p *PolicyAutomationApp) evaluateClusters(regoPackageBases []string) error log.Infof("Collector %s processing closed", c.Name()) } log.Info("Cluster review finished") - p.out.ColorPrintf("%s [light_gray][bold]Cluster review finished\n", outputs.IconInfo) + p.out.Printf("%s %s\n", + outputs.IconInfo, + consoleInfoColorF("Cluster review finished"), + ) return nil } diff --git a/internal/app/app_scc.go b/internal/app/app_scc.go index 6391ed82..b2ad3b73 100644 --- a/internal/app/app_scc.go +++ b/internal/app/app_scc.go @@ -18,6 +18,7 @@ import ( "errors" "github.com/google/gke-policy-automation/internal/log" + "github.com/google/gke-policy-automation/internal/outputs" "github.com/google/gke-policy-automation/internal/outputs/scc" ) @@ -29,7 +30,10 @@ func (p *PolicyAutomationApp) ConfigureSCC(orgNumber string) error { if err != nil { return err } - p.out.ColorPrintf("\u2139 [light_gray][bold]Searching for GKE Policy Automation in SCC organization... [%s]\n", orgNumber) + p.out.Printf("%s %s\n", + outputs.IconMagnifier, + consoleInfoColorF("Searching for GKE Policy Automation in SCC organization... [%s]", orgNumber), + ) log.Infof("Searching for GKE Policy Automation in SCC organization %s", orgNumber) id, err := cli.FindSource() if err != nil { @@ -37,18 +41,27 @@ func (p *PolicyAutomationApp) ConfigureSCC(orgNumber string) error { return err } if id != nil { - p.out.ColorPrintf("\u2139 [light_gray][bold]Found GKE Policy Automation in SCC... [%s]\n", *id) + p.out.Printf("%s %s\n", + outputs.IconMagnifier, + consoleInfoColorF("Found GKE Policy Automation in SCC... [%s]", *id), + ) log.Infof("Found GKE Policy Automation in SCC: %s", *id) return nil } - p.out.ColorPrintf("\u2139 [light_gray][bold]GKE Policy Automation was not found in SCC, creating it...\n") + p.out.Printf("%s %s\n", + outputs.IconMagnifier, + consoleInfoColorF("GKE Policy Automation was not found in SCC, creating it..."), + ) log.Info("Creating GKE Policy Automation in SCC") *id, err = cli.CreateSource() if err != nil { p.out.ErrorPrint("could not create GKE Policy Automation source in SCC", err) return err } - p.out.ColorPrintf("\u2139 [light_gray][bold]Created GKE Policy Automation in SCC... [%s]\n", *id) + p.out.Printf("%s %s\n", + outputs.IconMagnifier, + consoleInfoColorF("Created GKE Policy Automation in SCC... [%s]", *id), + ) log.Infof("Created GKE Policy Automation in SCC: %s", *id) return nil } diff --git a/internal/gke/gke.go b/internal/gke/gke.go index 6e71f608..81232db3 100644 --- a/internal/gke/gke.go +++ b/internal/gke/gke.go @@ -24,6 +24,14 @@ func GetClusterID(project string, location string, name string) string { return fmt.Sprintf("projects/%s/locations/%s/clusters/%s", project, location, name) } +func MustSliceClusterID(id string) (string, string, string) { + p, l, c, err := SliceAndValidateClusterID(id) + if err != nil { + panic("invalid cluster id: " + err.Error()) + } + return p, l, c +} + func SliceAndValidateClusterID(id string) (string, string, string, error) { r := regexp.MustCompile(`projects/(.+)/(locations|zones)/(.+)/clusters/(.+)`) if !r.MatchString(id) { diff --git a/internal/gke/gke_test.go b/internal/gke/gke_test.go index 48461246..a98ffa03 100644 --- a/internal/gke/gke_test.go +++ b/internal/gke/gke_test.go @@ -69,3 +69,8 @@ func TestSliceAndValidateClusterID_negative(t *testing.T) { t.Fatalf("err = nil; want err") } } + +func TestMustSliceClusterID(t *testing.T) { + input := "projects/demo-project-123/locations/europe-central2/clusters/cluster-waw" + MustSliceClusterID(input) +} diff --git a/internal/outputs/console_collector.go b/internal/outputs/console_collector.go index 93a5a50d..2ec3782b 100644 --- a/internal/outputs/console_collector.go +++ b/internal/outputs/console_collector.go @@ -16,7 +16,10 @@ package outputs import ( "fmt" + "strings" + "github.com/fatih/color" + "github.com/google/gke-policy-automation/internal/gke" "github.com/google/gke-policy-automation/internal/log" "github.com/google/gke-policy-automation/internal/policy" ) @@ -41,23 +44,40 @@ func (p *consoleResultCollector) RegisterResult(results []*policy.PolicyEvaluati func (p *consoleResultCollector) Close() error { report := p.reportMapper.GetReport() p.out.Printf("\n") - p.out.InitTabs(95) - for _, policy := range report.Policies { - policyTitle := policy.PolicyTitle + p.out.InitTabs(0, 4) + for i, policy := range report.Policies { + severityf := severitySprintfFunc(policy.Severity) + ruleTitleF := color.New(color.Bold, color.FgHiWhite).SprintfFunc() + p.out.Printf("%s #%d %s %s", + IconMagnifier, + i+1, + severityf("%s", strings.ToUpper(policy.Severity)), + ruleTitleF("%s", policy.PolicyTitle), + ) if policy.ExternalURI != "" { - policyTitle = fmt.Sprintf("%s \x1b]8;;%s\x07%s\x1b]8;;\x07", IconHyperlink, policy.ExternalURI, policy.PolicyTitle) + extURI := fmt.Sprintf("(\x1b]8;;%s\x07%s\x1b]8;;\x07)", policy.ExternalURI, "documentation") + p.out.Printf(" %s\n", extURI) + } else { + p.out.Printf("\n") } - p.out.ColorPrintf("%s [bold][light_gray][%s][yellow] %s[reset]: %s\n", IconMagnifier, policy.PolicyGroup, policy.PolicyName, policyTitle) - for _, evaluation := range policy.ClusterEvaluations { - statusString := "[ \033[1m\033[32mVALID\033[0m ]" - if !evaluation.Valid { - statusString = "[\033[1m\033[31mINVALID\033[0m]" - } - p.out.TabPrintf(" - %s\t"+statusString+"\n", evaluation.ClusterID) + statusf := evalStatusSprintfFunc(*evaluation) + clusterDataf := color.New(color.FgCyan).Sprintf + project, location, cluster := gke.MustSliceClusterID(evaluation.ClusterID) + + p.out.TabPrintf(" - projects/%s/locations/%s/clusters/%s\t[%s]\n", + clusterDataf("%s", project), + clusterDataf("%s", location), + clusterDataf("%s", cluster), + statusf("%s", evalStatusString(*evaluation)), + ) + if !evaluation.Valid { + violationF := color.New(color.Italic, color.FgRed).Sprintf for _, violation := range evaluation.Violations { - p.out.TabPrintf(" \033[1m\033[31m%s\033[0m\t\n", violation) + p.out.TabPrintf(" %s\t\n", + violationF("%s %s", IconMiddleDot, violation), + ) } } log.Infof("Policy: %s, Cluster: %s, Valid: %v", policy.PolicyName, evaluation.ClusterID, evaluation.Valid) @@ -65,10 +85,29 @@ func (p *consoleResultCollector) Close() error { p.out.TabFlush() p.out.Printf("\n") } - p.out.ColorPrintf("%s [light_gray][bold]Evaluated %d policies on %d clusters\n", IconInfo, len(report.Policies), len(report.ClusterStats)) - p.out.InitTabs(0) + summaryf := color.New(color.Bold, color.FgHiWhite).Sprintf + p.out.Printf("%s %s", + IconInfo, + summaryf("Evaluated %d policies on %d clusters\n", len(report.Policies), len(report.ClusterStats)), + ) + p.out.InitTabs(0, 2) for _, stat := range report.ClusterStats { - p.out.TabPrintf(" - %s:\t\033[32m%d valid, \033[31m%d violated, \033[33m%d errored\033[0m\n", stat.ClusterID, stat.ValidPoliciesCount, stat.ViolatedPoliciesCount, stat.ErroredPoliciesCount) + clusterDataf := color.New(color.FgCyan).Sprintf + criticalf := color.New(color.FgHiRed).Sprintf + highf := color.New(color.FgRed).Sprintf + mediumf := color.New(color.FgYellow).Sprintf + lowf := color.New(color.FgHiWhite).Sprintf + project, location, cluster := gke.MustSliceClusterID(stat.ClusterID) + + p.out.TabPrintf(" - projects/%s/locations/%s/clusters/%s\t: %s, %s, %s, %s\n", + clusterDataf("%s", project), + clusterDataf("%s", location), + clusterDataf("%s", cluster), + criticalf("%d Critical", stat.ViolatedCriticalCount), + highf("%d High", stat.ViolatedHighCount), + mediumf("%d Medium", stat.ViolatedMediumCount), + lowf("%d Low", stat.ViolatedLowCount), + ) } p.out.TabFlush() p.out.Printf("\n") @@ -78,3 +117,40 @@ func (p *consoleResultCollector) Close() error { func (p *consoleResultCollector) Name() string { return "console" } + +type sprintfFunc func(format string, a ...interface{}) string + +func severitySprintfFunc(severity string) sprintfFunc { + var sevColor []color.Attribute + switch strings.ToLower(severity) { + case "critical": + sevColor = []color.Attribute{color.Bold, color.FgHiRed} + case "high": + sevColor = []color.Attribute{color.FgHiRed} + case "medium": + sevColor = []color.Attribute{color.Bold, color.FgHiYellow} + default: + sevColor = []color.Attribute{color.FgHiWhite} + } + return color.New(sevColor...).SprintfFunc() +} + +func evalStatusSprintfFunc(e ValidationReportClusterEvaluation) sprintfFunc { + if e.Errored { + return color.New(color.Bold, color.FgHiYellow).Sprintf + } + if e.Valid { + return color.New(color.Bold, color.FgHiGreen).Sprintf + } + return color.New(color.Bold, color.FgHiRed).Sprintf +} + +func evalStatusString(e ValidationReportClusterEvaluation) string { + if e.Errored { + return " ERROR " + } + if e.Valid { + return " VALID " + } + return "INVALID" +} diff --git a/internal/outputs/console_collector_test.go b/internal/outputs/console_collector_test.go index 281232a7..05312bc9 100644 --- a/internal/outputs/console_collector_test.go +++ b/internal/outputs/console_collector_test.go @@ -36,13 +36,13 @@ func TestConsoleResultCollector(t *testing.T) { PolicyTitle: "test-title", PolicyDescription: "test-desc", ClusterEvaluations: []*ValidationReportClusterEvaluation{ - {ClusterID: "cluster-one", Valid: true}, - {ClusterID: "cluster-two", Valid: false, Violations: []string{"violation"}}, + {ClusterID: "projects/test-proj/locations/europe-central2/clusters/cluster-one", Valid: true}, + {ClusterID: "projects/test-proj/locations/europe-central2/clusters/cluster-two", Valid: false, Violations: []string{"violation"}}, }, }, }, ClusterStats: []*ValidationReportClusterStats{ - {ClusterID: "cluster-one", ValidPoliciesCount: 1}, + {ClusterID: "projects/test-proj/locations/europe-central2/clusters/cluster-one", ValidPoliciesCount: 1}, }, } }, diff --git a/internal/outputs/icons.go b/internal/outputs/icons.go index 54b7d679..5c586983 100644 --- a/internal/outputs/icons.go +++ b/internal/outputs/icons.go @@ -18,4 +18,5 @@ const ( IconMagnifier = "\U0001f50e" IconInfo = "\u2139" IconHyperlink = "\U0001F517" + IconMiddleDot = "\u00b7" ) diff --git a/internal/outputs/output.go b/internal/outputs/output.go index ed303ca1..4f07af32 100644 --- a/internal/outputs/output.go +++ b/internal/outputs/output.go @@ -21,7 +21,7 @@ import ( "os" "text/tabwriter" - "github.com/mitchellh/colorstring" + "github.com/fatih/color" ) const ( @@ -34,14 +34,12 @@ const ( type Output struct { w io.Writer tabWriter *tabwriter.Writer - colorize *colorstring.Colorize } func NewStdOutOutput() *Output { return &Output{ w: os.Stdout, tabWriter: initTabWriter(os.Stdout, defColWidth, tabWidth, tabPadding, tabChar), - colorize: NewColorize(), } } @@ -60,11 +58,7 @@ func (o *Output) TabPrintf(format string, a ...interface{}) (n int, err error) { return fmt.Fprintf(o.tabWriter, format, a...) } -func (o *Output) ColorPrintf(format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(o.w, o.Color(format), a...) -} - -func (o *Output) InitTabs(minColWidth int) { +func (o *Output) InitTabs(minColWidth int, tabPadding int) { o.tabWriter.Flush() o.tabWriter = initTabWriter(o.w, minColWidth, tabWidth, tabPadding, tabChar) } @@ -74,24 +68,13 @@ func (o *Output) TabFlush() (err error) { } func (o *Output) ErrorPrint(message string, cause error) (n int, err error) { - if o.colorize != nil { - return fmt.Fprint(o.w, o.colorize.Color(fmt.Sprintf("[bold][red]Error: [light_gray]%s: [reset][light_gray]%v\n", message, cause))) - } - return fmt.Fprint(o.w, o.colorize.Color(fmt.Sprintf("Error: %s: %s\n", message, cause))) -} - -func (o *Output) Color(v string) string { - if o.colorize != nil { - return o.colorize.Color(v) - } - return v -} - -func NewColorize() *colorstring.Colorize { - return &colorstring.Colorize{ - Colors: colorstring.DefaultColors, - Reset: true, - } + errF := color.New(color.Bold, color.FgHiRed).Sprint + errTitleF := color.New(color.Bold, color.FgHiWhite).Sprintf + return fmt.Fprintf(o.w, "%s %s %v\n", + errF("Error:"), + errTitleF("%s:", message), + cause, + ) } func initTabWriter(output io.Writer, minWidth, tabWidth, padding int, padChar byte) *tabwriter.Writer { diff --git a/internal/outputs/validation_report.go b/internal/outputs/validation_report.go index 52de0580..568649b7 100644 --- a/internal/outputs/validation_report.go +++ b/internal/outputs/validation_report.go @@ -17,11 +17,20 @@ package outputs import ( "encoding/json" "sort" + "strings" "time" "github.com/google/gke-policy-automation/internal/policy" ) +const ( + SeverityCritical = 4 + SeverityHigh = 3 + SeverityMedium = 2 + SeverityLow = 1 + SeverityUnknown = 0 +) + type ValidationReport struct { ValidationTime time.Time `json:"validationDate"` Policies []*ValidationReportPolicy `json:"policies"` @@ -35,6 +44,8 @@ type ValidationReportPolicy struct { PolicyDescription string `json:"description"` Recommendation string `json:"recommendation,omitempty"` ExternalURI string `json:"externalURI,omitempty"` + Severity string `json:"severity,omitempty"` + SeverityNumber int `json:"-"` ClusterEvaluations []*ValidationReportClusterEvaluation `json:"clusters"` } @@ -51,6 +62,10 @@ type ValidationReportClusterStats struct { ValidPoliciesCount int `json:"validPoliciesCount"` ViolatedPoliciesCount int `json:"violatedPoliciesCount"` ErroredPoliciesCount int `json:"erroredPoliciesCount"` + ViolatedCriticalCount int `json:"violatedCriticalCount"` + ViolatedHighCount int `json:"violatedHighCount"` + ViolatedMediumCount int `json:"violatedMediumCount"` + ViolatedLowCount int `json:"violatedLowCount"` } type ValidationReportMapper interface { @@ -97,6 +112,16 @@ func (m *validationReportMapperImpl) AddResult(result *policy.PolicyEvaluationRe clusterStat.ValidPoliciesCount++ } else { clusterStat.ViolatedPoliciesCount++ + switch strings.ToLower(resultPolicy.Severity) { + case "critical": + clusterStat.ViolatedCriticalCount++ + case "high": + clusterStat.ViolatedHighCount++ + case "medium": + clusterStat.ViolatedMediumCount++ + default: + clusterStat.ViolatedLowCount++ + } } } } @@ -114,10 +139,19 @@ func (m *validationReportMapperImpl) GetReport() *ValidationReport { policies = append(policies, policy) } sort.SliceStable(policies, func(i, j int) bool { - if policies[i].PolicyGroup == policies[j].PolicyGroup { - return policies[i].PolicyName < policies[j].PolicyName + /* + if policies[i].PolicyGroup == policies[j].PolicyGroup { + return policies[i].PolicyName < policies[j].PolicyName + } + return policies[i].PolicyGroup < policies[j].PolicyGroup + */ + if policies[i].SeverityNumber == policies[j].SeverityNumber { + if policies[i].PolicyGroup == policies[j].PolicyGroup { + return policies[i].PolicyName < policies[j].PolicyName + } + return policies[i].PolicyGroup < policies[j].PolicyGroup } - return policies[i].PolicyGroup < policies[j].PolicyGroup + return policies[i].SeverityNumber > policies[j].SeverityNumber }) stats := make([]*ValidationReportClusterStats, 0, len(m.clusterStats)) for _, stat := range m.clusterStats { @@ -143,6 +177,8 @@ func mapResultPolicyToReportPolicy(policy *policy.Policy) *ValidationReportPolic PolicyGroup: policy.Group, Recommendation: policy.Recommendation, ExternalURI: policy.ExternalURI, + Severity: policy.Severity, + SeverityNumber: mapSeverityToNumber(policy.Severity), } return reportPolicy } @@ -168,3 +204,18 @@ func mapErrorSliceToStringSlice(errors []error) []string { } return strings } + +func mapSeverityToNumber(severity string) int { + switch strings.ToLower(severity) { + case "critical": + return SeverityCritical + case "high": + return SeverityHigh + case "medium": + return SeverityMedium + case "low": + return SeverityLow + default: + return SeverityUnknown + } +} diff --git a/internal/outputs/validation_report_test.go b/internal/outputs/validation_report_test.go index 78d3ebb5..083576b8 100644 --- a/internal/outputs/validation_report_test.go +++ b/internal/outputs/validation_report_test.go @@ -56,6 +56,7 @@ func TestGetReport(t *testing.T) { Group: "group", Recommendation: "do this and that", ExternalURI: "https://cloud.google.com/kubernetes-engine", + Severity: "Medium", }, { Name: "policy-two", @@ -64,6 +65,7 @@ func TestGetReport(t *testing.T) { Group: "group", Recommendation: "delete your cluster", ExternalURI: "https://cloud.google.com/kubernetes-engine/docs/concepts/kubernetes-engine-overview", + Severity: "Critical", }, } expectedClusterEvaluations := [][]*ValidationReportClusterEvaluation{ @@ -89,6 +91,7 @@ func TestGetReport(t *testing.T) { Valid: true, Recommendation: policies[0].Recommendation, ExternalURI: policies[0].ExternalURI, + Severity: policies[0].Severity, }, { Name: policies[1].Name, Title: policies[1].Title, @@ -98,6 +101,7 @@ func TestGetReport(t *testing.T) { Violations: []string{"violation"}, Recommendation: policies[1].Recommendation, ExternalURI: policies[1].ExternalURI, + Severity: policies[1].Severity, }, }, }, @@ -113,6 +117,7 @@ func TestGetReport(t *testing.T) { Violations: []string{"violation"}, Recommendation: policies[0].Recommendation, ExternalURI: policies[0].ExternalURI, + Severity: policies[0].Severity, }, { Name: policies[1].Name, Title: policies[1].Title, @@ -122,6 +127,7 @@ func TestGetReport(t *testing.T) { Violations: []string{"violation"}, Recommendation: policies[1].Recommendation, ExternalURI: policies[1].ExternalURI, + Severity: policies[1].Severity, }, }, }, @@ -141,6 +147,8 @@ func TestGetReport(t *testing.T) { PolicyDescription: policies[i].Description, Recommendation: policies[i].Recommendation, ExternalURI: policies[i].ExternalURI, + Severity: policies[i].Severity, + SeverityNumber: mapSeverityToNumber(policies[i].Severity), ClusterEvaluations: expectedClusterEvaluations[i], }, "report policies contains valid policy %v", policies[0].Name) } @@ -148,10 +156,13 @@ func TestGetReport(t *testing.T) { ClusterID: clusterOneName, ValidPoliciesCount: 1, ViolatedPoliciesCount: 1, + ViolatedCriticalCount: 1, }, "report cluster stats contains valid stats for cluster %v", clusterOneName) assert.Contains(t, report.ClusterStats, &ValidationReportClusterStats{ ClusterID: clusterTwoName, ViolatedPoliciesCount: 2, + ViolatedCriticalCount: 1, + ViolatedMediumCount: 1, }, "report cluster stats contains valid stats for cluster %v", clusterTwoName) }