diff --git a/dev/404.html b/dev/404.html index 07f6b7a8..0a5e35bc 100644 --- a/dev/404.html +++ b/dev/404.html @@ -12,7 +12,7 @@ - + diff --git a/dev/configuration/authenticated-registries/index.html b/dev/configuration/authenticated-registries/index.html index 517cc683..ccd3296c 100644 --- a/dev/configuration/authenticated-registries/index.html +++ b/dev/configuration/authenticated-registries/index.html @@ -18,7 +18,7 @@ - + diff --git a/dev/configuration/convert-to-csv/index.html b/dev/configuration/convert-to-csv/index.html index 0723db9f..b0961979 100644 --- a/dev/configuration/convert-to-csv/index.html +++ b/dev/configuration/convert-to-csv/index.html @@ -18,7 +18,7 @@ - + diff --git a/dev/configuration/custom-checks/index.html b/dev/configuration/custom-checks/index.html index 472f5c92..04e2bcad 100644 --- a/dev/configuration/custom-checks/index.html +++ b/dev/configuration/custom-checks/index.html @@ -18,7 +18,7 @@ - + diff --git a/dev/configuration/https-proxy/index.html b/dev/configuration/https-proxy/index.html index 962f7213..9902ffe8 100644 --- a/dev/configuration/https-proxy/index.html +++ b/dev/configuration/https-proxy/index.html @@ -18,7 +18,7 @@ - + diff --git a/dev/configuration/private-registries/acr/index.html b/dev/configuration/private-registries/acr/index.html index 49bc640b..04562eb4 100644 --- a/dev/configuration/private-registries/acr/index.html +++ b/dev/configuration/private-registries/acr/index.html @@ -18,7 +18,7 @@ - + diff --git a/dev/configuration/private-registries/ecr/index.html b/dev/configuration/private-registries/ecr/index.html index 90fdaba3..0b971eb0 100644 --- a/dev/configuration/private-registries/ecr/index.html +++ b/dev/configuration/private-registries/ecr/index.html @@ -18,7 +18,7 @@ - + diff --git a/dev/configuration/resources/index.html b/dev/configuration/resources/index.html index 06c4f44d..858687fc 100644 --- a/dev/configuration/resources/index.html +++ b/dev/configuration/resources/index.html @@ -18,7 +18,7 @@ - + diff --git a/dev/configuration/retain-issues/index.html b/dev/configuration/retain-issues/index.html index f629dbf5..ede21c1a 100644 --- a/dev/configuration/retain-issues/index.html +++ b/dev/configuration/retain-issues/index.html @@ -18,7 +18,7 @@ - + diff --git a/dev/configuration/scan-schedule/index.html b/dev/configuration/scan-schedule/index.html index 08f363e1..f77f2da9 100644 --- a/dev/configuration/scan-schedule/index.html +++ b/dev/configuration/scan-schedule/index.html @@ -18,7 +18,7 @@ - + diff --git a/dev/configuration/suspend-scan/index.html b/dev/configuration/suspend-scan/index.html index e82fd146..4f0d12cf 100644 --- a/dev/configuration/suspend-scan/index.html +++ b/dev/configuration/suspend-scan/index.html @@ -18,7 +18,7 @@ - + diff --git a/dev/configuration/vulnerability-database-persistence/index.html b/dev/configuration/vulnerability-database-persistence/index.html index f6d9234c..7809b6d0 100644 --- a/dev/configuration/vulnerability-database-persistence/index.html +++ b/dev/configuration/vulnerability-database-persistence/index.html @@ -18,7 +18,7 @@ - + diff --git a/dev/dashboard/index.html b/dev/dashboard/index.html index e62f92be..f0fb37a1 100644 --- a/dev/dashboard/index.html +++ b/dev/dashboard/index.html @@ -18,7 +18,7 @@ - + diff --git a/dev/faq/index.html b/dev/faq/index.html index 33cb8e83..c55d5ca4 100644 --- a/dev/faq/index.html +++ b/dev/faq/index.html @@ -18,7 +18,7 @@ - + diff --git a/dev/getting-started/installation/index.html b/dev/getting-started/installation/index.html index 90d43508..340f64c2 100644 --- a/dev/getting-started/installation/index.html +++ b/dev/getting-started/installation/index.html @@ -18,7 +18,7 @@ - + diff --git a/dev/helm-chart/index.html b/dev/helm-chart/index.html index e72c8634..19beb81e 100644 --- a/dev/helm-chart/index.html +++ b/dev/helm-chart/index.html @@ -16,7 +16,7 @@ - + @@ -1809,7 +1809,7 @@

Parameters +"40m" Trivy timeout diff --git a/dev/index.html b/dev/index.html index 24e72cdd..2d2b959e 100644 --- a/dev/index.html +++ b/dev/index.html @@ -16,7 +16,7 @@ - + diff --git a/dev/plugins/index.html b/dev/plugins/index.html index ac69ae8c..7d292134 100644 --- a/dev/plugins/index.html +++ b/dev/plugins/index.html @@ -18,7 +18,7 @@ - + diff --git a/dev/plugins/marvin/index.html b/dev/plugins/marvin/index.html index fd35acc6..1914dab3 100644 --- a/dev/plugins/marvin/index.html +++ b/dev/plugins/marvin/index.html @@ -18,7 +18,7 @@ - + diff --git a/dev/plugins/popeye/index.html b/dev/plugins/popeye/index.html index 1f1643f3..7f0376e8 100644 --- a/dev/plugins/popeye/index.html +++ b/dev/plugins/popeye/index.html @@ -18,7 +18,7 @@ - + diff --git a/dev/plugins/trivy/index.html b/dev/plugins/trivy/index.html index 2fb86293..1e703ada 100644 --- a/dev/plugins/trivy/index.html +++ b/dev/plugins/trivy/index.html @@ -18,7 +18,7 @@ - + @@ -1366,9 +1366,9 @@

Large vulnerability reportsScan timeout

Trivy's scan duration may vary depending on the total images in your cluster and the time to download the vulnerability database when needed.

-

By default, Zora sets a timeout of 10 minutes for Trivy scan completion.

+

By default, Zora sets a timeout of 40 minutes for Trivy scan completion.

To adjust this timeout, use the following Helm parameter:

-
--set scan.plugins.trivy.timeout=15m
+
--set scan.plugins.trivy.timeout=60m
 

Once this parameter is updated, the next scan will use the specified value.

diff --git a/dev/search/search_index.json b/dev/search/search_index.json index cd145021..857890df 100644 --- a/dev/search/search_index.json +++ b/dev/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Welcome to the Zora documentation","text":"

This documentation will help you install, explore, and configure Zora!

"},{"location":"#what-is-zora","title":"What is Zora?","text":"

Zora is an open-source solution that helps you achieve compliance with Kubernetes best practices recommended by industry-leading frameworks.

By scanning your cluster with multiple plugins at scheduled times, Zora identifies potential issues, misconfigurations, and vulnerabilities.

"},{"location":"#zora-oss-vs-zora-dashboard","title":"Zora OSS vs Zora Dashboard","text":"

Zora OSS is open-source, available under Apache 2.0 license, and can be used either as standalone tool or integrated with Zora Dashboard, a SaaS platform which centralize all your clusters providing a full experience. Please refer to Zora Dashboard page for more details.

"},{"location":"#key-features","title":"Key features","text":""},{"location":"#multi-plugin-architecture","title":"Multi-plugin architecture","text":"

Zora seamlessly integrates open-source tools like Popeye, Marvin, and Trivy as scanners. These tools' capabilities are combined to provide you with a unified view of your cluster's security posture, addressing potential issues, misconfigurations, and vulnerabilities.

"},{"location":"#kubernetes-compliance","title":"Kubernetes compliance","text":"

Zora and its plugins provide actionable insights, guiding you to align your cluster with industry-recognized frameworks such as NSA-CISA, MITRE ATT&CK, CIS Benchmark, and Pod Security Standards.

"},{"location":"#custom-checks","title":"Custom checks","text":"

Enabled by the Marvin plugin, Zora offers a declarative way to create your own checks by using Common Expression Language (CEL) expressions to define validation rules.

"},{"location":"#kubernetes-native","title":"Kubernetes-native","text":"

All scan configurations and plugin reports, including misconfigurations and vulnerabilities, are securely stored as CRDs (Custom Resource Definitions) within your Kubernetes cluster, making it easily accessible through the Kubernetes API and kubectl command.

"},{"location":"#architecture","title":"Architecture","text":"

Zora works as a Kubernetes Operator, where both scan and plugin configurations, as well as the results (misconfigurations and vulnerabilities), are managed in CRDs (Custom Resource Definitions).

Zora Dashboard

When a Zora OSS installation is integrated with Zora Dashboard, scan results are automatically sent to Zora Dashboard SaaS by zora-operator.

Check out Zora Dashboard architecture for more details.

"},{"location":"#zora-origins","title":"Zora origins","text":"

In the early days of the cloud native era, Borg dominated the container-oriented cluster management scene. The origin of the name Borg refers to the cybernetic life form existing in the Star Trek series, that worked as a collective of individuals with a single mind and the same purpose, as well as a \"cluster\".

As good nerds as we are and wishing to honor our Kubernetes' predecessor (Borg) we named our project Zora.

In Star Trek, Zora is the Artificial Intelligence that controls the ship U.S.S Discovery. After being merged with a collective of other intelligences, Zora became sentient and became a member of the team, bringing insights and making the ship more efficient.

Like Star Trek's Zora, our goal is to help manage your Kubernetes environment by combining multiple plugin capabilities to scan your clusters looking for misconfigurations and vulnerabilities.

"},{"location":"dashboard/","title":"Zora Dashboard","text":"

Zora Dashboard is a SaaS platform designed to seamlessly centralize the security posture management of all your Kubernetes clusters, providing a full experience powered by Zora OSS.

It features a powerful UI that allows you to navigate, filter and explore details of issues and affected resources across all your clusters. You can also invite users to your workspace.

Try Zora Dashboard

Zora Dashboard offers a starter plan for 14 days, after which it will revert to the free plan which provides access for 2 clusters with up to 10 nodes per cluster. Please contact us if you need to discuss a tailored solution.

"},{"location":"dashboard/#getting-started","title":"Getting started","text":"

To integrate your Zora OSS installation with Zora Dashboard, you need to first authenticate with the authorization server and then provide your saas.workspaceID parameter in the Zora OSS installation command.

"},{"location":"dashboard/#authenticating-with-the-authorization-server","title":"Authenticating with the Authorization server","text":"

Authenticating with the authorization server is simplified through the use of a helm plugin, zoraauth, which can be installed by executing

helm plugin install https://github.com/undistro/helm-zoraauth\n
and updated by executing
helm plugin update zoraauth\n
The authentication process will occur when the plugin is executed, and you visit the authorization server to confirm the request. The instructions within the Zora Dashboard console will include the appropriate parameters for the plugin, these can be obtained through the Connect cluster option once you have signed in to the Zora Dashboard.

To authenticate with the authorization server, copy and run the helm zoraauth command and then follow the instructions within your terminal

helm zoraauth --audience=\"zora_prod\" \\\n  --client-id=\"<client id>\" \\\n  --domain=\"login.undistro.io\"\nInitiating Device Authorization Flow...\nPlease visit https://login.undistro.io/activate and enter code: BFNS-NWFF, or visit: https://login.undistro.io/activate?user_code=BFNS-NWFF\n
Entering the login URL within your browser will present you with a screen similar to the following

Once you have confirmed the request you should see the following message on your terminal

Tokens saved to tokens.yaml\n

You can then install Zora OSS by providing the saas.workspaceID parameter in the Zora OSS installation command:

HTTP chart repositoryOCI registry
helm repo add undistro https://charts.undistro.io --force-update\nhelm repo update undistro\nhelm upgrade --install zora undistro/zora \\\n  -n zora-system --create-namespace --wait \\\n  --set clusterName=\"$(kubectl config current-context)\" \\\n  --set saas.workspaceID=<YOUR WORKSPACE ID HERE> \\\n  --values tokens.yaml\n
helm upgrade --install zora oci://ghcr.io/undistro/helm-charts/zora \\\n  -n zora-system --create-namespace --wait \\\n  --set clusterName=\"$(kubectl config current-context)\" \\\n  --set saas.workspaceID=<YOUR WORKSPACE ID HERE> \\\n  --values tokens.yaml\n
"},{"location":"dashboard/#architecture","title":"Architecture","text":"

Zora OSS acts as the engine of Zora Dashboard, meaning that once scans are completed, only the results are sent to Zora Dashboard, where they are accessible by you and those you have invited to your workspace.

Note that these results do not contain sensitive information or specific data about your cluster configuration.

"},{"location":"faq/","title":"Frequently Asked Questions","text":"

Do you have any question about Zora? We do our best to answer all of your questions on this page. If you can't find your question below, ask it on our discussion board!

"},{"location":"faq/#is-zora-open-source","title":"Is Zora open source?","text":"

There are two Zora tools: Zora OSS and Zora Dashboard.

Zora OSS is open-source, available under Apache 2.0 license, and can be used either as standalone tool or integrated with Zora Dashboard.

On the other hand, Zora Dashboard is a SaaS platform that provides a full experience, centralizing the security posture management of all your clusters. It's free for up to 3 clusters. Visit the Zora Dashboard page for more information.

"},{"location":"faq/#can-i-use-zora-oss-standalone-without-zora-dashboard","title":"Can I use Zora OSS standalone without Zora Dashboard?","text":"

Yes, you can use Zora OSS as a standalone tool and access scan results (misconfigurations and vulnerabilities) via kubectl one cluster at a time.

"},{"location":"faq/#can-i-install-zora-in-a-different-namespace","title":"Can I install Zora in a different namespace?","text":"

Yes, Zora can be installed in any namespace. Simply provide the namespace name using the -n flag in Helm installation command.

The Cluster, ClusterScan, Plugin, ClusterIssue, and VulnerabilityReport objects will be created in the specified namespace.

If you already have Zora installed and want to change the namespace, you will need to reinstall it.

"},{"location":"faq/#can-i-integrate-my-own-plugins-with-zora-and-how","title":"Can I integrate my own plugins with Zora, and how?","text":"

Currently, integrating a new plugin into Zora requires modifying the source code of Worker, a Zora component. The parsing of plugin results into ClusterIssue or VulnerabilityReport is directly handled by Worker, which is written in Go. A fully declarative approach is not yet supported.

Refer to plugins page to know more about how plugins work.

Feel free to open an issue or start a discussion with any suggestions regarding this process.

"},{"location":"faq/#which-data-is-sent-to-zora-dashboard-saas","title":"Which data is sent to Zora Dashboard (SaaS)?","text":"

When integrated with Zora Dashboard, only scan results are sent to the SaaS platform.

No sensitive information is collected or exposed.

Scans are performed in your cluster and the results are securely sent via HTTPS to Zora Dashboard, where only you and the users you've invited to your workspace will have access.

"},{"location":"faq/#can-i-host-zora-dashboard-on-premise","title":"Can I host Zora Dashboard on-premise?","text":"

Currently, Zora Dashboard is available as a SaaS platform. While we do not offer an on-premise version of Zora Dashboard at this time, we're continuously working to enhance and expand our offerings. If you have specific requirements or are interested in on-premise solutions, please contact us, and we'll be happy to discuss potential options and explore how we can meet your needs.

"},{"location":"helm-chart/","title":"Zora Helm Chart","text":"

A multi-plugin solution that reports misconfigurations and vulnerabilities by scanning your cluster at scheduled times.

"},{"location":"helm-chart/#installing-the-chart","title":"Installing the Chart","text":"

To install the chart with the release name zora in zora-system namespace:

helm repo add undistro https://charts.undistro.io --force-update\nhelm repo update undistro\nhelm upgrade --install zora undistro/zora \\\n  -n zora-system \\\n  --version 0.10.2 \\\n  --create-namespace \\\n  --wait \\\n  --set clusterName=\"$(kubectl config current-context)\"\n

These commands deploy Zora on the Kubernetes cluster with the default configuration.

The Parameters section lists the available parameters that can be configured during installation.

Tips:

  • List all charts available in undistro repo using helm search repo undistro

  • Update undistro chart repository using helm repo update undistro

  • List all versions available of undistro/zora chart using helm search repo undistro/zora --versions

  • List all releases in a specific namespace using helm list -n zora-system

  • Get the notes provided by zora release using helm get notes zora -n zora-system

"},{"location":"helm-chart/#uninstalling-the-chart","title":"Uninstalling the Chart","text":"

To uninstall/delete the zora release:

helm uninstall zora -n zora-system\n

The command removes all the Kubernetes components associated with the chart and deletes the release.

"},{"location":"helm-chart/#parameters","title":"Parameters","text":"

The following table lists the configurable parameters of the Zora chart and their default values.

Key Type Default Description nameOverride string \"\" String to partially override fullname template with a string (will prepend the release name) fullnameOverride string \"\" String to fully override fullname template with a string clusterName string \"\" Cluster name. Should be set by kubectl config current-context. saas.workspaceID string \"\" Your SaaS workspace ID saas.server string \"https://zora-dashboard.undistro.io\" SaaS server URL saas.installURL string \"{{.Values.saas.server}}/zora/api/v1alpha1/workspaces/{{.Values.saas.workspaceID}}/helmreleases\" SaaS URL template to notify installation hooks.install.image.repository string \"curlimages/curl\" Post-install hook image repository hooks.install.image.tag string \"8.7.1\" Post-install hook image tag hooks.delete.image.repository string \"rancher/kubectl\" Pre-delete hook image repository hooks.delete.image.tag string \"v1.29.2\" Pre-delete hook image tag imageCredentials.create bool false Specifies whether the secret should be created by providing credentials imageCredentials.registry string \"ghcr.io\" Docker registry host imageCredentials.username string \"\" Docker registry username imageCredentials.password string \"\" Docker registry password imagePullSecrets list [] Specify docker-registry secret names as an array to be used when imageCredentials.create is false operator.replicaCount int 1 Number of replicas desired of Zora operator operator.image.repository string \"ghcr.io/undistro/zora/operator\" Zora operator image repository operator.image.tag string \"\" Overrides the image tag whose default is the chart appVersion operator.image.pullPolicy string \"IfNotPresent\" Image pull policy operator.rbac.create bool true Specifies whether ClusterRoles and ClusterRoleBindings should be created operator.rbac.serviceAccount.create bool true Specifies whether a service account should be created operator.rbac.serviceAccount.annotations object {} Annotations to be added to service account operator.rbac.serviceAccount.name string \"\" The name of the service account to use. If not set and create is true, a name is generated using the fullname template operator.podAnnotations object {\"kubectl.kubernetes.io/default-container\":\"manager\"} Annotations to be added to pods operator.podSecurityContext object {\"runAsNonRoot\":true} Security Context to add to the pod operator.securityContext object {\"allowPrivilegeEscalation\":false,\"readOnlyRootFilesystem\":true} Security Context to add to manager container operator.metricsService.type string \"ClusterIP\" Type of metrics service operator.metricsService.port int 8443 Port of metrics service operator.serviceMonitor.enabled bool false Specifies whether a Prometheus ServiceMonitor should be enabled operator.resources object {\"limits\":{\"cpu\":\"500m\",\"memory\":\"128Mi\"},\"requests\":{\"cpu\":\"10m\",\"memory\":\"64Mi\"}} Resources to add to manager container operator.rbacProxy.image.repository string \"gcr.io/kubebuilder/kube-rbac-proxy\" kube-rbac-proxy image repository operator.rbacProxy.image.tag string \"v0.15.0\" kube-rbac-proxy image tag operator.rbacProxy.image.pullPolicy string \"IfNotPresent\" Image pull policy operator.rbacProxy.securityContext object {\"allowPrivilegeEscalation\":false,\"capabilities\":{\"drop\":[\"ALL\"]},\"readOnlyRootFilesystem\":true} Security Context to add to kube-rbac-proxy container operator.rbacProxy.resources object {\"limits\":{\"cpu\":\"500m\",\"memory\":\"128Mi\"},\"requests\":{\"cpu\":\"5m\",\"memory\":\"64Mi\"}} Resources to add to kube-rbac-proxy container operator.nodeSelector object {} Node selection to constrain a Pod to only be able to run on particular Node(s) operator.tolerations list [] Tolerations for pod assignment operator.affinity object {} Map of node/pod affinities operator.log.encoding string \"json\" Log encoding (one of 'json' or 'console') operator.log.level string \"info\" Log level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', or any integer value > 0 which corresponds to custom debug levels of increasing verbosity operator.log.stacktraceLevel string \"error\" Log level at and above which stacktraces are captured (one of 'info', 'error' or 'panic') operator.log.timeEncoding string \"rfc3339\" Log time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano') operator.webhook.enabled bool true Specifies whether webhook server is enabled scan.misconfiguration.enabled bool true Specifies whether misconfiguration scan is enabled scan.misconfiguration.schedule string Cron expression for every hour at the current minute + 5 minutes Cluster scan schedule in Cron format for misconfiguration scan scan.misconfiguration.successfulScansHistoryLimit int 1 The number of successful finished scans and their issues to retain. scan.misconfiguration.plugins list [\"marvin\",\"popeye\"] Misconfiguration scanners plugins scan.vulnerability.enabled bool true Specifies whether vulnerability scan is enabled scan.vulnerability.schedule string Cron expression for every day at the current hour and minute + 5 minutes Cluster scan schedule in Cron format for vulnerability scan scan.vulnerability.successfulScansHistoryLimit int 1 The number of successful finished scans and their issues to retain. scan.vulnerability.plugins list [\"trivy\"] Vulnerability scanners plugins scan.worker.image.repository string \"ghcr.io/undistro/zora/worker\" worker image repository scan.worker.image.tag string \"\" Overrides the image tag whose default is the chart appVersion scan.plugins.annotations object {} Annotations added to the plugin service account scan.plugins.marvin.resources object {\"limits\":{\"cpu\":\"500m\",\"memory\":\"500Mi\"},\"requests\":{\"cpu\":\"250m\",\"memory\":\"256Mi\"}} Resources to add to marvin container scan.plugins.marvin.podAnnotations object {} Annotations added to the marvin pods scan.plugins.marvin.image.repository string \"ghcr.io/undistro/marvin\" marvin plugin image repository scan.plugins.marvin.image.tag string \"v0.2\" marvin plugin image tag scan.plugins.marvin.image.pullPolicy string \"Always\" Image pull policy scan.plugins.marvin.env list [] List of environment variables to set in marvin container. scan.plugins.marvin.envFrom list [] List of sources to populate environment variables in marvin container. scan.plugins.trivy.ignoreUnfixed bool false Specifies whether only fixed vulnerabilities should be reported scan.plugins.trivy.ignoreDescriptions bool false Specifies whether vulnerability descriptions should be ignored scan.plugins.trivy.resources object {\"limits\":{\"cpu\":\"1500m\",\"memory\":\"4096Mi\"},\"requests\":{\"cpu\":\"500m\",\"memory\":\"2048Mi\"}} Resources to add to trivy container scan.plugins.trivy.podAnnotations object {} Annotations added to the trivy pods scan.plugins.trivy.image.repository string \"ghcr.io/undistro/trivy\" trivy plugin image repository scan.plugins.trivy.image.tag float 0.53 trivy plugin image tag scan.plugins.trivy.image.pullPolicy string \"Always\" Image pull policy scan.plugins.trivy.env list [] List of environment variables to set in trivy container. scan.plugins.trivy.envFrom list [] List of sources to populate environment variables in trivy container. scan.plugins.trivy.timeout string \"10m\" Trivy timeout scan.plugins.trivy.insecure bool false Allow insecure server connections for Trivy scan.plugins.trivy.fsGroup int nil Trivy fsGroup. Should be greater than 0. scan.plugins.trivy.persistence.enabled bool true Specifies whether Trivy vulnerabilities database should be persisted between the scans, using PersistentVolumeClaim scan.plugins.trivy.persistence.accessMode string \"ReadWriteOnce\" Persistence access mode scan.plugins.trivy.persistence.storageClass string \"\" Persistence storage class. Set to empty for default storage class scan.plugins.trivy.persistence.storageRequest string \"2Gi\" Persistence storage size scan.plugins.trivy.persistence.downloadJavaDB bool false Specifies whether Java vulnerability database should be downloaded on helm install/upgrade scan.plugins.popeye.skipInternalResources bool false Specifies whether the following resources should be skipped by popeye scans. 1. resources from kube-system, kube-public and kube-node-lease namespaces; 2. kubernetes system reserved RBAC (prefixed with system:); 3. kube-root-ca.crt configmaps; 4. default namespace; 5. default serviceaccounts; 6. Helm secrets (prefixed with sh.helm.release); 7. Zora components. See popeye configuration file that is used for this case: https://github.com/undistro/zora/blob/main/charts/zora/templates/plugins/popeye-config.yaml scan.plugins.popeye.resources object {\"limits\":{\"cpu\":\"500m\",\"memory\":\"500Mi\"},\"requests\":{\"cpu\":\"250m\",\"memory\":\"256Mi\"}} Resources to add to popeye container scan.plugins.popeye.podAnnotations object {} Annotations added to the popeye pods scan.plugins.popeye.image.repository string \"ghcr.io/undistro/popeye\" popeye plugin image repository scan.plugins.popeye.image.tag float 0.21 popeye plugin image tag scan.plugins.popeye.image.pullPolicy string \"Always\" Image pull policy scan.plugins.popeye.env list [] List of environment variables to set in popeye container. scan.plugins.popeye.envFrom list [] List of sources to populate environment variables in popeye container. kubexnsImage.repository string \"ghcr.io/undistro/kubexns\" kubexns image repository kubexnsImage.tag string \"v0.1\" kubexns image tag kubexnsImage.pullPolicy string \"Always\" Image pull policy customChecksConfigMap string \"zora-custom-checks\" Custom checks ConfigMap name httpsProxy string \"\" HTTPS proxy URL noProxy string \"kubernetes.default.svc.*,127.0.0.1,localhost\" Comma-separated list of URL patterns to be excluded from going through the proxy updateCRDs bool true for upgrades Specifies whether CRDs should be updated by operator at startup tokenRefresh.image.repository string \"ghcr.io/undistro/zora/tokenrefresh\" tokenrefresh image repository tokenRefresh.image.tag string \"\" Overrides the image tag whose default is the chart appVersion tokenRefresh.image.pullPolicy string \"IfNotPresent\" Image pull policy tokenRefresh.rbac.create bool true Specifies whether Roles and RoleBindings should be created tokenRefresh.rbac.serviceAccount.create bool true Specifies whether a service account should be created tokenRefresh.rbac.serviceAccount.annotations object {} Annotations to be added to service account tokenRefresh.rbac.serviceAccount.name string \"\" The name of the service account to use. If not set and create is true, a name is generated using the fullname template tokenRefresh.minRefreshTime string \"1m\" Minimum time to wait before checking for token refresh tokenRefresh.refreshThreshold string \"2h\" Threshold relative to the token expiry timestamp, after which a token can be refreshed. tokenRefresh.nodeSelector object {} Node selection to constrain a Pod to only be able to run on particular Node(s) tokenRefresh.tolerations list [] Tolerations for pod assignment tokenRefresh.affinity object {} Map of node/pod affinities tokenRefresh.podAnnotations object {\"kubectl.kubernetes.io/default-container\":\"manager\"} Annotations to be added to pods tokenRefresh.podSecurityContext object {\"runAsNonRoot\":true} Security Context to add to the pod tokenRefresh.securityContext object {\"allowPrivilegeEscalation\":false,\"readOnlyRootFilesystem\":true} Security Context to add to manager container zoraauth.domain string \"\" The domain associated with the tokens zoraauth.clientId string \"\" The client id associated with the tokens zoraauth.accessToken string \"\" The access token authorizing access to the SaaS API server zoraauth.tokenType string \"Bearer\" The type of the access token zoraauth.refreshToken string \"\" The refresh token for obtaining a new access token

Specify each parameter using the --set key=value[,key=value] argument to helm install. For example,

helm install zora \\\n  --set operator.resources.limits.memory=256Mi undistro/zora\n

Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,

helm install zora -f values.yaml undistro/zora\n

Tip: You can use the default values.yaml

"},{"location":"configuration/authenticated-registries/","title":"Authenticated Registries","text":"

Trivy plugin is able to scan images from registries that require authentication.

It's necessary to create a secret containing authentication credentials as pairs, like the command below.

Note

For AWS ECR and Azure ACR registries, please refer to the specific pages.

kubectl create secret generic trivy-credentials -n zora-system \\\n  --from-literal=TRIVY_USERNAME=\"<username1>,<username2>\" \\\n  --from-literal=TRIVY_PASSWORD=\"<password1>,<password2>\"\n

Note

Please note that the number of usernames and passwords must be the same.

Once the secret is created, it needs to be referenced in the Helm chart parameters as the following values.yaml file:

scan:\n  plugins:\n    trivy:\n      envFrom:\n        - secretRef:\n            name: trivy-credentials\n            optional: true\n

Then provide this file in helm upgrade --install command with -f values.yaml flag.

This ensures that Trivy can authenticate with the private registries using the provided credentials.

"},{"location":"configuration/convert-to-csv/","title":"Convert results to CSV","text":"

After a successful scan, the results (vulnerabilities and misconfigurations) are available in your cluster via CRDs, and you can transform them into CSV files using jq.

"},{"location":"configuration/convert-to-csv/#vulnerabilities","title":"Vulnerabilities","text":"

Vulnerability scan results are stored as instances of VulnerabilityReport CRD within your cluster. You can export summaries or detailed reports of these vulnerabilities to CSV format for further analysis.

"},{"location":"configuration/convert-to-csv/#images-summary","title":"Images summary","text":"

To generate a summary report of vulnerabilities by image, run the following command:

kubectl get vulnerabilityreports -n zora-system -o json | jq -r '\n  [\"Image\", \"Image digest\", \"OS\", \"Distro\", \"Distro version\", \"Total\", \"Critical\", \"High\", \"Medium\", \"Low\", \"Unknown\", \"Scanned at\"],\n  (.items[] | [\n    .spec.image, .spec.digest, .spec.os, .spec.distro.name, .spec.distro.version, \n    .spec.summary.total, .spec.summary.critical, .spec.summary.high, .spec.summary.medium, .spec.summary.low, .spec.summary.unknown,\n    .metadata.creationTimestamp\n  ]) | @csv' > images.csv\n

This command will produce a CSV file, images.csv, with the following structure:

Image Image digest OS Distro Distro version Total Critical High Medium Low Unknown Scanned at docker.io/istio/examples-bookinfo-reviews-v1:1.20.1 istio/examples-bookinfo-reviews-v1@sha256:5b3c8ec2cb877b7a3c93fc340bb91633c3e51a6bc43a2da3ae7d72727650ec07 linux ubuntu 22.04 45 0 0 25 20 0 2024-10-31T12:56:51Z nginx nginx@sha256:28402db69fec7c17e179ea87882667f1e054391138f77ffaf0c3eb388efc3ffb linux debian 12.7 95 2 10 24 59 0 2024-10-31T12:56:51Z"},{"location":"configuration/convert-to-csv/#full-report-images-and-vulnerabilities","title":"Full report: images and vulnerabilities","text":"

To create a detailed report of each vulnerability affecting images, use the following command:

kubectl get vulnerabilityreports -n zora-system -o json | jq -r '\n  [\"Image\", \"Image digest\", \"OS\", \"Distro\", \"Distro version\", \"Vulnerability ID\", \"Severity\", \"Score\", \"Title\", \"Package\", \"Type\", \"Version\", \"Status\", \"Fix version\", \"Scanned at\"],\n  (.items[] | . as $i | $i.spec.vulnerabilities[] as $vuln | $vuln.packages[] | [\n    $i.spec.image, $i.spec.digest, $i.spec.os, $i.spec.distro.name, $i.spec.distro.version,\n    $vuln.id, $vuln.severity, $vuln.score, $vuln.title,\n    .package, .type, .version, .status, .fixVersion,\n    $i.metadata.creationTimestamp\n  ]) | @csv' > vulnerabilities.csv\n

This will generate a vulnerabilities.csv file with details for each vulnerability:

Note

A single vulnerability can affect multiple packages within the same image, so you may see repeated entries for the same vulnerability. For instance, in the example below, CVE-2024-7264 affects both curl and libcurl4 packages in the same image.

Image Image digest OS Distro Distro version Vulnerability ID Severity Score Title Package Type Version Status Fix version Scanned at nginx nginx@sha256:28402db69fec7c17e179ea87882667f1e054391138f77ffaf0c3eb388efc3ffb linux debian 12.7 CVE-2023-49462 HIGH 8.8 libheif v1.17.5 was discovered to contain a segmentation violation via ... libheif1 debian 1.15.1-1 fixed 1.15.1-1+deb12u1 2024-10-31T12:56:51Z docker.io/istio/examples-bookinfo-reviews-v1:1.20.1 istio/examples-bookinfo-reviews-v1@sha256:5b3c8ec2cb877b7a3c93fc340bb91633c3e51a6bc43a2da3ae7d72727650ec07 linux ubuntu 22.04 CVE-2024-7264 MEDIUM 6.5 curl: libcurl: ASN.1 date parser overread curl ubuntu 7.81.0-1ubuntu1.15 fixed 7.81.0-1ubuntu1.17 2024-10-31T12:56:51Z docker.io/istio/examples-bookinfo-reviews-v1:1.20.1 istio/examples-bookinfo-reviews-v1@sha256:5b3c8ec2cb877b7a3c93fc340bb91633c3e51a6bc43a2da3ae7d72727650ec07 linux ubuntu 22.04 CVE-2024-7264 MEDIUM 6.5 curl: libcurl: ASN.1 date parser overread libcurl4 ubuntu 7.81.0-1ubuntu1.15 fixed 7.81.0-1ubuntu1.17 2024-10-31T12:56:51Z"},{"location":"configuration/convert-to-csv/#misconfigurations","title":"Misconfigurations","text":"

Misconfiguration scan results are represented as instances of ClusterIssue CRD within your cluster, and can also be parsed to CSV format.

"},{"location":"configuration/convert-to-csv/#misconfigurations-summary","title":"Misconfigurations summary","text":"

To generate a summary report of misconfigurations, you can run the following command:

kubectl get misconfigurations -n zora-system -o json | jq -r '\n  [\"ID\", \"Misconfiguration\", \"Severity\", \"Category\", \"Total resources\", \"Scanned at\"],\n  (.items[] | ([.spec.resources[] | length] | add) as $totalResources | [\n    .spec.id, .spec.message, .spec.severity, .spec.category, $totalResources, .metadata.creationTimestamp\n  ]) | @csv' > misconfigurations.csv\n

This command will create a misconfigurations.csv file with the following structure:

ID Misconfiguration Severity Category Total resources Scanned at M-102 Privileged container High Security 2 2024-10-31T17:45:08Z M-103 Insecure capabilities High Security 2 2024-10-31T17:45:08Z M-112 Allowed privilege escalation Medium Security 14 2024-10-31T17:45:08Z M-113 Container could be running as root user Medium Security 18 2024-10-31T17:45:08Z M-201 Application credentials stored in configuration files High Security 6 2024-10-31T17:45:08Z M-300 Root filesystem write allowed Low Security 29 2024-10-31T17:45:08Z M-400 Image tagged latest Medium Best Practices 2 2024-10-31T17:45:08Z M-403 Liveness probe not configured Medium Reliability 16 2024-10-31T17:45:08Z M-406 Memory not limited Medium Reliability 15 2024-10-31T17:45:08Z"},{"location":"configuration/convert-to-csv/#full-report-misconfigurations-and-affected-resources","title":"Full report: misconfigurations and affected resources","text":"

A detailed CSV file containing the affected resources can be generated with the command below.

kubectl get misconfigurations -n zora-system -o json | jq -r '\n  [\"ID\", \"Misconfiguration\", \"Severity\", \"Category\", \"Resource Type\", \"Resource\", \"Scanned at\"],\n  (.items[] as $i | $i.spec.resources | to_entries[] as $resource | $resource.value[] as $affectedResource | [\n    $i.spec.id, $i.spec.message, $i.spec.severity, $i.spec.category, $resource.key, $affectedResource, $i.metadata.creationTimestamp\n  ]) | @csv' > misconfigurations_full.csv\n

This command will generate the misconfigurations_full.csv file with the following structure:

ID Misconfiguration Severity Category Resource Type Resource Scanned at M-400 Image tagged latest Medium Best Practices v1/pods default/test 2024-10-31T18:45:06Z M-400 Image tagged latest Medium Best Practices v1/pods default/nginx 2024-10-31T18:45:06Z"},{"location":"configuration/custom-checks/","title":"Custom checks","text":"

Zora offers a declarative way to create your own checks using the CustomCheck API, introduced in version 0.6.

Custom checks use the Common Expression Language (CEL) to declare the validation rules and are performed by the Marvin plugin, which should be enabled in your cluster scans.

Info

Marvin is already a default plugin and enabled by default in cluster scans since Zora 0.5.0.

"},{"location":"configuration/custom-checks/#customcheck-api","title":"CustomCheck API","text":"

The example below demonstrates a custom check that requires the labels mycompany.com/squad and mycompany.com/component to be present on Pods, Deployments and Services.

Example

apiVersion: zora.undistro.io/v1alpha1\nkind: CustomCheck\nmetadata:\n  name: mycheck\nspec:\n  message: \"Required labels\"\n  severity: Low\n  category: Custom\n  match:\n    resources:\n      - group: \"\"\n        version: v1\n        resource: pods\n      - group: apps\n        version: v1\n        resource: deployments\n      - group: \"\"\n        version: v1\n        resource: services\n  params:\n    requiredLabels:\n      - mycompany.com/squad\n      - mycompany.com/component\n  validations:\n    - expression: >\n        has(object.metadata.labels) &&\n        !object.metadata.labels.all(label,\n          params.requiredLabels.all(\n            req, req != label\n          )\n        )\n      message: \"Resource without required labels\"\n

The spec.match.resources defines which resources are checked by the expressions defined in spec.validations.expression using Common Expression Language (CEL).

If an expression evaluates to false, the check fails, and a ClusterIssue is reported.

CEL Playground

To quickly test CEL expressions directly from your browser, check out CEL Playground.

"},{"location":"configuration/custom-checks/#variables","title":"Variables","text":"

The variables available in CEL expressions:

Variable Description object The object being scanned. params The parameter defined in spec.params field.

If the object matches a PodSpec, the following useful variables are available:

Variable Description allContainers A list of all containers, including initContainers and ephemeralContainers. podMeta The Pod metadata. podSpec The Pod spec.

The following resources matches a PodSpec:

  • v1/pods
  • v1/replicationcontrollers
  • apps/v1/replicasets
  • apps/v1/deployments
  • apps/v1/statefulsets
  • apps/v1/daemonsets
  • batch/v1/jobs
  • batch/v1/cronjobs
"},{"location":"configuration/custom-checks/#applying-custom-checks","title":"Applying custom checks","text":"

Since you have a CustomCheck on a file, you can apply it with the following command.

kubectl apply -f check.yaml -n zora-system\n
"},{"location":"configuration/custom-checks/#listing-custom-checks","title":"Listing custom checks","text":"

Once created, list the custom checks to see if they are ready.

kubectl get customchecks -n zora-system\n
NAME      MESSAGE           SEVERITY   READY\nmycheck   Required labels   Low        True\n

The READY column indicates when the check has successfully compiled and is ready to be used in the next Marvin scan.

ClusterIssues reported by a custom check are labeled custom=true and can be filtered by the following command:

kubectl get clusterissues -l custom=true\n
NAME                             CLUSTER     ID        MESSAGE           SEVERITY   CATEGORY   AGE\nmycluster-mycheck-4edd75cb85a4   mycluster   mycheck   Required labels   Low        Custom     25s\n

"},{"location":"configuration/custom-checks/#examples","title":"Examples","text":"

All Marvin checks are similar to the CustomCheck API. You can see them in the internal/builtins folder for examples.

Here are some examples of Marvin built-in checks expressions:

  • HostPath volumes must be forbidden
    !has(podSpec.volumes) || podSpec.volumes.all(vol, !has(vol.hostPath))\n
  • Sharing the host namespaces must be disallowed
    (!has(podSpec.hostNetwork) || podSpec.hostNetwork == false) &&\n(!has(podSpec.hostPID) || podSpec.hostPID == false) &&\n(!has(podSpec.hostIPC) || podSpec.hostIPC == false)\n
  • Privileged Pods disable most security mechanisms and must be disallowed
    allContainers.all(container,\n  !has(container.securityContext) ||\n  !has(container.securityContext.privileged) ||\n  container.securityContext.privileged == false)\n
  • HostPorts should be disallowed entirely (recommended) or restricted to a known list
    allContainers.all(container,\n  !has(container.ports) ||\n  container.ports.all(port,\n    !has(port.hostPort) ||\n    port.hostPort == 0 ||\n    port.hostPort in params.allowedHostPorts\n  )\n)\n

Marvin's checks and Zora's CustomCheck API are inspired in Kubernetes ValidatingAdmissionPolicy API, introduced in version 1.26 as an alpha feature. Below, the table of validation expression examples from Kubernetes documentation.

Expression Purpose object.minReplicas <= object.replicas && object.replicas <= object.maxReplicas Validate that the three fields defining replicas are ordered appropriately 'Available' in object.stateCounts Validate that an entry with the 'Available' key exists in a map (size(object.list1) == 0) != (size(object.list2) == 0) Validate that one of two lists is non-empty, but not both !('MY_KEY' in object.map1) || object['MY_KEY'].matches('^[a-zA-Z]*$') Validate the value of a map for a specific key, if it is in the map object.envars.filter(e, e.name == 'MY_ENV').all(e, e.value.matches('^[a-zA-Z]*$') Validate the 'value' field of a listMap entry where key field 'name' is 'MY_ENV' has(object.expired) && object.created + object.ttl < object.expired Validate that 'expired' date is after a 'create' date plus a 'ttl' duration object.health.startsWith('ok') Validate a 'health' string field has the prefix 'ok' object.widgets.exists(w, w.key == 'x' && w.foo < 10) Validate that the 'foo' property of a listMap item with a key 'x' is less than 10 type(object) == string ? object == '100%' : object == 1000 Validate an int-or-string field for both the int and string cases object.metadata.name.startsWith(object.prefix) Validate that an object's name has the prefix of another field value object.set1.all(e, !(e in object.set2)) Validate that two listSets are disjoint size(object.names) == size(object.details) && object.names.all(n, n in object.details) Validate the 'details' map is keyed by the items in the 'names' listSet size(object.clusters.filter(c, c.name == object.primary)) == 1 Validate that the 'primary' property has one and only one occurrence in the 'clusters' listMap"},{"location":"configuration/https-proxy/","title":"HTTPS Proxy","text":"

If your network environment requires the use of a proxy, you must ensure proper configuration of the httpsProxy parameter when running helm upgrade --install command.

# omitted \"helm upgrade --install\" command and parameters\n\n--set httpsProxy=\"https://secure.proxy.tld\"\n

Additionally, you can specify URLs that should bypass the proxy, by setting the noProxy parameter in comma-separated list format. Note that this parameter already has a default value: kubernetes.default.svc.*,127.0.0.1,localhost.

Configuring proxy settings enables both trivy plugin and zora-operator to use the proxy for external requests.

Zora OSS sends scan results to the following external URL if your installation is integrated with Zora Dashboard:

  • https://zora-dashboard.undistro.io

While Trivy downloads vulnerability databases during scans from the following external sources:

  • ghcr.io/aquasecurity/trivy-db
  • ghcr.io/aquasecurity/trivy-java-db
"},{"location":"configuration/resources/","title":"Compute resources","text":"

Zora Helm Chart allows you to define resource requests and limits (memory and CPU) for zora-operator and plugins. You can do this by setting specific parameters using --set argument as the example below.

--set operator.resources.limits.memory=256Mi\n

Alternatively, a YAML file can be specified using -f myvalues.yaml flag.

Tip

Refer to the default values.yaml file for more details

In a similar way, you can customize the resources for plugins. The following example sets 1Gi as memory limit for marvin plugin.

--set scan.plugins.marvin.resources.limits.memory=1Gi\n
"},{"location":"configuration/retain-issues/","title":"Retain issues","text":"

By default, both scans automatically scheduled by Zora upon installation are configured to retain issues/results only from the last scan.

To retain results from the last two scans, for example, you should set the successfulScansHistoryLimit field of ClusterScan to 2.

This can be done by either directly editing the ClusterScan object or by providing a parameter in the Helm installation/upgrade command,

# omitted \"helm upgrade --install\" command and parameters\n\n--set scan.misconfiguration.successfulScansHistoryLimit=2\n

In this case, it may appear that there are duplicate issues when more than one scan completes successfully. However, these issues are actually related to different scans. The identifier of each scan can be found in the scanID label of each issue.

kubectl get issues -n zora-system --show-labels\n
NAME                    CLUSTER     ID      MESSAGE                SEVERITY   CATEGORY   AGE    LABELS\nkind-kind-m-102-4wxvv   kind-kind   M-102   Privileged container   High       Security   43s    scanID=556cc35a-830e-45af-a31c-7130918de262,category=Security,cluster=kind-kind,custom=false,id=M-102,plugin=marvin,severity=High\nkind-kind-m-102-nf5xq   kind-kind   M-102   Privileged container   High       Security   102s   scanID=8464411a-4b9c-456b-a11c-dd3a5ab905f5,category=Security,cluster=kind-kind,custom=false,id=M-102,plugin=marvin,severity=High\n

To list issues from a specific scan, you can use a label selector like this:

kubectl get issues -n zora-system -l scanID=556cc35a-830e-45af-a31c-7130918de262\n

This also applies to vulnerability scans and VulnerabilityReport results.

Warning

Note that results are stored as CRDs in your Kubernetes cluster. Be cautious not to set a high value that could potentially affect the performance and storage capacity of your Kubernetes cluster

Note

That applies only to Zora OSS. Zora Dashboard always shows results from the last scan.

"},{"location":"configuration/scan-schedule/","title":"Scan schedule","text":"

After successfully installing Zora, vulnerability and misconfiguration scans are automatically scheduled for your cluster, with each scan using different plugins.

Scan schedules are defined using Cron expressions. You can view the schedule for your cluster by listing ClusterScan resources:

kubectl get clusterscans -o wide -n zora-system\n

By default, the misconfiguration scan is scheduled to run every hour at the current minute plus 5, while the vulnerability scan is scheduled to run every day at the current hour and the current minute plus 5.

For example, if the installation occurred at 10:00 UTC, the scans will have the following schedules:

Scan Cron Description Misconfigurations 5 * * * * Every hour at minute 5 Vulnerabilities 5 10 * * * Every day at 10:05

However, you can customize the schedule for each scan by directly editing the ClusterScan resource or by providing parameters in the helm upgrade --install command, as shown in the example below:

# omitted \"helm upgrade --install\" command and parameters\n\n--set scan.misconfiguration.schedule=\"0 * * * *\" \\\n--set scan.vulnerability.schedule=\"0 0 * * *\"\n

The recommended approach is to provide parameters through Helm.

Costly scan scheduling

Overly frequent scheduling of scans can increase networking costs significantly, especially for vulnerability scans, which involve downloading a vulnerability database and pulling images.

Warning

If you directly edit the ClusterScan resource, be cautious when running the next update via Helm, as the value may be overwritten.

"},{"location":"configuration/scan-schedule/#cron-schedule-syntax","title":"Cron schedule syntax","text":"

Cron expression has five fields separated by a space, and each field represents a time unit.

\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 minute (0 - 59)\n\u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 hour (0 - 23)\n\u2502 \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 day of the month (1 - 31)\n\u2502 \u2502 \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 month (1 - 12)\n\u2502 \u2502 \u2502 \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 day of the week (0 - 6) (Sunday to Saturday;\n\u2502 \u2502 \u2502 \u2502 \u2502                                   7 is also Sunday on some systems)\n\u2502 \u2502 \u2502 \u2502 \u2502                                   OR sun, mon, tue, wed, thu, fri, sat\n\u2502 \u2502 \u2502 \u2502 \u2502\n* * * * *\n
Operator Descriptor Example * Any value 15 * * * * runs at every minute 15 of every hour of every day. , Value list separator 2,10 4,5 * * * runs at minute 2 and 10 of the 4th and 5th hour of every day. - Range of values 30 4-6 * * * runs at minute 30 of the 4th, 5th, and 6th hour. / Step values 20/15 * * * * runs every 15 minutes starting from minute 20 through 59 (minutes 20, 35, and 50)."},{"location":"configuration/suspend-scan/","title":"Suspending scans","text":"

The cluster scans, which are automatically scheduled upon installation, can be suspended by setting spec.suspend to true in a ClusterScan object. This action will suspend subsequent scans, it does not apply to already started scans.

The command below suspends the mycluster-vuln scan.

kubectl patch scan mycluster-vuln --type='merge' -p '{\"spec\":{\"suspend\":true}}' -n zora-system\n

Note

This way, the scan results remain available, unlike if the ClusterScan had been deleted, in which case the results would also be removed.

Setting spec.suspend back to false, the scans are resume:

kubectl patch scan mycluster-vuln --type='merge' -p '{\"spec\":{\"suspend\":false}}' -n zora-system\n
"},{"location":"configuration/vulnerability-database-persistence/","title":"Vulnerability Database Persistence","text":"

Trivy utilizes a database containing vulnerability information in its scan. This database is updated every 6 hours.

When scanning JAR files, Trivy downloads a specific database for Java every 3 days.

Both databases are distributed via GitHub Container registry (GHCR) and cached by Trivy in local file system.

Starting with version 0.8.4, Zora persists Trivy databases by default, caching them between the scheduled scans. This means that scheduled scans may not need to download the databases, saving compute resources, time, and networking.

It's done by applying a PersistentVolumeClaim during a Zora installation/upgrade through Helm. A Job is also applied, which just downloads the vulnerability database to be ready for the first scheduled scan.

This persistence can be disabled or configured with the following Helm parameters:

Key Type Default Description scan.plugins.trivy.persistence.enabled bool true Specifies whether Trivy vulnerabilities database should be persisted between the scans, using PersistentVolumeClaim scan.plugins.trivy.persistence.accessMode string \"ReadWriteOnce\" Persistence access mode scan.plugins.trivy.persistence.storageClass string \"\" Persistence storage class. Set to empty for default storage class scan.plugins.trivy.persistence.storageRequest string \"2Gi\" Persistence storage size scan.plugins.trivy.persistence.downloadJavaDB bool false Specifies whether Java vulnerability database should be downloaded on helm install/upgrade

These parameters can be specified using the --set key=value argument in helm upgrade --install command.

"},{"location":"configuration/private-registries/acr/","title":"Azure Container Registry (ACR)","text":"

If you are running within Azure, and making use of a private Azure Container Registry (ACR) to host your application images, then the Trivy plugin will be unable to scan those images unless access is granted to the registry through a service principal with AcrPull role assigned.

"},{"location":"configuration/private-registries/acr/#creating-service-principal","title":"Creating service principal","text":"

The following Azure CLI command creates a service principal with AcrPull role assigned, and stores the output including the credentials into SP_DATA environment variable.

Note

Please replace <SUBSCRIPTION_ID>, <RESOURCE_GROUP>, and <REGISTRY_NAME> before running the command below.

export SP_DATA=$(az ad sp create-for-rbac --name ZoraTrivy --role AcrPull --scope \"/subscriptions/<SUBSCRIPTION_ID>/resourceGroups/<RESOURCE_GROUP>/providers/Microsoft.ContainerRegistry/registries/<REGISTRY_NAME>\")\n
"},{"location":"configuration/private-registries/acr/#usage","title":"Usage","text":"

Once the service principal is created and the credentials are in SP_DATA environment variable, create a Kubernetes secret to store these credentials by running:

kubectl create secret generic trivy-acr-credentials -n zora-system \\\n  --from-literal=AZURE_CLIENT_ID=$(echo $SP_DATA | jq -r '.appId') \\\n  --from-literal=AZURE_CLIENT_SECRET=$(echo $SP_DATA | jq -r '.password') \\\n  --from-literal=AZURE_TENANT_ID=$(echo $SP_DATA | jq -r '.tenant')\n

Note

If you are running this command before a Zora installation, you may need to create the zora-system namespace.

kubectl create namespace zora-system\n

Now set the secret name in a values.yaml

scan:\n  plugins:\n    trivy:\n      envFrom:\n        - secretRef:\n            name: trivy-acr-credentials\n            optional: true\n

Then provide it in helm upgrade --install command

-f values.yaml\n

This will now allow the Trivy plugin to scan your internal images for vulnerabilities.

"},{"location":"configuration/private-registries/ecr/","title":"AWS Elastic Container Registry (ECR)","text":"

If you are running within AWS, and making use of a private Elastic Container Registry (ECR) to host your application images, then the Trivy plugin will be unable to scan those images unless access is granted to the registry through an Identity and Access Managemnent (IAM) role assigned to the service account running the Trivy plugins.

Once an IAM role granting access to the ECR has been created, this can be assigned to the service account by including the following additional parameter when running the helm upgrade --install command.

--set scan.plugins.annotations.eks\\\\.amazonaws\\\\.com/role-arn=arn:aws:iam::<AWS_ACCOUNT_ID>:role/<ROLE_NAME>\n
where <AWS_ACCOUNT_ID> should be replaced with your AWS account ID, and <ROLE_NAME> should be replaced with the name of the role granting access to the ECR.

This will now allow the Trivy plugin to scan your internal images for vulnerabilities.

"},{"location":"getting-started/installation/","title":"Installation","text":"

Zora OSS is installed inside your Kubernetes clusters using Helm, where the zora-operator deployment is created and scans are automatically scheduled for your cluster.

"},{"location":"getting-started/installation/#prerequisites","title":"Prerequisites","text":"
  • Kubernetes cluster 1.21+
  • Kubectl
  • Helm 3.8+
"},{"location":"getting-started/installation/#install-with-helm","title":"Install with Helm","text":"

First, ensure that your current context of kubectl refer to the Kubernetes cluster you wish to install Zora into.

Manage kubectl contexts

The following commands can help you to manage kubectl contexts:

  • List all contexts: kubectl config get-contexts

  • Display the current-context: kubectl config current-context

  • Use the context for the Kind cluster: kubectl config use-context kind-kind

Then, run the following command to install Zora Helm chart:

HTTP chart repositoryOCI registry
helm repo add undistro https://charts.undistro.io --force-update\nhelm repo update undistro\nhelm upgrade --install zora undistro/zora \\\n  -n zora-system \\\n  --version 0.10.2 \\\n  --create-namespace \\\n  --wait \\\n  --set clusterName=\"$(kubectl config current-context)\"\n
helm upgrade --install zora oci://ghcr.io/undistro/helm-charts/zora \\\n  -n zora-system \\\n  --version 0.10.2 \\\n  --create-namespace \\\n  --wait \\\n  --set clusterName=\"$(kubectl config current-context)\"\n

This command will install Zora in zora-system namespace, creating the namespace if it doesn't already exist.

Zora OSS + Zora Dashboard

To integrate your Zora OSS installation with Zora Dashboard, you need to authenticate with the authorization server and provide saas.workspaceID parameter in installation command. For more information, please refer to this page.

With the following commands, you can verify if Zora has been successfully installed and retrieve installation notes:

helm list -n zora-system\nhelm get notes zora -n zora-system\n

Zora Helm Chart

To see the full list of available parameters in Zora Helm chart, please visit this page

If everything is set up correctly, your cluster should have scheduled scans. Check it by running:

kubectl get cluster,scan -o wide -n zora-system\n

Customize scan schedule

To customize the scan schedule, please refer to the Scan Schedule page.

Once the cluster is successfully scanned, you can check issues by running:

kubectl get misconfigurations -n zora-system\nkubectl get vulnerabilities   -n zora-system\n
"},{"location":"getting-started/installation/#migrating-to-08","title":"Migrating to 0.8","text":""},{"location":"getting-started/installation/#whats-new-in-08","title":"What's new in 0.8","text":""},{"location":"getting-started/installation/#extended-vulnerability-reports-information","title":"Extended Vulnerability Reports Information","text":"

Now, VulnerabilityReports provide more in-depth information about the image, including OS, architecture, distro, and digest. Additionally, details about vulnerabilities, such as publishedDate and lastModifiedDate, have been included to offer a clearer understanding of your cluster's security posture.

"},{"location":"getting-started/installation/#full-integration-with-zora-dashboard","title":"Full Integration with Zora Dashboard","text":"

Zora 0.8 introduces the integration of Vulnerability Reports with the Zora Dashboard. Now, alongside misconfigurations, you can centrally explore images and vulnerabilities across your clusters.

"},{"location":"getting-started/installation/#migration-guide","title":"Migration guide","text":"

Version 0.7 or earlier

If you are currently using a version prior to 0.7, please be aware that the 0.7 release brought about significant architectural changes. Before upgrading to version 0.8, refer to this page for essential information and considerations to ensure a smooth transition.

The recommended way to migrate to version 0.8 is to reinstall Zora, including its CRDs.

"},{"location":"getting-started/installation/#uninstall","title":"Uninstall","text":"

You can uninstall Zora and its components by uninstalling the Helm chart installed above.

helm uninstall zora -n zora-system\n

By design, Helm doesn't upgrade or delete CRDs. You can permanently delete Zora CRDs and any remaining associated resources from your cluster, using the following command.

kubectl get crd -o=name | grep --color=never 'zora.undistro.io' | xargs kubectl delete\n

You can also delete the zora-system namespace using the command below.

kubectl delete namespace zora-system\n
"},{"location":"plugins/","title":"Zora Plugins","text":""},{"location":"plugins/#overview","title":"Overview","text":"

Zora utilizes open-source CLI tools like Marvin, Popeye, and Trivy as plugins to perform scans on Kubernetes clusters.

The current available plugins of a Zora installation can be listed by running the following command:

kubectl get plugins -n zora-system\n
NAME     IMAGE                               TYPE               AGE\nmarvin   ghcr.io/undistro/marvin:v0.2.1      misconfiguration   14m\npopeye   ghcr.io/undistro/popeye:0.21.3-6    misconfiguration   14m\ntrivy    ghcr.io/undistro/trivy:0.50.1-1     vulnerability      14m\n

Each item listed above is an instance of Plugin CRD and represents the execution configuration of a plugin. More details can be seen by getting the YAML output of a plugin:

kubectl get plugin marvin -o yaml -n zora-system\n
"},{"location":"plugins/#plugin-types","title":"Plugin types","text":"

Currently, Zora has two plugin types: vulnerability and misconfiguration, which determine the focus of plugin scans.

  • vulnerability plugins scan cluster images for vulnerabilities, and their results are stored as instances of VulnerabilityReport CRD.

  • misconfiguration plugins scan cluster resources for potential configuration issues, and their results are available as instances of the ClusterIssue CRD.

Both result types can be listed using kubectl, and some aliases are supported for your convenience, as shown in the following commands:

kubectl get vulnerabilityreports\nkubectl get vuln\nkubectl get vulns\nkubectl get vulnerabilities\n
kubectl get clusterissues\nkubectl get issue\nkubectl get issues\nkubectl get misconfig\nkubectl get misconfigs\nkubectl get misconfigurations\n

Note

The results are only available after a successful scan, in the same namespace as the ClusterScan (default is zora-system).

"},{"location":"plugins/#how-plugins-work","title":"How plugins work","text":"

Starting from a Plugin and a ClusterScan, Zora manages and schedules scans by applying CronJobs, which creates Jobs and Pods.

The Pods where the scans run, include a \"sidecar\" container called worker alongside the plugin container.

After the plugin completes its scan, it needs to signal to Zora (worker) by writing out the path of the results file into a \"done file\".

Worker container waits for the \"done file\" to be present, then transforms the results and creates ClusterIssues and VulnerabilityReports (depending on the plugin type).

Note

This is the aspect that currently prevents the full declarative integration of new plugins. The code responsible for transforming the output of each plugin into CRDs is written in Go within the worker.

Any contributions or suggestions in this regard are greatly appreciated.

Note

This architecture for supporting plugins is inspired by Sonobuoy, a project used for CNCF conformance certification.

"},{"location":"plugins/marvin/","title":"Marvin Plugin","text":"

Marvin is an open-source CLI tool that scans a Kubernetes cluster by performing CEL expressions to report potential issues and misconfigurations.

Marvin enables Zora's custom checks using CEL. For further information, please visit this page.

Type: misconfiguration

Image: ghcr.io/undistro/marvin:v0.2.1

GitHub repository: https://github.com/undistro/marvin

"},{"location":"plugins/popeye/","title":"Popeye Plugin","text":"

Popeye is a utility that scans live Kubernetes cluster and reports potential issues with deployed resources and configurations.

Type: misconfiguration

Image: ghcr.io/undistro/popeye:0.21.3-6

GitHub repository: https://github.com/derailed/popeye

Info

Currently, Zora does not use the official Popeye image (derailed/popeye) due to its lack of multi-architecture support.

"},{"location":"plugins/trivy/","title":"Trivy Plugin","text":"

Trivy is a versatile security scanner that can find vulnerabilities, misconfigurations, secrets, SBOM in different targets like containers, code repositories and Kubernetes cluster.

Zora uses Trivy as a plugin exclusively to scan vulnerabilities in a Kubernetes cluster.

Type: vulnerability

Image: ghcr.io/undistro/trivy:0.50.1-1

GitHub repository: https://github.com/aquasecurity/trivy

"},{"location":"plugins/trivy/#vulnerability-database-persistence","title":"Vulnerability Database Persistence","text":"

Trivy utilizes a database containing vulnerability information. This database is updated every 6 hours and persisted by default for caching purposes between the schedule scans.

Please refer to this page for further details and configuration options regarding vulnerability database persistence.

"},{"location":"plugins/trivy/#large-vulnerability-reports","title":"Large vulnerability reports","text":"

Vulnerability reports can be large. If you encounter issues with etcd request payload limit, you can ignore unfixed vulnerabilities from reports by providing the following flag to helm upgrade --install command:

--set 'scan.plugins.trivy.ignoreUnfixed=true'\n

To identify this issue, check the logs of worker container in trivy pod. The ClusterScan will have a Failed status. You will see a log entry similar to the following example:

2023-09-26T14:18:02Z    ERROR   worker  failed to run worker    {\"error\": \"failed to create VulnerabilityReport \\\"kind-kind-usdockerpkgdevgooglesamplescontainersgkegbfrontendsha256dc8de8e0d569d2f828b187528c9317bd6b605c273ac5a282aebe471f630420fc-rzntw\\\": etcdserver: request is too large\"}\n
"},{"location":"plugins/trivy/#scan-timeout","title":"Scan timeout","text":"

Trivy's scan duration may vary depending on the total images in your cluster and the time to download the vulnerability database when needed.

By default, Zora sets a timeout of 10 minutes for Trivy scan completion.

To adjust this timeout, use the following Helm parameter:

--set scan.plugins.trivy.timeout=15m\n

Once this parameter is updated, the next scan will use the specified value.

"}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Welcome to the Zora documentation","text":"

This documentation will help you install, explore, and configure Zora!

"},{"location":"#what-is-zora","title":"What is Zora?","text":"

Zora is an open-source solution that helps you achieve compliance with Kubernetes best practices recommended by industry-leading frameworks.

By scanning your cluster with multiple plugins at scheduled times, Zora identifies potential issues, misconfigurations, and vulnerabilities.

"},{"location":"#zora-oss-vs-zora-dashboard","title":"Zora OSS vs Zora Dashboard","text":"

Zora OSS is open-source, available under Apache 2.0 license, and can be used either as standalone tool or integrated with Zora Dashboard, a SaaS platform which centralize all your clusters providing a full experience. Please refer to Zora Dashboard page for more details.

"},{"location":"#key-features","title":"Key features","text":""},{"location":"#multi-plugin-architecture","title":"Multi-plugin architecture","text":"

Zora seamlessly integrates open-source tools like Popeye, Marvin, and Trivy as scanners. These tools' capabilities are combined to provide you with a unified view of your cluster's security posture, addressing potential issues, misconfigurations, and vulnerabilities.

"},{"location":"#kubernetes-compliance","title":"Kubernetes compliance","text":"

Zora and its plugins provide actionable insights, guiding you to align your cluster with industry-recognized frameworks such as NSA-CISA, MITRE ATT&CK, CIS Benchmark, and Pod Security Standards.

"},{"location":"#custom-checks","title":"Custom checks","text":"

Enabled by the Marvin plugin, Zora offers a declarative way to create your own checks by using Common Expression Language (CEL) expressions to define validation rules.

"},{"location":"#kubernetes-native","title":"Kubernetes-native","text":"

All scan configurations and plugin reports, including misconfigurations and vulnerabilities, are securely stored as CRDs (Custom Resource Definitions) within your Kubernetes cluster, making it easily accessible through the Kubernetes API and kubectl command.

"},{"location":"#architecture","title":"Architecture","text":"

Zora works as a Kubernetes Operator, where both scan and plugin configurations, as well as the results (misconfigurations and vulnerabilities), are managed in CRDs (Custom Resource Definitions).

Zora Dashboard

When a Zora OSS installation is integrated with Zora Dashboard, scan results are automatically sent to Zora Dashboard SaaS by zora-operator.

Check out Zora Dashboard architecture for more details.

"},{"location":"#zora-origins","title":"Zora origins","text":"

In the early days of the cloud native era, Borg dominated the container-oriented cluster management scene. The origin of the name Borg refers to the cybernetic life form existing in the Star Trek series, that worked as a collective of individuals with a single mind and the same purpose, as well as a \"cluster\".

As good nerds as we are and wishing to honor our Kubernetes' predecessor (Borg) we named our project Zora.

In Star Trek, Zora is the Artificial Intelligence that controls the ship U.S.S Discovery. After being merged with a collective of other intelligences, Zora became sentient and became a member of the team, bringing insights and making the ship more efficient.

Like Star Trek's Zora, our goal is to help manage your Kubernetes environment by combining multiple plugin capabilities to scan your clusters looking for misconfigurations and vulnerabilities.

"},{"location":"dashboard/","title":"Zora Dashboard","text":"

Zora Dashboard is a SaaS platform designed to seamlessly centralize the security posture management of all your Kubernetes clusters, providing a full experience powered by Zora OSS.

It features a powerful UI that allows you to navigate, filter and explore details of issues and affected resources across all your clusters. You can also invite users to your workspace.

Try Zora Dashboard

Zora Dashboard offers a starter plan for 14 days, after which it will revert to the free plan which provides access for 2 clusters with up to 10 nodes per cluster. Please contact us if you need to discuss a tailored solution.

"},{"location":"dashboard/#getting-started","title":"Getting started","text":"

To integrate your Zora OSS installation with Zora Dashboard, you need to first authenticate with the authorization server and then provide your saas.workspaceID parameter in the Zora OSS installation command.

"},{"location":"dashboard/#authenticating-with-the-authorization-server","title":"Authenticating with the Authorization server","text":"

Authenticating with the authorization server is simplified through the use of a helm plugin, zoraauth, which can be installed by executing

helm plugin install https://github.com/undistro/helm-zoraauth\n
and updated by executing
helm plugin update zoraauth\n
The authentication process will occur when the plugin is executed, and you visit the authorization server to confirm the request. The instructions within the Zora Dashboard console will include the appropriate parameters for the plugin, these can be obtained through the Connect cluster option once you have signed in to the Zora Dashboard.

To authenticate with the authorization server, copy and run the helm zoraauth command and then follow the instructions within your terminal

helm zoraauth --audience=\"zora_prod\" \\\n  --client-id=\"<client id>\" \\\n  --domain=\"login.undistro.io\"\nInitiating Device Authorization Flow...\nPlease visit https://login.undistro.io/activate and enter code: BFNS-NWFF, or visit: https://login.undistro.io/activate?user_code=BFNS-NWFF\n
Entering the login URL within your browser will present you with a screen similar to the following

Once you have confirmed the request you should see the following message on your terminal

Tokens saved to tokens.yaml\n

You can then install Zora OSS by providing the saas.workspaceID parameter in the Zora OSS installation command:

HTTP chart repositoryOCI registry
helm repo add undistro https://charts.undistro.io --force-update\nhelm repo update undistro\nhelm upgrade --install zora undistro/zora \\\n  -n zora-system --create-namespace --wait \\\n  --set clusterName=\"$(kubectl config current-context)\" \\\n  --set saas.workspaceID=<YOUR WORKSPACE ID HERE> \\\n  --values tokens.yaml\n
helm upgrade --install zora oci://ghcr.io/undistro/helm-charts/zora \\\n  -n zora-system --create-namespace --wait \\\n  --set clusterName=\"$(kubectl config current-context)\" \\\n  --set saas.workspaceID=<YOUR WORKSPACE ID HERE> \\\n  --values tokens.yaml\n
"},{"location":"dashboard/#architecture","title":"Architecture","text":"

Zora OSS acts as the engine of Zora Dashboard, meaning that once scans are completed, only the results are sent to Zora Dashboard, where they are accessible by you and those you have invited to your workspace.

Note that these results do not contain sensitive information or specific data about your cluster configuration.

"},{"location":"faq/","title":"Frequently Asked Questions","text":"

Do you have any question about Zora? We do our best to answer all of your questions on this page. If you can't find your question below, ask it on our discussion board!

"},{"location":"faq/#is-zora-open-source","title":"Is Zora open source?","text":"

There are two Zora tools: Zora OSS and Zora Dashboard.

Zora OSS is open-source, available under Apache 2.0 license, and can be used either as standalone tool or integrated with Zora Dashboard.

On the other hand, Zora Dashboard is a SaaS platform that provides a full experience, centralizing the security posture management of all your clusters. It's free for up to 3 clusters. Visit the Zora Dashboard page for more information.

"},{"location":"faq/#can-i-use-zora-oss-standalone-without-zora-dashboard","title":"Can I use Zora OSS standalone without Zora Dashboard?","text":"

Yes, you can use Zora OSS as a standalone tool and access scan results (misconfigurations and vulnerabilities) via kubectl one cluster at a time.

"},{"location":"faq/#can-i-install-zora-in-a-different-namespace","title":"Can I install Zora in a different namespace?","text":"

Yes, Zora can be installed in any namespace. Simply provide the namespace name using the -n flag in Helm installation command.

The Cluster, ClusterScan, Plugin, ClusterIssue, and VulnerabilityReport objects will be created in the specified namespace.

If you already have Zora installed and want to change the namespace, you will need to reinstall it.

"},{"location":"faq/#can-i-integrate-my-own-plugins-with-zora-and-how","title":"Can I integrate my own plugins with Zora, and how?","text":"

Currently, integrating a new plugin into Zora requires modifying the source code of Worker, a Zora component. The parsing of plugin results into ClusterIssue or VulnerabilityReport is directly handled by Worker, which is written in Go. A fully declarative approach is not yet supported.

Refer to plugins page to know more about how plugins work.

Feel free to open an issue or start a discussion with any suggestions regarding this process.

"},{"location":"faq/#which-data-is-sent-to-zora-dashboard-saas","title":"Which data is sent to Zora Dashboard (SaaS)?","text":"

When integrated with Zora Dashboard, only scan results are sent to the SaaS platform.

No sensitive information is collected or exposed.

Scans are performed in your cluster and the results are securely sent via HTTPS to Zora Dashboard, where only you and the users you've invited to your workspace will have access.

"},{"location":"faq/#can-i-host-zora-dashboard-on-premise","title":"Can I host Zora Dashboard on-premise?","text":"

Currently, Zora Dashboard is available as a SaaS platform. While we do not offer an on-premise version of Zora Dashboard at this time, we're continuously working to enhance and expand our offerings. If you have specific requirements or are interested in on-premise solutions, please contact us, and we'll be happy to discuss potential options and explore how we can meet your needs.

"},{"location":"helm-chart/","title":"Zora Helm Chart","text":"

A multi-plugin solution that reports misconfigurations and vulnerabilities by scanning your cluster at scheduled times.

"},{"location":"helm-chart/#installing-the-chart","title":"Installing the Chart","text":"

To install the chart with the release name zora in zora-system namespace:

helm repo add undistro https://charts.undistro.io --force-update\nhelm repo update undistro\nhelm upgrade --install zora undistro/zora \\\n  -n zora-system \\\n  --version 0.10.2 \\\n  --create-namespace \\\n  --wait \\\n  --set clusterName=\"$(kubectl config current-context)\"\n

These commands deploy Zora on the Kubernetes cluster with the default configuration.

The Parameters section lists the available parameters that can be configured during installation.

Tips:

  • List all charts available in undistro repo using helm search repo undistro

  • Update undistro chart repository using helm repo update undistro

  • List all versions available of undistro/zora chart using helm search repo undistro/zora --versions

  • List all releases in a specific namespace using helm list -n zora-system

  • Get the notes provided by zora release using helm get notes zora -n zora-system

"},{"location":"helm-chart/#uninstalling-the-chart","title":"Uninstalling the Chart","text":"

To uninstall/delete the zora release:

helm uninstall zora -n zora-system\n

The command removes all the Kubernetes components associated with the chart and deletes the release.

"},{"location":"helm-chart/#parameters","title":"Parameters","text":"

The following table lists the configurable parameters of the Zora chart and their default values.

Key Type Default Description nameOverride string \"\" String to partially override fullname template with a string (will prepend the release name) fullnameOverride string \"\" String to fully override fullname template with a string clusterName string \"\" Cluster name. Should be set by kubectl config current-context. saas.workspaceID string \"\" Your SaaS workspace ID saas.server string \"https://zora-dashboard.undistro.io\" SaaS server URL saas.installURL string \"{{.Values.saas.server}}/zora/api/v1alpha1/workspaces/{{.Values.saas.workspaceID}}/helmreleases\" SaaS URL template to notify installation hooks.install.image.repository string \"curlimages/curl\" Post-install hook image repository hooks.install.image.tag string \"8.7.1\" Post-install hook image tag hooks.delete.image.repository string \"rancher/kubectl\" Pre-delete hook image repository hooks.delete.image.tag string \"v1.29.2\" Pre-delete hook image tag imageCredentials.create bool false Specifies whether the secret should be created by providing credentials imageCredentials.registry string \"ghcr.io\" Docker registry host imageCredentials.username string \"\" Docker registry username imageCredentials.password string \"\" Docker registry password imagePullSecrets list [] Specify docker-registry secret names as an array to be used when imageCredentials.create is false operator.replicaCount int 1 Number of replicas desired of Zora operator operator.image.repository string \"ghcr.io/undistro/zora/operator\" Zora operator image repository operator.image.tag string \"\" Overrides the image tag whose default is the chart appVersion operator.image.pullPolicy string \"IfNotPresent\" Image pull policy operator.rbac.create bool true Specifies whether ClusterRoles and ClusterRoleBindings should be created operator.rbac.serviceAccount.create bool true Specifies whether a service account should be created operator.rbac.serviceAccount.annotations object {} Annotations to be added to service account operator.rbac.serviceAccount.name string \"\" The name of the service account to use. If not set and create is true, a name is generated using the fullname template operator.podAnnotations object {\"kubectl.kubernetes.io/default-container\":\"manager\"} Annotations to be added to pods operator.podSecurityContext object {\"runAsNonRoot\":true} Security Context to add to the pod operator.securityContext object {\"allowPrivilegeEscalation\":false,\"readOnlyRootFilesystem\":true} Security Context to add to manager container operator.metricsService.type string \"ClusterIP\" Type of metrics service operator.metricsService.port int 8443 Port of metrics service operator.serviceMonitor.enabled bool false Specifies whether a Prometheus ServiceMonitor should be enabled operator.resources object {\"limits\":{\"cpu\":\"500m\",\"memory\":\"128Mi\"},\"requests\":{\"cpu\":\"10m\",\"memory\":\"64Mi\"}} Resources to add to manager container operator.rbacProxy.image.repository string \"gcr.io/kubebuilder/kube-rbac-proxy\" kube-rbac-proxy image repository operator.rbacProxy.image.tag string \"v0.15.0\" kube-rbac-proxy image tag operator.rbacProxy.image.pullPolicy string \"IfNotPresent\" Image pull policy operator.rbacProxy.securityContext object {\"allowPrivilegeEscalation\":false,\"capabilities\":{\"drop\":[\"ALL\"]},\"readOnlyRootFilesystem\":true} Security Context to add to kube-rbac-proxy container operator.rbacProxy.resources object {\"limits\":{\"cpu\":\"500m\",\"memory\":\"128Mi\"},\"requests\":{\"cpu\":\"5m\",\"memory\":\"64Mi\"}} Resources to add to kube-rbac-proxy container operator.nodeSelector object {} Node selection to constrain a Pod to only be able to run on particular Node(s) operator.tolerations list [] Tolerations for pod assignment operator.affinity object {} Map of node/pod affinities operator.log.encoding string \"json\" Log encoding (one of 'json' or 'console') operator.log.level string \"info\" Log level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', or any integer value > 0 which corresponds to custom debug levels of increasing verbosity operator.log.stacktraceLevel string \"error\" Log level at and above which stacktraces are captured (one of 'info', 'error' or 'panic') operator.log.timeEncoding string \"rfc3339\" Log time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano') operator.webhook.enabled bool true Specifies whether webhook server is enabled scan.misconfiguration.enabled bool true Specifies whether misconfiguration scan is enabled scan.misconfiguration.schedule string Cron expression for every hour at the current minute + 5 minutes Cluster scan schedule in Cron format for misconfiguration scan scan.misconfiguration.successfulScansHistoryLimit int 1 The number of successful finished scans and their issues to retain. scan.misconfiguration.plugins list [\"marvin\",\"popeye\"] Misconfiguration scanners plugins scan.vulnerability.enabled bool true Specifies whether vulnerability scan is enabled scan.vulnerability.schedule string Cron expression for every day at the current hour and minute + 5 minutes Cluster scan schedule in Cron format for vulnerability scan scan.vulnerability.successfulScansHistoryLimit int 1 The number of successful finished scans and their issues to retain. scan.vulnerability.plugins list [\"trivy\"] Vulnerability scanners plugins scan.worker.image.repository string \"ghcr.io/undistro/zora/worker\" worker image repository scan.worker.image.tag string \"\" Overrides the image tag whose default is the chart appVersion scan.plugins.annotations object {} Annotations added to the plugin service account scan.plugins.marvin.resources object {\"limits\":{\"cpu\":\"500m\",\"memory\":\"500Mi\"},\"requests\":{\"cpu\":\"250m\",\"memory\":\"256Mi\"}} Resources to add to marvin container scan.plugins.marvin.podAnnotations object {} Annotations added to the marvin pods scan.plugins.marvin.image.repository string \"ghcr.io/undistro/marvin\" marvin plugin image repository scan.plugins.marvin.image.tag string \"v0.2\" marvin plugin image tag scan.plugins.marvin.image.pullPolicy string \"Always\" Image pull policy scan.plugins.marvin.env list [] List of environment variables to set in marvin container. scan.plugins.marvin.envFrom list [] List of sources to populate environment variables in marvin container. scan.plugins.trivy.ignoreUnfixed bool false Specifies whether only fixed vulnerabilities should be reported scan.plugins.trivy.ignoreDescriptions bool false Specifies whether vulnerability descriptions should be ignored scan.plugins.trivy.resources object {\"limits\":{\"cpu\":\"1500m\",\"memory\":\"4096Mi\"},\"requests\":{\"cpu\":\"500m\",\"memory\":\"2048Mi\"}} Resources to add to trivy container scan.plugins.trivy.podAnnotations object {} Annotations added to the trivy pods scan.plugins.trivy.image.repository string \"ghcr.io/undistro/trivy\" trivy plugin image repository scan.plugins.trivy.image.tag float 0.53 trivy plugin image tag scan.plugins.trivy.image.pullPolicy string \"Always\" Image pull policy scan.plugins.trivy.env list [] List of environment variables to set in trivy container. scan.plugins.trivy.envFrom list [] List of sources to populate environment variables in trivy container. scan.plugins.trivy.timeout string \"40m\" Trivy timeout scan.plugins.trivy.insecure bool false Allow insecure server connections for Trivy scan.plugins.trivy.fsGroup int nil Trivy fsGroup. Should be greater than 0. scan.plugins.trivy.persistence.enabled bool true Specifies whether Trivy vulnerabilities database should be persisted between the scans, using PersistentVolumeClaim scan.plugins.trivy.persistence.accessMode string \"ReadWriteOnce\" Persistence access mode scan.plugins.trivy.persistence.storageClass string \"\" Persistence storage class. Set to empty for default storage class scan.plugins.trivy.persistence.storageRequest string \"2Gi\" Persistence storage size scan.plugins.trivy.persistence.downloadJavaDB bool false Specifies whether Java vulnerability database should be downloaded on helm install/upgrade scan.plugins.popeye.skipInternalResources bool false Specifies whether the following resources should be skipped by popeye scans. 1. resources from kube-system, kube-public and kube-node-lease namespaces; 2. kubernetes system reserved RBAC (prefixed with system:); 3. kube-root-ca.crt configmaps; 4. default namespace; 5. default serviceaccounts; 6. Helm secrets (prefixed with sh.helm.release); 7. Zora components. See popeye configuration file that is used for this case: https://github.com/undistro/zora/blob/main/charts/zora/templates/plugins/popeye-config.yaml scan.plugins.popeye.resources object {\"limits\":{\"cpu\":\"500m\",\"memory\":\"500Mi\"},\"requests\":{\"cpu\":\"250m\",\"memory\":\"256Mi\"}} Resources to add to popeye container scan.plugins.popeye.podAnnotations object {} Annotations added to the popeye pods scan.plugins.popeye.image.repository string \"ghcr.io/undistro/popeye\" popeye plugin image repository scan.plugins.popeye.image.tag float 0.21 popeye plugin image tag scan.plugins.popeye.image.pullPolicy string \"Always\" Image pull policy scan.plugins.popeye.env list [] List of environment variables to set in popeye container. scan.plugins.popeye.envFrom list [] List of sources to populate environment variables in popeye container. kubexnsImage.repository string \"ghcr.io/undistro/kubexns\" kubexns image repository kubexnsImage.tag string \"v0.1\" kubexns image tag kubexnsImage.pullPolicy string \"Always\" Image pull policy customChecksConfigMap string \"zora-custom-checks\" Custom checks ConfigMap name httpsProxy string \"\" HTTPS proxy URL noProxy string \"kubernetes.default.svc.*,127.0.0.1,localhost\" Comma-separated list of URL patterns to be excluded from going through the proxy updateCRDs bool true for upgrades Specifies whether CRDs should be updated by operator at startup tokenRefresh.image.repository string \"ghcr.io/undistro/zora/tokenrefresh\" tokenrefresh image repository tokenRefresh.image.tag string \"\" Overrides the image tag whose default is the chart appVersion tokenRefresh.image.pullPolicy string \"IfNotPresent\" Image pull policy tokenRefresh.rbac.create bool true Specifies whether Roles and RoleBindings should be created tokenRefresh.rbac.serviceAccount.create bool true Specifies whether a service account should be created tokenRefresh.rbac.serviceAccount.annotations object {} Annotations to be added to service account tokenRefresh.rbac.serviceAccount.name string \"\" The name of the service account to use. If not set and create is true, a name is generated using the fullname template tokenRefresh.minRefreshTime string \"1m\" Minimum time to wait before checking for token refresh tokenRefresh.refreshThreshold string \"2h\" Threshold relative to the token expiry timestamp, after which a token can be refreshed. tokenRefresh.nodeSelector object {} Node selection to constrain a Pod to only be able to run on particular Node(s) tokenRefresh.tolerations list [] Tolerations for pod assignment tokenRefresh.affinity object {} Map of node/pod affinities tokenRefresh.podAnnotations object {\"kubectl.kubernetes.io/default-container\":\"manager\"} Annotations to be added to pods tokenRefresh.podSecurityContext object {\"runAsNonRoot\":true} Security Context to add to the pod tokenRefresh.securityContext object {\"allowPrivilegeEscalation\":false,\"readOnlyRootFilesystem\":true} Security Context to add to manager container zoraauth.domain string \"\" The domain associated with the tokens zoraauth.clientId string \"\" The client id associated with the tokens zoraauth.accessToken string \"\" The access token authorizing access to the SaaS API server zoraauth.tokenType string \"Bearer\" The type of the access token zoraauth.refreshToken string \"\" The refresh token for obtaining a new access token

Specify each parameter using the --set key=value[,key=value] argument to helm install. For example,

helm install zora \\\n  --set operator.resources.limits.memory=256Mi undistro/zora\n

Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,

helm install zora -f values.yaml undistro/zora\n

Tip: You can use the default values.yaml

"},{"location":"configuration/authenticated-registries/","title":"Authenticated Registries","text":"

Trivy plugin is able to scan images from registries that require authentication.

It's necessary to create a secret containing authentication credentials as pairs, like the command below.

Note

For AWS ECR and Azure ACR registries, please refer to the specific pages.

kubectl create secret generic trivy-credentials -n zora-system \\\n  --from-literal=TRIVY_USERNAME=\"<username1>,<username2>\" \\\n  --from-literal=TRIVY_PASSWORD=\"<password1>,<password2>\"\n

Note

Please note that the number of usernames and passwords must be the same.

Once the secret is created, it needs to be referenced in the Helm chart parameters as the following values.yaml file:

scan:\n  plugins:\n    trivy:\n      envFrom:\n        - secretRef:\n            name: trivy-credentials\n            optional: true\n

Then provide this file in helm upgrade --install command with -f values.yaml flag.

This ensures that Trivy can authenticate with the private registries using the provided credentials.

"},{"location":"configuration/convert-to-csv/","title":"Convert results to CSV","text":"

After a successful scan, the results (vulnerabilities and misconfigurations) are available in your cluster via CRDs, and you can transform them into CSV files using jq.

"},{"location":"configuration/convert-to-csv/#vulnerabilities","title":"Vulnerabilities","text":"

Vulnerability scan results are stored as instances of VulnerabilityReport CRD within your cluster. You can export summaries or detailed reports of these vulnerabilities to CSV format for further analysis.

"},{"location":"configuration/convert-to-csv/#images-summary","title":"Images summary","text":"

To generate a summary report of vulnerabilities by image, run the following command:

kubectl get vulnerabilityreports -n zora-system -o json | jq -r '\n  [\"Image\", \"Image digest\", \"OS\", \"Distro\", \"Distro version\", \"Total\", \"Critical\", \"High\", \"Medium\", \"Low\", \"Unknown\", \"Scanned at\"],\n  (.items[] | [\n    .spec.image, .spec.digest, .spec.os, .spec.distro.name, .spec.distro.version, \n    .spec.summary.total, .spec.summary.critical, .spec.summary.high, .spec.summary.medium, .spec.summary.low, .spec.summary.unknown,\n    .metadata.creationTimestamp\n  ]) | @csv' > images.csv\n

This command will produce a CSV file, images.csv, with the following structure:

Image Image digest OS Distro Distro version Total Critical High Medium Low Unknown Scanned at docker.io/istio/examples-bookinfo-reviews-v1:1.20.1 istio/examples-bookinfo-reviews-v1@sha256:5b3c8ec2cb877b7a3c93fc340bb91633c3e51a6bc43a2da3ae7d72727650ec07 linux ubuntu 22.04 45 0 0 25 20 0 2024-10-31T12:56:51Z nginx nginx@sha256:28402db69fec7c17e179ea87882667f1e054391138f77ffaf0c3eb388efc3ffb linux debian 12.7 95 2 10 24 59 0 2024-10-31T12:56:51Z"},{"location":"configuration/convert-to-csv/#full-report-images-and-vulnerabilities","title":"Full report: images and vulnerabilities","text":"

To create a detailed report of each vulnerability affecting images, use the following command:

kubectl get vulnerabilityreports -n zora-system -o json | jq -r '\n  [\"Image\", \"Image digest\", \"OS\", \"Distro\", \"Distro version\", \"Vulnerability ID\", \"Severity\", \"Score\", \"Title\", \"Package\", \"Type\", \"Version\", \"Status\", \"Fix version\", \"Scanned at\"],\n  (.items[] | . as $i | $i.spec.vulnerabilities[] as $vuln | $vuln.packages[] | [\n    $i.spec.image, $i.spec.digest, $i.spec.os, $i.spec.distro.name, $i.spec.distro.version,\n    $vuln.id, $vuln.severity, $vuln.score, $vuln.title,\n    .package, .type, .version, .status, .fixVersion,\n    $i.metadata.creationTimestamp\n  ]) | @csv' > vulnerabilities.csv\n

This will generate a vulnerabilities.csv file with details for each vulnerability:

Note

A single vulnerability can affect multiple packages within the same image, so you may see repeated entries for the same vulnerability. For instance, in the example below, CVE-2024-7264 affects both curl and libcurl4 packages in the same image.

Image Image digest OS Distro Distro version Vulnerability ID Severity Score Title Package Type Version Status Fix version Scanned at nginx nginx@sha256:28402db69fec7c17e179ea87882667f1e054391138f77ffaf0c3eb388efc3ffb linux debian 12.7 CVE-2023-49462 HIGH 8.8 libheif v1.17.5 was discovered to contain a segmentation violation via ... libheif1 debian 1.15.1-1 fixed 1.15.1-1+deb12u1 2024-10-31T12:56:51Z docker.io/istio/examples-bookinfo-reviews-v1:1.20.1 istio/examples-bookinfo-reviews-v1@sha256:5b3c8ec2cb877b7a3c93fc340bb91633c3e51a6bc43a2da3ae7d72727650ec07 linux ubuntu 22.04 CVE-2024-7264 MEDIUM 6.5 curl: libcurl: ASN.1 date parser overread curl ubuntu 7.81.0-1ubuntu1.15 fixed 7.81.0-1ubuntu1.17 2024-10-31T12:56:51Z docker.io/istio/examples-bookinfo-reviews-v1:1.20.1 istio/examples-bookinfo-reviews-v1@sha256:5b3c8ec2cb877b7a3c93fc340bb91633c3e51a6bc43a2da3ae7d72727650ec07 linux ubuntu 22.04 CVE-2024-7264 MEDIUM 6.5 curl: libcurl: ASN.1 date parser overread libcurl4 ubuntu 7.81.0-1ubuntu1.15 fixed 7.81.0-1ubuntu1.17 2024-10-31T12:56:51Z"},{"location":"configuration/convert-to-csv/#misconfigurations","title":"Misconfigurations","text":"

Misconfiguration scan results are represented as instances of ClusterIssue CRD within your cluster, and can also be parsed to CSV format.

"},{"location":"configuration/convert-to-csv/#misconfigurations-summary","title":"Misconfigurations summary","text":"

To generate a summary report of misconfigurations, you can run the following command:

kubectl get misconfigurations -n zora-system -o json | jq -r '\n  [\"ID\", \"Misconfiguration\", \"Severity\", \"Category\", \"Total resources\", \"Scanned at\"],\n  (.items[] | ([.spec.resources[] | length] | add) as $totalResources | [\n    .spec.id, .spec.message, .spec.severity, .spec.category, $totalResources, .metadata.creationTimestamp\n  ]) | @csv' > misconfigurations.csv\n

This command will create a misconfigurations.csv file with the following structure:

ID Misconfiguration Severity Category Total resources Scanned at M-102 Privileged container High Security 2 2024-10-31T17:45:08Z M-103 Insecure capabilities High Security 2 2024-10-31T17:45:08Z M-112 Allowed privilege escalation Medium Security 14 2024-10-31T17:45:08Z M-113 Container could be running as root user Medium Security 18 2024-10-31T17:45:08Z M-201 Application credentials stored in configuration files High Security 6 2024-10-31T17:45:08Z M-300 Root filesystem write allowed Low Security 29 2024-10-31T17:45:08Z M-400 Image tagged latest Medium Best Practices 2 2024-10-31T17:45:08Z M-403 Liveness probe not configured Medium Reliability 16 2024-10-31T17:45:08Z M-406 Memory not limited Medium Reliability 15 2024-10-31T17:45:08Z"},{"location":"configuration/convert-to-csv/#full-report-misconfigurations-and-affected-resources","title":"Full report: misconfigurations and affected resources","text":"

A detailed CSV file containing the affected resources can be generated with the command below.

kubectl get misconfigurations -n zora-system -o json | jq -r '\n  [\"ID\", \"Misconfiguration\", \"Severity\", \"Category\", \"Resource Type\", \"Resource\", \"Scanned at\"],\n  (.items[] as $i | $i.spec.resources | to_entries[] as $resource | $resource.value[] as $affectedResource | [\n    $i.spec.id, $i.spec.message, $i.spec.severity, $i.spec.category, $resource.key, $affectedResource, $i.metadata.creationTimestamp\n  ]) | @csv' > misconfigurations_full.csv\n

This command will generate the misconfigurations_full.csv file with the following structure:

ID Misconfiguration Severity Category Resource Type Resource Scanned at M-400 Image tagged latest Medium Best Practices v1/pods default/test 2024-10-31T18:45:06Z M-400 Image tagged latest Medium Best Practices v1/pods default/nginx 2024-10-31T18:45:06Z"},{"location":"configuration/custom-checks/","title":"Custom checks","text":"

Zora offers a declarative way to create your own checks using the CustomCheck API, introduced in version 0.6.

Custom checks use the Common Expression Language (CEL) to declare the validation rules and are performed by the Marvin plugin, which should be enabled in your cluster scans.

Info

Marvin is already a default plugin and enabled by default in cluster scans since Zora 0.5.0.

"},{"location":"configuration/custom-checks/#customcheck-api","title":"CustomCheck API","text":"

The example below demonstrates a custom check that requires the labels mycompany.com/squad and mycompany.com/component to be present on Pods, Deployments and Services.

Example

apiVersion: zora.undistro.io/v1alpha1\nkind: CustomCheck\nmetadata:\n  name: mycheck\nspec:\n  message: \"Required labels\"\n  severity: Low\n  category: Custom\n  match:\n    resources:\n      - group: \"\"\n        version: v1\n        resource: pods\n      - group: apps\n        version: v1\n        resource: deployments\n      - group: \"\"\n        version: v1\n        resource: services\n  params:\n    requiredLabels:\n      - mycompany.com/squad\n      - mycompany.com/component\n  validations:\n    - expression: >\n        has(object.metadata.labels) &&\n        !object.metadata.labels.all(label,\n          params.requiredLabels.all(\n            req, req != label\n          )\n        )\n      message: \"Resource without required labels\"\n

The spec.match.resources defines which resources are checked by the expressions defined in spec.validations.expression using Common Expression Language (CEL).

If an expression evaluates to false, the check fails, and a ClusterIssue is reported.

CEL Playground

To quickly test CEL expressions directly from your browser, check out CEL Playground.

"},{"location":"configuration/custom-checks/#variables","title":"Variables","text":"

The variables available in CEL expressions:

Variable Description object The object being scanned. params The parameter defined in spec.params field.

If the object matches a PodSpec, the following useful variables are available:

Variable Description allContainers A list of all containers, including initContainers and ephemeralContainers. podMeta The Pod metadata. podSpec The Pod spec.

The following resources matches a PodSpec:

  • v1/pods
  • v1/replicationcontrollers
  • apps/v1/replicasets
  • apps/v1/deployments
  • apps/v1/statefulsets
  • apps/v1/daemonsets
  • batch/v1/jobs
  • batch/v1/cronjobs
"},{"location":"configuration/custom-checks/#applying-custom-checks","title":"Applying custom checks","text":"

Since you have a CustomCheck on a file, you can apply it with the following command.

kubectl apply -f check.yaml -n zora-system\n
"},{"location":"configuration/custom-checks/#listing-custom-checks","title":"Listing custom checks","text":"

Once created, list the custom checks to see if they are ready.

kubectl get customchecks -n zora-system\n
NAME      MESSAGE           SEVERITY   READY\nmycheck   Required labels   Low        True\n

The READY column indicates when the check has successfully compiled and is ready to be used in the next Marvin scan.

ClusterIssues reported by a custom check are labeled custom=true and can be filtered by the following command:

kubectl get clusterissues -l custom=true\n
NAME                             CLUSTER     ID        MESSAGE           SEVERITY   CATEGORY   AGE\nmycluster-mycheck-4edd75cb85a4   mycluster   mycheck   Required labels   Low        Custom     25s\n

"},{"location":"configuration/custom-checks/#examples","title":"Examples","text":"

All Marvin checks are similar to the CustomCheck API. You can see them in the internal/builtins folder for examples.

Here are some examples of Marvin built-in checks expressions:

  • HostPath volumes must be forbidden
    !has(podSpec.volumes) || podSpec.volumes.all(vol, !has(vol.hostPath))\n
  • Sharing the host namespaces must be disallowed
    (!has(podSpec.hostNetwork) || podSpec.hostNetwork == false) &&\n(!has(podSpec.hostPID) || podSpec.hostPID == false) &&\n(!has(podSpec.hostIPC) || podSpec.hostIPC == false)\n
  • Privileged Pods disable most security mechanisms and must be disallowed
    allContainers.all(container,\n  !has(container.securityContext) ||\n  !has(container.securityContext.privileged) ||\n  container.securityContext.privileged == false)\n
  • HostPorts should be disallowed entirely (recommended) or restricted to a known list
    allContainers.all(container,\n  !has(container.ports) ||\n  container.ports.all(port,\n    !has(port.hostPort) ||\n    port.hostPort == 0 ||\n    port.hostPort in params.allowedHostPorts\n  )\n)\n

Marvin's checks and Zora's CustomCheck API are inspired in Kubernetes ValidatingAdmissionPolicy API, introduced in version 1.26 as an alpha feature. Below, the table of validation expression examples from Kubernetes documentation.

Expression Purpose object.minReplicas <= object.replicas && object.replicas <= object.maxReplicas Validate that the three fields defining replicas are ordered appropriately 'Available' in object.stateCounts Validate that an entry with the 'Available' key exists in a map (size(object.list1) == 0) != (size(object.list2) == 0) Validate that one of two lists is non-empty, but not both !('MY_KEY' in object.map1) || object['MY_KEY'].matches('^[a-zA-Z]*$') Validate the value of a map for a specific key, if it is in the map object.envars.filter(e, e.name == 'MY_ENV').all(e, e.value.matches('^[a-zA-Z]*$') Validate the 'value' field of a listMap entry where key field 'name' is 'MY_ENV' has(object.expired) && object.created + object.ttl < object.expired Validate that 'expired' date is after a 'create' date plus a 'ttl' duration object.health.startsWith('ok') Validate a 'health' string field has the prefix 'ok' object.widgets.exists(w, w.key == 'x' && w.foo < 10) Validate that the 'foo' property of a listMap item with a key 'x' is less than 10 type(object) == string ? object == '100%' : object == 1000 Validate an int-or-string field for both the int and string cases object.metadata.name.startsWith(object.prefix) Validate that an object's name has the prefix of another field value object.set1.all(e, !(e in object.set2)) Validate that two listSets are disjoint size(object.names) == size(object.details) && object.names.all(n, n in object.details) Validate the 'details' map is keyed by the items in the 'names' listSet size(object.clusters.filter(c, c.name == object.primary)) == 1 Validate that the 'primary' property has one and only one occurrence in the 'clusters' listMap"},{"location":"configuration/https-proxy/","title":"HTTPS Proxy","text":"

If your network environment requires the use of a proxy, you must ensure proper configuration of the httpsProxy parameter when running helm upgrade --install command.

# omitted \"helm upgrade --install\" command and parameters\n\n--set httpsProxy=\"https://secure.proxy.tld\"\n

Additionally, you can specify URLs that should bypass the proxy, by setting the noProxy parameter in comma-separated list format. Note that this parameter already has a default value: kubernetes.default.svc.*,127.0.0.1,localhost.

Configuring proxy settings enables both trivy plugin and zora-operator to use the proxy for external requests.

Zora OSS sends scan results to the following external URL if your installation is integrated with Zora Dashboard:

  • https://zora-dashboard.undistro.io

While Trivy downloads vulnerability databases during scans from the following external sources:

  • ghcr.io/aquasecurity/trivy-db
  • ghcr.io/aquasecurity/trivy-java-db
"},{"location":"configuration/resources/","title":"Compute resources","text":"

Zora Helm Chart allows you to define resource requests and limits (memory and CPU) for zora-operator and plugins. You can do this by setting specific parameters using --set argument as the example below.

--set operator.resources.limits.memory=256Mi\n

Alternatively, a YAML file can be specified using -f myvalues.yaml flag.

Tip

Refer to the default values.yaml file for more details

In a similar way, you can customize the resources for plugins. The following example sets 1Gi as memory limit for marvin plugin.

--set scan.plugins.marvin.resources.limits.memory=1Gi\n
"},{"location":"configuration/retain-issues/","title":"Retain issues","text":"

By default, both scans automatically scheduled by Zora upon installation are configured to retain issues/results only from the last scan.

To retain results from the last two scans, for example, you should set the successfulScansHistoryLimit field of ClusterScan to 2.

This can be done by either directly editing the ClusterScan object or by providing a parameter in the Helm installation/upgrade command,

# omitted \"helm upgrade --install\" command and parameters\n\n--set scan.misconfiguration.successfulScansHistoryLimit=2\n

In this case, it may appear that there are duplicate issues when more than one scan completes successfully. However, these issues are actually related to different scans. The identifier of each scan can be found in the scanID label of each issue.

kubectl get issues -n zora-system --show-labels\n
NAME                    CLUSTER     ID      MESSAGE                SEVERITY   CATEGORY   AGE    LABELS\nkind-kind-m-102-4wxvv   kind-kind   M-102   Privileged container   High       Security   43s    scanID=556cc35a-830e-45af-a31c-7130918de262,category=Security,cluster=kind-kind,custom=false,id=M-102,plugin=marvin,severity=High\nkind-kind-m-102-nf5xq   kind-kind   M-102   Privileged container   High       Security   102s   scanID=8464411a-4b9c-456b-a11c-dd3a5ab905f5,category=Security,cluster=kind-kind,custom=false,id=M-102,plugin=marvin,severity=High\n

To list issues from a specific scan, you can use a label selector like this:

kubectl get issues -n zora-system -l scanID=556cc35a-830e-45af-a31c-7130918de262\n

This also applies to vulnerability scans and VulnerabilityReport results.

Warning

Note that results are stored as CRDs in your Kubernetes cluster. Be cautious not to set a high value that could potentially affect the performance and storage capacity of your Kubernetes cluster

Note

That applies only to Zora OSS. Zora Dashboard always shows results from the last scan.

"},{"location":"configuration/scan-schedule/","title":"Scan schedule","text":"

After successfully installing Zora, vulnerability and misconfiguration scans are automatically scheduled for your cluster, with each scan using different plugins.

Scan schedules are defined using Cron expressions. You can view the schedule for your cluster by listing ClusterScan resources:

kubectl get clusterscans -o wide -n zora-system\n

By default, the misconfiguration scan is scheduled to run every hour at the current minute plus 5, while the vulnerability scan is scheduled to run every day at the current hour and the current minute plus 5.

For example, if the installation occurred at 10:00 UTC, the scans will have the following schedules:

Scan Cron Description Misconfigurations 5 * * * * Every hour at minute 5 Vulnerabilities 5 10 * * * Every day at 10:05

However, you can customize the schedule for each scan by directly editing the ClusterScan resource or by providing parameters in the helm upgrade --install command, as shown in the example below:

# omitted \"helm upgrade --install\" command and parameters\n\n--set scan.misconfiguration.schedule=\"0 * * * *\" \\\n--set scan.vulnerability.schedule=\"0 0 * * *\"\n

The recommended approach is to provide parameters through Helm.

Costly scan scheduling

Overly frequent scheduling of scans can increase networking costs significantly, especially for vulnerability scans, which involve downloading a vulnerability database and pulling images.

Warning

If you directly edit the ClusterScan resource, be cautious when running the next update via Helm, as the value may be overwritten.

"},{"location":"configuration/scan-schedule/#cron-schedule-syntax","title":"Cron schedule syntax","text":"

Cron expression has five fields separated by a space, and each field represents a time unit.

\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 minute (0 - 59)\n\u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 hour (0 - 23)\n\u2502 \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 day of the month (1 - 31)\n\u2502 \u2502 \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 month (1 - 12)\n\u2502 \u2502 \u2502 \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 day of the week (0 - 6) (Sunday to Saturday;\n\u2502 \u2502 \u2502 \u2502 \u2502                                   7 is also Sunday on some systems)\n\u2502 \u2502 \u2502 \u2502 \u2502                                   OR sun, mon, tue, wed, thu, fri, sat\n\u2502 \u2502 \u2502 \u2502 \u2502\n* * * * *\n
Operator Descriptor Example * Any value 15 * * * * runs at every minute 15 of every hour of every day. , Value list separator 2,10 4,5 * * * runs at minute 2 and 10 of the 4th and 5th hour of every day. - Range of values 30 4-6 * * * runs at minute 30 of the 4th, 5th, and 6th hour. / Step values 20/15 * * * * runs every 15 minutes starting from minute 20 through 59 (minutes 20, 35, and 50)."},{"location":"configuration/suspend-scan/","title":"Suspending scans","text":"

The cluster scans, which are automatically scheduled upon installation, can be suspended by setting spec.suspend to true in a ClusterScan object. This action will suspend subsequent scans, it does not apply to already started scans.

The command below suspends the mycluster-vuln scan.

kubectl patch scan mycluster-vuln --type='merge' -p '{\"spec\":{\"suspend\":true}}' -n zora-system\n

Note

This way, the scan results remain available, unlike if the ClusterScan had been deleted, in which case the results would also be removed.

Setting spec.suspend back to false, the scans are resume:

kubectl patch scan mycluster-vuln --type='merge' -p '{\"spec\":{\"suspend\":false}}' -n zora-system\n
"},{"location":"configuration/vulnerability-database-persistence/","title":"Vulnerability Database Persistence","text":"

Trivy utilizes a database containing vulnerability information in its scan. This database is updated every 6 hours.

When scanning JAR files, Trivy downloads a specific database for Java every 3 days.

Both databases are distributed via GitHub Container registry (GHCR) and cached by Trivy in local file system.

Starting with version 0.8.4, Zora persists Trivy databases by default, caching them between the scheduled scans. This means that scheduled scans may not need to download the databases, saving compute resources, time, and networking.

It's done by applying a PersistentVolumeClaim during a Zora installation/upgrade through Helm. A Job is also applied, which just downloads the vulnerability database to be ready for the first scheduled scan.

This persistence can be disabled or configured with the following Helm parameters:

Key Type Default Description scan.plugins.trivy.persistence.enabled bool true Specifies whether Trivy vulnerabilities database should be persisted between the scans, using PersistentVolumeClaim scan.plugins.trivy.persistence.accessMode string \"ReadWriteOnce\" Persistence access mode scan.plugins.trivy.persistence.storageClass string \"\" Persistence storage class. Set to empty for default storage class scan.plugins.trivy.persistence.storageRequest string \"2Gi\" Persistence storage size scan.plugins.trivy.persistence.downloadJavaDB bool false Specifies whether Java vulnerability database should be downloaded on helm install/upgrade

These parameters can be specified using the --set key=value argument in helm upgrade --install command.

"},{"location":"configuration/private-registries/acr/","title":"Azure Container Registry (ACR)","text":"

If you are running within Azure, and making use of a private Azure Container Registry (ACR) to host your application images, then the Trivy plugin will be unable to scan those images unless access is granted to the registry through a service principal with AcrPull role assigned.

"},{"location":"configuration/private-registries/acr/#creating-service-principal","title":"Creating service principal","text":"

The following Azure CLI command creates a service principal with AcrPull role assigned, and stores the output including the credentials into SP_DATA environment variable.

Note

Please replace <SUBSCRIPTION_ID>, <RESOURCE_GROUP>, and <REGISTRY_NAME> before running the command below.

export SP_DATA=$(az ad sp create-for-rbac --name ZoraTrivy --role AcrPull --scope \"/subscriptions/<SUBSCRIPTION_ID>/resourceGroups/<RESOURCE_GROUP>/providers/Microsoft.ContainerRegistry/registries/<REGISTRY_NAME>\")\n
"},{"location":"configuration/private-registries/acr/#usage","title":"Usage","text":"

Once the service principal is created and the credentials are in SP_DATA environment variable, create a Kubernetes secret to store these credentials by running:

kubectl create secret generic trivy-acr-credentials -n zora-system \\\n  --from-literal=AZURE_CLIENT_ID=$(echo $SP_DATA | jq -r '.appId') \\\n  --from-literal=AZURE_CLIENT_SECRET=$(echo $SP_DATA | jq -r '.password') \\\n  --from-literal=AZURE_TENANT_ID=$(echo $SP_DATA | jq -r '.tenant')\n

Note

If you are running this command before a Zora installation, you may need to create the zora-system namespace.

kubectl create namespace zora-system\n

Now set the secret name in a values.yaml

scan:\n  plugins:\n    trivy:\n      envFrom:\n        - secretRef:\n            name: trivy-acr-credentials\n            optional: true\n

Then provide it in helm upgrade --install command

-f values.yaml\n

This will now allow the Trivy plugin to scan your internal images for vulnerabilities.

"},{"location":"configuration/private-registries/ecr/","title":"AWS Elastic Container Registry (ECR)","text":"

If you are running within AWS, and making use of a private Elastic Container Registry (ECR) to host your application images, then the Trivy plugin will be unable to scan those images unless access is granted to the registry through an Identity and Access Managemnent (IAM) role assigned to the service account running the Trivy plugins.

Once an IAM role granting access to the ECR has been created, this can be assigned to the service account by including the following additional parameter when running the helm upgrade --install command.

--set scan.plugins.annotations.eks\\\\.amazonaws\\\\.com/role-arn=arn:aws:iam::<AWS_ACCOUNT_ID>:role/<ROLE_NAME>\n
where <AWS_ACCOUNT_ID> should be replaced with your AWS account ID, and <ROLE_NAME> should be replaced with the name of the role granting access to the ECR.

This will now allow the Trivy plugin to scan your internal images for vulnerabilities.

"},{"location":"getting-started/installation/","title":"Installation","text":"

Zora OSS is installed inside your Kubernetes clusters using Helm, where the zora-operator deployment is created and scans are automatically scheduled for your cluster.

"},{"location":"getting-started/installation/#prerequisites","title":"Prerequisites","text":"
  • Kubernetes cluster 1.21+
  • Kubectl
  • Helm 3.8+
"},{"location":"getting-started/installation/#install-with-helm","title":"Install with Helm","text":"

First, ensure that your current context of kubectl refer to the Kubernetes cluster you wish to install Zora into.

Manage kubectl contexts

The following commands can help you to manage kubectl contexts:

  • List all contexts: kubectl config get-contexts

  • Display the current-context: kubectl config current-context

  • Use the context for the Kind cluster: kubectl config use-context kind-kind

Then, run the following command to install Zora Helm chart:

HTTP chart repositoryOCI registry
helm repo add undistro https://charts.undistro.io --force-update\nhelm repo update undistro\nhelm upgrade --install zora undistro/zora \\\n  -n zora-system \\\n  --version 0.10.2 \\\n  --create-namespace \\\n  --wait \\\n  --set clusterName=\"$(kubectl config current-context)\"\n
helm upgrade --install zora oci://ghcr.io/undistro/helm-charts/zora \\\n  -n zora-system \\\n  --version 0.10.2 \\\n  --create-namespace \\\n  --wait \\\n  --set clusterName=\"$(kubectl config current-context)\"\n

This command will install Zora in zora-system namespace, creating the namespace if it doesn't already exist.

Zora OSS + Zora Dashboard

To integrate your Zora OSS installation with Zora Dashboard, you need to authenticate with the authorization server and provide saas.workspaceID parameter in installation command. For more information, please refer to this page.

With the following commands, you can verify if Zora has been successfully installed and retrieve installation notes:

helm list -n zora-system\nhelm get notes zora -n zora-system\n

Zora Helm Chart

To see the full list of available parameters in Zora Helm chart, please visit this page

If everything is set up correctly, your cluster should have scheduled scans. Check it by running:

kubectl get cluster,scan -o wide -n zora-system\n

Customize scan schedule

To customize the scan schedule, please refer to the Scan Schedule page.

Once the cluster is successfully scanned, you can check issues by running:

kubectl get misconfigurations -n zora-system\nkubectl get vulnerabilities   -n zora-system\n
"},{"location":"getting-started/installation/#migrating-to-08","title":"Migrating to 0.8","text":""},{"location":"getting-started/installation/#whats-new-in-08","title":"What's new in 0.8","text":""},{"location":"getting-started/installation/#extended-vulnerability-reports-information","title":"Extended Vulnerability Reports Information","text":"

Now, VulnerabilityReports provide more in-depth information about the image, including OS, architecture, distro, and digest. Additionally, details about vulnerabilities, such as publishedDate and lastModifiedDate, have been included to offer a clearer understanding of your cluster's security posture.

"},{"location":"getting-started/installation/#full-integration-with-zora-dashboard","title":"Full Integration with Zora Dashboard","text":"

Zora 0.8 introduces the integration of Vulnerability Reports with the Zora Dashboard. Now, alongside misconfigurations, you can centrally explore images and vulnerabilities across your clusters.

"},{"location":"getting-started/installation/#migration-guide","title":"Migration guide","text":"

Version 0.7 or earlier

If you are currently using a version prior to 0.7, please be aware that the 0.7 release brought about significant architectural changes. Before upgrading to version 0.8, refer to this page for essential information and considerations to ensure a smooth transition.

The recommended way to migrate to version 0.8 is to reinstall Zora, including its CRDs.

"},{"location":"getting-started/installation/#uninstall","title":"Uninstall","text":"

You can uninstall Zora and its components by uninstalling the Helm chart installed above.

helm uninstall zora -n zora-system\n

By design, Helm doesn't upgrade or delete CRDs. You can permanently delete Zora CRDs and any remaining associated resources from your cluster, using the following command.

kubectl get crd -o=name | grep --color=never 'zora.undistro.io' | xargs kubectl delete\n

You can also delete the zora-system namespace using the command below.

kubectl delete namespace zora-system\n
"},{"location":"plugins/","title":"Zora Plugins","text":""},{"location":"plugins/#overview","title":"Overview","text":"

Zora utilizes open-source CLI tools like Marvin, Popeye, and Trivy as plugins to perform scans on Kubernetes clusters.

The current available plugins of a Zora installation can be listed by running the following command:

kubectl get plugins -n zora-system\n
NAME     IMAGE                               TYPE               AGE\nmarvin   ghcr.io/undistro/marvin:v0.2.1      misconfiguration   14m\npopeye   ghcr.io/undistro/popeye:0.21.3-6    misconfiguration   14m\ntrivy    ghcr.io/undistro/trivy:0.50.1-1     vulnerability      14m\n

Each item listed above is an instance of Plugin CRD and represents the execution configuration of a plugin. More details can be seen by getting the YAML output of a plugin:

kubectl get plugin marvin -o yaml -n zora-system\n
"},{"location":"plugins/#plugin-types","title":"Plugin types","text":"

Currently, Zora has two plugin types: vulnerability and misconfiguration, which determine the focus of plugin scans.

  • vulnerability plugins scan cluster images for vulnerabilities, and their results are stored as instances of VulnerabilityReport CRD.

  • misconfiguration plugins scan cluster resources for potential configuration issues, and their results are available as instances of the ClusterIssue CRD.

Both result types can be listed using kubectl, and some aliases are supported for your convenience, as shown in the following commands:

kubectl get vulnerabilityreports\nkubectl get vuln\nkubectl get vulns\nkubectl get vulnerabilities\n
kubectl get clusterissues\nkubectl get issue\nkubectl get issues\nkubectl get misconfig\nkubectl get misconfigs\nkubectl get misconfigurations\n

Note

The results are only available after a successful scan, in the same namespace as the ClusterScan (default is zora-system).

"},{"location":"plugins/#how-plugins-work","title":"How plugins work","text":"

Starting from a Plugin and a ClusterScan, Zora manages and schedules scans by applying CronJobs, which creates Jobs and Pods.

The Pods where the scans run, include a \"sidecar\" container called worker alongside the plugin container.

After the plugin completes its scan, it needs to signal to Zora (worker) by writing out the path of the results file into a \"done file\".

Worker container waits for the \"done file\" to be present, then transforms the results and creates ClusterIssues and VulnerabilityReports (depending on the plugin type).

Note

This is the aspect that currently prevents the full declarative integration of new plugins. The code responsible for transforming the output of each plugin into CRDs is written in Go within the worker.

Any contributions or suggestions in this regard are greatly appreciated.

Note

This architecture for supporting plugins is inspired by Sonobuoy, a project used for CNCF conformance certification.

"},{"location":"plugins/marvin/","title":"Marvin Plugin","text":"

Marvin is an open-source CLI tool that scans a Kubernetes cluster by performing CEL expressions to report potential issues and misconfigurations.

Marvin enables Zora's custom checks using CEL. For further information, please visit this page.

Type: misconfiguration

Image: ghcr.io/undistro/marvin:v0.2.1

GitHub repository: https://github.com/undistro/marvin

"},{"location":"plugins/popeye/","title":"Popeye Plugin","text":"

Popeye is a utility that scans live Kubernetes cluster and reports potential issues with deployed resources and configurations.

Type: misconfiguration

Image: ghcr.io/undistro/popeye:0.21.3-6

GitHub repository: https://github.com/derailed/popeye

Info

Currently, Zora does not use the official Popeye image (derailed/popeye) due to its lack of multi-architecture support.

"},{"location":"plugins/trivy/","title":"Trivy Plugin","text":"

Trivy is a versatile security scanner that can find vulnerabilities, misconfigurations, secrets, SBOM in different targets like containers, code repositories and Kubernetes cluster.

Zora uses Trivy as a plugin exclusively to scan vulnerabilities in a Kubernetes cluster.

Type: vulnerability

Image: ghcr.io/undistro/trivy:0.50.1-1

GitHub repository: https://github.com/aquasecurity/trivy

"},{"location":"plugins/trivy/#vulnerability-database-persistence","title":"Vulnerability Database Persistence","text":"

Trivy utilizes a database containing vulnerability information. This database is updated every 6 hours and persisted by default for caching purposes between the schedule scans.

Please refer to this page for further details and configuration options regarding vulnerability database persistence.

"},{"location":"plugins/trivy/#large-vulnerability-reports","title":"Large vulnerability reports","text":"

Vulnerability reports can be large. If you encounter issues with etcd request payload limit, you can ignore unfixed vulnerabilities from reports by providing the following flag to helm upgrade --install command:

--set 'scan.plugins.trivy.ignoreUnfixed=true'\n

To identify this issue, check the logs of worker container in trivy pod. The ClusterScan will have a Failed status. You will see a log entry similar to the following example:

2023-09-26T14:18:02Z    ERROR   worker  failed to run worker    {\"error\": \"failed to create VulnerabilityReport \\\"kind-kind-usdockerpkgdevgooglesamplescontainersgkegbfrontendsha256dc8de8e0d569d2f828b187528c9317bd6b605c273ac5a282aebe471f630420fc-rzntw\\\": etcdserver: request is too large\"}\n
"},{"location":"plugins/trivy/#scan-timeout","title":"Scan timeout","text":"

Trivy's scan duration may vary depending on the total images in your cluster and the time to download the vulnerability database when needed.

By default, Zora sets a timeout of 40 minutes for Trivy scan completion.

To adjust this timeout, use the following Helm parameter:

--set scan.plugins.trivy.timeout=60m\n

Once this parameter is updated, the next scan will use the specified value.

"}]} \ No newline at end of file diff --git a/dev/sitemap.xml b/dev/sitemap.xml index 7b847e06..055d85fd 100644 --- a/dev/sitemap.xml +++ b/dev/sitemap.xml @@ -2,82 +2,82 @@ https://zora-docs.undistro.io/dev/ - 2024-11-01 + 2024-11-06 https://zora-docs.undistro.io/dev/dashboard/ - 2024-11-01 + 2024-11-06 https://zora-docs.undistro.io/dev/faq/ - 2024-11-01 + 2024-11-06 https://zora-docs.undistro.io/dev/helm-chart/ - 2024-11-01 + 2024-11-06 https://zora-docs.undistro.io/dev/configuration/authenticated-registries/ - 2024-11-01 + 2024-11-06 https://zora-docs.undistro.io/dev/configuration/convert-to-csv/ - 2024-11-01 + 2024-11-06 https://zora-docs.undistro.io/dev/configuration/custom-checks/ - 2024-11-01 + 2024-11-06 https://zora-docs.undistro.io/dev/configuration/https-proxy/ - 2024-11-01 + 2024-11-06 https://zora-docs.undistro.io/dev/configuration/resources/ - 2024-11-01 + 2024-11-06 https://zora-docs.undistro.io/dev/configuration/retain-issues/ - 2024-11-01 + 2024-11-06 https://zora-docs.undistro.io/dev/configuration/scan-schedule/ - 2024-11-01 + 2024-11-06 https://zora-docs.undistro.io/dev/configuration/suspend-scan/ - 2024-11-01 + 2024-11-06 https://zora-docs.undistro.io/dev/configuration/vulnerability-database-persistence/ - 2024-11-01 + 2024-11-06 https://zora-docs.undistro.io/dev/configuration/private-registries/acr/ - 2024-11-01 + 2024-11-06 https://zora-docs.undistro.io/dev/configuration/private-registries/ecr/ - 2024-11-01 + 2024-11-06 https://zora-docs.undistro.io/dev/getting-started/installation/ - 2024-11-01 + 2024-11-06 https://zora-docs.undistro.io/dev/plugins/ - 2024-11-01 + 2024-11-06 https://zora-docs.undistro.io/dev/plugins/marvin/ - 2024-11-01 + 2024-11-06 https://zora-docs.undistro.io/dev/plugins/popeye/ - 2024-11-01 + 2024-11-06 https://zora-docs.undistro.io/dev/plugins/trivy/ - 2024-11-01 + 2024-11-06 \ No newline at end of file diff --git a/dev/sitemap.xml.gz b/dev/sitemap.xml.gz index 284d4fe0..5fe6b83a 100644 Binary files a/dev/sitemap.xml.gz and b/dev/sitemap.xml.gz differ diff --git a/dev/values.yaml b/dev/values.yaml index 6a5cf90a..17fd5648 100644 --- a/dev/values.yaml +++ b/dev/values.yaml @@ -237,7 +237,7 @@ scan: # - secretRef: # name: trivy-credentials # -- Trivy timeout - timeout: 10m + timeout: 40m # -- Allow insecure server connections for Trivy insecure: false diff --git a/versions.json b/versions.json index 59b09d17..62ff2438 100644 --- a/versions.json +++ b/versions.json @@ -1 +1,49 @@ -[{"version":"dev","title":"dev","aliases":[]},{"version":"v0.10","title":"v0.10 (latest)","aliases":["latest"]},{"version":"v0.9","title":"v0.9","aliases":[]},{"version":"v0.8","title":"v0.8","aliases":[]},{"version":"v0.7","title":"v0.7","aliases":[]},{"version":"v0.6","title":"v0.6","aliases":[]},{"version":"v0.5","title":"v0.5","aliases":[]},{"version":"v0.4","title":"v0.4","aliases":[]},{"version":"v0.3","title":"v0.3","aliases":[]}] \ No newline at end of file +[ + { + "version": "dev", + "title": "dev", + "aliases": [] + }, + { + "version": "v0.10", + "title": "v0.10 (latest)", + "aliases": [ + "latest" + ] + }, + { + "version": "v0.9", + "title": "v0.9", + "aliases": [] + }, + { + "version": "v0.8", + "title": "v0.8", + "aliases": [] + }, + { + "version": "v0.7", + "title": "v0.7", + "aliases": [] + }, + { + "version": "v0.6", + "title": "v0.6", + "aliases": [] + }, + { + "version": "v0.5", + "title": "v0.5", + "aliases": [] + }, + { + "version": "v0.4", + "title": "v0.4", + "aliases": [] + }, + { + "version": "v0.3", + "title": "v0.3", + "aliases": [] + } +]