diff --git a/Makefile b/Makefile index 681caa4f5bd..1f2710743de 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ LOG_LEVEL ?= "INFO" GENERATE_VAP ?= false GENERATE_VAPBINDING ?= false -VERSION := v3.17.0-beta.0 +VERSION := v3.18.0-beta.0 KIND_VERSION ?= 0.17.0 KIND_CLUSTER_FILE ?= test/bats/tests/kindcluster.yml diff --git a/charts/gatekeeper/Chart.yaml b/charts/gatekeeper/Chart.yaml index c5bcc30e89f..e42cec2b576 100644 --- a/charts/gatekeeper/Chart.yaml +++ b/charts/gatekeeper/Chart.yaml @@ -4,8 +4,8 @@ name: gatekeeper icon: https://open-policy-agent.github.io/gatekeeper/website/img/logo.svg keywords: - open policy agent -version: 3.17.0-beta.0 +version: 3.18.0-beta.0 home: https://github.com/open-policy-agent/gatekeeper sources: - https://github.com/open-policy-agent/gatekeeper.git -appVersion: v3.17.0-beta.0 +appVersion: v3.18.0-beta.0 diff --git a/charts/gatekeeper/README.md b/charts/gatekeeper/README.md index 0a69ba9e758..7313421e175 100644 --- a/charts/gatekeeper/README.md +++ b/charts/gatekeeper/README.md @@ -74,7 +74,7 @@ information._ | postInstall.labelNamespace.extraNamespaces | The extra namespaces that need to have the label during post install hooks | `[]` | | postInstall.labelNamespace.extraAnnotations | Extra annotations added to the post install Job | `{}` | | postInstall.labelNamespace.image.repository | Image with kubectl to label the namespace | `openpolicyagent/gatekeeper-crds` | -| postInstall.labelNamespace.image.tag | Image tag | Current release version: `v3.17.0-beta.0` | +| postInstall.labelNamespace.image.tag | Image tag | Current release version: `v3.18.0-beta.0` | | postInstall.labelNamespace.image.pullPolicy | Image pullPolicy | `IfNotPresent` | | postInstall.labelNamespace.image.pullSecrets | Image pullSecrets | `[]` | | postInstall.labelNamespace.extraRules | Extra rules for the gatekeeper-update-namespace-label Role | `[]` | @@ -97,7 +97,7 @@ information._ | postUpgrade.labelNamespace.extraNamespaces | The extra namespaces that need to have the label during post upgrade hooks | `[]` | | postUpgrade.labelNamespace.extraAnnotations | Extra annotations added to the post upgrade Job | `{}` | | postUpgrade.labelNamespace.image.repository | Image with kubectl to label the namespace | `openpolicyagent/gatekeeper-crds` | -| postUpgrade.labelNamespace.image.tag | Image tag | Current release version: `v3.17.0-beta.0` | +| postUpgrade.labelNamespace.image.tag | Image tag | Current release version: `v3.18.0-beta.0` | | postUpgrade.labelNamespace.image.pullPolicy | Image pullPolicy | `IfNotPresent` | | postUpgrade.labelNamespace.image.pullSecrets | Image pullSecrets | `[]` | | postUpgrade.labelNamespace.priorityClassName | Priority class name for gatekeeper-update-namespace-label-post-upgrade Job | `` | @@ -107,10 +107,10 @@ information._ | postUpgrade.resources | The resource request/limits for the container image in postUpgrade hook jobs | `{}` | | postUpgrade.securityContext | Security context applied on the container | `{ "allowPrivilegeEscalation": false, "capabilities": "drop": [all], "readOnlyRootFilesystem": true, "runAsGroup": 999, "runAsNonRoot": true, "runAsUser": 1000 }` | | preInstall.crdRepository.image.repository | Image with kubectl to update the CRDs. If not set, the `image.crdRepository` is used instead. | `null` | -| preInstall.crdRepository.image.tag | Image tag | Current release version: `v3.17.0-beta.0` | +| preInstall.crdRepository.image.tag | Image tag | Current release version: `v3.18.0-beta.0` | | preUninstall.deleteWebhookConfigurations.enabled | Delete webhooks before gatekeeper itself is uninstalled | `false` | | preUninstall.deleteWebhookConfigurations.image.repository | Image with kubectl to delete the webhooks | `openpolicyagent/gatekeeper-crds` | -| preUninstall.deleteWebhookConfigurations.image.tag | Image tag | Current release version: `v3.17.0-beta.0` | +| preUninstall.deleteWebhookConfigurations.image.tag | Image tag | Current release version: `v3.18.0-beta.0` | | preUninstall.deleteWebhookConfigurations.image.pullPolicy | Image pullPolicy | `IfNotPresent` | | preUninstall.deleteWebhookConfigurations.image.pullSecrets | Image pullSecrets | `[]` | | preUninstall.deleteWebhookConfigurations.extraRules | Extra rules for the gatekeeper-delete-webhook-configs Role | `[]` | @@ -148,6 +148,7 @@ information._ | validatingWebhookCustomRules | Custom rules for selecting which API resources trigger the webhook. Mutually exclusive with `enableDeleteOperations`. NOTE: If you change this, ensure all your constraints are still being enforced. | `{}` | | validatingWebhookURL | Custom URL for Kubernetes API server to use to reach the validating webhook pod. If not set, the default of connecting via the kubernetes service endpoint is used. | `null` | | enableDeleteOperations | Enable validating webhook for delete operations. Does not work with `validatingWebhookCustomRules` | `false` | +| enableConnectOperations | Enable validating webhook for connect operations. | `false` | | enableExternalData | Enable external data | `true` | | enableGeneratorResourceExpansion | Enable generator resource expansion (beta feature) | `true` | | enableTLSHealthcheck | Enable probing webhook API with certificate stored in certDir | `false` | @@ -165,18 +166,20 @@ information._ | mutatingWebhookURL | Custom URL for Kubernetes API server to use to reach the mutating webhook pod. If not set, the default of connecting via the kubernetes service endpoint is used. | `null` | | emitAdmissionEvents | Emit K8s events in configurable namespace for admission violations (alpha feature) | `false` | | emitAuditEvents | Emit K8s events in configurable namespace for audit violations (alpha feature) | `false` | -| enableK8sNativeValidation | Enable the K8s Native Validating driver to create CEL-based rules (alpha feature) | `false` | -| vapEnforcement | Generate K8s Validating Admission Policy resource. Allowed values are NONE: do not generate, GATEKEEPER_DEFAULT: do not generate unless label gatekeeper.sh/use-vap: yes is added to policy explicitly, VAP_DEFAULT: generate unless label gatekeeper.sh/use-vap: no is added to policy explicitly. (alpha feature) | `GATEKEEPER_DEFAULT` | +| enableK8sNativeValidation | Enable the K8s Native Validating driver to allow constraint templates to use rules written in VAP-style CEL (beta feature) | `true` | +| defaultCreateVAPForTemplates | Create VAP resource for template containing VAP-style CEL source. Allowed values are false: do not create Validating Admission Policy unless generateVAP: true is set on constraint template explicitly, true: create Validating Admission Policy unless generateVAP: false is set on constraint template explicitly. (alpha feature) | `false` | +| defaultCreateVAPBindingForConstraints | Create VAPBinding resource for constraint of the template containing VAP-style CEL source. Allowed values are false: do not create Validating Admission Policy Binding, true: create Validating Admission Policy Binding. (alpha feature) | `false` | | auditEventsInvolvedNamespace | Emit audit events for each violation in the involved objects namespace, the default (false) generates events in the namespace Gatekeeper is installed in. Audit events from cluster-scoped resources will continue to generate events in the namespace that Gatekeeper is installed in | `false` | | admissionEventsInvolvedNamespace | Emit admission events for each violation in the involved objects namespace, the default (false) generates events in the namespace Gatekeeper is installed in. Admission events from cluster-scoped resources will continue to generate events in the namespace that Gatekeeper is installed in | `false` | | logDenies | Log detailed info on each deny | `false` | | logLevel | Minimum log level | `INFO` | | image.pullPolicy | The image pull policy | `IfNotPresent` | | image.repository | Image repository | `openpolicyagent/gatekeeper` | -| image.release | The image release tag to use | Current release version: `v3.17.0-beta.0` | +| image.release | The image release tag to use | Current release version: `v3.18.0-beta.0` | | image.pullSecrets | Specify an array of imagePullSecrets | `[]` | | resources | The resource request/limits for the container image | limits: 1 CPU, 512Mi, requests: 100mCPU, 256Mi | | nodeSelector | The node selector to use for pod scheduling | `kubernetes.io/os: linux` | +| controllerManager.podLabels | The labels to add to the controller manager pod | `{}` | | controllerManager.affinity | The node affinity to use for controller manager pod scheduling | `{}` | | controllerManager.topologySpreadConstraints | The topology spread constraints to use for controller manager pod scheduling | `[]` | | controllerManager.tolerations | The tolerations to use for controller manager pod scheduling | `[]` | @@ -198,6 +201,8 @@ information._ | controllerManager.networkPolicy.enabled | Should a network policy for the controller manager be created | `false` | | controllerManager.networkPolicy.ingress | Additional ingress rules to be added to the controller manager network policy | `{}` | | controllerManager.strategyType | The strategy type to use for Controller Manager deployment | `RollingUpdate` | +| controllerManager.strategyRollingUpdate | [RollingUpdate](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment) configuration for Controller Manager deployment | `{}` | +| audit.podLabels | The labels to add to the audit pod | `{}` | | audit.affinity | The node affinity to use for audit pod scheduling | `{}` | | audit.topologySpreadConstraints | The topology spread constraints to use for audit pod scheduling | `[]` | | audit.tolerations | The tolerations to use for audit pod scheduling | `[]` | diff --git a/charts/gatekeeper/crds/assign-customresourcedefinition.yaml b/charts/gatekeeper/crds/assign-customresourcedefinition.yaml index 9cdb88b82db..1b8bc5313b2 100644 --- a/charts/gatekeeper/crds/assign-customresourcedefinition.yaml +++ b/charts/gatekeeper/crds/assign-customresourcedefinition.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -149,11 +150,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -200,11 +203,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -498,11 +503,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -549,11 +556,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -847,11 +856,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -898,11 +909,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string diff --git a/charts/gatekeeper/crds/assignimage-customresourcedefinition.yaml b/charts/gatekeeper/crds/assignimage-customresourcedefinition.yaml index 8a206ea3d50..ab9177acf46 100644 --- a/charts/gatekeeper/crds/assignimage-customresourcedefinition.yaml +++ b/charts/gatekeeper/crds/assignimage-customresourcedefinition.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -149,11 +150,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -200,11 +203,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string diff --git a/charts/gatekeeper/crds/assignmetadata-customresourcedefinition.yaml b/charts/gatekeeper/crds/assignmetadata-customresourcedefinition.yaml index 2c28c1c6d02..719b0cd0aae 100644 --- a/charts/gatekeeper/crds/assignmetadata-customresourcedefinition.yaml +++ b/charts/gatekeeper/crds/assignmetadata-customresourcedefinition.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -121,11 +122,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -172,11 +175,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -419,11 +424,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -470,11 +477,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -717,11 +726,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -768,11 +779,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string diff --git a/charts/gatekeeper/crds/config-customresourcedefinition.yaml b/charts/gatekeeper/crds/config-customresourcedefinition.yaml index 2842c926d05..11a5d922789 100644 --- a/charts/gatekeeper/crds/config-customresourcedefinition.yaml +++ b/charts/gatekeeper/crds/config-customresourcedefinition.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/charts/gatekeeper/crds/constraintpodstatus-customresourcedefinition.yaml b/charts/gatekeeper/crds/constraintpodstatus-customresourcedefinition.yaml index 566ac21f376..85942c0dbcc 100644 --- a/charts/gatekeeper/crds/constraintpodstatus-customresourcedefinition.yaml +++ b/charts/gatekeeper/crds/constraintpodstatus-customresourcedefinition.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/charts/gatekeeper/crds/constrainttemplate-customresourcedefinition.yaml b/charts/gatekeeper/crds/constrainttemplate-customresourcedefinition.yaml index 91e2f0b8563..2507c159f74 100644 --- a/charts/gatekeeper/crds/constrainttemplate-customresourcedefinition.yaml +++ b/charts/gatekeeper/crds/constrainttemplate-customresourcedefinition.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/charts/gatekeeper/crds/constrainttemplatepodstatus-customresourcedefinition.yaml b/charts/gatekeeper/crds/constrainttemplatepodstatus-customresourcedefinition.yaml index f6290d17f7a..2d4bd1c8bf2 100644 --- a/charts/gatekeeper/crds/constrainttemplatepodstatus-customresourcedefinition.yaml +++ b/charts/gatekeeper/crds/constrainttemplatepodstatus-customresourcedefinition.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/charts/gatekeeper/crds/expansiontemplate-customresourcedefinition.yaml b/charts/gatekeeper/crds/expansiontemplate-customresourcedefinition.yaml index f5838f6e50e..8e84974e398 100644 --- a/charts/gatekeeper/crds/expansiontemplate-customresourcedefinition.yaml +++ b/charts/gatekeeper/crds/expansiontemplate-customresourcedefinition.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/charts/gatekeeper/crds/expansiontemplatepodstatus-customresourcedefinition.yaml b/charts/gatekeeper/crds/expansiontemplatepodstatus-customresourcedefinition.yaml index 004abaf343d..54d9d5de3b3 100644 --- a/charts/gatekeeper/crds/expansiontemplatepodstatus-customresourcedefinition.yaml +++ b/charts/gatekeeper/crds/expansiontemplatepodstatus-customresourcedefinition.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/charts/gatekeeper/crds/modifyset-customresourcedefinition.yaml b/charts/gatekeeper/crds/modifyset-customresourcedefinition.yaml index 188197df541..24c86d4c8bf 100644 --- a/charts/gatekeeper/crds/modifyset-customresourcedefinition.yaml +++ b/charts/gatekeeper/crds/modifyset-customresourcedefinition.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -151,11 +152,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -202,11 +205,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -471,11 +476,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -522,11 +529,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -791,11 +800,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -842,11 +853,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string diff --git a/charts/gatekeeper/crds/mutatorpodstatus-customresourcedefinition.yaml b/charts/gatekeeper/crds/mutatorpodstatus-customresourcedefinition.yaml index 931e05e147a..37111692e15 100644 --- a/charts/gatekeeper/crds/mutatorpodstatus-customresourcedefinition.yaml +++ b/charts/gatekeeper/crds/mutatorpodstatus-customresourcedefinition.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/charts/gatekeeper/crds/provider-customresourcedefinition.yaml b/charts/gatekeeper/crds/provider-customresourcedefinition.yaml index a43e39173ff..901c9cca045 100644 --- a/charts/gatekeeper/crds/provider-customresourcedefinition.yaml +++ b/charts/gatekeeper/crds/provider-customresourcedefinition.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/charts/gatekeeper/crds/syncset-customresourcedefinition.yaml b/charts/gatekeeper/crds/syncset-customresourcedefinition.yaml index 1ef7ce4154e..6f055bff8f5 100644 --- a/charts/gatekeeper/crds/syncset-customresourcedefinition.yaml +++ b/charts/gatekeeper/crds/syncset-customresourcedefinition.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/charts/gatekeeper/templates/_helpers.tpl b/charts/gatekeeper/templates/_helpers.tpl index 785d9912656..c8ffb260489 100644 --- a/charts/gatekeeper/templates/_helpers.tpl +++ b/charts/gatekeeper/templates/_helpers.tpl @@ -40,6 +40,25 @@ Adds additional pod labels to the common ones {{- end }} {{- end -}} +{{/* +Adds additional controller-manager pod labels to the common ones +*/}} +{{- define "controllerManager.podLabels" -}} +{{- if .Values.controllerManager.podLabels }} +{{- toYaml .Values.controllerManager.podLabels }} +{{- end }} +{{- end -}} + +{{/* +Adds additional audit pod labels to the common ones +*/}} +{{- define "audit.podLabels" -}} +{{- if .Values.audit.podLabels }} +{{- toYaml .Values.audit.podLabels }} +{{- end }} +{{- end -}} + + {{/* Mandatory labels */}} diff --git a/charts/gatekeeper/templates/gatekeeper-admin-serviceaccount.yaml b/charts/gatekeeper/templates/gatekeeper-admin-serviceaccount.yaml index 4b68998cb48..4fd37c89d5e 100644 --- a/charts/gatekeeper/templates/gatekeeper-admin-serviceaccount.yaml +++ b/charts/gatekeeper/templates/gatekeeper-admin-serviceaccount.yaml @@ -1,3 +1,5 @@ +--- +{{- if .Values.serviceAccount.gatekeeperAdmin.create }} apiVersion: v1 kind: ServiceAccount metadata: @@ -9,3 +11,4 @@ metadata: release: '{{ .Release.Name }}' name: gatekeeper-admin namespace: '{{ .Release.Namespace }}' +{{- end }} diff --git a/charts/gatekeeper/templates/gatekeeper-audit-deployment.yaml b/charts/gatekeeper/templates/gatekeeper-audit-deployment.yaml index 77da1a4531a..03e2d6b97e5 100644 --- a/charts/gatekeeper/templates/gatekeeper-audit-deployment.yaml +++ b/charts/gatekeeper/templates/gatekeeper-audit-deployment.yaml @@ -1,3 +1,4 @@ +--- {{- if not .Values.disableAudit }} apiVersion: apps/v1 kind: Deployment @@ -35,6 +36,7 @@ spec: {{- end }} labels: {{- include "gatekeeper.podLabels" . | nindent 8 }} + {{- include "audit.podLabels" . | nindent 8 }} {{- include "gatekeeper.commonLabels" . | nindent 8 }} app: '{{ template "gatekeeper.name" . }}' chart: '{{ template "gatekeeper.name" . }}' @@ -87,6 +89,15 @@ spec: {{- end }} - --disable-cert-rotation={{ or .Values.audit.disableCertRotation .Values.externalCertInjection.enabled }} - --external-data-provider-response-cache-ttl={{ .Values.externaldataProviderResponseCacheTTL }} + - --enable-k8s-native-validation={{ .Values.enableK8sNativeValidation }} + + {{- if hasKey .Values "defaultCreateVAPForTemplates"}} + - --default-create-vap-for-templates={{ .Values.defaultCreateVAPForTemplates }} + {{- end }} + + {{- if hasKey .Values "defaultCreateVAPBindingForConstraints"}} + - --default-create-vap-binding-for-constraints={{ .Values.defaultCreateVAPBindingForConstraints }} + {{- end }} command: - /manager env: @@ -152,7 +163,7 @@ spec: {{- end }} securityContext: {{- toYaml .Values.audit.podSecurityContext | nindent 8 }} - serviceAccountName: gatekeeper-admin + serviceAccountName: {{ .Values.audit.serviceAccount.name }} terminationGracePeriodSeconds: 60 tolerations: {{- toYaml .Values.audit.tolerations | nindent 8 }} diff --git a/charts/gatekeeper/templates/gatekeeper-controller-manager-deployment.yaml b/charts/gatekeeper/templates/gatekeeper-controller-manager-deployment.yaml index df9807a6d96..4f2927c1cb5 100644 --- a/charts/gatekeeper/templates/gatekeeper-controller-manager-deployment.yaml +++ b/charts/gatekeeper/templates/gatekeeper-controller-manager-deployment.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -24,6 +25,10 @@ spec: heritage: '{{ .Release.Service }}' release: '{{ .Release.Name }}' strategy: + {{- if .Values.controllerManager.strategyRollingUpdate }} + rollingUpdate: + {{- toYaml .Values.controllerManager.strategyRollingUpdate | nindent 6 }} + {{- end }} type: {{ .Values.controllerManager.strategyType }} template: metadata: @@ -33,6 +38,7 @@ spec: {{- end }} labels: {{- include "gatekeeper.podLabels" . | nindent 8 }} + {{- include "controllerManager.podLabels" . | nindent 8 }} {{- include "gatekeeper.commonLabels" . | nindent 8 }} app: '{{ template "gatekeeper.name" . }}' chart: '{{ template "gatekeeper.name" . }}' @@ -72,8 +78,7 @@ spec: - --validating-webhook-configuration-name={{ .Values.validatingWebhookName }} - --mutating-webhook-configuration-name={{ .Values.mutatingWebhookName }} - --external-data-provider-response-cache-ttl={{ .Values.externaldataProviderResponseCacheTTL }} - - --experimental-enable-k8s-native-validation={{ .Values.enableK8sNativeValidation }} - - --vap-enforcement={{ .Values.vapEnforcement }} + - --enable-k8s-native-validation={{ .Values.enableK8sNativeValidation }} {{ if ne .Values.controllerManager.clientCertName "" }}- --client-cert-name={{ .Values.controllerManager.clientCertName }}{{- end }} {{- range .Values.metricsBackends}} @@ -101,6 +106,14 @@ spec: {{- if .Values.controllerManager.logFile}} - --log-file={{ .Values.controllerManager.logFile }} {{- end }} + + {{- if hasKey .Values "defaultCreateVAPForTemplates"}} + - --default-create-vap-for-templates={{ .Values.defaultCreateVAPForTemplates }} + {{- end }} + + {{- if hasKey .Values "defaultCreateVAPBindingForConstraints"}} + - --default-create-vap-binding-for-constraints={{ .Values.defaultCreateVAPBindingForConstraints }} + {{- end }} command: - /manager env: @@ -167,7 +180,7 @@ spec: {{- end }} securityContext: {{- toYaml .Values.controllerManager.podSecurityContext | nindent 8 }} - serviceAccountName: gatekeeper-admin + serviceAccountName: {{ .Values.controllerManager.serviceAccount.name }} terminationGracePeriodSeconds: 60 tolerations: {{- toYaml .Values.controllerManager.tolerations | nindent 8 }} diff --git a/charts/gatekeeper/templates/gatekeeper-controller-manager-poddisruptionbudget.yaml b/charts/gatekeeper/templates/gatekeeper-controller-manager-poddisruptionbudget.yaml index 424f6a67c44..609270a92de 100644 --- a/charts/gatekeeper/templates/gatekeeper-controller-manager-poddisruptionbudget.yaml +++ b/charts/gatekeeper/templates/gatekeeper-controller-manager-poddisruptionbudget.yaml @@ -1,3 +1,4 @@ +--- {{- $v1 := .Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" -}} {{- $v1beta1 := .Capabilities.APIVersions.Has "policy/v1beta1/PodDisruptionBudget" -}} apiVersion: policy/v1{{- if and (not $v1) $v1beta1 -}}beta1{{- end }} diff --git a/charts/gatekeeper/templates/gatekeeper-critical-pods-resourcequota.yaml b/charts/gatekeeper/templates/gatekeeper-critical-pods-resourcequota.yaml index 15464636691..356e877bbab 100644 --- a/charts/gatekeeper/templates/gatekeeper-critical-pods-resourcequota.yaml +++ b/charts/gatekeeper/templates/gatekeeper-critical-pods-resourcequota.yaml @@ -1,3 +1,4 @@ +--- {{- if .Values.resourceQuota }} apiVersion: v1 kind: ResourceQuota diff --git a/charts/gatekeeper/templates/gatekeeper-manager-role-clusterrole.yaml b/charts/gatekeeper/templates/gatekeeper-manager-role-clusterrole.yaml index e41f96c9790..a6306b3a285 100644 --- a/charts/gatekeeper/templates/gatekeeper-manager-role-clusterrole.yaml +++ b/charts/gatekeeper/templates/gatekeeper-manager-role-clusterrole.yaml @@ -1,3 +1,4 @@ +--- {{- if .Values.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole diff --git a/charts/gatekeeper/templates/gatekeeper-manager-role-role.yaml b/charts/gatekeeper/templates/gatekeeper-manager-role-role.yaml index 72d7513baaf..0a5eecfa500 100644 --- a/charts/gatekeeper/templates/gatekeeper-manager-role-role.yaml +++ b/charts/gatekeeper/templates/gatekeeper-manager-role-role.yaml @@ -1,3 +1,4 @@ +--- {{- if .Values.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/charts/gatekeeper/templates/gatekeeper-manager-rolebinding-clusterrolebinding.yaml b/charts/gatekeeper/templates/gatekeeper-manager-rolebinding-clusterrolebinding.yaml index 1fb9f6c87a5..3c876bd1e6d 100644 --- a/charts/gatekeeper/templates/gatekeeper-manager-rolebinding-clusterrolebinding.yaml +++ b/charts/gatekeeper/templates/gatekeeper-manager-rolebinding-clusterrolebinding.yaml @@ -1,3 +1,4 @@ +--- {{- if .Values.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/charts/gatekeeper/templates/gatekeeper-manager-rolebinding-rolebinding.yaml b/charts/gatekeeper/templates/gatekeeper-manager-rolebinding-rolebinding.yaml index fbe9580d57f..a932d59f6f3 100644 --- a/charts/gatekeeper/templates/gatekeeper-manager-rolebinding-rolebinding.yaml +++ b/charts/gatekeeper/templates/gatekeeper-manager-rolebinding-rolebinding.yaml @@ -1,3 +1,4 @@ +--- {{- if .Values.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding diff --git a/charts/gatekeeper/templates/gatekeeper-mutating-webhook-configuration-mutatingwebhookconfiguration.yaml b/charts/gatekeeper/templates/gatekeeper-mutating-webhook-configuration-mutatingwebhookconfiguration.yaml index c3cc122851a..6b4821181f1 100644 --- a/charts/gatekeeper/templates/gatekeeper-mutating-webhook-configuration-mutatingwebhookconfiguration.yaml +++ b/charts/gatekeeper/templates/gatekeeper-mutating-webhook-configuration-mutatingwebhookconfiguration.yaml @@ -1,3 +1,4 @@ +--- {{- if not .Values.disableMutation }} apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration @@ -24,9 +25,11 @@ webhooks: path: /v1/mutate {{- end }} failurePolicy: {{ .Values.mutatingWebhookFailurePolicy }} + {{- if .Values.mutatingWebhookMatchConditions }} {{- if ge (int .Capabilities.KubeVersion.Minor) 28 }} matchConditions: {{ toYaml .Values.mutatingWebhookMatchConditions | nindent 4 }} {{- end }} + {{- end }} matchPolicy: Exact name: mutation.gatekeeper.sh namespaceSelector: @@ -61,6 +64,21 @@ webhooks: - UPDATE resources: - '*' + - pods/ephemeralcontainers + - pods/exec + - pods/log + - pods/eviction + - pods/portforward + - pods/proxy + - pods/attach + - pods/binding + - deployments/scale + - replicasets/scale + - statefulsets/scale + - replicationcontrollers/scale + - services/proxy + - nodes/proxy + - services/status {{- end }} sideEffects: None timeoutSeconds: {{ .Values.mutatingWebhookTimeoutSeconds }} diff --git a/charts/gatekeeper/templates/gatekeeper-validating-webhook-configuration-validatingwebhookconfiguration.yaml b/charts/gatekeeper/templates/gatekeeper-validating-webhook-configuration-validatingwebhookconfiguration.yaml index c8dbc8f0daa..d2cdd775a18 100644 --- a/charts/gatekeeper/templates/gatekeeper-validating-webhook-configuration-validatingwebhookconfiguration.yaml +++ b/charts/gatekeeper/templates/gatekeeper-validating-webhook-configuration-validatingwebhookconfiguration.yaml @@ -1,3 +1,4 @@ +--- {{- if not .Values.disableValidatingWebhook }} apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration @@ -24,9 +25,11 @@ webhooks: path: /v1/admit {{- end }} failurePolicy: {{ .Values.validatingWebhookFailurePolicy }} + {{- if .Values.validatingWebhookMatchConditions }} {{- if ge (int .Capabilities.KubeVersion.Minor) 28 }} matchConditions: {{ toYaml .Values.validatingWebhookMatchConditions | nindent 4 }} {{- end }} + {{- end }} matchPolicy: Exact name: validation.gatekeeper.sh namespaceSelector: @@ -61,6 +64,9 @@ webhooks: {{- if .Values.enableDeleteOperations }} - DELETE {{- end }} + {{- if .Values.enableConnectOperations }} + - CONNECT + {{- end }} resources: - '*' # Explicitly list all known subresources except "status" (to avoid destabilizing the cluster and increasing load on gatekeeper). diff --git a/charts/gatekeeper/templates/gatekeeper-webhook-server-cert-secret.yaml b/charts/gatekeeper/templates/gatekeeper-webhook-server-cert-secret.yaml index a841780a555..79d331c97ca 100644 --- a/charts/gatekeeper/templates/gatekeeper-webhook-server-cert-secret.yaml +++ b/charts/gatekeeper/templates/gatekeeper-webhook-server-cert-secret.yaml @@ -1,3 +1,4 @@ +--- {{- if not .Values.externalCertInjection.enabled }} apiVersion: v1 kind: Secret diff --git a/charts/gatekeeper/templates/gatekeeper-webhook-service-service.yaml b/charts/gatekeeper/templates/gatekeeper-webhook-service-service.yaml index 3c0f4453a11..5773f39b3fa 100644 --- a/charts/gatekeeper/templates/gatekeeper-webhook-service-service.yaml +++ b/charts/gatekeeper/templates/gatekeeper-webhook-service-service.yaml @@ -1,3 +1,4 @@ +--- apiVersion: v1 kind: Service metadata: diff --git a/charts/gatekeeper/templates/namespace-post-install.yaml b/charts/gatekeeper/templates/namespace-post-install.yaml index 41232de4267..2586ca2cc33 100644 --- a/charts/gatekeeper/templates/namespace-post-install.yaml +++ b/charts/gatekeeper/templates/namespace-post-install.yaml @@ -32,7 +32,7 @@ spec: imagePullSecrets: {{- .Values.postInstall.labelNamespace.image.pullSecrets | toYaml | nindent 12 }} {{- end }} - serviceAccount: gatekeeper-update-namespace-label + serviceAccount: {{ .Values.postInstall.labelNamespace.serviceAccount.name }} {{- if .Values.postInstall.probeWebhook.enabled }} volumes: {{- include "gatekeeper.postInstallWebhookProbeVolume" . | nindent 8 }} @@ -90,10 +90,11 @@ spec: {{- toYaml .tolerations | nindent 8 }} {{- end }} --- +{{- if .Values.postInstall.labelNamespace.serviceAccount.create }} apiVersion: v1 kind: ServiceAccount metadata: - name: gatekeeper-update-namespace-label + name: {{ .Values.postInstall.labelNamespace.serviceAccount.name }} namespace: {{ .Release.Namespace | quote }} labels: {{- include "gatekeeper.commonLabels" . | nindent 4 }} @@ -103,6 +104,7 @@ metadata: "helm.sh/hook": post-install "helm.sh/hook-weight": "-5" "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- end }} --- {{- if .Values.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 @@ -155,7 +157,7 @@ roleRef: name: gatekeeper-update-namespace-label subjects: - kind: ServiceAccount - name: gatekeeper-update-namespace-label + name: {{ .Values.postInstall.labelNamespace.serviceAccount.name }} namespace: {{ .Release.Namespace | quote }} {{- end }} {{- end }} diff --git a/charts/gatekeeper/templates/namespace-post-upgrade.yaml b/charts/gatekeeper/templates/namespace-post-upgrade.yaml index b26abab34e0..c758e402cf7 100644 --- a/charts/gatekeeper/templates/namespace-post-upgrade.yaml +++ b/charts/gatekeeper/templates/namespace-post-upgrade.yaml @@ -27,7 +27,7 @@ spec: imagePullSecrets: {{- .Values.postUpgrade.labelNamespace.image.pullSecrets | toYaml | nindent 12 }} {{- end }} - serviceAccount: gatekeeper-update-namespace-label-post-upgrade + serviceAccount: {{ .Values.postUpgrade.labelNamespace.serviceAccount.name }} {{- if .Values.postUpgrade.labelNamespace.priorityClassName }} priorityClassName: {{ .Values.postUpgrade.labelNamespace.priorityClassName }} {{- end }} @@ -82,10 +82,11 @@ spec: {{- toYaml .nodeSelector | nindent 8 }} {{- end }} --- +{{- if .Values.postUpgrade.labelNamespace.serviceAccount.create }} apiVersion: v1 kind: ServiceAccount metadata: - name: gatekeeper-update-namespace-label-post-upgrade + name: {{ .Values.postUpgrade.labelNamespace.serviceAccount.name }} labels: {{- include "gatekeeper.commonLabels" . | nindent 4 }} release: {{ .Release.Name }} @@ -94,6 +95,7 @@ metadata: "helm.sh/hook": post-upgrade "helm.sh/hook-weight": "-5" "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- end }} --- {{- if .Values.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 @@ -143,7 +145,7 @@ roleRef: name: gatekeeper-update-namespace-label-post-upgrade subjects: - kind: ServiceAccount - name: gatekeeper-update-namespace-label-post-upgrade + name: {{ .Values.postUpgrade.labelNamespace.serviceAccount.name }} namespace: {{ .Release.Namespace | quote }} {{- end }} {{- end }} diff --git a/charts/gatekeeper/templates/upgrade-crds-hook.yaml b/charts/gatekeeper/templates/upgrade-crds-hook.yaml index f9347ad9c7b..a0f1ed466c1 100644 --- a/charts/gatekeeper/templates/upgrade-crds-hook.yaml +++ b/charts/gatekeeper/templates/upgrade-crds-hook.yaml @@ -36,7 +36,7 @@ metadata: helm.sh/hook-weight: "1" subjects: - kind: ServiceAccount - name: gatekeeper-admin-upgrade-crds + name: {{ .Values.upgradeCRDs.serviceAccount.name }} namespace: {{ .Release.Namespace }} roleRef: kind: ClusterRole @@ -44,6 +44,7 @@ roleRef: apiGroup: rbac.authorization.k8s.io {{- end }} --- +{{- if .Values.upgradeCRDs.serviceAccount.create }} apiVersion: v1 kind: ServiceAccount metadata: @@ -51,12 +52,13 @@ metadata: {{- include "gatekeeper.commonLabels" . | nindent 4 }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} - name: gatekeeper-admin-upgrade-crds + name: {{ .Values.upgradeCRDs.serviceAccount.name }} namespace: '{{ .Release.Namespace }}' annotations: helm.sh/hook: pre-install,pre-upgrade helm.sh/hook-delete-policy: "hook-succeeded,before-hook-creation" helm.sh/hook-weight: "1" +{{- end }} --- apiVersion: batch/v1 kind: Job @@ -82,7 +84,7 @@ spec: {{- include "gatekeeper.mandatoryLabels" . | nindent 8 }} {{- include "gatekeeper.commonLabels" . | nindent 8 }} spec: - serviceAccountName: gatekeeper-admin-upgrade-crds + serviceAccountName: {{ .Values.upgradeCRDs.serviceAccount.name }} restartPolicy: Never {{- if .Values.image.pullSecrets }} imagePullSecrets: diff --git a/charts/gatekeeper/templates/webhook-configs-pre-delete.yaml b/charts/gatekeeper/templates/webhook-configs-pre-delete.yaml index fb359b87e16..19c6ff0eb25 100644 --- a/charts/gatekeeper/templates/webhook-configs-pre-delete.yaml +++ b/charts/gatekeeper/templates/webhook-configs-pre-delete.yaml @@ -26,7 +26,7 @@ spec: imagePullSecrets: {{- .Values.preUninstall.deleteWebhookConfigurations.image.pullSecrets | toYaml | nindent 12 }} {{- end }} - serviceAccount: gatekeeper-delete-webhook-configs + serviceAccount: {{ .Values.preUninstall.deleteWebhookConfigurations.serviceAccount.name }} {{- if .Values.preUninstall.deleteWebhookConfigurations.priorityClassName }} priorityClassName: {{ .Values.preUninstall.deleteWebhookConfigurations.priorityClassName }} {{- end }} @@ -59,10 +59,11 @@ spec: {{- toYaml .tolerations | nindent 8 }} {{- end }} --- +{{- if .Values.preUninstall.deleteWebhookConfigurations.serviceAccount.create }} apiVersion: v1 kind: ServiceAccount metadata: - name: gatekeeper-delete-webhook-configs + name: {{ .Values.preUninstall.deleteWebhookConfigurations.serviceAccount.name }} namespace: {{ .Release.Namespace | quote }} labels: {{- include "gatekeeper.commonLabels" . | nindent 4 }} @@ -72,6 +73,7 @@ metadata: "helm.sh/hook": pre-delete "helm.sh/hook-weight": "-5" "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- end }} --- {{- if .Values.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 @@ -131,7 +133,7 @@ roleRef: name: gatekeeper-delete-webhook-configs subjects: - kind: ServiceAccount - name: gatekeeper-delete-webhook-configs + name: {{ .Values.preUninstall.deleteWebhookConfigurations.name }} namespace: {{ .Release.Namespace | quote }} {{- end }} {{- end }} diff --git a/charts/gatekeeper/values.yaml b/charts/gatekeeper/values.yaml index ac84eabf1b4..ea81c083063 100644 --- a/charts/gatekeeper/values.yaml +++ b/charts/gatekeeper/values.yaml @@ -19,6 +19,7 @@ validatingWebhookCheckIgnoreFailurePolicy: Fail validatingWebhookCustomRules: {} validatingWebhookURL: null enableDeleteOperations: false +enableConnectOperations: false enableExternalData: true enableGeneratorResourceExpansion: true enableTLSHealthcheck: false @@ -44,25 +45,27 @@ admissionEventsInvolvedNamespace: false auditEventsInvolvedNamespace: false resourceQuota: true externaldataProviderResponseCacheTTL: 3m -enableK8sNativeValidation: false -vapEnforcement: GATEKEEPER_DEFAULT +enableK8sNativeValidation: true image: repository: openpolicyagent/gatekeeper crdRepository: openpolicyagent/gatekeeper-crds - release: v3.17.0-beta.0 + release: v3.18.0-beta.0 pullPolicy: IfNotPresent pullSecrets: [] preInstall: crdRepository: image: repository: null - tag: v3.17.0-beta.0 + tag: v3.18.0-beta.0 postUpgrade: labelNamespace: + serviceAccount: + name: gatekeeper-update-namespace-label-post-upgrade + create: true enabled: false image: repository: openpolicyagent/gatekeeper-crds - tag: v3.17.0-beta.0 + tag: v3.18.0-beta.0 pullPolicy: IfNotPresent pullSecrets: [] extraNamespaces: [] @@ -89,11 +92,14 @@ postUpgrade: runAsUser: 1000 postInstall: labelNamespace: + serviceAccount: + name: gatekeeper-update-namespace-label + create: true enabled: true extraRules: [] image: repository: openpolicyagent/gatekeeper-crds - tag: v3.17.0-beta.0 + tag: v3.18.0-beta.0 pullPolicy: IfNotPresent pullSecrets: [] extraNamespaces: [] @@ -130,11 +136,14 @@ postInstall: runAsUser: 1000 preUninstall: deleteWebhookConfigurations: + serviceAccount: + name: gatekeeper-delete-webhook-configs + create: true extraRules: [] enabled: false image: repository: openpolicyagent/gatekeeper-crds - tag: v3.17.0-beta.0 + tag: v3.18.0-beta.0 pullPolicy: IfNotPresent pullSecrets: [] priorityClassName: "" @@ -158,6 +167,8 @@ podCountLimit: "100" secretAnnotations: {} enableRuntimeDefaultSeccompProfile: true controllerManager: + serviceAccount: + name: gatekeeper-admin exemptNamespaces: [] exemptNamespacePrefixes: [] hostNetwork: false @@ -172,6 +183,8 @@ controllerManager: tlsMinVersion: 1.3 clientCertName: "" strategyType: RollingUpdate + strategyRollingUpdate: {} + podLabels: {} affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: @@ -214,6 +227,8 @@ controllerManager: # - ipBlock: # cidr: 0.0.0.0/0 audit: + serviceAccount: + name: gatekeeper-admin enablePubsub: false connection: audit-connection channel: audit-channel @@ -225,6 +240,7 @@ audit: livenessTimeout: 1 priorityClassName: system-cluster-critical disableCertRotation: false + podLabels: {} affinity: {} tolerations: [] nodeSelector: {kubernetes.io/os: linux} @@ -271,6 +287,9 @@ disabledBuiltins: ["{http.send}"] psp: enabled: false upgradeCRDs: + serviceAccount: + create: true + name: gatekeeper-admin-upgrade-crds enabled: true extraRules: [] priorityClassName: "" @@ -279,3 +298,6 @@ rbac: externalCertInjection: enabled: false secretName: gatekeeper-webhook-server-cert +serviceAccount: + gatekeeperAdmin: + create: true diff --git a/cmd/build/helmify/static/Chart.yaml b/cmd/build/helmify/static/Chart.yaml index c5bcc30e89f..e42cec2b576 100644 --- a/cmd/build/helmify/static/Chart.yaml +++ b/cmd/build/helmify/static/Chart.yaml @@ -4,8 +4,8 @@ name: gatekeeper icon: https://open-policy-agent.github.io/gatekeeper/website/img/logo.svg keywords: - open policy agent -version: 3.17.0-beta.0 +version: 3.18.0-beta.0 home: https://github.com/open-policy-agent/gatekeeper sources: - https://github.com/open-policy-agent/gatekeeper.git -appVersion: v3.17.0-beta.0 +appVersion: v3.18.0-beta.0 diff --git a/cmd/build/helmify/static/README.md b/cmd/build/helmify/static/README.md index 54ec0750d3f..7313421e175 100644 --- a/cmd/build/helmify/static/README.md +++ b/cmd/build/helmify/static/README.md @@ -74,7 +74,7 @@ information._ | postInstall.labelNamespace.extraNamespaces | The extra namespaces that need to have the label during post install hooks | `[]` | | postInstall.labelNamespace.extraAnnotations | Extra annotations added to the post install Job | `{}` | | postInstall.labelNamespace.image.repository | Image with kubectl to label the namespace | `openpolicyagent/gatekeeper-crds` | -| postInstall.labelNamespace.image.tag | Image tag | Current release version: `v3.17.0-beta.0` | +| postInstall.labelNamespace.image.tag | Image tag | Current release version: `v3.18.0-beta.0` | | postInstall.labelNamespace.image.pullPolicy | Image pullPolicy | `IfNotPresent` | | postInstall.labelNamespace.image.pullSecrets | Image pullSecrets | `[]` | | postInstall.labelNamespace.extraRules | Extra rules for the gatekeeper-update-namespace-label Role | `[]` | @@ -97,7 +97,7 @@ information._ | postUpgrade.labelNamespace.extraNamespaces | The extra namespaces that need to have the label during post upgrade hooks | `[]` | | postUpgrade.labelNamespace.extraAnnotations | Extra annotations added to the post upgrade Job | `{}` | | postUpgrade.labelNamespace.image.repository | Image with kubectl to label the namespace | `openpolicyagent/gatekeeper-crds` | -| postUpgrade.labelNamespace.image.tag | Image tag | Current release version: `v3.17.0-beta.0` | +| postUpgrade.labelNamespace.image.tag | Image tag | Current release version: `v3.18.0-beta.0` | | postUpgrade.labelNamespace.image.pullPolicy | Image pullPolicy | `IfNotPresent` | | postUpgrade.labelNamespace.image.pullSecrets | Image pullSecrets | `[]` | | postUpgrade.labelNamespace.priorityClassName | Priority class name for gatekeeper-update-namespace-label-post-upgrade Job | `` | @@ -107,10 +107,10 @@ information._ | postUpgrade.resources | The resource request/limits for the container image in postUpgrade hook jobs | `{}` | | postUpgrade.securityContext | Security context applied on the container | `{ "allowPrivilegeEscalation": false, "capabilities": "drop": [all], "readOnlyRootFilesystem": true, "runAsGroup": 999, "runAsNonRoot": true, "runAsUser": 1000 }` | | preInstall.crdRepository.image.repository | Image with kubectl to update the CRDs. If not set, the `image.crdRepository` is used instead. | `null` | -| preInstall.crdRepository.image.tag | Image tag | Current release version: `v3.17.0-beta.0` | +| preInstall.crdRepository.image.tag | Image tag | Current release version: `v3.18.0-beta.0` | | preUninstall.deleteWebhookConfigurations.enabled | Delete webhooks before gatekeeper itself is uninstalled | `false` | | preUninstall.deleteWebhookConfigurations.image.repository | Image with kubectl to delete the webhooks | `openpolicyagent/gatekeeper-crds` | -| preUninstall.deleteWebhookConfigurations.image.tag | Image tag | Current release version: `v3.17.0-beta.0` | +| preUninstall.deleteWebhookConfigurations.image.tag | Image tag | Current release version: `v3.18.0-beta.0` | | preUninstall.deleteWebhookConfigurations.image.pullPolicy | Image pullPolicy | `IfNotPresent` | | preUninstall.deleteWebhookConfigurations.image.pullSecrets | Image pullSecrets | `[]` | | preUninstall.deleteWebhookConfigurations.extraRules | Extra rules for the gatekeeper-delete-webhook-configs Role | `[]` | @@ -175,7 +175,7 @@ information._ | logLevel | Minimum log level | `INFO` | | image.pullPolicy | The image pull policy | `IfNotPresent` | | image.repository | Image repository | `openpolicyagent/gatekeeper` | -| image.release | The image release tag to use | Current release version: `v3.17.0-beta.0` | +| image.release | The image release tag to use | Current release version: `v3.18.0-beta.0` | | image.pullSecrets | Specify an array of imagePullSecrets | `[]` | | resources | The resource request/limits for the container image | limits: 1 CPU, 512Mi, requests: 100mCPU, 256Mi | | nodeSelector | The node selector to use for pod scheduling | `kubernetes.io/os: linux` | diff --git a/cmd/build/helmify/static/values.yaml b/cmd/build/helmify/static/values.yaml index af50dc61420..ea81c083063 100644 --- a/cmd/build/helmify/static/values.yaml +++ b/cmd/build/helmify/static/values.yaml @@ -49,14 +49,14 @@ enableK8sNativeValidation: true image: repository: openpolicyagent/gatekeeper crdRepository: openpolicyagent/gatekeeper-crds - release: v3.17.0-beta.0 + release: v3.18.0-beta.0 pullPolicy: IfNotPresent pullSecrets: [] preInstall: crdRepository: image: repository: null - tag: v3.17.0-beta.0 + tag: v3.18.0-beta.0 postUpgrade: labelNamespace: serviceAccount: @@ -65,7 +65,7 @@ postUpgrade: enabled: false image: repository: openpolicyagent/gatekeeper-crds - tag: v3.17.0-beta.0 + tag: v3.18.0-beta.0 pullPolicy: IfNotPresent pullSecrets: [] extraNamespaces: [] @@ -99,7 +99,7 @@ postInstall: extraRules: [] image: repository: openpolicyagent/gatekeeper-crds - tag: v3.17.0-beta.0 + tag: v3.18.0-beta.0 pullPolicy: IfNotPresent pullSecrets: [] extraNamespaces: [] @@ -143,7 +143,7 @@ preUninstall: enabled: false image: repository: openpolicyagent/gatekeeper-crds - tag: v3.17.0-beta.0 + tag: v3.18.0-beta.0 pullPolicy: IfNotPresent pullSecrets: [] priorityClassName: "" diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index a0ce1be1308..1835e9d2b2a 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -56,7 +56,7 @@ spec: - "--operation=webhook" - "--operation=mutation-webhook" - "--disable-opa-builtin={http.send}" - image: openpolicyagent/gatekeeper:v3.17.0-beta.0 + image: openpolicyagent/gatekeeper:v3.18.0-beta.0 imagePullPolicy: Always name: manager ports: @@ -150,7 +150,7 @@ spec: - --disable-cert-rotation command: - /manager - image: openpolicyagent/gatekeeper:v3.17.0-beta.0 + image: openpolicyagent/gatekeeper:v3.18.0-beta.0 env: # used by Gatekeeper - name: POD_NAMESPACE diff --git a/deploy/gatekeeper.yaml b/deploy/gatekeeper.yaml index 91d607f107a..fe375e4eb43 100644 --- a/deploy/gatekeeper.yaml +++ b/deploy/gatekeeper.yaml @@ -181,11 +181,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -232,11 +234,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -530,11 +534,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -581,11 +587,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -879,11 +887,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -930,11 +940,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -1251,11 +1263,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -1302,11 +1316,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -1563,11 +1579,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -1614,11 +1632,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -1861,11 +1881,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -1912,11 +1934,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -2159,11 +2183,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -2210,11 +2236,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -3512,11 +3540,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -3563,11 +3593,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -3832,11 +3864,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -3883,11 +3917,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -4152,11 +4188,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -4203,11 +4241,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -4921,7 +4961,7 @@ spec: value: manager - name: OTEL_RESOURCE_ATTRIBUTES value: k8s.pod.name=$(POD_NAME),k8s.namespace.name=$(NAMESPACE),k8s.container.name=$(CONTAINER_NAME) - image: openpolicyagent/gatekeeper:v3.17.0-beta.0 + image: openpolicyagent/gatekeeper:v3.18.0-beta.0 imagePullPolicy: Always livenessProbe: httpGet: @@ -5040,7 +5080,7 @@ spec: value: manager - name: OTEL_RESOURCE_ATTRIBUTES value: k8s.pod.name=$(POD_NAME),k8s.namespace.name=$(NAMESPACE),k8s.container.name=$(CONTAINER_NAME) - image: openpolicyagent/gatekeeper:v3.17.0-beta.0 + image: openpolicyagent/gatekeeper:v3.18.0-beta.0 imagePullPolicy: Always livenessProbe: httpGet: diff --git a/manifest_staging/charts/gatekeeper/Chart.yaml b/manifest_staging/charts/gatekeeper/Chart.yaml index c5bcc30e89f..e42cec2b576 100644 --- a/manifest_staging/charts/gatekeeper/Chart.yaml +++ b/manifest_staging/charts/gatekeeper/Chart.yaml @@ -4,8 +4,8 @@ name: gatekeeper icon: https://open-policy-agent.github.io/gatekeeper/website/img/logo.svg keywords: - open policy agent -version: 3.17.0-beta.0 +version: 3.18.0-beta.0 home: https://github.com/open-policy-agent/gatekeeper sources: - https://github.com/open-policy-agent/gatekeeper.git -appVersion: v3.17.0-beta.0 +appVersion: v3.18.0-beta.0 diff --git a/manifest_staging/charts/gatekeeper/README.md b/manifest_staging/charts/gatekeeper/README.md index 54ec0750d3f..7313421e175 100644 --- a/manifest_staging/charts/gatekeeper/README.md +++ b/manifest_staging/charts/gatekeeper/README.md @@ -74,7 +74,7 @@ information._ | postInstall.labelNamespace.extraNamespaces | The extra namespaces that need to have the label during post install hooks | `[]` | | postInstall.labelNamespace.extraAnnotations | Extra annotations added to the post install Job | `{}` | | postInstall.labelNamespace.image.repository | Image with kubectl to label the namespace | `openpolicyagent/gatekeeper-crds` | -| postInstall.labelNamespace.image.tag | Image tag | Current release version: `v3.17.0-beta.0` | +| postInstall.labelNamespace.image.tag | Image tag | Current release version: `v3.18.0-beta.0` | | postInstall.labelNamespace.image.pullPolicy | Image pullPolicy | `IfNotPresent` | | postInstall.labelNamespace.image.pullSecrets | Image pullSecrets | `[]` | | postInstall.labelNamespace.extraRules | Extra rules for the gatekeeper-update-namespace-label Role | `[]` | @@ -97,7 +97,7 @@ information._ | postUpgrade.labelNamespace.extraNamespaces | The extra namespaces that need to have the label during post upgrade hooks | `[]` | | postUpgrade.labelNamespace.extraAnnotations | Extra annotations added to the post upgrade Job | `{}` | | postUpgrade.labelNamespace.image.repository | Image with kubectl to label the namespace | `openpolicyagent/gatekeeper-crds` | -| postUpgrade.labelNamespace.image.tag | Image tag | Current release version: `v3.17.0-beta.0` | +| postUpgrade.labelNamespace.image.tag | Image tag | Current release version: `v3.18.0-beta.0` | | postUpgrade.labelNamespace.image.pullPolicy | Image pullPolicy | `IfNotPresent` | | postUpgrade.labelNamespace.image.pullSecrets | Image pullSecrets | `[]` | | postUpgrade.labelNamespace.priorityClassName | Priority class name for gatekeeper-update-namespace-label-post-upgrade Job | `` | @@ -107,10 +107,10 @@ information._ | postUpgrade.resources | The resource request/limits for the container image in postUpgrade hook jobs | `{}` | | postUpgrade.securityContext | Security context applied on the container | `{ "allowPrivilegeEscalation": false, "capabilities": "drop": [all], "readOnlyRootFilesystem": true, "runAsGroup": 999, "runAsNonRoot": true, "runAsUser": 1000 }` | | preInstall.crdRepository.image.repository | Image with kubectl to update the CRDs. If not set, the `image.crdRepository` is used instead. | `null` | -| preInstall.crdRepository.image.tag | Image tag | Current release version: `v3.17.0-beta.0` | +| preInstall.crdRepository.image.tag | Image tag | Current release version: `v3.18.0-beta.0` | | preUninstall.deleteWebhookConfigurations.enabled | Delete webhooks before gatekeeper itself is uninstalled | `false` | | preUninstall.deleteWebhookConfigurations.image.repository | Image with kubectl to delete the webhooks | `openpolicyagent/gatekeeper-crds` | -| preUninstall.deleteWebhookConfigurations.image.tag | Image tag | Current release version: `v3.17.0-beta.0` | +| preUninstall.deleteWebhookConfigurations.image.tag | Image tag | Current release version: `v3.18.0-beta.0` | | preUninstall.deleteWebhookConfigurations.image.pullPolicy | Image pullPolicy | `IfNotPresent` | | preUninstall.deleteWebhookConfigurations.image.pullSecrets | Image pullSecrets | `[]` | | preUninstall.deleteWebhookConfigurations.extraRules | Extra rules for the gatekeeper-delete-webhook-configs Role | `[]` | @@ -175,7 +175,7 @@ information._ | logLevel | Minimum log level | `INFO` | | image.pullPolicy | The image pull policy | `IfNotPresent` | | image.repository | Image repository | `openpolicyagent/gatekeeper` | -| image.release | The image release tag to use | Current release version: `v3.17.0-beta.0` | +| image.release | The image release tag to use | Current release version: `v3.18.0-beta.0` | | image.pullSecrets | Specify an array of imagePullSecrets | `[]` | | resources | The resource request/limits for the container image | limits: 1 CPU, 512Mi, requests: 100mCPU, 256Mi | | nodeSelector | The node selector to use for pod scheduling | `kubernetes.io/os: linux` | diff --git a/manifest_staging/charts/gatekeeper/values.yaml b/manifest_staging/charts/gatekeeper/values.yaml index af50dc61420..ea81c083063 100644 --- a/manifest_staging/charts/gatekeeper/values.yaml +++ b/manifest_staging/charts/gatekeeper/values.yaml @@ -49,14 +49,14 @@ enableK8sNativeValidation: true image: repository: openpolicyagent/gatekeeper crdRepository: openpolicyagent/gatekeeper-crds - release: v3.17.0-beta.0 + release: v3.18.0-beta.0 pullPolicy: IfNotPresent pullSecrets: [] preInstall: crdRepository: image: repository: null - tag: v3.17.0-beta.0 + tag: v3.18.0-beta.0 postUpgrade: labelNamespace: serviceAccount: @@ -65,7 +65,7 @@ postUpgrade: enabled: false image: repository: openpolicyagent/gatekeeper-crds - tag: v3.17.0-beta.0 + tag: v3.18.0-beta.0 pullPolicy: IfNotPresent pullSecrets: [] extraNamespaces: [] @@ -99,7 +99,7 @@ postInstall: extraRules: [] image: repository: openpolicyagent/gatekeeper-crds - tag: v3.17.0-beta.0 + tag: v3.18.0-beta.0 pullPolicy: IfNotPresent pullSecrets: [] extraNamespaces: [] @@ -143,7 +143,7 @@ preUninstall: enabled: false image: repository: openpolicyagent/gatekeeper-crds - tag: v3.17.0-beta.0 + tag: v3.18.0-beta.0 pullPolicy: IfNotPresent pullSecrets: [] priorityClassName: "" diff --git a/manifest_staging/deploy/gatekeeper.yaml b/manifest_staging/deploy/gatekeeper.yaml index 559325e2fe2..fe375e4eb43 100644 --- a/manifest_staging/deploy/gatekeeper.yaml +++ b/manifest_staging/deploy/gatekeeper.yaml @@ -4961,7 +4961,7 @@ spec: value: manager - name: OTEL_RESOURCE_ATTRIBUTES value: k8s.pod.name=$(POD_NAME),k8s.namespace.name=$(NAMESPACE),k8s.container.name=$(CONTAINER_NAME) - image: openpolicyagent/gatekeeper:v3.17.0-beta.0 + image: openpolicyagent/gatekeeper:v3.18.0-beta.0 imagePullPolicy: Always livenessProbe: httpGet: @@ -5080,7 +5080,7 @@ spec: value: manager - name: OTEL_RESOURCE_ATTRIBUTES value: k8s.pod.name=$(POD_NAME),k8s.namespace.name=$(NAMESPACE),k8s.container.name=$(CONTAINER_NAME) - image: openpolicyagent/gatekeeper:v3.17.0-beta.0 + image: openpolicyagent/gatekeeper:v3.18.0-beta.0 imagePullPolicy: Always livenessProbe: httpGet: diff --git a/website/versioned_docs/version-v3.17.x/audit.md b/website/versioned_docs/version-v3.17.x/audit.md new file mode 100644 index 00000000000..f4ff7ec2f7b --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/audit.md @@ -0,0 +1,215 @@ +--- +id: audit +title: Audit +--- + +Audit performs periodic evaluations of existing resources against constraints, detecting pre-existing misconfigurations. + +## Reading Audit Results + +There are three ways to gather audit results, depending on the level of detail needed. + +### Prometheus Metrics + +Prometheus metrics provide an aggregated look at the number of audit violations: + +* `gatekeeper_audit_last_run_time` provides the start time timestamp of the most recent audit run +* `gatekeeper_audit_last_run_end_time` provides the end time timestamp of the last completed audit run +* `gatekeeper_violations` provides the total number of audited violations for the last audit run, broken down by violation severity + +### Constraint Status + +Violations of constraints are listed in the `status` field of the corresponding constraint. +Note that only violations from the most recent audit run are reported. Also note that there +is a maximum number of individual violations that will be reported on the constraint +itself. If the number of current violations is greater than this cap, the excess violations +will not be reported (though they will still be included in the `totalViolations` count). +This is because Kubernetes has a cap on how large individual API objects can grow, which makes +unbounded growth a bad idea. This limit can be configured via the `--constraint-violations-limit` flag. + +Here is an example of a constraint with violations: + +```yaml +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: K8sRequiredLabels +metadata: + name: ns-must-have-gk +spec: + match: + kinds: + - apiGroups: [""] + kinds: ["Namespace"] + parameters: + labels: ["gatekeeper"] +status: + auditTimestamp: "2019-05-11T01:46:13Z" + enforced: true + violations: + - enforcementAction: deny + group: "" + version: v1 + kind: Namespace + message: 'you must provide labels: {"gatekeeper"}' + name: default + - enforcementAction: deny + group: "" + version: v1 + kind: Namespace + message: 'you must provide labels: {"gatekeeper"}' + name: gatekeeper-system + - enforcementAction: deny + group: "" + version: v1 + kind: Namespace + message: 'you must provide labels: {"gatekeeper"}' + name: kube-public + - enforcementAction: deny + group: "" + version: v1 + kind: Namespace + message: 'you must provide labels: {"gatekeeper"}' + name: kube-system +``` + +Limitations of getting violations from constraint status: + +- To reduce in-memory consumption of Gatekeeper audit pod and to avoid hitting [default etcd limit](https://etcd.io/docs/v3.5/dev-guide/limit/#request-size-limit) of 1.5MB per resource, gatekeeper recommends configuring a [limit up-to 500 violations](https://open-policy-agent.github.io/gatekeeper/website/docs/audit/#configuring-audit)(by default 20) on constraint. Because of these limitations, users might not get all the violations from a Constraint resource. + +### Audit Logs + +#### Violations + +The audit pod emits JSON-formatted audit logs to stdout. The following is an example audit event: + +```json +{ + "level": "info", + "ts": 1632889070.3075402, + "logger": "controller", + "msg": "container has no resource limits", + "process": "audit", + "audit_id": "2021-09-29T04:17:47Z", + "event_type": "violation_audited", + "constraint_group": "constraints.gatekeeper.sh", + "constraint_api_version": "v1beta1", + "constraint_kind": "K8sContainerLimits", + "constraint_name": "container-must-have-limits", + "constraint_namespace": "", + "constraint_action": "deny", + "constraint_enforcement_actions": [], + "constraint_annotations": { + "test-annotation-1": "annotation_1" + }, + "resource_group": "", + "resource_api_version": "v1", + "resource_kind": "Pod", + "resource_namespace": "kube-system", + "resource_name": "kube-scheduler-kind-control-plane" + "resource_labels": { + "env": "prod", + "my-app-system": "true" + } +} +``` + +In addition to information on the violated constraint, violating resource, and violation message, the +audit log entries also contain: + +* An `audit_id` field that uniquely identifies a given audit run. This allows indexing of historical audits +* An `event_type` field with a value of `violation_audited` to make it easy to programatically identify audit violations + +Limitations of getting violations from audit logs: + +- It could be difficult to parse audit pod logs to look for violation messages, as violation logs would be mixed together with other log statements. + +#### Other Event Types + +In addition to violations, these other audit events may be useful (all uniquely identified via the `event_type` field): + +* `audit_started` marks the beginning of a new audit run +* `constraint_audited` marks when a constraint is done being audited for a given run, along with the number of violations found +* `audit_finished` marks the end of the current audit run + +All of these events (including `violation_audited`) are marked +with the same `audit_id` for a given audit run. + +### Pubsub channel + +This feature uses publish and subscribe (pubsub) model that allows Gatekeeper to export audit violations over a broker that can be consumed by a subscriber independently. Therefore, pubsub violations are not subject to reporting limits. Please refer to [this](pubsub.md) guide to configure audit to push violations over a channel. + +Limitations/drawbacks of getting violations using pubsub channel: + +- There is an inherent risk of messages getting dropped. You might not receive all the published violations. +- Additional dependancy on pubsub broker. + +## Running Audit +For more details on how to deploy audit and +number of instances to run, please refer to [operations audit](operations.md#audit). + +## Configuring Audit + +- Audit violations per constraint: set `--constraint-violations-limit=123` (defaults to `20`). NOTE: This flag only impacts when gathering audit results using the constraint status model. If you are gathering audit results using the pubsub model, please refer to the [pubsub](pubsub.md) guide. Both approaches for getting audit violations can be configured independently and work simultaneously without any interference. +- Audit chunk size: set `--audit-chunk-size=400` (defaults to `500`, `0` = infinite) Lower chunk size can reduce memory consumption of the auditing `Pod` but can increase the number requests to the Kubernetes API server. +- Audit interval: set `--audit-interval=123` (defaults to every `60` seconds). Disable audit interval by setting `--audit-interval=0` +- Audit api server cache write to disk (Gatekeeper v3.7.0+): Starting from v3.7.0, by default, audit writes api server cache to the disk attached to the node. This reduces the memory consumption of the audit `pod`. If there are concerns with high IOPS, then switch audit to write cache to a tmpfs ramdisk instead. NOTE: write to ramdisk will increase memory footprint of the audit `pod`. + - helm install `--set audit.writeToRAMDisk=true` + - if not using helm, modify the deployment manifest to mount a ramdisk + ```yaml + - emptyDir: + medium: Memory + ``` + +By default, audit will request each resource from the Kubernetes API during each audit cycle. To rely on the audit informer cache instead, use the flag `--audit-from-cache=true`. Note that this requires replication of Kubernetes resources into the audit cache before they can be evaluated against the enforced policies. Refer to the [Replicating data](sync.md) section for more information. + +### Audit using kinds specified in the constraints only + +By default, Gatekeeper will audit all resources in the cluster. This operation can take some time depending on the number of resources. + +If all of your constraints match against specific kinds (e.g. "match only pods"), then you can speed up audit runs by setting `--audit-match-kind-only=true` flag. This will only check resources of the kinds specified in all [constraints](howto.md#constraints) defined in the cluster. + +For example, defining this constraint will only audit `Pod` kind: + +```yaml +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: K8sAllowedRepos +metadata: + name: prod-repo-is-openpolicyagent +spec: + match: + kinds: + - apiGroups: [""] + kinds: ["Pod"] +... +``` + +If any of the [constraints](howto.md#constraints) do not specify `kinds`, it will be equivalent to not setting `--audit-match-kind-only` flag (`false` by default), and will fall back to auditing all resources in the cluster. + +### Opt-out of Audit in constraints + +By default, all constraints are opted-in audit. To opt-out of the audit process at constraint, you can use `enforcementAction: scoped` and define `scopedEnforcementActions` without including audit enforcement point. + +For example, defining this constraint will opt-out of audit + +```yaml +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: K8sAllowedRepos +metadata: + name: prod-repo-is-openpolicyagent +spec: +... + enforcementAction: scoped + scopedEnforcementActions: + - action: warn + enforcementPoints: + - name: "validation.gatekeeper.sh" + - action: deny + enforcementPoints: + - name: "gator.gatekeeper.sh" +... +``` + +Find out more about different [enforcement points](enforcement-points.md) in Gatekeeper. + +## Audit UserInfo + +When using `input.review.userInfo`, *NOTE* the request's user's information, such as `username`, `uid`, `groups`, `extra`, cannot be populated by Kubernetes for audit reviews and therefore constraint templates that rely on `userInfo` are not auditable. It is up to the rego author to handle the case where `userInfo` is unset and empty in order to avoid every matching resource being reported as violating resources. diff --git a/website/versioned_docs/version-v3.17.x/cloud-specific.md b/website/versioned_docs/version-v3.17.x/cloud-specific.md new file mode 100644 index 00000000000..d810997034a --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/cloud-specific.md @@ -0,0 +1,49 @@ +--- +id: vendor-specific +title: Cloud and Vendor Specific Fixes +--- + +## Running on private GKE Cluster nodes + +By default, firewall rules restrict the cluster master communication to nodes only on ports 443 (HTTPS) and 10250 (kubelet). Although Gatekeeper exposes its service on port 443, GKE by default enables `--enable-aggregator-routing` option, which makes the master to bypass the service and communicate straight to the POD on port 8443. + +Two ways of working around this: + +- create a new firewall rule from master to private nodes to open port `8443` (or any other custom port) + - https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#add_firewall_rules +- make the pod to run on privileged port 443 (need to run pod as root, or have `NET_BIND_SERVICE` capability) + - update Gatekeeper deployment manifest spec: + - add `NET_BIND_SERVICE` to `securityContext.capabilities.add` to allow binding on privileged ports as non-root + - update port from `8443` to `443` + ```yaml + containers: + - args: + - --port=443 + ports: + - containerPort: 443 + name: webhook-server + protocol: TCP + securityContext: + capabilities: + drop: ["all"] + add: ["NET_BIND_SERVICE"] + ``` + +## Running on OpenShift 4.x + +When running on OpenShift, the `anyuid` scc must be used to keep a restricted profile but being able to set the UserID. + +In order to use it, the following section must be added to the gatekeeper-manager-role Role: + +```yaml +- apiGroups: + - security.openshift.io + resourceNames: + - anyuid + resources: + - securitycontextconstraints + verbs: + - use +``` + +With this restricted profile, it won't be possible to set the `container.seccomp.security.alpha.kubernetes.io/manager: runtime/default` annotation. On the other hand, given the limited amount of privileges provided by the anyuid scc, the annotation can be removed. diff --git a/website/versioned_docs/version-v3.17.x/constrainttemplates.md b/website/versioned_docs/version-v3.17.x/constrainttemplates.md new file mode 100644 index 00000000000..0da80393053 --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/constrainttemplates.md @@ -0,0 +1,160 @@ +--- +id: constrainttemplates +title: Constraint Templates +--- + +ConstraintTemplates define a way to validate some set of Kubernetes objects in Gatekeeper's Kubernetes [admission controller](https://kubernetes.io/blog/2019/03/21/a-guide-to-kubernetes-admission-controllers/). They are made of two main elements: + +1. [Rego](https://www.openpolicyagent.org/docs/latest/#rego) code that defines a policy violation +2. The schema of the accompanying `Constraint` object, which represents an instantiation of a `ConstraintTemplate` + + +## `v1` Constraint Template + +In release version 3.6.0, Gatekeeper included the `v1` version of `ConstraintTemplate`. Unlike past versions of `ConstraintTemplate`, `v1` requires the Constraint schema section to be [structural](https://kubernetes.io/blog/2019/06/20/crd-structural-schema/). + +Structural schemas have a variety of [requirements](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#specifying-a-structural-schema). One such requirement is that the `type` field be defined for each level of the schema. + +For example, users of Gatekeeper may recognize the `k8srequiredlabels` ConstraintTemplate, defined here in version `v1beta1`: + +```yaml +apiVersion: templates.gatekeeper.sh/v1beta1 +kind: ConstraintTemplate +metadata: + name: k8srequiredlabels +spec: + crd: + spec: + names: + kind: K8sRequiredLabels + validation: + # Schema for the `parameters` field + openAPIV3Schema: + properties: + labels: + type: array + items: + type: string + targets: + - target: admission.k8s.gatekeeper.sh + rego: | + package k8srequiredlabels + + violation[{"msg": msg, "details": {"missing_labels": missing}}] { + provided := {label | input.review.object.metadata.labels[label]} + required := {label | label := input.parameters.labels[_]} + missing := required - provided + count(missing) > 0 + msg := sprintf("you must provide labels: %v", [missing]) + } +``` + +The `parameters` field schema (`spec.crd.spec.validation.openAPIV3Schema`) is _not_ structural. Notably, it is missing the `type:` declaration: + +```yaml +openAPIV3Schema: + # missing type + properties: + labels: + type: array + items: + type: string +``` + +This schema is _invalid_ by default in a `v1` ConstraintTemplate. Adding the `type` information makes the schema valid: + +```yaml +openAPIV3Schema: + type: object + properties: + labels: + type: array + items: + type: string +``` + +For more information on valid types in JSONSchemas, see the [JSONSchema documentation](https://json-schema.org/understanding-json-schema/reference/type.html). + +## Why implement this change? + +Structural schemas are required in version `v1` of `CustomResourceDefinition` resources, which underlie ConstraintTemplates. Requiring the same in ConstraintTemplates puts Gatekeeper in line with the overall direction of Kubernetes. + +Beyond this alignment, structural schemas yield significant usability improvements. The schema of a ConstraintTemplate's associated Constraint is both more visible and type validated. + +As the data types of Constraint fields are defined in the ConstraintTemplate, the API server will reject a Constraint with an incorrect `parameters` field. Previously, the API server would ingest it and simply not pass those `parameters` to Gatekeeper. This experience was confusing for users, and is noticeably improved by structural schemas. + +For example, see this incorrectly defined `k8srequiredlabels` Constraint: + +```yaml +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: K8sRequiredLabels +metadata: + name: ns-must-have-gk +spec: + match: + kinds: + - apiGroups: [""] + kinds: ["Namespace"] + parameters: + # Note that "labels" is now contained in an array item, rather than an object key under "parameters" + - labels: ["gatekeeper"] +``` + +In a `v1beta1` ConstraintTemplate, this Constraint would be ingested successfully. However, it would not work. The creation of a new namespace, `foobar`, would succeed, even in the absence of the `gatekeeper` label: + +```shell +$ kubectl create ns foobar +namespace/foobar created +``` + +This is incorrect. We'd expect this to fail: + +```shell +$ kubectl create ns foobar +Error from server ([ns-must-have-gk] you must provide labels: {"gatekeeper"}): admission webhook "validation.gatekeeper.sh" denied the request: [ns-must-have-gk] you must provide labels: {"gatekeeper"} +``` + +The structural schema requirement _prevents this mistake_. The aforementioned `type: object` declaration would prevent the API server from accepting the incorrect `k8srequiredlabels` Constraint. + +```shell +# Apply the Constraint with incorrect parameters schema +$ cat << EOF | kubectl apply -f - +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: K8sRequiredLabels +metadata: + name: ns-must-have-gk +spec: + match: + kinds: + - apiGroups: [""] + kinds: ["Namespace"] + parameters: + # Note that "labels" is now an array item, rather than an object + - labels: ["gatekeeper"] +EOF +The K8sRequiredLabels "ns-must-have-gk" is invalid: spec.parameters: Invalid value: "array": spec.parameters in body must be of type object: "array" +``` + +Fixing the incorrect `parameters` section would then yield a successful ingestion and a working Constraint. + +```shell +$ cat << EOF | kubectl apply -f - +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: K8sRequiredLabels +metadata: + name: ns-must-have-gk +spec: + match: + kinds: + - apiGroups: [""] + kinds: ["Namespace"] + parameters: + labels: ["gatekeeper"] +EOF +k8srequiredlabels.constraints.gatekeeper.sh/ns-must-have-gk created +``` + +```shell +$ kubectl create ns foobar +Error from server ([ns-must-have-gk] you must provide labels: {"gatekeeper"}): admission webhook "validation.gatekeeper.sh" denied the request: [ns-must-have-gk] you must provide labels: {"gatekeeper"} +``` diff --git a/website/versioned_docs/version-v3.17.x/customize-admission.md b/website/versioned_docs/version-v3.17.x/customize-admission.md new file mode 100644 index 00000000000..9476559c9d3 --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/customize-admission.md @@ -0,0 +1,78 @@ +--- +id: customize-admission +title: Customizing Admission Behavior +--- + +Gatekeeper is a [Kubernetes admission webhook](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#webhook-configuration) +whose default configuration can be found in the `gatekeeper.yaml` manifest file. By default, it is +a `ValidatingWebhookConfiguration` resource named `gatekeeper-validating-webhook-configuration`. + +Currently the configuration specifies two webhooks: one for checking a request against +the installed constraints and a second webhook for checking labels on namespace requests +that would result in bypassing constraints for the namespace. The namespace-label webhook +is necessary to prevent a privilege escalation where the permission to add a label to a +namespace is equivalent to the ability to bypass all constraints for that namespace. +You can read more about the ability to exempt namespaces by label [here](exempt-namespaces.md#exempting-namespaces-from-the-gatekeeper-admission-webhook-using---exempt-namespace-flag). + +Because Kubernetes adds features with each version, if you want to know how the webhook can be configured it +is best to look at the official documentation linked at the top of this section. However, two particularly important +configuration options deserve special mention: [timeouts](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#timeouts) and +[failure policy](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy). + +Timeouts allow you to configure how long the API server will wait for a response from the admission webhook before it +considers the request to have failed. Note that setting the timeout longer than the overall request timeout +means that the main request will time out before the webhook's failure policy is invoked, causing the +request to fail. + +Failure policy controls what happens when a webhook fails for whatever reason. Common +failure scenarios include timeouts, a 5xx error from the server or the webhook being unavailable. +You have the option to ignore errors, allowing the request through, or failing, rejecting the request. +This results in a direct tradeoff between availability and enforcement. + +Currently Gatekeeper is defaulting to using `Ignore` for the constraint requests, which means +constraints will not be enforced at admission time if the webhook is down or otherwise inaccessible. +This is because we cannot know the operational details of the cluster Gatekeeper is running on and +how that might affect webhook uptime. For a more detailed treatment of this topic, see our docs +on [failing closed](failing-closed.md). + +The namespace label webhook defaults to `Fail`, this is to help ensure that policies preventing +labels that bypass the webhook from being applied are enforced. Because this webhook only gets +called for namespace modification requests, the impact of downtime is mitigated, making the +theoretical maximum availability less of an issue. + +Because the manifest is available for customization, the webhook configuration can +be tuned to meet your specific needs if they differ from the defaults. + +## Enable Validation of Delete Operations + +### Caveats + +#### Deletes are not Auditable + +Once a resource is deleted, it is gone. This means that non-compliant deletes cannot be +audited via Gatekeeper's audit mechanism, and increases the importance of webhook-based +enforcement. + +#### Policies Against DELETE May Not be Perfectly Enforced + +Since the webhook fails open by default (as described earlier on this page), it is possible +for admission requests to have imperfect enforcement, which means some non-compliant deletes +may still go through despite the policy. Normally such failures of webhook enforcement could +be caught by audit, but deletes are not auditable. + +It is possible to improve the likelihood of enforcement by configuring the webhook to +[fail closed](failing-closed.md). + +### How to Enable Validation of Delete Operations + +To enable Delete operations for the `validation.gatekeeper.sh` admission webhook, add "DELETE" to the list of operations in the `gatekeeper-validating-webhook-configuration` ValidatingWebhookConfiguration as seen in this deployment manifest of gatekeeper: [here](https://github.com/open-policy-agent/gatekeeper/blob/v3.1.0-beta.10/deploy/gatekeeper.yaml#L792-L794) + + So you have + ```YAML + operations: + - CREATE + - UPDATE + - DELETE +``` + +You can now check for deletes. diff --git a/website/versioned_docs/version-v3.17.x/customize-startup.md b/website/versioned_docs/version-v3.17.x/customize-startup.md new file mode 100644 index 00000000000..abf2b93b459 --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/customize-startup.md @@ -0,0 +1,68 @@ +--- +id: customize-startup +title: Customizing Startup Behavior +--- + +## Allow retries when adding objects to OPA + +Gatekeeper's webhook servers undergo a bootstrapping period during which they are unavailable until the initial set of resources (constraints, templates, synced objects, etc...) have been ingested. This prevents Gatekeeper's webhook from validating based on an incomplete set of policies. This wait-for-bootstrapping behavior can be configured. + +The `--readiness-retries` flag defines the number of retry attempts allowed for an object (a Constraint, for example) to be successfully added to OPA. The default is `0`. A value of `-1` allows for infinite retries, blocking the webhook until all objects have been added to OPA. This guarantees complete enforcement, but has the potential to indefinitely block the webhook from serving requests. + +## Enable profiling using `pprof` + +The `--enable-pprof` flag enables an HTTP server for profiling using the [pprof](https://pkg.go.dev/net/http/pprof) library. By default, it serves to `localhost:6060` but the port can be customized with the `--pprof-port` flag. + +## Disable certificate generation and rotation for Gatekeeper's webhook + +By default, Gatekeeper uses [`open-policy-agent/cert-controller`](https://github.com/open-policy-agent/cert-controller) to handle the webhook's certificate rotation and generation. If you want to use a third-party solution, you may disable the cert-controller feature using `--disable-cert-rotation`. + +## Disable OPA built-in functions + +The `--disable-opa-builtin` flag disables specific [OPA built-ins functions](https://www.openpolicyagent.org/docs/v0.37.2/policy-reference/#built-in-functions). Starting with v3.8.0, Gatekeeper disables the `http.send` built-in function by default. For more information, please see [external data](./externaldata.md#motivation). + +## [Alpha] Emit admission and audit events + +The `--emit-admission-events` flag enables the emission of all admission violations as Kubernetes events. This flag is in alpha stage and it is set to `false` by default. + +The `--emit-audit-events` flag enables the emission of all audit violation as Kubernetes events. This flag is in alpha stage and it is set to `false` by default. + +The `--admission-events-involved-namespace` flag controls which namespace admission events will be created in. When set to `true`, admission events will be created in the namespace of the object violating the constraint. If the object has no namespace (ie. cluster scoped resources), they will be created in the namespace Gatekeeper is installed in. Setting to `false` will cause all admission events to be created in the Gatekeeper namespace. + +The `--audit-events-involved-namespace` flag controls which namespace audit events will be created in. When set to `true`, audit events will be created in the namespace of the object violating the constraint. If the object has no namespace (ie. cluster scoped resources), they will be created in the namespace Gatekeeper is installed in. Setting to `false` will cause all audit events to be created in the Gatekeeper namespace. + +There are four types of events that are emitted by Gatekeeper when the emit event flags are enabled: + +| Event | Description | +| ------------------ | ----------------------------------------------------------------------- | +| `FailedAdmission` | The Gatekeeper webhook denied the admission request (default behavior). | +| `WarningAdmission` | When `enforcementAction: warn` is specified in the constraint. | +| `DryrunViolation` | When `enforcementAction: dryrun` is specified in the constraint. | +| `AuditViolation` | A violation is detected during an audit. | + +> ❗ Warning: if the same constraint and violating resource tuple was emitted for [more than 10 times in a 10-minute rolling interval](https://github.com/kubernetes/kubernetes/blob/v1.23.3/staging/src/k8s.io/client-go/tools/record/events_cache.go#L429-L438), the Kubernetes event recorder will aggregate the events, e.g. +> ``` +> 39s Warning FailedAdmission namespace/test (combined from similar events): Admission webhook "validation.gatekeeper.sh" denied request, Resource Namespace: , Constraint: ns-must-have-gk, Message: you must provide labels: {"gatekeeper"} +> ``` +> Gatekeeper might burst 25 events about an object, but limit the refill rate to 1 new event every 5 minutes. This will help control the long-tail of events for resources that are always violating the constraint. + +## [Beta] Enable mutation logging and annotations + +The `--log-mutations` flag enables logging of mutation events and errors. + +The `--mutation-annotations` flag adds the following two annotations to mutated objects: + +| Annotation | Value | +| --------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `gatekeeper.sh/mutation-id` | The UUID of the mutation. | +| `gatekeeper.sh/mutations` | A list of comma-separated mutations in the format of `//:` that are applied to the object. | + +> ❗ Note that this will break the idempotence requirement that Kubernetes sets for mutation webhooks. See the [Kubernetes docs here](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#idempotence) for more details + +## Other Configuration Options + +For the complete list of configuration flags for your specific version of Gatekeeper, run the Gatekeeper binary with the `--help` flag. For example: + +`docker run openpolicyagent/gatekeeper:v3.10.0-beta.0 --help` + +To ensure you are seeing all relevant flags, be sure the image tag (`:3.10.0-beta.0` above) corresponds with the version of Gatekeeper you are running. diff --git a/website/versioned_docs/version-v3.17.x/debug.md b/website/versioned_docs/version-v3.17.x/debug.md new file mode 100644 index 00000000000..abc72c38e94 --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/debug.md @@ -0,0 +1,103 @@ +--- +id: debug +title: Debugging +--- + +> NOTE: Verbose logging with DEBUG level can be turned on with `--log-level=DEBUG`. By default, the `--log-level` flag is set to minimum log level `INFO`. Acceptable values for minimum log level are [`DEBUG`, `INFO`, `WARNING`, `ERROR`]. In production, this flag should not be set to `DEBUG`. + +## Viewing the Request Object + +A simple way to view the request object is to use a constraint/template that +denies all requests and outputs the request object as its rejection message. + +Example template: + +```yaml +apiVersion: templates.gatekeeper.sh/v1 +kind: ConstraintTemplate +metadata: + name: k8sdenyall +spec: + crd: + spec: + names: + kind: K8sDenyAll + targets: + - target: admission.k8s.gatekeeper.sh + rego: | + package k8sdenyall + + violation[{"msg": msg}] { + msg := sprintf("REVIEW OBJECT: %v", [input.review]) + } +``` + +Example constraint: + +```yaml +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: K8sDenyAll +metadata: + name: deny-all-namespaces +spec: + match: + kinds: + - apiGroups: [""] + kinds: ["Namespace"] +``` + +## Tracing + +In debugging decisions and constraints, a few pieces of information can be helpful: + + * Cached data and existing rules at the time of the request + * A trace of the evaluation + * The input document being evaluated + +Writing out all the information above for every request would be expensive in terms of memory and load on the Gatekeeper server, which may lead to requests timing out. It would also be hard +to find the relevant logs for a given request. + +For tracing, Gatekeeper **requires** operators to specify +resources and requesting users for which traces will be logged. They can do so by +configuring the `Config` resource, which lives in the `gatekeeper-system` namespace. + +Below is an example of a config resource: + +```yaml +apiVersion: config.gatekeeper.sh/v1alpha1 +kind: Config +metadata: + name: config + namespace: "gatekeeper-system" +spec: + # Data to be replicated into OPA + sync: + syncOnly: + - group: "" + version: "v1" + kind: "Namespace" + validation: + # Requests for which we want to run traces + traces: + # The requesting user for which traces will be run + # This field is required. + # To trace multiple users, feel free to pass in a list. + # To trace controllers, use the service accounts of those controllers. + - user: "user_to_trace@company.com" + kind: + # The group, version, kind for which we want to run a trace + group: "" + version: "v1" + kind: "Namespace" + # If dump is defined and set to `All`, also dump the state of OPA + dump: "All" +``` + +Traces will be written to the stdout logs of the Gatekeeper controller. + + +If there is an error in the Rego in the ConstraintTemplate, there are cases where it is still created via `kubectl apply -f [CONSTRAINT_TEMPLATE_FILENAME].yaml`. + +When applying the constraint using `kubectl apply -f constraint.yaml` with a ConstraintTemplate that contains incorrect Rego, and error will occur: `error: unable to recognize "[CONSTRAINT_FILENAME].yaml": no matches for kind "[NAME_OF_CONSTRAINT]" in version "constraints.gatekeeper.sh/v1beta1"`. + +To find the error, run `kubectl get -f [CONSTRAINT_TEMPLATE_FILENAME].yaml -o yaml`. Build errors are shown in the `status` field. \ No newline at end of file diff --git a/website/versioned_docs/version-v3.17.x/developers.md b/website/versioned_docs/version-v3.17.x/developers.md new file mode 100644 index 00000000000..442cf4bfdb9 --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/developers.md @@ -0,0 +1,160 @@ +--- +id: developers +title: Developers +--- + +This section describes how Gatekeeper developers can leverage [kind](https://kind.sigs.k8s.io/) and [Tilt](https://tilt.dev/) for rapid iterative development. Kind allows developers to quickly provision a conformant Kubernetes cluster using Docker and Tilt enables smart rebuilds and live updates of your Kubernetes workload during development time. + +## Prerequisites + +1. [kind](https://kind.sigs.k8s.io/#installation-and-usage) v0.11.0 or newer +2. [Tilt](https://docs.tilt.dev/install.html) v0.25.0 or newer + +## Getting started + +### Create a kind cluster with a local registry + +Kind cluster with a local registry will enable faster image pushing and pulling: + +```bash +./third_party/github.com/tilt-dev/kind-local/kind-with-registry.sh +``` + +> If you would like to customize the local registry port on your machine (the default port is `5000`), you can run `export KIND_REGISTRY_PORT=` to customize it. + +### Create `tilt-settings.json` + +`tilt-settings.json` contains various settings that developers can customize when deploying gatekeeper to a local kind cluster. Developers can create the JSON file under the project root directory: + +```json +{ + "helm_values": { + "controllerManager.metricsPort": 8080, + "enableExternalData": true + }, + "trigger_mode": "manual" +} +``` + +#### `tilt-settings.json` fields + +- `helm_values` (Map, default=`{}`): A map of helm values to be injected when deploying `manifest_staging/charts/gatekeeper` to the kind cluster. + +- `trigger_mode` (String, default=`"auto"`): Optional setting to configure if tilt should automatically rebuild on changes. Set to `manual` to disable auto-rebuilding and require users to trigger rebuilds of individual changed components through the UI. + +### Run `make tilt` + +```bash +make tilt +``` + +
+Output + +``` +make tilt +docker build . \ + -t gatekeeper-tooling \ + -f build/tooling/Dockerfile +[+] Building 1.5s (10/10) FINISHED + => [internal] load build definition from Dockerfile 0.2s + => => transferring dockerfile: 35B 0.1s + => [internal] load .dockerignore 0.2s + => => transferring context: 34B 0.0s + => [internal] load metadata for docker.io/library/golang:1.17 1.0s + => [auth] library/golang:pull token for registry-1.docker.io 0.0s + => [1/5] FROM docker.io/library/golang:1.17@sha256:bd9823cdad5700fb4abe983854488749421d5b4fc84154c30dae474100468b85 0.0s + => CACHED [2/5] RUN GO111MODULE=on go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.8.0 0.0s + => CACHED [3/5] RUN GO111MODULE=on go install k8s.io/code-generator/cmd/conversion-gen@release-1.22 0.0s + => CACHED [4/5] RUN mkdir /gatekeeper 0.0s + => CACHED [5/5] WORKDIR /gatekeeper 0.0s + => exporting to image 0.2s + => => exporting layers 0.0s + => => writing image sha256:7d2fecb230986ffdd78932ad8ff13aa0968c9a9a98bec2fe8ecb21c6e683c730 0.0s + => => naming to docker.io/library/gatekeeper-tooling 0.0s +docker run -v /workspaces/gatekeeper:/gatekeeper gatekeeper-tooling controller-gen object:headerFile=./hack/boilerplate.go.txt paths="./apis/..." paths="./pkg/..." +docker run -v /workspaces/gatekeeper:/gatekeeper gatekeeper-tooling conversion-gen \ + --output-base=/gatekeeper \ + --input-dirs=./apis/mutations/v1beta1,./apis/mutations/v1alpha1 \ + --go-header-file=./hack/boilerplate.go.txt \ + --output-file-base=zz_generated.conversion +docker run -v /workspaces/gatekeeper:/gatekeeper gatekeeper-tooling controller-gen \ + crd \ + rbac:roleName=manager-role \ + webhook \ + paths="./apis/..." \ + paths="./pkg/..." \ + output:crd:artifacts:config=config/crd/bases +rm -rf manifest_staging +mkdir -p manifest_staging/deploy/experimental +mkdir -p manifest_staging/charts/gatekeeper +docker run --rm -v /workspaces/gatekeeper:/gatekeeper \ + registry.k8s.io/kustomize/kustomize:v3.8.9 build \ + /gatekeeper/config/default -o /gatekeeper/manifest_staging/deploy/gatekeeper.yaml +docker run --rm -v /workspaces/gatekeeper:/gatekeeper \ + registry.k8s.io/kustomize/kustomize:v3.8.9 build \ + --load_restrictor LoadRestrictionsNone /gatekeeper/cmd/build/helmify | go run cmd/build/helmify/*.go +Writing manifest_staging/charts/gatekeeper/.helmignore +Writing manifest_staging/charts/gatekeeper/Chart.yaml +Writing manifest_staging/charts/gatekeeper/README.md +Making manifest_staging/charts/gatekeeper/templates +Writing manifest_staging/charts/gatekeeper/templates/_helpers.tpl +Writing manifest_staging/charts/gatekeeper/templates/namespace-post-install.yaml +Writing manifest_staging/charts/gatekeeper/templates/upgrade-crds-hook.yaml +Writing manifest_staging/charts/gatekeeper/templates/webhook-configs-pre-delete.yaml +Writing manifest_staging/charts/gatekeeper/values.yaml +Writing manifest_staging/charts/gatekeeper/templates/gatekeeper-webhook-server-cert-secret.yaml +Writing manifest_staging/charts/gatekeeper/templates/gatekeeper-audit-deployment.yaml +Writing manifest_staging/charts/gatekeeper/templates/gatekeeper-controller-manager-deployment.yaml +Writing manifest_staging/charts/gatekeeper/templates/gatekeeper-validating-webhook-configuration-validatingwebhookconfiguration.yaml +Writing manifest_staging/charts/gatekeeper/templates/gatekeeper-controller-manager-poddisruptionbudget.yaml +Writing manifest_staging/charts/gatekeeper/templates/gatekeeper-admin-serviceaccount.yaml +Writing manifest_staging/charts/gatekeeper/templates/gatekeeper-admin-podsecuritypolicy.yaml +Writing manifest_staging/charts/gatekeeper/templates/gatekeeper-webhook-service-service.yaml +Writing manifest_staging/charts/gatekeeper/templates/gatekeeper-manager-role-clusterrole.yaml +Writing manifest_staging/charts/gatekeeper/templates/gatekeeper-manager-rolebinding-rolebinding.yaml +Writing manifest_staging/charts/gatekeeper/templates/gatekeeper-manager-rolebinding-clusterrolebinding.yaml +Writing manifest_staging/charts/gatekeeper/templates/gatekeeper-mutating-webhook-configuration-mutatingwebhookconfiguration.yaml +Writing manifest_staging/charts/gatekeeper/templates/gatekeeper-critical-pods-resourcequota.yaml +Making manifest_staging/charts/gatekeeper/crds +Writing manifest_staging/charts/gatekeeper/crds/assign-customresourcedefinition.yaml +Writing manifest_staging/charts/gatekeeper/crds/assignmetadata-customresourcedefinition.yaml +Writing manifest_staging/charts/gatekeeper/crds/config-customresourcedefinition.yaml +Writing manifest_staging/charts/gatekeeper/crds/constraintpodstatus-customresourcedefinition.yaml +Writing manifest_staging/charts/gatekeeper/crds/constrainttemplatepodstatus-customresourcedefinition.yaml +Writing manifest_staging/charts/gatekeeper/crds/constrainttemplate-customresourcedefinition.yaml +Writing manifest_staging/charts/gatekeeper/crds/modifyset-customresourcedefinition.yaml +Writing manifest_staging/charts/gatekeeper/crds/mutatorpodstatus-customresourcedefinition.yaml +Writing manifest_staging/charts/gatekeeper/crds/provider-customresourcedefinition.yaml +Writing manifest_staging/charts/gatekeeper/templates/gatekeeper-manager-role-role.yaml +mkdir -p .tiltbuild/charts +rm -rf .tiltbuild/charts/gatekeeper +cp -R manifest_staging/charts/gatekeeper .tiltbuild/charts +# disable some configs from the security context so we can perform live update +sed -i "/readOnlyRootFilesystem: true/d" .tiltbuild/charts/gatekeeper/templates/*.yaml +sed -i -e "/run.*: .*/d" .tiltbuild/charts/gatekeeper/templates/*.yaml +tilt up +Tilt started on http://localhost:10350/ +v0.25.2, built 2022-02-25 + +(space) to open the browser +(s) to stream logs (--stream=true) +(t) to open legacy terminal mode (--legacy=true) +(ctrl-c) to exit +``` + +
+ +### Start developing! + +If you have trigger mode set to `auto`, any changes in the source code will trigger a rebuild of the gatekeeper manager binary. The build will subsequently trigger a rebuild of the gatekeeper manager image, load it to your kind cluster, and restart the deployment. + +If you have trigger mode set to `manual`, you can trigger a manager build manually in the local Tilt UI portal. By default, it is located at `http://localhost:10350/` + +### Tear down the kind cluster + +To tear down the kind cluster and its local registry: + +```bash +./third_party/github.com/tilt-dev/kind-local/teardown-kind-with-registry.sh +``` diff --git a/website/versioned_docs/version-v3.17.x/emergency.md b/website/versioned_docs/version-v3.17.x/emergency.md new file mode 100644 index 00000000000..ae703e044aa --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/emergency.md @@ -0,0 +1,12 @@ +--- +id: emergency +title: Emergency Recovery +--- + +If a situation arises where Gatekeeper is preventing the cluster from operating correctly, +the webhook can be disabled. This will remove all Gatekeeper admission checks. Assuming +the default webhook name has been used this can be achieved by running: + +`kubectl delete validatingwebhookconfigurations.admissionregistration.k8s.io gatekeeper-validating-webhook-configuration` + +Redeploying the webhook configuration will re-enable Gatekeeper. \ No newline at end of file diff --git a/website/versioned_docs/version-v3.17.x/enforcement-points.md b/website/versioned_docs/version-v3.17.x/enforcement-points.md new file mode 100644 index 00000000000..0b7f226b83e --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/enforcement-points.md @@ -0,0 +1,106 @@ +--- +id: enforcement-points +title: Enforcement points in Gatekeeper +--- + +## Understanding Enforcement Points + +An enforcement point defines the location where enforcement happens. Below are the different enforcement points available in Gatekeeper: + +- `validation.gatekeeper.sh` indicates that enforcement should be carried out by Gatekeeper's validating webhook for a constraint. Supports templates with CEL and Rego. +- `gator.gatekeeper.sh` indicates that enforcement should be carried out in shift-left via [gator-cli](gator.md) for a constraint. Supports templates with CEL and Rego. +- `audit.gatekeeper.sh` indicates that on-cluster resources should be audited and violations should be reported for the resources that are in violation of constraint. Supports templates with CEL and Rego. +- `vap.k8s.io` indicates that enforcement should be carried out by Validating Admission Policy for a constraint. Supports templates with CEL. + +### How to use different enforcement points in constraint + +By default, a constraint will be enforced at all enforcement points with common enforcement action defined in `spec.enforcementAction`. However, you can choose to enforce a constraint at specific enforcement points with different actions using `enforcementAction: scoped` and `spec.scopedEnforcementActions`. Below are examples and use cases that utilize different enforcement actions for different enforcement points. + +:::note +`spec.enforcementAction: scoped` is needed to customize specific enforcement point/enforcement action behavior. If `spec.enforcementAction: scoped` is not provided, `spec.scopedEnforcementActions` is ignored and the provided `enforcementAction` will be applied across all enforcement points. +::: + +###### Deny in shift-left and warn at admission + +You are trying out a new constraint template, and you want to deny violating resources in shift-left testing, but do not want to block any resources admitted to clusters to reduce impact for faulty rejections. You may want to use `deny` action for the `gator.gatekeeper.sh` shift-left enforcement point and `warn` for `the validation.gatekeepet.sh` admission webhook enforcement point. The below constraint satisfies this use case. + +```yaml +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: K8sAllowedRepos +metadata: + name: prod-repo-is-openpolicyagent +spec: +... + enforcementAction: scoped + scopedEnforcementActions: + - action: warn + enforcementPoints: + - name: "validation.gatekeeper.sh" + - action: deny + enforcementPoints: + - name: "gator.gatekeeper.sh" +... +``` + +> **Note**: The audit enforcement point is not included unless explicitly added to scopedEnforcementActions.enforcementPoints or if scopedEnforcementActions.enforcementPoints is set to "*". + +###### Only audit + +You are depending on external-data or referential policies for validating resources. These type of validation may be latency sensitive and may take longer to evaluate. To avoid such situation you may want to only use `audit.gatekeeper.sh` enforcement point to not face any delay at admission time, but still get the information about violating resources from Gatekeeper's audit operation. Here is the constraint for only using `audit.gatekeeper.sh` enforcement point. + +```yaml +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: K8sAllowedRepos +metadata: + name: prod-repo-is-openpolicyagent +spec: +... + enforcementAction: scoped + scopedEnforcementActions: + - action: deny + enforcementPoints: + - name: "audit.gatekeeper.sh" +... +``` + +###### Enforcing through Validating Admission Policy and using Gatekeeper as fall-back validation mechanism + +You want to utilize in-tree Validating Admission Policy for faster turn around time. But you want to make sure that in case Validating Admission Policy fails-open, Gatekeeper blocks faulty resources from being created. Here is how you can achieve the same. + +```yaml +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: K8sAllowedRepos +metadata: + name: prod-repo-is-openpolicyagent +spec: +... + enforcementAction: scoped + scopedEnforcementActions: + - action: deny + enforcementPoints: + - name: "vap.k8s.io" + - name: "validation.gatekeeper.sh" +... +``` + +Please refer to [VAP/VAPB generation behavior](validating-admission-policy.md#policy-updates-to-generate-validating-admission-policy-resources). + +###### Enforcing through Validating Admission Policy and using audit from Gatekeeper + +You want to utilize in-tree Validating Admission Policy for faster turn around time and only want to use audit operation from Gatekeeper to get information about violation resources on-cluster. Here is the constraint that users `vap.k8s.io` and `audit.gatekeeper.sh` enforcement points. + +```yaml +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: K8sAllowedRepos +metadata: + name: prod-repo-is-openpolicyagent +spec: +... + enforcementAction: scoped + scopedEnforcementActions: + - action: deny + enforcementPoints: + - name: "vap.k8s.io" + - name: "audit.gatekeeper.sh" +... +``` diff --git a/website/versioned_docs/version-v3.17.x/examples.md b/website/versioned_docs/version-v3.17.x/examples.md new file mode 100644 index 00000000000..24f05dd6638 --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/examples.md @@ -0,0 +1,8 @@ +--- +id: examples +title: Examples +--- + +The [demo/basic](https://github.com/open-policy-agent/gatekeeper/tree/master/demo/basic) directory contains the above examples of simple constraints, templates and configs to play with. + +The [demo/agilebank](https://github.com/open-policy-agent/gatekeeper/tree/master/demo/agilebank) directory contains more complex examples based on a slightly more realistic scenario. Both folders have a handy demo script to step you through the demos. \ No newline at end of file diff --git a/website/versioned_docs/version-v3.17.x/exempt-namespaces.md b/website/versioned_docs/version-v3.17.x/exempt-namespaces.md new file mode 100644 index 00000000000..5273c9cbfc9 --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/exempt-namespaces.md @@ -0,0 +1,80 @@ +--- +id: exempt-namespaces +title: Exempting Namespaces +--- + +`Feature State`: The `Config` resource is currently alpha. + +## Exempting Namespaces from Gatekeeper using config resource + +> The "Config" resource must be named `config` for it to be reconciled by Gatekeeper. Gatekeeper will ignore the resource if you do not name it `config`. + +The config resource can be used as follows to exclude namespaces from certain processes for all constraints in the cluster. An asterisk can be used for wildcard matching (e.g. `kube-*`). To exclude namespaces at a constraint level, use `excludedNamespaces` in the [constraint](howto.md#constraints) instead. + +```yaml +apiVersion: config.gatekeeper.sh/v1alpha1 +kind: Config +metadata: + name: config + namespace: "gatekeeper-system" +spec: + match: + - excludedNamespaces: ["kube-*", "my-namespace"] + processes: ["*"] + - excludedNamespaces: ["audit-excluded-ns"] + processes: ["audit"] + - excludedNamespaces: ["audit-webhook-sync-excluded-ns"] + processes: ["audit", "webhook", "sync"] + - excludedNamespaces: ["mutation-excluded-ns"] + processes: ["mutation-webhook"] +... +``` + +Available processes: + +- `audit` process exclusion will exclude resources from specified namespace(s) in audit results. +- `webhook` process exclusion will exclude resources from specified namespace(s) from the admission webhook. +- `sync` process exclusion will exclude resources from specified namespace(s) from being synced into OPA. +- `mutation-webhook` process exclusion will exclude resources from specified namespace(s) from the mutation webhook. +- `*` includes all current processes above and includes any future processes. + +## Exempting Namespaces from the Gatekeeper Admission Webhook using `--exempt-namespace` flag + +Note that the following only exempts resources from the admission webhook. They will still be audited. Editing individual constraints or [config resource](#exempting-namespaces-from-gatekeeper-using-config-resource) is +necessary to exclude them from audit. + +If it becomes necessary to exempt a namespace from Gatekeeper webhook entirely (e.g. you want `kube-system` to bypass admission checks), here's how to do it: + + 1. Make sure the validating admission webhook configuration for Gatekeeper has the following namespace selector: + + ```yaml + namespaceSelector: + matchExpressions: + - key: admission.gatekeeper.sh/ignore + operator: DoesNotExist + ``` + + the default Gatekeeper manifest should already have added this. The default name for the + webhook configuration is `gatekeeper-validating-webhook-configuration` and the default + name for the webhook that needs the namespace selector is `validation.gatekeeper.sh` + + 2. Tell Gatekeeper it's okay for the namespace to be ignored by adding a flag to the pod: + `--exempt-namespace=`. This step is necessary because otherwise the + permission to modify a namespace would be equivalent to the permission to exempt everything + in that namespace from policy checks. This way a user must explicitly have permissions + to configure the Gatekeeper pod before they can add exemptions. + + > In order to add the `admission.gatekeeper.sh/ignore` label to a namespace, that namespace must be listed under the gatekeeper `controllerManager.exemptNamespaces` [parameter](https://github.com/open-policy-agent/gatekeeper/blob/master/charts/gatekeeper/README.md#parameters) when installing via Helm. + + 3. Add the `admission.gatekeeper.sh/ignore` label to the namespace. The value attached + to the label is ignored, so it can be used to annotate the reason for the exemption. + +Similarly, you can also enable the exemption of entire groups of namespaces using the `--exempt-namespace-prefix` and `--exempt-namespace-suffix` flags. Using these flags allows the `admission.gatekeeper.sh/ignore` label to be added to any namespace that matches the supplied prefix or suffix. + +## Difference between exclusion using config resource and `--exempt-namespace` flag + +The difference is at what point in the admission process an exemption occurs. + +If you use `--exempt-namespace` flag and `admission.gatekeeper.sh/ignore` label, Gatekeeper's webhook will not be called by the API server for any resource in that namespace. That means that Gatekeeper being down should have no effect on requests for that namespace. + +If you use the config method, Gatekeeper itself evaluates the exemption. The benefit there is that we have more control over the syntax and can be more fine-grained, but it also means that the API server is still calling the webhook, which means downtime can have an impact. diff --git a/website/versioned_docs/version-v3.17.x/expansion.md b/website/versioned_docs/version-v3.17.x/expansion.md new file mode 100644 index 00000000000..43c816f7b09 --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/expansion.md @@ -0,0 +1,327 @@ +--- +id: expansion +title: Validating Workload Resources using ExpansionTemplate +--- + +`Feature State:` Gatekeeper version v3.10+ (alpha), version 3.13+ (beta) + +> ❗This feature is in _beta_ stage. It is enabled by default. To disable the feature, +> set the `enable-generator-resource-expansion` flag to false. + +## Motivation + +A workload resource is a resource that creates other resources, such as a +[Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) or [Job](https://kubernetes.io/docs/concepts/workloads/controllers/job/). Gatekeeper can be configured to reject workload resources +that create a resource that violates a constraint. + +## `ExpansionTemplate` explained + +An `ExpansionTemplate` is a custom resource that Gatekeeper will use to create temporary, fake resources and validate the constraints against them. We refer to these resources that Gatekeeper creates for validation purposes as `expanded resources`. We refer to the `Deployment` or other workload resource as the `parent resource` and the act of creating those `expanded` resources as `expansion`. + +The `ExpansionTemplate` custom resource specifies: + +- Which workload resource(s) should be expanded, specified by their GVK +- The GVK of the expanded resources +- The "source" as defined in the field `templateSource` on the `parent resource`, which is used as the blueprint for the expanded resource. For example, in a case where a + `Deployment` expands into a `Pod`, `spec.template` would typically be the + source. +- Optionally, an `enforcementAction` override can be used when validating expanded + resources. If this field is set, any violations against the expanded resource + will use this enforcement action. If an enforcement action is not specified by + the `ExpansionTemplate`, the enforcement action set by the Constraint in + violation will be used. + +Here is an example of a `ExpansionTemplate` that specifies that `DaemonSet`, +`Deployment`, `Job`, `ReplicaSet`, `ReplicationController`, and `StatefulSet` + resources should be expanded into a `Pod`. + +```yaml +apiVersion: expansion.gatekeeper.sh/v1alpha1 +kind: ExpansionTemplate +metadata: + name: expand-deployments +spec: + applyTo: + - groups: ["apps"] + kinds: ["DaemonSet", "Deployment", "ReplicaSet", "StatefulSet"] + versions: ["v1"] + - groups: [""] + kinds: ["ReplicationController"] + versions: ["v1"] + - groups: ["batch"] + kinds: ["Job"] + versions: ["v1"] + templateSource: "spec.template" + enforcementAction: "warn" # This will overwrite all constraint enforcement actions for the GVKs below that result from the GVKs above. + generatedGVK: + kind: "Pod" + group: "" + version: "v1" +``` + +With this `ExpansionTemplate`, any constraints that are configured to target +`Pods` will also be evaluated on the `expanded` pods that Gatekeeper creates when a `Deployment` or `ReplicaSet` is +being reviewed. Any violations created against these expanded `Pod`s, and only these expanded `Pod`s, will have their +enforcement action set to `warn`, regardless of the enforcement actions +specified by the Constraint in violation. + +To see how to use Mutators and Constraints to exclusively review expanded resources, see the [Match Source](#match-source) section below. + +### Limitations + +#### Sidecars and Mutators + +It may not always be possible to build an accurate representation of an +expanded resource by looking at the workload resource alone. For example, suppose a +cluster is using [Istio](https://istio.io/), which will inject a sidecar container on specific +resources. This sidecar configuration is not specified in the config of the +workload resource (i.e. Deployment), but rather injected by Istio's webhook. In +order to accurately represent expanded resources modified by controllers or +webhooks, Gatekeeper leverages its +[Mutations](mutation.md) +feature to allow expanded resources to be manipulated into their desired form. In +the Istio example, `Assign` and `ModifySet` mutators could be configured to +mimic Istio sidecar injection. For further details on mutating expanded resources +see the [Match Source](#match-source) section below, or to see a working example, +see the [Mutating Example](#mutating-example) section. + +#### Unknown Data + +Any resources configured for expansion will be expanded by both the validating +webhook and +[Audit](audit.md). This +feature will only be enabled if a user creates an `ExpansionTemplate` that +targets any resources that exist on the cluster. + +Note that the accuracy of enforcement depends on how well the expanded resource +resembles the real thing. Mutations can help with this, but 100% accuracy is +impossible because not all fields can be predicted. For instance, Deployments +create pods with random names. Inaccurately expanded resources may lead to over- or under- +enforcement. In the case of under-enforcement, the expanded pod should still be +rejected. Finally, non-state-based policies (those that rely on transient +metadata such as requesting user or time of creation) cannot be enforced +accurately. This is because such metadata would necessarily be different when +creating the expanded resource. For example, a Deployment is created using the +requesting user's account, but the pod creation request comes from the service +account of the Deployment controller. + +## Configuring Expansion + +Expansion behavior is configured through the `ExpansionTemplate` custom +resource. Optionally, users can create `Mutation` custom resources to customize +how resources are expanded. Mutators with the `source: Generated` field will +only be applied when expanding workload resources, and will not mutate real +resources on the cluster. If the `source` field is not set, the `Mutation` will +apply to both expanded resources and real resources on the cluster. + +Users can test their expansion configuration using the +[`gator expand` CLI](gator.md#the-gator-expand-subcommand) +. + +#### Match Source + +The `source` field on the `match` API, present in the Mutation +and `Constraint` kinds, specifies if the config should match Generated ( +i.e. fake) resources, Original resources, or both. The `source` field is +an `enum` which accepts the following values: + +- `Generated` – the config will only apply to expanded resources, **and will not + apply to any real resources on the cluster** +- `Original` – the config will only apply to Original resources, and will not + affect expanded resources +- `All` – the config will apply to both `Generated` and `Original` resources. + This is the default value. + +For example, suppose a cluster's `ReplicaSet` controller adds a default value +for `fooField` when creating Pods that cannot reasonably be added to the +`ReplicaSet`'s `spec.template`. If a constraint relies on these default values, +a user could create a Mutation custom resource that modifies expanded resources, +like so: + +```yaml +apiVersion: mutations.gatekeeper.sh/v1alpha1 +kind: Assign +metadata: + name: assign-foo-field +spec: + applyTo: + - groups: [""] + kinds: ["Pod"] + versions: ["v1"] + location: "spec.containers[name: *].fooField" + parameters: + assign: + value: "Bar" + match: + source: "Generated" + scope: Cluster + kinds: + - apiGroups: [] + kinds: [] +``` + +Similarly, `Constraints` can be configured to only target fake resources by +setting the `Constraint`'s `spec.match.source` field to `Generated`. This can +also be used to define different enforcement actions for expanded resources and +original resources. + +For example, suppose a cluster has a policy that blocks all [standalone pods](https://kubernetes.io/docs/concepts/configuration/overview/#naked-pods-vs-replicasets-deployments-and-jobs), but allows them to be created as part of a workload resource, such as `Deployment`. A user could create a `Constraint` that only targets original resources, like so: + +```yaml +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: block-standalone-pods +metadata: + name: block-standalone-pods +spec: + match: + source: Original + kinds: + - apiGroups: [""] + kinds: ["Pod"] +``` + +## Mutating Example + +Suppose a cluster is using Istio, and has a policy configured to ensure +specified Pods have an Istio sidecar. To validate Deployments that would create +Pods which Istio will inject a sidecar into, we need to use mutators to mimic +the sidecar injection. + +What follows is an example of: + +- an `ExpansionTemplate` configured to expand `Deployments` into `Pods` +- an `Assign` mutator to add the Istio sidecar container to `Pods` +- a `ModifySet` mutator to add the `proxy` and `sidecar` args +- an inbound `Deployment`, and the expanded `Pod` + +**Note that the Mutators set the `source: Generated` field, which will cause +them to only be applied when expanding resources specified +by `ExpansionTemplates`. These Mutators will not affect any real resources on +the cluster.** + +```yaml +apiVersion: expansion.gatekeeper.sh/v1alpha1 +kind: ExpansionTemplate +metadata: + name: expand-deployments +spec: + applyTo: + - groups: ["apps"] + kinds: ["Deployment"] + versions: ["v1"] + templateSource: "spec.template" + generatedGVK: + kind: "Pod" + group: "" + version: "v1" +--- +apiVersion: mutations.gatekeeper.sh/v1beta1 +kind: Assign +metadata: + name: add-sidecar +spec: + applyTo: + - groups: [""] + kinds: ["Pod"] + versions: ["v1"] + match: + scope: Namespaced + source: All + kinds: + - apiGroups: ["*"] + kinds: ["Pod"] + location: "spec.containers[name:istio-proxy]" + parameters: + assign: + value: + name: "istio-proxy" + imagePullPolicy: IfNotPresent + image: docker.io/istio/proxyv2:1.15.0 + ports: + - containerPort: 15090 + name: http-envoy-prom + protocol: TCP + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL +--- +apiVersion: mutations.gatekeeper.sh/v1beta1 +kind: ModifySet +metadata: + name: add-istio-args +spec: + applyTo: + - groups: [""] + kinds: ["Pod"] + versions: ["v1"] + match: + source: All + location: "spec.containers[name:istio-proxy].args" + parameters: + operation: merge + values: + fromList: + - proxy + - sidecar +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 + args: + - "/bin/sh" +``` + +When expanded, the above configs will produce the following `Pod`: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + labels: + app: nginx +spec: + containers: + - args: + - /bin/sh + image: nginx:1.14.2 + name: nginx + ports: + - containerPort: 80 + - args: + - proxy + - sidecar + image: docker.io/istio/proxyv2:1.15.0 + imagePullPolicy: IfNotPresent + name: istio-proxy + ports: + - containerPort: 15090 + name: http-envoy-prom + protocol: TCP + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL +``` + + diff --git a/website/versioned_docs/version-v3.17.x/externaldata.md b/website/versioned_docs/version-v3.17.x/externaldata.md new file mode 100644 index 00000000000..b154838485d --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/externaldata.md @@ -0,0 +1,578 @@ +--- +id: externaldata +title: External Data +--- + +`Feature State`: Gatekeeper version v3.11+ (beta) + +## Motivation + +Gatekeeper provides various means to mutate and validate Kubernetes resources. However, in many of these scenarios this data is either built-in, static or user defined. With external data feature, we are enabling Gatekeeper to interface with various external data sources, such as image registries, using a provider-based model. + +A similar way to connect with an external data source can be done today using OPA's built-in `http.send` functionality. However, there are limitations to this approach. +- Gatekeeper does not support Rego policies for mutation, which cannot use the OPA `http.send` built-in function. +- Security concerns due to: + - if template authors are not trusted, it will potentially give template authors access to the in-cluster network. + - if template authors are trusted, authors will need to be careful on how rego is written to avoid injection attacks. + +Key benefits provided by the external data solution: +- Addresses security concerns by: + - Restricting which hosts a user can access. + - Providing an interface for making requests, which allows Gatekeeper to better handle things like escaping strings. +- Addresses common patterns with a single provider, e.g. image tag-to-digest mutation, which can be leveraged by multiple scenarios (e.g. validating image signatures or vulnerabilities). +- Provider model creates a common interface for extending Gatekeeper with external data. + - It allows for separation of concerns between the implementation that allows access to external data and the actual policy being evaluated. + - Developers and consumers of data sources can rely on that common protocol to ease authoring of both constraint templates and data sources. + - Makes change management easier as users of an external data provider should be able to tell whether upgrading it will break existing constraint templates. (once external data API is stable, our goal is to have that answer always be "no") +- Performance benefits as Gatekeeper can now directly control caching and which values are significant for caching, which increases the likelihood of cache hits. + - For mutation, we can batch requests via lazy evaluation. + - For validation, we make batching easier via [`external_data`](#external-data-for-Gatekeeper-validating-webhook) Rego function design. + + + +## Providers + +Providers are designed to be in-cluster components that can communicate with external data sources (such as image registries, Active Directory/LDAP directories, etc) and return data in a format that can be processed by Gatekeeper. + +Example provider _template_ can be found at: https://github.com/open-policy-agent/gatekeeper-external-data-provider + +### Providers maintained by the community + +If you have built an external data provider and would like to add it to this list, please submit a PR to update this page. + +If you have any issues with a specific provider, please open an issue in the applicable provider's repository. + +The following external data providers are maintained by the community: + +- [ratify](https://github.com/deislabs/ratify) +- [cosign-gatekeeper-provider](https://github.com/sigstore/cosign-gatekeeper-provider) + +### Sample providers + +The following external data providers are samples and are not supported/maintained by the community: + +- [trivy-provider](https://github.com/sozercan/trivy-provider) +- [tag-to-digest-provider](https://github.com/sozercan/tagToDigest-provider) +- [aad-provider](https://github.com/sozercan/aad-provider) +- [kubernetes-api-provider](https://github.com/nilekhc/k8s-gatekeeper-external-data-provider) + +### API (v1beta1) + +#### `Provider` + +Provider resource defines the provider and the configuration for it. + +```yaml +apiVersion: externaldata.gatekeeper.sh/v1beta1 +kind: Provider +metadata: + name: my-provider +spec: + url: https://.:/ # URL to the external data source (e.g., https://my-provider.my-namespace:8090/validate) + timeout: # timeout value in seconds (e.g., 1). this is the timeout on the Provider custom resource, not the provider implementation. + caBundle: # CA bundle to use for TLS verification. +``` + +#### `ProviderRequest` + +`ProviderRequest` is the API request that is sent to the external data provider. + +```go +// ProviderRequest is the API request for the external data provider. +type ProviderRequest struct { + // APIVersion is the API version of the external data provider. + APIVersion string `json:"apiVersion,omitempty"` + // Kind is kind of the external data provider API call. This can be "ProviderRequest" or "ProviderResponse". + Kind ProviderKind `json:"kind,omitempty"` + // Request contains the request for the external data provider. + Request Request `json:"request,omitempty"` +} + +// Request is the struct that contains the keys to query. +type Request struct { + // Keys is the list of keys to send to the external data provider. + Keys []string `json:"keys,omitempty"` +} +``` + +#### `ProviderResponse` + +`ProviderResponse` is the API response that a provider must return. + +```go +// ProviderResponse is the API response from a provider. +type ProviderResponse struct { + // APIVersion is the API version of the external data provider. + APIVersion string `json:"apiVersion,omitempty"` + // Kind is kind of the external data provider API call. This can be "ProviderRequest" or "ProviderResponse". + Kind ProviderKind `json:"kind,omitempty"` + // Response contains the response from the provider. + Response Response `json:"response,omitempty"` +} + +// Response is the struct that holds the response from a provider. +type Response struct { + // Idempotent indicates that the responses from the provider are idempotent. + // Applies to mutation only and must be true for mutation. + Idempotent bool `json:"idempotent,omitempty"` + // Items contains the key, value and error from the provider. + Items []Item `json:"items,omitempty"` + // SystemError is the system error of the response. + SystemError string `json:"systemError,omitempty"` +} + +// Items is the struct that contains the key, value or error from a provider response. +type Item struct { + // Key is the request from the provider. + Key string `json:"key,omitempty"` + // Value is the response from the provider. + Value interface{} `json:"value,omitempty"` + // Error is the error from the provider. + Error string `json:"error,omitempty"` +} +``` + +### Implementation + +Provider is an HTTP server that listens on a port and responds to [`ProviderRequest`](#providerrequest) with [`ProviderResponse`](#providerresponse). + +As part of [`ProviderResponse`](#providerresponse), the provider can return a list of items. Each item is a JSON object with the following fields: +- `Key`: the key that was sent to the provider +- `Value`: the value that was returned from the provider for that key +- `Error`: an error message if the provider returned an error for that key + +If there is a system error, the provider should return the system error message in the `SystemError` field. + +> 📎 Recommendation is for provider implementations to keep a timeout such as maximum of 1-2 seconds for the provider to respond. + +Example provider implementation: https://github.com/open-policy-agent/gatekeeper/blob/master/test/externaldata/dummy-provider/provider.go + +#### Provider Response Caching +Starting with v3.13+, Gatekeeper supports caching of responses from external data providers for both audit and validating webhook. It caches the response based on the `Key` and `Value` received as part of the [`ProviderResponse`](#providerresponse). By default, the cache is invalidated after 3 minutes, which is the default Time-to-Live (TTL). You can configure the TTL using the `--external-data-provider-response-cache-ttl` flag. Setting the flag to 0 disables this cache. + +## External data for Gatekeeper validating webhook + +External data adds a [custom OPA built-in function](https://www.openpolicyagent.org/docs/latest/extensions/#custom-built-in-functions-in-go) called `external_data` to Rego. This function is used to query external data providers. + +`external_data` is a function that takes a request and returns a response. The request is a JSON object with the following fields: +- `provider`: the name of the provider to query +- `keys`: the list of keys to send to the provider + +e.g., +```rego + # build a list of keys containing images for batching + my_list := [img | img = input.review.object.spec.template.spec.containers[_].image] + + # send external data request + response := external_data({"provider": "my-provider", "keys": my_list}) +``` + +Response example: [[`"my-key"`, `"my-value"`, `""`], [`"another-key"`, `42`, `""`], [`"bad-key"`, `""`, `"error message"`]] + +> 📎 To avoid multiple calls to the same provider, recommendation is to batch the keys list to send a single request. + +Example template: +https://github.com/open-policy-agent/gatekeeper/blob/master/test/externaldata/dummy-provider/policy/template.yaml + +## External data for Gatekeeper mutating webhook + +External data can be used in conjunction with [Gatekeeper mutating webhook](mutation.md). + +### API + +You can specify the details of the external data provider in the `spec.parameters.assign.externalData` field of `AssignMetadata` and `Assign`. + +> Note: `spec.parameters.assign.externalData`, `spec.parameters.assign.value` and `spec.parameters.assign.fromMetadata` are mutually exclusive. + +| Field | Description | +|----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `provider`
String | The name of the external data provider. | +| `dataSource`
DataSource | Specifies where to extract the data that will be sent to the external data provider.
- `ValueAtLocation` (default): extracts an array of values from the path that will be modified. See [mutation intent](mutation.md#intent) for more details.
- `Username`: The name of the Kubernetes user who initiated the admission request. | +| `failurePolicy`
FailurePolicy | The policy to apply when the external data provider returns an error.
- `UseDefault`: use the default value specified in `spec.parameters.assign.externalData.default`
- `Ignore`: ignore the error and do not perform any mutations.
- `Fail` (default): do not perform any mutations and return the error to the user. | +| `default`
String | The default value to use when the external data provider returns an error and the failure policy is set to `UseDefault`. | + +### `AssignMetadata` + +```yaml +apiVersion: mutations.gatekeeper.sh/v1beta1 +kind: AssignMetadata +metadata: + name: annotate-owner +spec: + match: + scope: Namespaced + kinds: + - apiGroups: ["*"] + kinds: ["Pod"] + location: "metadata.annotations.owner" + parameters: + assign: + externalData: + provider: my-provider + dataSource: Username +``` + +
+Provider response + +```json +{ + "apiVersion": "externaldata.gatekeeper.sh/v1beta1", + "kind": "ProviderResponse", + "response": { + "idempotent": true, + "items": [ + { + "key": "kubernetes-admin", + "value": "admin@example.com" + } + ] + } +} +``` + +
+ +
+Mutated object + +```yaml +... +metadata: + annotations: + owner: admin@example.com +... +``` + +
+ +### `Assign` + +```yaml +apiVersion: mutations.gatekeeper.sh/v1beta1 +kind: Assign +metadata: + name: mutate-images +spec: + applyTo: + - groups: [""] + kinds: ["Pod"] + versions: ["v1"] + match: + scope: Namespaced + location: "spec.containers[name:*].image" + parameters: + assign: + externalData: + provider: my-provider + dataSource: ValueAtLocation + failurePolicy: UseDefault + default: busybox:latest +``` + +
+Provider response + +```json +{ + "apiVersion": "externaldata.gatekeeper.sh/v1beta1", + "kind": "ProviderResponse", + "response": { + "idempotent": true, + "items": [ + { + "key": "nginx", + "value": "nginx:v1.2.3" + } + ] + } +} +``` + +
+ +
+Mutated object + +```yaml +... +spec: + containers: + - name: nginx + image: nginx:v1.2.3 +... +``` + +
+ +### Limitations + +There are several limitations when using external data with the mutating webhook: + +- Only supports mutation of `string` fields (e.g. `.spec.containers[name:*].image`). +- `AssignMetadata` only supports `dataSource: Username` because `AssignMetadata` only supports creation of `metadata.annotations` and `metadata.labels`. `dataSource: ValueAtLocation` will not return any data. +- `ModifySet` does not support external data. +- Multiple mutations to the same object are applied alphabetically based on the name of the mutation CRDs. If you have an external data mutation and a non-external data mutation with the same `spec.location`, the final result might not be what you expected. Currently, there is no way to enforce custom ordering of mutations but the issue is being tracked [here](https://github.com/open-policy-agent/gatekeeper/issues/1133). + +## TLS and mutual TLS support + +Since external data providers are in-cluster HTTP servers backed by Kubernetes services, communication is not encrypted by default. This can potentially lead to security issues such as request eavesdropping, tampering, and man-in-the-middle attack. + +To further harden the security posture of the external data feature, + +- starting from Gatekeeper v3.9.0, TLS and mutual TLS (mTLS) via HTTPS protocol are supported between Gatekeeper and external data providers +- starting with Gatekeeper v3.11.0, TLS or mutual TLS (mTLS) via HTTPS protocol are _required_ between Gatekeeper and external data providers with a minimum TLS version of 1.3 + +In this section, we will describe the steps required to configure them. + +### Prerequisites + +- A Gatekeeper deployment with version >= v3.9.0. +- The certificate of your certificate authority (CA) in PEM format. +- The certificate of your external data provider in PEM format, signed by the CA above. +- The private key of the external data provider in PEM format. + +### How to generate a self-signed CA and a keypair for the external data provider + +In this section, we will describe how to generate a self-signed CA and a keypair using `openssl`. + +1. Generate a private key for your CA: + +```bash +openssl genrsa -out ca.key 2048 +``` + +2. Generate a self-signed certificate for your CA: + +```bash +openssl req -new -x509 -days 365 -key ca.key -subj "/O=My Org/CN=External Data Provider CA" -out ca.crt +``` + +3. Generate a private key for your external data provider: + +```bash + openssl genrsa -out server.key 2048 +``` + +4. Generate a certificate signing request (CSR) for your external data provider: + +> Replace `` and `` with the correct values. + +```bash +openssl req -newkey rsa:2048 -nodes -keyout server.key -subj "/CN=." -out server.csr +``` + +5. Generate a certificate for your external data provider: + +```bash +openssl x509 -req -extfile <(printf "subjectAltName=DNS:.") -days 365 -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server.crt +``` + +### How Gatekeeper trusts the external data provider (TLS) + +To enable one-way TLS, your external data provider should enable any TLS-related configurations for their HTTP server. For example, for Go's built-in [`HTTP server`](https://pkg.go.dev/net/http#Server) implementation, you can use [`ListenAndServeTLS`](https://pkg.go.dev/net/http#ListenAndServeTLS): + +```go +server.ListenAndServeTLS("/etc/ssl/certs/server.crt", "/etc/ssl/certs/server.key") +``` + +In addition, the provider is also responsible for supplying the certificate authority (CA) certificate as part of the Provider spec so that Gatekeeper can verify the authenticity of the external data provider's certificate. + +The CA certificate must be encoded as a base64 string when defining the Provider spec. Run the following command to perform base64 encoding: + +```bash +cat ca.crt | base64 | tr -d '\n' +``` + +With the encoded CA certificate, you can define the Provider spec as follows: + +```yaml +apiVersion: externaldata.gatekeeper.sh/v1beta1 +kind: Provider +metadata: + name: my-provider +spec: + url: https://.:/ # URL to the external data source (e.g., https://my-provider.my-namespace:8090/validate) + timeout: # timeout value in seconds (e.g., 1). this is the timeout on the Provider custom resource, not the provider implementation. + caBundle: # base64 encoded CA certificate. +``` + +### How the external data provider trusts Gatekeeper (mTLS) + +Gatekeeper attaches its certificate as part of the HTTPS request to the external data provider. To verify the authenticity of the Gatekeeper certificate, the external data provider must have access to Gatekeeper's CA certificate. There are several ways to do this: + +1. Deploy your external data provider to the same namespace as your Gatekeeper deployment. By default, [`cert-controller`](https://github.com/open-policy-agent/cert-controller) is used to generate and rotate Gatekeeper's webhook certificate. The content of the certificate is stored as a Kubernetes secret called `gatekeeper-webhook-server-cert` in the Gatekeeper namespace e.g. `gatekeeper-system`. In your external provider deployment, you can access Gatekeeper's certificate by adding the following `volume` and `volumeMount` to the provider deployment so that your server can trust Gatekeeper's CA certificate: + +```yaml +volumeMounts: + - name: gatekeeper-ca-cert + mountPath: /tmp/gatekeeper + readOnly: true +volumes: + - name: gatekeeper-ca-cert + secret: + secretName: gatekeeper-webhook-server-cert + items: + - key: ca.crt + path: ca.crt +``` + +After that, you can attach Gatekeeper's CA certificate in your TLS config and enable any client authentication-related settings. For example: + +```go +caCert, err := ioutil.ReadFile("/tmp/gatekeeper/ca.crt") +if err != nil { + panic(err) +} + +clientCAs := x509.NewCertPool() +clientCAs.AppendCertsFromPEM(caCert) + +server := &http.Server{ + Addr: ":8090", + TLSConfig: &tls.Config{ + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: clientCAs, + MinVersion: tls.VersionTLS13, + }, +} +``` + +2. If `cert-controller` is disabled via the `--disable-cert-rotation` flag, you can use a cluster-wide, well-known CA certificate for Gatekeeper so that your external data provider can trust it without being deployed to the `gatekeeper-system` namespace. + +### Authenticate the API server against Webhook (Self managed K8s cluster only) + +> ⚠️ Two new flags will be introduced in v3.11 because of these changes. And since they are not backward compatible, you may need a clean install to make use of them. + +**Note:** To enable authenticating the API server you have to be able to modify cluster resources. This may not be possible for managed K8s clusters. + +To ensure a request to the Gatekeeper webhook is coming from the API server, Gatekeeper needs to validate the client cert in the request. To enable authenticate API server, the following configuration can be made: + +1. Deploy Gatekeeper with a client CA cert name. Provide name of the client CA with the flag `--client-ca-name`. The same name will be used to read certificate from the webhook secret. The webhook will only authenticate API server requests if client CA name is provided with flag. You can modify gatekeeper deployment to add these flags and enable authentication of API server's requests. For example: + + ```yaml + containers: + - args: + - --port=8443 + - --logtostderr + - --exempt-namespace=gatekeeper-system + - --operation=webhook + - --operation=mutation-webhook + - --disable-opa-builtin={http.send} + - --client-cn-name=my-cn-name + - --client-ca-name=clientca.crt + ``` + +2. You will need to patch the webhook secret manually to attach client ca crt. Update secret `gatekeeper-webhook-server-cert` to include `clientca.crt`. Key name `clientca.crt` should match the name passed with `--client-ca-name` flag. You could generate your own CA for this purpose. + + ```yaml + kind: Secret + apiVersion: v1 + data: + ca.crt: + ca.key: + tls.crt: + tls.key: + clientca.crt: # root certificate generated with the help of commands in step 3 + metadata: + ... + name: + namespace: + type: Opaque + ``` + +3. Generate CA and client certificates signed with CA authority to be attached by the API server while talking to Gatekeeper webhook. Gatekeeper webhook expects the API server to attach the certificate that has CN name as `kube-apiserver`. Use `--client-cn-name` to provide custom cert CN name if using a certificate with other CN name, otherwise webhook will throw the error and not accept the request. + + - Generate private key for CA + + ```bash + openssl genrsa -des3 -out myCA.key 2048 + ``` + + - Generate a root certificate + + ```bash + openssl req -x509 -new -nodes -key myCA.key -sha256 -days 1825 -out myCA.crt + ``` + + - Generate private key for API server + + ```bash + openssl genrsa -out apiserver-client.key 2048 + ``` + + - Generate a CSR for API server + + ```bash + openssl req -new -key apiserver-client.key -out apiserver-client.csr + ``` + + - Generate public key for API server + + ```bash + openssl x509 -req -in apiserver-client.csr -CA myCA.crt -CAkey myCA.key -CAcreateserial -out apiserver-client.crt -days 365 + ``` + + > The client certificate generated by the above command will expire in 365 days, and you must renew it before it expires. Adjust the value of `-day` to set the expiry on the client certificate according to your need. + +4. You will need to make sure the K8s API Server includes the appropriate certificate while sending requests to the webhook, otherwise webhook will not accept these requests and will log an error of `tls client didn't provide a certificate`. To make sure the API server attaches the correct certificate to requests being sent to webhook, you must specify the location of the admission control configuration file via the `--admission-control-config-file` flag while starting the API server. Here is an example admission control configuration file: + + ```yaml + apiVersion: apiserver.config.k8s.io/v1 + kind: AdmissionConfiguration + plugins: + - name: ValidatingAdmissionWebhook + configuration: + apiVersion: apiserver.config.k8s.io/v1 + kind: WebhookAdmissionConfiguration + kubeConfigFile: "" + - name: MutatingAdmissionWebhook + configuration: + apiVersion: apiserver.config.k8s.io/v1 + kind: WebhookAdmissionConfiguration + kubeConfigFile: "" + ``` + + KubeConfig file should look something like: + + ```yaml + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: # same value as provided in gatekeeper webhook secret's clientca.crt + server: https://..svc:443 + name: + contexts: + - context: + cluster: + user: api-server + name: + current-context: + kind: Config + users: + - name: api-server + user: + client-certificate-data: + client-key-data: + ``` + + **Note**: Default `gatekeeper-webhook-service-name` is `gatekeeper-webhook-service` and default `gatekeeper-namespace` is `gatekeeper-system`. + + Visit [#authenticate-apiservers](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#authenticate-apiservers) for more details. + +## Disabling external data support + +External data support is enabled by default. If you don't need external data support, you can disable it. + +### YAML + +You can disable external data support by adding `--enable-external-data=false` in gatekeeper audit and controller-manager deployment arguments. + +### Helm + +You can also disable external data by installing or upgrading Helm chart by setting `enableExternalData=false`: + +```sh +helm install gatekeeper/gatekeeper --name-template=gatekeeper --namespace gatekeeper-system --create-namespace \ + --set enableExternalData=false +``` diff --git a/website/versioned_docs/version-v3.17.x/failing-closed.md b/website/versioned_docs/version-v3.17.x/failing-closed.md new file mode 100644 index 00000000000..ba1409a1f90 --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/failing-closed.md @@ -0,0 +1,152 @@ +--- +id: failing-closed +title: Failing Closed +--- + +## Admission Webhook Fail-Open by Default + +Currently Gatekeeper is defaulting to using `failurePolicy: Ignore` for admission request webhook errors. The impact of this is that when the webhook is down, or otherwise unreachable, constraints will not be enforced. Audit is expected to pick up any slack in enforcement by highlighting invalid resources that made it into the cluster. + +Here we discuss how to configure Gatekeeper to fail closed and some factors you may want to consider before doing so. + +## How to Fail Closed + +If you installed Gatekeeper via the manifest, the only needed change is to set the `failurePolicy` field of Gatekeeper's `ValidatingWebhookConfiguration` to `Fail`. For example: + + +```yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + gatekeeper.sh/system: "yes" + name: gatekeeper-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + caBundle: SOME_CERT + service: + name: gatekeeper-webhook-service + namespace: gatekeeper-system + path: /v1/admit + port: 443 + failurePolicy: Fail + matchPolicy: Exact + name: validation.gatekeeper.sh + namespaceSelector: + matchExpressions: + - key: admission.gatekeeper.sh/ignore + operator: DoesNotExist + rules: + - apiGroups: + - '*' + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + resources: + - '*' + scope: '*' + sideEffects: None + timeoutSeconds: 3 +- admissionReviewVersions: + - v1beta1 + clientConfig: + caBundle: SOME_CERT + service: + name: gatekeeper-webhook-service + namespace: gatekeeper-system + path: /v1/admitlabel + port: 443 + failurePolicy: Fail + matchPolicy: Exact + name: check-ignore-label.gatekeeper.sh + namespaceSelector: {} + objectSelector: {} + rules: + - apiGroups: + - "" + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + resources: + - namespaces + scope: '*' + sideEffects: None + timeoutSeconds: 3 +``` + +If you installed Gatekeeper via any other method (Helm chart, operator), please consult the documentation for that method. + +## Considerations + +Here are some factors you may want to consider before configuring Gatekeeper to fail closed. + +### Admission Deadlock + +#### Example + +It is possible to put the cluster in a state where automatic self-healing is impossible. Imagine you delete every `Node` in your cluster. This will kill all running Gatekeeper servers, which means the webhook will fail. Because a request to add a `Node` is subject to admission validation, it cannot succeed until the webhook can serve. The webhook cannot serve until a `Node` is added. This circular dependency will need to be broken before the cluster's control plane can recover. + +#### Mitigation + +This can normally be mitigated by deleting the `ValidatingWebhookConfiguration`, per the [emergency procedure](emergency.md). + +Note that it should always be possible to modify or delete the `ValidatingWebhookConfiguration` because Kubernetes does not make requests to edit webhook configurations subject to admission webhooks. + +#### Potential Gotchas + +If the existence of the webhook resource is enforced by some external process (such as an operator), that may interfere with the emergency recovery process. If this applies, it would be good to have a plan in place to deal with that scenario. + +### Cluster Control Plane Availability + +Because the webhook is being called for all K8s API server requests (under the default configuration), the availability of K8s's control plane becomes subject to the availability of the webhook. It is important to have an idea of your expected API server availability [SLO](https://en.wikipedia.org/wiki/Service-level_objective) and make sure Gatekeeper is configured to support that. + +Below are some potential ways to do that and their gotchas. + +#### Limit the Gatekeeper Webhook's Scope + +It is possible to exempt certain namespaces from being subject to the webhook, or to only call the webhook for certain kinds. This could be one way to prevent the webhook from interfering with sensitive processes. + +##### Potential Gotchas + +It can be hard to say for certain that all critical resources have been exempted because dependencies can be non-obvious. Some examples: + +- Exempting `kube-system` namespace is a good starting place, but what about cluster-scoped resources, like nodes? What about other potentially critical namespaces like `istio-system`? +- Some seemingly innocuous kinds can actually play a critical role in cluster operations. Did you know that a `ConfigMap` is used as the locking resource for some Kubernetes leader elections? + +If you are relying on exempting resources to keep your cluster available, be sure you know all the critical dependencies of your cluster. Unfortunately this is very cluster-specific, so there is no general guidance to be offered here. + +#### Harden Your Deployment + +Gatekeeper attempts to be resilient out-of-the-box by running its webhook in multiple pods. You can take that work and adapt it to your cluster by adding the appropriate node selectors and scaling the number of nodes up or down as desired. + +##### Impact of Scaling Nodes + +Putting hard numbers on the impact scaling resources has on Gatekeeper's availability depends on the specifics of the underlying hardware of your cluster and how Gatekeeper is distributed across it, but there are some general themes: + +- Increasing the number of webhook pods should increase QPS serving capacity +- Increasing the number of webhook pods tends to increase uptime of the service +- Increasing the number of webhook pods may increase the time it takes for a constraint to be enforced by all pods in the system + +##### Potential Gotcha: Failure Domains + +Increasing the number of pods increases the theoretical uptime of a system under the theory that if one pod goes down the other pods continue to serve and pick up the slack. This assumption fails if multiple pods fail at the same time due to the same root cause. This happens when multiple pods are in the same [failure domain](https://en.wikipedia.org/wiki/Failure_domain#:~:text=In%20computing%2C%20a%20failure%20domain,of%20infrastructure%20that%20could%20fail.). + +Here are some common ways for two pods to be in the same failure domain: + +- Running on the same node +- Running on the same physical host (e.g. multiple nodes are VMs backed by the same physical machine) +- Running on different physical hosts with the same network switch +- Running on different physical hosts with the same power supply +- Running on different physical hosts in the same rack + +Different clusters may have different backing physical infrastructures and different risk tolerances. Because of this, there is no definitive list of failure domains or guidance on how that should affect your setup. + +## Why Is This Hard? + +In a nutshell it's because it's a webhook, and because it's self-hosted. All REST servers require enough high-availabily infrastructure to satisfy their SLOs (see cloud availability zones / regions). Self-hosted webhooks create a circular dependency that has the potential to interfere with the self-healing Kubenetes usually provides. Any self-hosted admission webhook would be subject to these same concerns. diff --git a/website/versioned_docs/version-v3.17.x/gator.md b/website/versioned_docs/version-v3.17.x/gator.md new file mode 100644 index 00000000000..62510054e04 --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/gator.md @@ -0,0 +1,557 @@ +--- +id: gator +title: The gator CLI +--- + +`Feature State`: Gatekeeper version v3.11+ (beta) + +The `gator` CLI is a tool for evaluating Gatekeeper ConstraintTemplates and +Constraints in a local environment. + +## Installation + +To install `gator`, you may either +[download the binary](https://github.com/open-policy-agent/gatekeeper/releases) +relevant to your system or build it directly from source. On macOS and Linux, +you can also install `gator` using [Homebrew](https://brew.sh). + +To build from source: + +```shell +go install github.com/open-policy-agent/gatekeeper/v3/cmd/gator@master +``` + +:::note +`go install` of `gator` requires Gatekeeper `master` branch or `v3.16.0` and later. +::: + +Install with Homebrew: + +```shell +brew install gator +``` + +## The `gator test` subcommand + +`gator test` allows users to test a set of Kubernetes objects against a set of +Templates and Constraints. The command returns violations when found and +communicates success or failure via its exit status. This command will also +attempt to expand any resources passed in if a supplied `ExpansionTemplate` +matches these resources. + +Note: The `gator verify` command was first called `gator test`. These names were +changed to better align `gator` with other projects in the open-policy-agent +space. + +### Usage + +#### Specifying inputs + +`gator test` supports inputs through the `--filename` and `--image` flags, and +via stdin. The three methods of input can be used in combination or individually. The `--filename` and `--image` flags are repeatable. + +The `--filename` flag can specify a single file or a directory. If a file is +specified, that file must end in one of the following extensions: `.json`, +`.yaml`, `.yml`. Directories will be walked, and any files of extensions other +than the aforementioned three will be skipped. + +For example, to test a manifest (piped via stdin) against a folder of policies: + +```shell +cat my-manifest.yaml | gator test --filename=template-and-constraints/ +``` + +Or you can specify both as flags: + +```shell +gator test -f=my-manifest.yaml -f=templates-and-constraints/ +``` + +> ❗The `--image` flag is in _alpha_ stage. + +The `--image` flag specifies a content addressable OCI artifact containing +policy files. The image(s) will be copied into the local filesystem in a +temporary directory, the location of which can be overridden with +the `--tempdir` +flag. Only files with the aforementioned extensions will be processed. For +information on how to create OCI policy bundles, see +the [Bundling Policy into OCI Artifacts](#bundling-policy-into-oci-artifacts) +section. + +For example, to test a manifest (piped via stdin) against an OCI Artifact +containing policies: + +```shell +cat my-manifest.yaml | gator test --image=localhost:5000/gator/template-library:v1 \ + --image=localhost:5000/gator/constraints:v1 +``` + +#### Exit Codes + +`gator test` will return a `0` exit status when the objects, Templates, and +Constraints are successfully ingested, no errors occur during evaluation, and no +violations are found. + +An error during evaluation, for example a failure to read a file, will result in +a `1` exit status with an error message printed to stderr. + +Policy violations will generate a `1` exit status as well, but violation +information will be printed to stdout. + +##### Enforcement Actions + +While violation data will always be returned when an object is found to be +violating a Constraint, the exit status can vary. A constraint with +`spec.enforcementAction: ""` or `spec.enforcementAction: deny` will produce a +`1` exit code, but other enforcement actions like `dryrun` will not. This is +meant to make the exit code of `1` consistent with rejection of the object by +Gatekeeper's webhook. A Constraint set to `warn` would not trigger a rejection +in the webhook, but _would_ produce a violation message. The same is true for +that constraint when used in `gator test`. + +#### Output Formatting + +`gator test` supports a `--output` flag that allows the user to specify a +structured data format for the violation data. This information is printed to +stdout. + +The allowed values are `yaml` and `json`, specified like: + +```shell +gator test --filename=manifests-and-policies/ --output=json +``` + +## The `gator verify` subcommand + +### Writing Test Suites + +`gator verify` organizes tests into three levels: Suites, Tests, and Cases: + +- A Suite is a file which defines Tests. +- A Test declares a ConstraintTemplate, a Constraint, and Cases to test the + Constraint. +- A Case defines an object to validate and whether the object is expected to + pass validation. + +Any file paths declared in a Suite are assumed to be relative to the Suite +itself. Absolute paths are not allowed. Thus, it is possible to move around a +directory containing a Suite, and the files it uses for tests. + +### Suites + +[An example Suite file](https://github.com/open-policy-agent/gatekeeper-library/blob/8765ec11c12a523688ed77485c7a458df84266d6/library/general/allowedrepos/suite.yaml) +. + +To be valid, a Suite file must declare: + +```yaml +kind: Suite +apiVersion: test.gatekeeper.sh/v1alpha1 +``` + +`gator verify` silently ignores files which do not declare these. A Suite may +declare multiple Tests, each containing different Templates and Constraints. +Each Test in a Suite is independent. + +### Tests + +Each Suite contains a list of Tests under the `tests` field. + +A Test compiles a ConstraintTemplate, and instantiates a Constraint for the +ConstraintTemplate. It is an error for the Constraint to have a different type +than that defined in the ConstraintTemplate spec.crd.spec.names.kind, or for the +ConstraintTemplate to not compile. + +### Cases + +Each Test contains a list of Cases under the `cases` field. + +A Case validates an object against a Constraint. The case may specify that the +object is expected to pass or fail validation, and may make assertions about the +returned violations (if any). + +A Case must specify `assertions` and whether it expects violations. The simplest +way to declare this is: + +The Case expects at least one violation: + +```yaml +assertions: +- violations: yes +``` + +The Case expects no violations: + +```yaml +assertions: +- violations: no +``` + +Assertions contain the following fields, acting as conditions for each assertion +to check. + +- `violations` is either "yes", "no", or a non-negative integer. + - If "yes", at least one violation must otherwise match the assertion. + - If "no", then no violation messages must otherwise match the assertion. + - If a nonnegative integer, then exactly that many violations must match. + Defaults to "yes". +- `message` matches violations containing the exact string specified. `message` + is case-sensitive. If not specified or explicitly set to empty string, all + messages returned by the Constraint are considered matching. + +A Case may specify multiple assertions. For example: + +```yaml + - name: both-disallowed + object: samples/repo-must-be-openpolicyagent/disallowed_both.yaml + assertions: + - violations: 2 + - message: initContainer + violations: 1 + - message: container + violations: 1 +``` + +This Case specifies: + +- There are exactly two violations. +- There is exactly one violation containing "initContainer". +- There is exactly one violation containing "container". + +It is valid to assert that no violations match a specified message. For example, +the below is valid: + +```yaml +- violations: yes +- violations: no + message: foobar +``` + +This Case specifies that there is at least one violation, and no violations +contain the string "foobar". + +A Case may specify `inventory`, which is a list of paths to files containing +Kubernetes objects to put in `data.inventory` for testing referential +constraints. + +```yaml +inventory: +- samples/data_objects.yaml +``` + +More examples of working `gator verify` suites are available in the +[gatekeeper-library](https://github.com/open-policy-agent/gatekeeper-library/tree/master/library) +repository. + +### Usage + +To run a specific suite: + +``` +gator verify suite.yaml +``` + +To run all suites in the current directory and all child directories recursively + +```shell +gator verify ./... +``` + +To only run tests whose full names contain a match for a regular expression, use +the `run` flag: + +```shell +gator verify path/to/suites/... --run "disallowed" +``` + +### Validating Metadata-Based Constraint Templates + +`gator verify` may be used with an [`AdmissionReview`](https://pkg.go.dev/k8s.io/kubernetes/pkg/apis/admission#AdmissionReview) +object to test your constraints. This can be helpful to simulate a certain operation (`CREATE`, `UPDATE`, `DELETE`, etc.) +or [`UserInfo`](https://pkg.go.dev/k8s.io/kubernetes@v1.25.3/pkg/apis/authentication#UserInfo) metadata. +Recall that the `input.review.user` can be accessed in the Rego code (see [Input Review](howto.md#input-review) for more guidance). The `AdmissionReview` object can be specified where you would specify the object under test above: + +```yaml + - name: both-disallowed + object: path/to/test_admission_review.yaml + assertions: + - violations: 1 +``` + +Example for testing the `UserInfo` metadata: + +AdmissionReview, ConstraintTemplate, Constraint: +```yaml +kind: AdmissionReview +apiVersion: admission.k8s.io/v1beta1 +request: + operation: "UPDATE" + userInfo: + username: "system:foo" + object: + kind: Pod + labels: + - app: "bar" +--- +kind: ConstraintTemplate +apiVersion: templates.gatekeeper.sh/v1 +metadata: + name: validateuserinfo +spec: + crd: + spec: + names: + kind: ValidateUserInfo + targets: + - target: admission.k8s.gatekeeper.sh + rego: | + package k8svalidateuserinfo + violation[{"msg": msg}] { + username := input.review.userInfo.username + not startswith(username, "system:") + msg := sprintf("username is not allowed to perform this operation: %v", [username]) + } +--- +kind: ValidateUserInfo +apiVersion: constraints.gatekeeper.sh/v1 +metadata: + name: always-validate +``` + +Gator Suite: +```yaml +apiVersion: test.gatekeeper.sh/v1alpha1 +kind: Suite +tests: +- name: userinfo + template: template.yaml + constraint: constraint.yaml + cases: + - name: system-user + object: admission-review.yaml + assertions: + - violations: no +``` + +Note for `DELETE` operation, the `oldObject` should be the object being deleted: + +```yaml +kind: AdmissionReview +apiVersion: admission.k8s.io/v1beta1 +request: + operation: "DELETE" + userInfo: + username: "system:foo" + oldObject: + kind: Pod + labels: + - app: "bar" +``` + +Note that [`audit`](audit.md) or `gator test` are different enforcement points and they don't have the `AdmissionReview` request metadata. + +Run `gator verify --help` for more information. + +## The `gator expand` subcommand + +`gator expand` allows users to test the behavior of their Expansion configs. The +command accepts a file or directory containing the expansion configs, which +should include the resource(s) under test, the `ExpansionTemplate`(s), and +optionally any Mutation CRs. The command will output a manifest containing the +expanded resources. + +If the mutators or constraints use `spec.match.namespaceSelector`, the namespace the resource +belongs to must be supplied in order to correctly evaluate the match criteria. +If a resource is specified for expansion but its non-default namespace is not +supplied, the command will exit 1. See the [non default namespace example](#non-default-namespace-example) below. + +### Usage + +Similar to `gator test`, `gator expand` expects a `--filename` or `--image` +flag. The flags can be used individually, in combination, and/or repeated. + +```shell +gator expand --filename="manifest.yaml" –filename="expansion-policy/" +``` + +Or, using an OCI Artifact for the expansion configuration: + +```shell +gator expand --filename="my-deployment.yaml" --image=localhost:5000/gator/expansion-policy:v1 +``` + +By default, `gator expand` will output to stdout, but a `–outputfile` flag can be +specified to write the results to a file. + +```shell +gator expand --filename="manifest.yaml" –outputfile="results.yaml" +``` + +`gator expand` can output in `yaml` or `json` (default is `yaml`). + +```shell +gator expand --filename="manifest.yaml" –format="json" +``` + +See `gator expand –help` for more details. `gator expand` will exit 1 if there +is a problem parsing the configs or expanding the resources. + +#### Non default namespace example + +This is an example setup where we include a `namespace` in a `manifest.yaml` that we plan on passing to `gator expand`. + +```yaml +apiVersion: expansion.gatekeeper.sh/v1alpha1 +kind: ExpansionTemplate +metadata: + name: expand-deployments +spec: + applyTo: + - groups: [ "apps" ] + kinds: [ "Deployment" ] + versions: [ "v1" ] + templateSource: "spec.template" + generatedGVK: + kind: "Pod" + group: "" + version: "v1" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + namespace: my-ns + labels: + app: nginx +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 + args: + - "/bin/sh" +--- +apiVersion: mutations.gatekeeper.sh/v1alpha1 +kind: Assign +metadata: + name: always-pull-image +spec: + applyTo: + - groups: [ "" ] + kinds: [ "Pod" ] + versions: [ "v1" ] + location: "spec.containers[name: *].imagePullPolicy" + parameters: + assign: + value: "Always" + match: + source: "Generated" + scope: Namespaced + kinds: + - apiGroups: [ ] + kinds: [ ] + namespaceSelector: + matchExpressions: + - key: admission.gatekeeper.sh/ignore + operator: DoesNotExist +--- +# notice this file is providing the non default namespace `my-ns` +apiVersion: v1 +kind: Namespace +metadata: + name: my-ns +``` + +Calling `gator expand --filename=manifest.yaml` will produce the following output: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + labels: + app: nginx + name: nginx-deployment-pod + namespace: my-ns +spec: + containers: + - args: + - /bin/sh + image: nginx:1.14.2 + imagePullPolicy: Always + name: nginx + ports: + - containerPort: 80 +``` + +However, not including the `namespace` definition in the call to `gator expand` will exit with a status code of 1 and error out with: + +``` +error expanding resources: error expanding resource nginx-deployment: failed to mutate resultant resource nginx-deployment-pod: matching for mutator Assign.mutations.gatekeeper.sh /always-pull-image failed for Pod my-ns nginx-deployment-pod: failed to run Match criteria: namespace selector for namespace-scoped object but missing Namespace +``` + +## Bundling Policy into OCI Artifacts + +It may be useful to bundle policy files into OCI Artifacts for ingestion during +CI/CD workflows. The workflow could perform validation on inbound objects using +`gator test|expand`. + +A policy bundle can be composed of any arbitrary file structure, which `gator` +will walk recursively. Any files that do not end in `json|yaml|yml` will be +ignored. `gator` does not enforce any file schema in the artifacts; it only +requires that all files of the support extensions describe valid Kubernetes +resources. + +We recommend using the [Oras CLI](https://oras.land/cli/) to create OCI +artifacts. For example, to push a bundle containing the 2 local directories +`constraints` and `template_library`: + +```shell +oras push localhost:5000/gator/policy-bundle:v1 ./constraints/:application/vnd.oci.image.layer.v1.tar+gzip \ + ./template_library/:application/vnd.oci.image.layer.v1.tar+gzip +``` + +This expects that the `constraints` and `template_library` directories are at +the path that this command is being run from. + +## Gotchas + +### Duplicate violation messages + +Rego de-duplicates identical violation messages. If you want to be sure that a +test returns multiple violations, use a unique message for each violation. +Otherwise, if you specify an exact number of violations, the test may fail. + +### Matching is case-sensitive + +Message declarations are case-sensitive. If a test fails, check that the +expected message's capitalization exactly matches the one in the template. + +### Referential constraints and Namespace-scoped resources + +Gator cannot determine if a type is Namespace-scoped or not, so it does not +assign objects to the default Namespace automatically. Always specify +`metadata.namespace` for Namespace-scoped objects to prevent test failures, or +to keep from specifying templates which will fail in a real cluster. + +## Platform Compatibility + +`gator` is only automatically tested on Linux for each commit. If you want to +use `gator` on other systems, let us know by replying to +[this issue](https://github.com/open-policy-agent/gatekeeper/issues/1655). + +`gator verify` has been manually tested on Windows and works as of +[this commit](https://github.com/open-policy-agent/gatekeeper/commit/b3ed94406583c85f3102c54a32f362d27f76da96) +. Continued functionality is not guaranteed. + +File paths which include backslashes are not portable, so suites using such +paths will not work as intended on Windows. diff --git a/website/versioned_docs/version-v3.17.x/help.md b/website/versioned_docs/version-v3.17.x/help.md new file mode 100644 index 00000000000..a3718a4e268 --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/help.md @@ -0,0 +1,119 @@ +--- +id: help +title: How to contribute +--- +Thanks for your interest in contributing to the Gatekeeper project! This document will help answer common questions you may have during your contribution. + +## Where to start? +Join us to help define the direction and implementation of this project! + +- File [GitHub Issues](https://github.com/open-policy-agent/gatekeeper/issues) + to report bugs, request features, or ask questions asynchronously. + +- Ask questions in [OPA Gatekeeper Community Discussions](https://github.com/open-policy-agent/community/discussions/categories/gatekeeper) + +- Join the [`#opa-gatekeeper`](https://openpolicyagent.slack.com/messages/CDTN970AX) + channel on [OPA Slack](https://slack.openpolicyagent.org/) to talk to the maintainers and other contributors asynchronously. + +- Join [weekly meetings](https://docs.google.com/document/d/1A1-Q-1OMw3QODs1wT6eqfLTagcGmgzAJAjJihiO3T48/edit) to discuss development, issues, use cases, etc with maintainers and other contributors. + +- Add a policy to the [Gatekeeper policy library](https://www.github.com/open-policy-agent/gatekeeper-library). + +## Contributing Process + +Please follow these 3 steps for contributions: + +1. Commit changes to a git branch in your fork, making sure to sign-off those changes for the [Developer Certificate of Origin](#developer-certification-of-origin-dco). +1. Create a GitHub Pull Request for your change, following the instructions in the pull request template and use [semantic PR title](https://github.com/zeke/semantic-pull-requests) +1. Perform a [Pull Request Review](#pull-request-review-process) with the project maintainers on the pull request. + +### Developer Certification of Origin (DCO) + +This project requires contributors to sign a DCO (Developer Certificate of Origin) to ensure that the project has the proper rights to use your code. + +The DCO is an attestation attached to every commit made by every developer. In the commit message of the contribution, the developer simply adds a Signed-off-by statement and thereby agrees to the DCO, which you can find at . + +#### DCO Sign-Off via the command line + +Configure your local git to sign off your username and email address that is associated with your GitHub user account. + +```sh +$ git config --global user.name "John Doe" +$ git config --global user.email johndoe@example.com +``` + +Then, for every commit, add a signoff statement via the `-s` flag. + +```sh +$ git commit -s -m "This is my commit message" +``` + +If you forget to add the sign-off you can also amend a previous commit with the sign-off by running `git commit --amend -s`. If you've pushed your changes to GitHub already you'll need to force push your branch with `git push -f`. + +### Pull Request Review Process + +Please take a look at [this article](https://help.github.com/articles/about-pull-requests/) if you're not familiar with GitHub Pull Requests. + +Once you open a pull request, project maintainers will review your changes and respond to your pull request with any feedback they might have. + +#### Pull Request Test Requirements + +For code updates, to ensure high quality commits, we require that all pull requests to this project meet these specifications: + +1. **Tests:** We require all the code in Gatekeeper to have at least unit test coverage. +2. **Green CI Tests:** We require these test runs to succeed on every pull request before being merged. + +#### Optional benchmarking of changes + +To ensure that any changes made to the code do not negatively impact its performance, you can run benchmark tests on the changes included in a pull request. To do this, simply comment `/benchmark` on the pull request. This will trigger the benchmark tests to run on both the current HEAD and the code changes in the pull request. The results of the benchmark tests will then be commented on the pull request using [benchstat](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat). + +If you are introducing a new feature, doing a big refactor, or fixing a critical bug, it's especially important to run benchmark tests on the changes you are trying to merge. This will help ensure that the changes do not negatively impact the performance of the code and that it continues to function as expected. + +Below is the sample output that will be commented on the pull request: + +``` +name old time/op new time/op delta +pkg:github.com/open-policy-agent/gatekeeper/v3/pkg/mutation goos:linux goarch:amd64 +System_Mutate 1.48µs ± 5% 1.50µs ± 4% ~ (p=0.468 n=10+10) +pkg:github.com/open-policy-agent/gatekeeper/v3/pkg/mutation/mutators/assign goos:linux goarch:amd64 +AssignMutator_Mutate/always_mutate_1-depth 235ns ± 5% 234ns ± 5% ~ (p=0.726 n=10+10) +AssignMutator_Mutate/always_mutate_2-depth 287ns ± 6% 279ns ± 5% ~ (p=0.190 n=10+10) +AssignMutator_Mutate/always_mutate_5-depth 420ns ± 2% 416ns ± 3% ~ (p=0.297 n=9+9) +AssignMutator_Mutate/always_mutate_10-depth 556ns ± 4% 570ns ± 6% ~ (p=0.123 n=10+10) +AssignMutator_Mutate/always_mutate_20-depth 977ns ± 3% 992ns ± 2% ~ (p=0.063 n=10+10) +AssignMutator_Mutate/never_mutate_1-depth 196ns ± 4% 197ns ± 6% ~ (p=0.724 n=10+10) +AssignMutator_Mutate/never_mutate_2-depth 221ns ± 4% 222ns ± 4% ~ (p=0.971 n=10+10) +AssignMutator_Mutate/never_mutate_5-depth 294ns ± 4% 296ns ± 4% ~ (p=0.436 n=10+10) +AssignMutator_Mutate/never_mutate_10-depth 424ns ± 2% 425ns ± 3% ~ (p=0.905 n=9+10) +AssignMutator_Mutate/never_mutate_20-depth 682ns ± 3% 680ns ± 5% ~ (p=0.859 n=9+10) +pkg:github.com/open-policy-agent/gatekeeper/v3/pkg/mutation/mutators/assignimage goos:linux goarch:amd64 +AssignImageMutator_Mutate/always_mutate_1-depth 579ns ± 7% 573ns ± 3% ~ (p=0.650 n=9+9) +AssignImageMutator_Mutate/always_mutate_2-depth 625ns ± 5% 627ns ± 2% ~ (p=0.536 n=10+9) +AssignImageMutator_Mutate/always_mutate_5-depth 758ns ± 5% 768ns ± 6% ~ (p=0.631 n=10+10) +AssignImageMutator_Mutate/always_mutate_10-depth 1.06µs ± 8% 1.08µs ± 5% ~ (p=0.143 n=10+10) +AssignImageMutator_Mutate/always_mutate_20-depth 1.38µs ± 3% 1.42µs ± 3% +2.80% (p=0.003 n=9+10) +AssignImageMutator_Mutate/never_mutate_1-depth 237ns ± 3% 233ns ± 3% ~ (p=0.107 n=10+9) +AssignImageMutator_Mutate/never_mutate_2-depth 266ns ± 4% 266ns ± 3% ~ (p=1.000 n=10+10) +AssignImageMutator_Mutate/never_mutate_5-depth 336ns ± 6% 342ns ± 2% +1.85% (p=0.037 n=10+9) +AssignImageMutator_Mutate/never_mutate_10-depth 463ns ± 3% 479ns ± 5% +3.53% (p=0.013 n=9+10) +AssignImageMutator_Mutate/never_mutate_20-depth 727ns ± 3% 727ns ± 2% ~ (p=0.897 n=10+8) +... +``` + +If a significantly positive increase in the delta occurs, it could suggest that the changes being implemented have a negative impact on the respective package. However, there might be cases where the delta may be higher even without significant changes. In such situations, it is advisable to rerun the benchmarks for more precise and accurate results. + +## Contributing to Docs + +If you want to contribute to docs, Gatekeeper auto-generates versioned docs. If you have any doc changes for a particular version, please update in [website/docs](https://github.com/open-policy-agent/gatekeeper/tree/master/website/docs) as well as in [website/versioned_docs/version-vx.y.z](https://github.com/open-policy-agent/gatekeeper/tree/master/website/versioned_docs) directory. If the change is for next release, please update in [website/docs](https://github.com/open-policy-agent/gatekeeper/tree/master/website/docs), then the change will be part of next versioned doc when we do a new release. + +## Contributing to Helm Chart + +If you want to contribute to Helm chart, Gatekeeper auto-generates versioned Helm charts from static manifests. If you have any changes in [charts](https://github.com/open-policy-agent/gatekeeper/tree/master/charts) directory, they will get clobbered when we do a new release. The generator code lives under [cmd/build/helmify](https://github.com/open-policy-agent/gatekeeper/tree/master/cmd/build/helmify). To make modifications to this template, please edit `kustomization.yaml`, `kustomize-for-helm.yaml` and `replacements.go` under that directory and then run `make manifests` to generate changes in the [manifest_staging](https://github.com/open-policy-agent/gatekeeper/tree/master/manifest_staging) directory. You should push all the modified files to your PR. Once it's merged, the changes will be promoted to the root charts directory the next time a Gatekeeper release is cut. + +## Contributing to Code + +If you want to contribute code, check out the [Developers](developers.md) guide to get started. + +## Contributing Templates + +If you'd like to contribute a Constraint Template to the [Gatekeeper Policy Library](https://open-policy-agent.github.io/gatekeeper-library/website/), you can find documentation on how to do that [here in the library's README](https://github.com/open-policy-agent/gatekeeper-library?tab=readme-ov-file#how-to-contribute-to-the-library). \ No newline at end of file diff --git a/website/versioned_docs/version-v3.17.x/howto.md b/website/versioned_docs/version-v3.17.x/howto.md new file mode 100644 index 00000000000..b84da1e8f9e --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/howto.md @@ -0,0 +1,142 @@ +--- +id: howto +title: How to use Gatekeeper +--- + +Gatekeeper uses the [OPA Constraint Framework](https://github.com/open-policy-agent/frameworks/tree/master/constraint) to describe and enforce policy. Look there for more detailed information on their semantics and advanced usage. + +## Constraint Templates + +Before you can define a constraint, you must first define a [`ConstraintTemplate`](constrainttemplates.md), which describes both the [Rego](https://www.openpolicyagent.org/docs/latest/#rego) that enforces the constraint and the schema of the constraint. The schema of the constraint allows an admin to fine-tune the behavior of a constraint, much like arguments to a function. + +Here is an example constraint template that requires all labels described by the constraint to be present: + +```yaml +apiVersion: templates.gatekeeper.sh/v1 +kind: ConstraintTemplate +metadata: + name: k8srequiredlabels +spec: + crd: + spec: + names: + kind: K8sRequiredLabels + validation: + # Schema for the `parameters` field + openAPIV3Schema: + type: object + properties: + labels: + type: array + items: + type: string + targets: + - target: admission.k8s.gatekeeper.sh + rego: | + package k8srequiredlabels + + violation[{"msg": msg, "details": {"missing_labels": missing}}] { + provided := {label | input.review.object.metadata.labels[label]} + required := {label | label := input.parameters.labels[_]} + missing := required - provided + count(missing) > 0 + msg := sprintf("you must provide labels: %v", [missing]) + } +``` + +You can install this ConstraintTemplate with the following command: + +```sh +kubectl apply -f https://raw.githubusercontent.com/open-policy-agent/gatekeeper/master/demo/basic/templates/k8srequiredlabels_template.yaml +``` + +## Constraints + +Constraints are then used to inform Gatekeeper that the admin wants a ConstraintTemplate to be enforced, and how. This constraint uses the `K8sRequiredLabels` constraint template above to make sure the `gatekeeper` label is defined on all namespaces: + +```yaml +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: K8sRequiredLabels +metadata: + name: ns-must-have-gk +spec: + match: + kinds: + - apiGroups: [""] + kinds: ["Namespace"] + parameters: + labels: ["gatekeeper"] +``` + +You can install this Constraint with the following command: + +```sh +kubectl apply -f https://raw.githubusercontent.com/open-policy-agent/gatekeeper/master/demo/basic/constraints/all_ns_must_have_gatekeeper.yaml +``` +### The match field + +The `match` field defines which resources the constraint will be applied to. It supports the following types of matchers: + + * `kinds` accepts a list of objects with `apiGroups` and `kinds` fields that list the groups/kinds of objects to which the constraint will apply. If multiple groups/kinds objects are specified, only one match is needed for the resource to be in scope. + * `scope` determines if cluster-scoped and/or namespaced-scoped resources are matched. Accepts `*`, `Cluster`, or `Namespaced`. (defaults to `*`) + * `namespaces` is a list of namespace names. If defined, a constraint only applies to resources in a listed namespace. Namespaces also supports a prefix-based glob. For example, `namespaces: [kube-*]` matches both `kube-system` and `kube-public`. + * `excludedNamespaces` is a list of namespace names. If defined, a constraint only applies to resources not in a listed namespace. ExcludedNamespaces also supports a prefix-based glob. For example, `excludedNamespaces: [kube-*]` matches both `kube-system` and `kube-public`. + * `labelSelector` is the combination of two optional fields: `matchLabels` and `matchExpressions`. These two fields provide different methods of selecting or excluding k8s objects based on the label keys and values included in object metadata. All selection expressions are ANDed to determine if an object meets the cumulative requirements of the selector. + * `namespaceSelector` is a label selector against an object's containing namespace or the object itself, if the object is a namespace. + * `name` is the name of a Kubernetes object. If defined, it matches against objects with the specified name. Name also supports a prefix-based glob. For example, `name: pod-*` matches both `pod-a` and `pod-b`. + +Note that if multiple matchers are specified, a resource must satisfy each top-level matcher (`kinds`, `namespaces`, etc.) to be in scope. Each top-level matcher has its own semantics for what qualifies as a match. An empty matcher, a undefined `match` field, is deemed to be inclusive (matches everything). Also understand `namespaces`, `excludedNamespaces`, and `namespaceSelector` will match on cluster scoped resources which are not namespaced. To avoid this adjust the `scope` to `Namespaced`. + +### The parameters field + +The `parameters` field describes the intent of a constraint. It can be referenced as `input.parameters` by the ConstraintTemplate's Rego source code. Gatekeeper populates `input.parameters` with values passed into the `parameters` field in the Constraint. + +Example: +```yaml + rego: | + package k8srequiredlabels + + violation[{"msg": msg, "details": {"missing_labels": missing}}] { + provided := {label | input.review.object.metadata.labels[label]} + required := {label | label := input.parameters.labels[_]} + missing := required - provided + count(missing) > 0 + msg := sprintf("you must provide labels: %v", [missing]) + } +``` +The schema for the input Constraint `parameters` is defined in the ConstraintTemplate. The API server will reject a Constraint with an incorrect parameters field if the data types do not match. + +Example: +```shell +# Apply the Constraint with incorrect parameters schema +$ cat << EOF | kubectl apply -f - +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: K8sRequiredLabels +metadata: + name: ns-must-have-gk +spec: + match: + kinds: + - apiGroups: [""] + kinds: ["Namespace"] + parameters: + # Note that "labels" is now an array item, rather than an object + - labels: ["gatekeeper"] +EOF +The K8sRequiredLabels "ns-must-have-gk" is invalid: spec.parameters: Invalid value: "array": spec.parameters in body must be of type object: "array" +``` + +### The enforcementAction field + +The `enforcementAction` field defines the action for handling Constraint violations. By default, `enforcementAction` is set to `deny` as the default behavior is to deny admission requests with any violation. Other supported enforcementActions include `dryrun` and `warn`. Refer to [Handling Constraint Violations](violations.md) for more details. + +### Listing constraints +You can list all constraints in a cluster with the following command: + +```sh +kubectl get constraints +``` + +### Input Review +You can view information on the `input.review` object that Gatekeeper takes as input [here](./input.md) + diff --git a/website/versioned_docs/version-v3.17.x/input.md b/website/versioned_docs/version-v3.17.x/input.md new file mode 100644 index 00000000000..488f1a54bb0 --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/input.md @@ -0,0 +1,75 @@ +--- +id: input +title: Admission Review Input +--- + +The data that's passed to Gatekeeper for review is in the form of an `input.review` object that stores the [admission request](https://pkg.go.dev/k8s.io/kubernetes/pkg/apis/admission#AdmissionRequest) under evaluation. It follows a structure that contains the object being created, and in the case of update operations the old object being updated. It has the following fields: +- `dryRun`: Describes if the request was invoked by `kubectl --dry-run`. This cannot be populated by Kubernetes for audit. +- `kind`: The resource `kind`, `group`, `version` of the request object under evaluation. +- `name`: The name of the request object under evaluation. It may be empty if the deployment expects the API server to generate a name for the requested resource. +- `namespace`: The namespace of the request object under evaluation. Empty for cluster scoped objects. +- `object`: The request object under evaluation to be created or modified. +- `oldObject`: The original state of the request object under evaluation. This is only available for UPDATE operations. +- `operation`: The operation for the request (e.g. CREATE, UPDATE). This cannot be populated by Kubernetes for audit. +- `uid`: The request's unique identifier. This cannot be populated by Kubernetes for audit. +- `userInfo`: The request's user's information such as `username`, `uid`, `groups`, `extra`. This cannot be populated by Kubernetes for audit. + +> **_NOTE_** For `input.review` fields above that cannot be populated by Kubernetes for audit reviews, the constraint templates that rely on them are not auditable. It is up to the rego author to handle the case where these fields are unset and empty in order to avoid every matching resource being reported as violating resources. + +You can see an example of the request structure below. + +```json +{ + "apiVersion": "admission.k8s.io/v1", + "kind": "AdmissionReview", + "request": { + "uid": "abc123", + "kind": { + "group": "apps", + "version": "v1", + "kind": "Deployment" + }, + "resource": { + "group": "apps", + "version": "v1", + "resource": "deployments" + }, + "namespace": "default", + "operation": "CREATE", + "userInfo": { + "username": "john_doe", + "groups": ["developers"] + }, + "object": { + // The resource object being created, updated, or deleted + "metadata": { + "name": "my-deployment", + "labels": { + "app": "my-app", + "env": "production" + } + }, + "spec": { + // Specific configuration for the resource + "replicas": 3, + // ... other fields ... + } + }, + "oldObject": { + // For update requests, the previous state of the resource + "metadata": { + "name": "my-deployment", + "labels": { + "app": "my-app", + "env": "staging" + } + }, + "spec": { + // Previous configuration for the resource + "replicas": 2, + // ... other fields ... + } + } + } +} +``` \ No newline at end of file diff --git a/website/versioned_docs/version-v3.17.x/install.md b/website/versioned_docs/version-v3.17.x/install.md new file mode 100644 index 00000000000..7b99e93bc8a --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/install.md @@ -0,0 +1,117 @@ +--- +id: install +title: Installation +--- + +## Prerequisites + +### Minimum Kubernetes Version + +The minimum supported Kubernetes version for Gatekeeper is aligned with the Kubernetes releases listed in the [Kubernetes Supported Versions policy](https://kubernetes.io/releases/version-skew-policy/). For more information, please see [supported Kubernetes versions](https://github.com/open-policy-agent/gatekeeper/blob/master/docs/Release_Management.md#supported-kubernetes-versions). + +**Note:** Gatekeeper requires resources introduced in Kubernetes v1.16. + +### RBAC Permissions + +For either installation method, make sure you have cluster admin permissions: + +```sh + kubectl create clusterrolebinding cluster-admin-binding \ + --clusterrole cluster-admin \ + --user +``` + +## Installation + +### Deploying a Release using Prebuilt Image + +If you want to deploy a released version of Gatekeeper in your cluster with a prebuilt image, then you can run the following command: + +```sh +kubectl apply -f https://raw.githubusercontent.com/open-policy-agent/gatekeeper/v3.17.0/deploy/gatekeeper.yaml +``` + +### Deploying a Release using development image + +If you want to deploy latest development version of Gatekeeper, you can use `openpolicyagent/gatekeeper:dev` tag or `openpolicyagent/gatekeeper:`. + +Images are hosted in [OPA Docker Hub repository](https://hub.docker.com/r/openpolicyagent/gatekeeper/tags). + +### Deploying HEAD Using make + +Currently the most reliable way of installing Gatekeeper is to build and install from HEAD: + + * Make sure that: + * You have [Docker](https://docs.docker.com/engine/install/) version 20.10 or later installed. + * Your kubectl context is set to the desired installation cluster. + * You have a container registry you can write to that is readable by the target cluster. + + * Clone the Gatekeeper repository to your local system: + ```sh + git clone https://github.com/open-policy-agent/gatekeeper.git + ``` + + * `cd` to the repository directory. + + * Build and push Gatekeeper image: + ```sh + export DESTINATION_GATEKEEPER_IMAGE= + make docker-buildx REPOSITORY=$DESTINATION_GATEKEEPER_IMAGE OUTPUT_TYPE=type=registry + ``` + + > If you want to use a local image, don't set OUTPUT_TYPE and it will default to `OUTPUT_TYPE=type=docker`. + + * Finally, deploy: + ```sh + make deploy REPOSITORY=$DESTINATION_GATEKEEPER_IMAGE + ``` + +### Deploying via Helm + +A basic Helm chart exists in `charts/gatekeeper`. If you have Helm installed, you can deploy via the following instructions for Helm v3: + +```sh +helm repo add gatekeeper https://open-policy-agent.github.io/gatekeeper/charts +helm install gatekeeper/gatekeeper --name-template=gatekeeper --namespace gatekeeper-system --create-namespace +``` + +If you are using the older Gatekeeper Helm repo location and Helm v3.3.2+, then use `force-update` to override the default behavior to update the existing repo. + +```sh +helm repo add gatekeeper https://open-policy-agent.github.io/gatekeeper/charts --force-update +``` + +Please note that this chart is compatible with Helm v3 starting with Gatekeeper v3.1.1. When using Helm v3, it is expected to see warnings regarding to `crd-install` hook. This is due to maintaining backwards compatibility with Helm v2 and should not impact the chart deployment. + +You can alter the variables in `charts/gatekeeper/values.yaml` to customize your deployment. To regenerate the base template, run `make manifests`. + +## Uninstallation + +### Using Prebuilt Image + +If you used a prebuilt image to deploy Gatekeeper, then you can delete all the Gatekeeper components with the following command: + + ```sh + kubectl delete -f https://raw.githubusercontent.com/open-policy-agent/gatekeeper/v3.17.0/deploy/gatekeeper.yaml + ``` + +### Using make + +If you used `make` to deploy, then run the following to uninstall Gatekeeper: + + * cd to the repository directory + * run `make uninstall` + +### Using Helm + +If you used `helm` to deploy, then run the following to uninstall Gatekeeper: +```sh +helm delete gatekeeper --namespace gatekeeper-system +``` + +Helm v3 will not cleanup Gatekeeper installed CRDs. Run the following to uninstall Gatekeeper CRDs: +```sh +kubectl delete crd -l gatekeeper.sh/system=yes +``` + +This operation will also delete any user installed config changes, and constraint templates and constraints. diff --git a/website/versioned_docs/version-v3.17.x/intro.md b/website/versioned_docs/version-v3.17.x/intro.md new file mode 100644 index 00000000000..faba9031c23 --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/intro.md @@ -0,0 +1,31 @@ +--- +id: intro +title: Introduction +sidebar_label: Introduction +slug: / +--- + +## Goals + +Every organization has policies. Some are essential to meet governance and legal requirements. Others help ensure adherence to best practices and institutional conventions. Attempting to ensure compliance manually would be error-prone and frustrating. Automating policy enforcement ensures consistency, lowers development latency through immediate feedback, and helps with agility by allowing developers to operate independently without sacrificing compliance. + +Kubernetes allows decoupling policy decisions from the inner workings of the API Server by means of [admission controller webhooks](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/), which are executed whenever a resource is created, updated or deleted. Gatekeeper is a validating and mutating webhook that enforces CRD-based policies executed by [Open Policy Agent](https://github.com/open-policy-agent/opa), a policy engine for Cloud Native environments hosted by CNCF as a [graduated project](https://www.cncf.io/projects/open-policy-agent-opa/). + +In addition to the `admission` scenario, Gatekeeper's audit functionality allows administrators to see what resources are currently violating any given policy. + +Finally, Gatekeeper's engine is designed to be portable, allowing administrators to detect and reject non-compliant commits to an infrastructure-as-code system's source-of-truth, further strengthening compliance efforts and preventing bad state from slowing down the organization. + +## Looking for sample policies? + +Please visit Gatekeeper [policy library](https://open-policy-agent.github.io/gatekeeper-library/website/) to find a collection of sample policies. + +## How is Gatekeeper different from OPA? + +Compared to using [OPA with its sidecar kube-mgmt](https://www.openpolicyagent.org/docs/kubernetes-admission-control.html) (aka Gatekeeper v1.0), Gatekeeper introduces the following functionality: + + * An extensible, parameterized [policy library](https://open-policy-agent.github.io/gatekeeper-library/website/) + * Native Kubernetes CRDs for instantiating the policy library (aka "constraints") + * Native Kubernetes CRDs for extending the policy library (aka "constraint templates") + * Native Kubernetes CRDs for [mutation](mutation.md) support + * Audit functionality + * External data support diff --git a/website/versioned_docs/version-v3.17.x/library.md b/website/versioned_docs/version-v3.17.x/library.md new file mode 100644 index 00000000000..f9c39bf51f8 --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/library.md @@ -0,0 +1,6 @@ +--- +id: library +title: Policy Library +--- + +See the [Gatekeeper policy library](https://open-policy-agent.github.io/gatekeeper-library/website/) for a collection of constraint templates, sample constraints, and sample mutation policies that you can use with Gatekeeper. \ No newline at end of file diff --git a/website/versioned_docs/version-v3.17.x/metrics.md b/website/versioned_docs/version-v3.17.x/metrics.md new file mode 100644 index 00000000000..fc3f6bf44ec --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/metrics.md @@ -0,0 +1,299 @@ +--- +id: metrics +title: Metrics & Observability +--- +## Observability + +This section covers how to gather more detailed statistics about Gatekeeper's query performance. This can be helpful in diagnosing situations such as identifying a constraint template with a long execution time. Statistics are written to Gatekeeper's stdout logs. + +### Logging Constraint Execution Stats + +- set `--log-stats-audit`. This flag enables logging the stats for the audit process. + +- set `--log-stats-admission`. This flag enables logging the stats for the admission review process. + +#### Example Log Line + +To see how long it takes to review a constraint kind at admission time, enable the `--log-stats-admission` flag and watch the logs for a constraint kind `K8sRequiredLabels`, for example: + +```json +{ + "level": "info", + "ts": 1683692576.9093642, + "logger": "webhook", + "msg": "admission review request stats", + "hookType": "validation", + "process": "admission", + "event_type": "review_response_stats", + "resource_group": "", + "resource_api_version": "v1", + "resource_kind": "Namespace", + "resource_namespace": "", + "request_username": "kubernetes-admin", + "execution_stats": [ + { + "scope": "template", + "statsFor": "K8sRequiredLabels", + "stats": [ + { + "name": "templateRunTimeNS", + "value": 762561, + "source": { + "type": "engine", + "value": "Rego" + }, + "description": "the number of nanoseconds it took to evaluate all constraints for a template" + }, + { + "name": "constraintCount", + "value": 1, + "source": { + "type": "engine", + "value": "Rego" + }, + "description": "the number of constraints that were evaluated for the given constraint kind" + } + ], + "labels": [ + { + "name": "TracingEnabled", + "value": false + }, + { + "name": "PrintEnabled", + "value": false + }, + { + "name": "target", + "value": "admission.k8s.gatekeeper.sh" + } + ] + } + ] +} +``` + +In the excerpt above, notice `templateRunTimeNS` and `constraintCount`. The former indicates the time it takes to evaluate the number of constraints of kind `K8sRequiredLabels`, while the latter surfaces how many such constraints were evaluated for this template. Labels provide additional information about the execution environemnt setup, like whether tracing was enabled (`TraceEnabled`). + +#### Caveats + +The additional log volume from enabling the stats logging can be quite high. +## Metrics + +> If you are using a Prometheus client library, for counter metrics, the _total suffix is recommended and sometimes automatically appended by client libraries to indicate that the metric represents a cumulative total. + +Below are the list of metrics provided by Gatekeeper: + +### Constraint + +- Name: `gatekeeper_constraints` + + Description: `Current number of known constraints` + + Tags: + + - `enforcement_action`: [`deny`, `dryrun`, `warn`] + + - `status`: [`active`, `error`] + + Aggregation: `LastValue` + +### Constraint Template + +- Name: `gatekeeper_constraint_templates` + + Description: `Number of observed constraint templates` + + Tags: + + - `status`: [`active`, `error`] + + Aggregation: `LastValue` + +- Name: `gatekeeper_constraint_template_ingestion_count` + + Description: `Total number of constraint template ingestion actions` + + Tags: + + - `status`: [`active`, `error`] + + Aggregation: `Count` + +- Name: `gatekeeper_constraint_template_ingestion_duration_seconds` + + Description: `Distribution of how long it took to ingest a constraint template in seconds` + + Tags: + + - `status`: [`active`, `error`] + + Aggregation: `Distribution` + +### Expansion Template + +- Name: `gatekeeper_expansion_templates` + + Description: `Number of observed expansion templates` + + Tags: + + - `status`: [`active`, `error`] + + Aggregation: `LastValue` + +### Webhook + +- Name: `gatekeeper_validation_request_count` + + Description: `The number of requests that are routed to validation webhook` + + Tags: + + - `admission_status`: [`allow`, `deny`] + + - `admission_dryrun`: [`true`, `false`] + + Aggregation: `Count` + +- Name: `gatekeeper_validation_request_duration_seconds` + + Description: `The validation webhook response time in seconds` + + Tags: + + - `admission_status`: [`allow`, `deny`] + + Aggregation: `Distribution` + +- Name: `gatekeeper_mutation_request_count ` + + Description: `The number of requests that are routed to mutation webhook` + + Tags: + + - `admission_status`: [`allow`, `deny`] + + Aggregation: `Count` + +- Name: `gatekeeper_mutation_request_duration_seconds` + + Description: `The mutation webhook response time in seconds` + + Tags: + + - `admission_status`: [`allow`, `deny`] + + Aggregation: `Distribution` + +### Audit + +- Name: `gatekeeper_violations` + + Description: `Total number of audited violations` + + Tags: + + - `enforcement_action`: [`deny`, `dryrun`, `warn`] + + Aggregation: `LastValue` + +- Name: `gatekeeper_audit_duration_seconds` + + Description: `Latency of audit operation in seconds` + + Aggregation: `Distribution` + +- Name: `gatekeeper_audit_last_run_time` + + Description: `Timestamp of last audit run starting time` + + Aggregation: `LastValue` + +- Name: `gatekeeper_audit_last_run_end_time` + + Description: `Timestamp of last audit run ending time` + + Aggregation: `LastValue` + +### Mutation + +- Name: `gatekeeper_mutator_ingestion_count` + + Description: `Total number of Mutator ingestion actions` + + Tags: + + - `status`: [`active`, `error`] + + Aggregation: `Count` + +- Name: `gatekeeper_mutator_ingestion_duration_seconds` + + Description: `The distribution of Mutator ingestion durations` + + Tags: + + - `status`: [`active`, `error`] + + Aggregation: `Distribution` + +- Name: `gatekeeper_mutators` + + Description: `The current number of Mutator objects` + + Tags: + + - `status`: [`active`, `error`] + + Aggregation: `Count` + +- Name: `gatekeeper_mutator_conflicting_count` + + Description: `The current number of conflicting Mutator objects` + + Tags: + + - `status`: [`active`, `error`] + + Aggregation: `Count` + +### Sync + +- Name: `gatekeeper_sync` + + Description: `Total number of resources of each kind being cached` + + Tags: + + - `status`: [`active`, `error`] + + - `kind` (examples, `pod`, `namespace`, ...) + + Aggregation: `LastValue` + +- Name: `gatekeeper_sync_duration_seconds` + + Description: `Latency of sync operation in seconds` + + Aggregation: `Distribution` + +- Name: `gatekeeper_sync_last_run_time` + + Description: `Timestamp of last sync operation` + + Aggregation: `LastValue` + +### Watch + +- Name: `gatekeeper_watch_manager_watched_gvk` + + Description: `Total number of watched GroupVersionKinds` + + Aggregation: `LastValue` + +- Name: `gatekeeper_watch_manager_intended_watch_gvk` + + Description: `Total number of GroupVersionKinds with a registered watch intent` + + Aggregation: `LastValue` diff --git a/website/versioned_docs/version-v3.17.x/mutation-background.md b/website/versioned_docs/version-v3.17.x/mutation-background.md new file mode 100644 index 00000000000..2f90e172ff8 --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/mutation-background.md @@ -0,0 +1,70 @@ +--- +id: mutation-background +title: Background Information on Mutation +--- + +Mutation webhooks in Kubernetes is a nuanced concept with many gotchas. This +page explores some of the background of mutation webhooks in Kubernetes, their +operational and syntactical implications, and how Gatekeeper is trying to provide +value on top of the basic Kubernetes webhook ecosystem. + +# Mutation Chaining + +A key difference between mutating webhooks and validating webhooks are that +mutating webhooks are called in series, whereas validating webhooks are called in parallel. + +This makes sense, since validating webhooks can only approve or deny (or warn) for a given +input and have no other side effects. This means that the result of one validating webhook +cannot impact the result of any other validating webhook, and it's trivial to aggregate +all of the validation responses as they come in: reject if at least one deny comes in, return +all warnings and denies that are encountered back to the user. + +Mutation, however, changes what the input resource looks like. This means that the output +of one mutating webhook can have an effect on the output of another mutating webhook. +For example, if one mutating webhook adds a sidecar container, and another webhook sets +`imagePullPolicy` to `Always`, then the new sidecar container means that this second webhook +has one more container to mutate. + +The biggest practical issue with this call-in-sequence behavior is latency. Validation webhooks +(which are called in parallel), have a latency equivalent to the slowest-responding webhook. +Mutation webhooks have a total latency that is the sum of all mutating webhooks to be called. This +makes mutation much more latency-sensitive. + +This can be particularly harmful for something like external data, where a webhook reaches out to +a secondary service to gather necessary information. This extra hop can be extra expensive, +especially if these external calls are not minimized. Gatekeeper translates external data +references scattered across multiple mutators into a single batched call per external data provider, +and calls each provider in parallel, minimizing latency. + +# Mutation Recursion + +Not only are mutators chained, but they recurse as well. This is not only due to Kubernetes' +[reinvocation policy](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#reinvocation-policy), +but also due to the nature of the Kubernetes control plane itself, since controllers may modify resources periodically. +Whether because of the reinvocation policy, or because of control plane behavior, mutators are likely to +operate on their own output. This has some operational risk. Consider a mutating webhook that prepends a hostname to a docker +image reference (e.g. prepend `gcr.io/`), if written naievly, each successive mutation would add another prefix, leading to results +like `gcr.io/gcr.io/gcr.io/my-favorite-image:latest`. Because of this, Kubernetes requires mutation webhooks to be +[idempotent](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#idempotence). + +This is a good idea, but there is one problem: webhooks that are idempotent in isolation may not be idempotent as a group. +Let's take the above mutator and make it idempotent. We'll give it the following behavior: "if an image reference does +not start with `gcr.io/`, prepend `gcr.io/`". This makes the webhook idempotent, for sure. But, what if there is another +team working on the cluster, and they want their own image mutation rule: "if an image reference for the `billing` +namespace does not start with `billing.company.com/`, prepend `billing.company.com/`". Each of these webhooks would +be idempotent in isolation, but when chained together you'll see results like +`billing.company.com/gcr.io/billing.company.com/gcr.io/my-favorite-image:latest`. + +At small scales, with small teams, it's relatively easy to ensure that mutations don't interfere with each other, +but at larger scales, or when multiple non-communicating parties have their own rules that they want to set, it +can be hard, or impossible to maintain this requirement of "global idempotence". + +Gatekeeper attempts to make this easier by designing mutation in such a way that "global idempotence" is an +emergent property of all mutators, no matter how they are configured. Here is a [proof](https://docs.google.com/document/d/1mCHHhBABzUwP8FtUuEf_B-FX-HHgh_k4bwZcGUYm7Sw/edit#heading=h.j5thjfnqybpn), where we attempt to show that our language +for expressing mutation always converges on a stable result. + +# Summary + +By using Gatekeeper for mutation, it is possible to reduce the number of mutation webhooks, which should improve latency +considerations. It should also help prevent decoupled management of mutation policies from violating the Kubernetes API +server's requirement of idempotence. \ No newline at end of file diff --git a/website/versioned_docs/version-v3.17.x/mutation.md b/website/versioned_docs/version-v3.17.x/mutation.md new file mode 100644 index 00000000000..a66f7d2c403 --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/mutation.md @@ -0,0 +1,428 @@ +--- +id: mutation +title: Mutation +--- + +`Feature State`: Gatekeeper version v3.10+ (stable) + +The mutation feature allows Gatekeeper modify Kubernetes resources at request time based on customizable mutation policies. + +## Mutation CRDs + +Mutation policies are defined using mutation-specific CRDs, called __mutators__: +- AssignMetadata - defines changes to the metadata section of a resource +- Assign - any change outside the metadata section +- ModifySet - adds or removes entries from a list, such as the arguments to a container +- AssignImage - defines changes to the components of an image string + +The rules for mutating metadata are more strict than for mutating the rest of the resource. The differences are described in more detail below. + +Here is an example of a simple AssignMetadata CRD: +```yaml +apiVersion: mutations.gatekeeper.sh/v1 +kind: AssignMetadata +metadata: + name: demo-annotation-owner +spec: + match: + scope: Namespaced + name: nginx-* + kinds: + - apiGroups: ["*"] + kinds: ["Pod"] + location: "metadata.annotations.owner" + parameters: + assign: + value: "admin" +``` + +Each mutation CRD can be divided into 3 distinct sections: +- extent of changes - what is to be modified (kinds, namespaces, ...) +- intent - the path and value of the modification +- conditional - conditions under which the mutation will be applied + +#### Extent of changes + +The extent of changes section describes which resources will be mutated. +It allows selecting resources to be mutated using the same match criteria +as constraints. + +An example of the extent of changes section. +```yaml +applyTo: +- groups: [""] + kinds: ["Pod"] + versions: ["v1"] +match: + scope: Namespaced | Cluster + kinds: + - apiGroups: [] + kinds: [] + labelSelector: [] + namespaces: [] + namespaceSelector: [] + excludedNamespaces: [] +``` + +Note that the `applyTo` field is required for all mutators except `AssignMetadata`, which does not have the `applyTo` field. +`applyTo` allows Gatekeeper to understand the schema of the objects being modified, so that it can detect when two mutators disagree as +to a kind's schema, which can cause non-convergent mutations. Also, the `applyTo` section does not accept globs. + +The `match` section is common to all mutators. It supports the following match criteria: +- scope - the scope (Namespaced | Cluster) of the mutated resource +- kinds - the resource kind, any of the elements listed +- labelSelector - filters resources by resource labels listed +- namespaces - list of allowed namespaces, only resources in listed namespaces will be mutated +- namespaceSelector - filters resources by namespace selector +- excludedNamespaces - list of excluded namespaces, resources in listed namespaces will not be mutated +- name - the name of an object. If defined, it matches against objects with the specified name. Name also supports a prefix-based glob. For example, `name: pod-*` matches both `pod-a` and `pod-b`. + +Note that any empty/undefined match criteria are inclusive: they match any object. + +#### Intent + +This specifies what should be changed in the resource. + +An example of the section is shown below: +```yaml +location: "spec.containers[name: foo].imagePullPolicy" +parameters: + assign: + value: "Always" +``` + +The `location` element specifies the path to be modified. +The `parameters.assign.value` element specifies the value to be set for the element specified in `location`. Note that the value can either be a simple string or a composite value. + +An example of a composite value: +```yaml +location: "spec.containers[name: networking]" +parameters: + assign: + value: + name: "networking" + imagePullPolicy: Always + +``` + +The `location` element can specify either a simple subelement or an element in a list. +For example the location `spec.containers[name:foo].imagePullPolicy` would be parsed as follows: +- ***spec**.containers[name: foo].imagePullPolicy* - the spec element +- *spec.**containers[name: foo]**.imagePullPolicy* - container subelement of spec. The container element is a list. Out of the list chosen, an element with the `name` element having the value `foo`. + - *spec.containers[name: foo].**imagePullPolicy*** - in the element from the list chosen in the previous step the element `imagePullPolicy` is chosen + +The yaml illustrating the above `location`: +```yaml +spec: + containers: + - name: foo + imagePullPolicy: +``` + +Wildcards can be used for list element values: `spec.containers[name: *].imagePullPolicy` + +##### Assigning values from metadata + +*This section does not apply to ModifySet mutators* + +Sometimes it's useful to assign a field's value from metadata. For instance, injecting a deployment's name into its pod template's labels +to use affinity/anti-affinity rules to [keep Pods from the same deployment on different nodes](https://github.com/open-policy-agent/feedback/discussions/15). + +Assign and AssignMetadata can do this via the `fromMetadata` field. Here is an example: + +``` +apiVersion: mutations.gatekeeper.sh/v1 +kind: AssignMetadata +metadata: + name: demo-annotation-owner +spec: + location: "metadata.labels.namespace" + parameters: + assign: + fromMetadata: + field: namespace +``` + +Valid values for `spec.parameters.assign.fromMetadata.field` are `namespace` and `name`. They will inject the namespace's name and the object's name, respectively. + + +##### Conditionals + +The conditions for updating the resource. + +Mutation has path tests, which make it so the resource will only be mutated if the specified path exists/does not exist. +This can be useful for things like setting a default value if a field is undeclared, or for avoiding creating a field +when a parent is missing, such as accidentally creating an empty sidecar named "foo" in the example below: + +```yaml +parameters: + pathTests: + - subPath: "spec.containers[name: foo]" + condition: MustExist + - subPath: "spec.containers[name: foo].securityContext.capabilities" + condition: MustNotExist +``` + + +### AssignMetadata + +AssignMetadata is a mutator that modifies the metadata section of a resource. Note that the metadata of a resource is a very sensitive piece of data, +and certain mutations could result in unintended consequences. An example of this could be changing the name or namespace of a resource. +The AssignMetadata changes have therefore been limited to only the labels and annotations. Furthermore, it is currently only allowed to add a label or annotation. +Pre-existing labels and annotations cannot be modified. + + An example of an AssignMetadata adding a label `owner` set to `admin`: +```yaml +apiVersion: mutations.gatekeeper.sh/v1 +kind: AssignMetadata +metadata: + name: demo-annotation-owner +spec: + match: + scope: Namespaced + location: "metadata.labels.owner" + parameters: + assign: + value: "admin" +``` + +### ModifySet + +ModifySet is a mutator that allows for the adding and removal of items from a list as if that list were a set. +New values are appended to the end of a list. + +For example, the following mutator removes an `--alsologtostderr` argument from all containers in a pod: + +```yaml +apiVersion: mutations.gatekeeper.sh/v1 +kind: ModifySet +metadata: + name: remove-err-logging +spec: + applyTo: + - groups: [""] + kinds: ["Pod"] + versions: ["v1"] + location: "spec.containers[name: *].args" + parameters: + operation: prune + values: + fromList: + - --alsologtostderr +``` + +- `spec.parameters.values.fromList` holds the list of values that will be added or removed. +- `operation` can be `merge` to insert values into the list if missing, or `prune` to remove values from the list. `merge` is default. + +### AssignImage + +AssignImage is a mutator specifically for changing the components of an image +string. Suppose you have an image like `my.registry.io:2000/repo/app:latest`. +`my.registry.io:2000` would be the domain, `repo/app` would be the path, and +`:latest` would be the tag. The domain, path, and tag of an image can be changed +separately or in conjunction. + +For example, to change the whole image to `my.registry.io/repo/app@sha256:abcde67890123456789abc345678901a`: + +```yaml +apiVersion: mutations.gatekeeper.sh/v1alpha1 +kind: AssignImage +metadata: + name: assign-container-image +spec: + applyTo: + - groups: [ "" ] + kinds: [ "Pod" ] + versions: [ "v1" ] + location: "spec.containers[name:*].image" + parameters: + assignDomain: "my.registry.io" + assignPath: "repo/app" + assignTag: "@sha256:abcde67890123456789abc345678901a" + match: + source: "All" + scope: Namespaced + kinds: + - apiGroups: [ "*" ] + kinds: [ "Pod" ] +``` + +Only one of `[assignDomain, assignPath, assignTag]` is required. Note that `assignTag` +must start with `:` or `@`. Also, if `assignPath` is set to a value which could potentially +be interpreted as a domain, such as `my.repo.lib/app`, then `assignDomain` must +also be specified. + +### Mutation Annotations + +You can have two recording annotations applied at mutation time by enabling the `--mutation-annotations` flag. More details can be found on the +[customize startup docs page](./customize-startup.md). + +## Examples + +### Adding an annotation + +```yaml +apiVersion: mutations.gatekeeper.sh/v1 +kind: AssignMetadata +metadata: + name: demo-annotation-owner +spec: + match: + scope: Namespaced + location: "metadata.annotations.owner" + parameters: + assign: + value: "admin" +``` + +### Setting security context of a specific container in a Pod in a namespace to be non-privileged + +Set the security context of container named `foo` in a Pod in namespace `bar` to be non-privileged + +```yaml +apiVersion: mutations.gatekeeper.sh/v1 +kind: Assign +metadata: + name: demo-privileged +spec: + applyTo: + - groups: [""] + kinds: ["Pod"] + versions: ["v1"] + match: + scope: Namespaced + kinds: + - apiGroups: ["*"] + kinds: ["Pod"] + namespaces: ["bar"] + location: "spec.containers[name:foo].securityContext.privileged" + parameters: + assign: + value: false + pathTests: + - subPath: "spec.containers[name:foo]" + condition: MustExist +``` + +#### Setting imagePullPolicy of all containers to Always in all namespaces except namespace `system` + +```yaml +apiVersion: mutations.gatekeeper.sh/v1 +kind: Assign +metadata: + name: demo-image-pull-policy +spec: + applyTo: + - groups: [""] + kinds: ["Pod"] + versions: ["v1"] + match: + scope: Namespaced + kinds: + - apiGroups: ["*"] + kinds: ["Pod"] + excludedNamespaces: ["system"] + location: "spec.containers[name:*].imagePullPolicy" + parameters: + assign: + value: Always +``` + +### Adding a `network` sidecar to a Pod + +```yaml +apiVersion: mutations.gatekeeper.sh/v1 +kind: Assign +metadata: + name: demo-sidecar +spec: + applyTo: + - groups: [""] + kinds: ["Pod"] + versions: ["v1"] + match: + scope: Namespaced + kinds: + - apiGroups: ["*"] + kinds: ["Pod"] + location: "spec.containers[name:networking]" + parameters: + assign: + value: + name: "networking" + imagePullPolicy: Always + image: quay.io/foo/bar:latest + command: ["/bin/bash", "-c", "sleep INF"] + +``` + +### Adding dnsPolicy and dnsConfig to a Pod + +```yaml +apiVersion: mutations.gatekeeper.sh/v1 +kind: Assign +metadata: + name: demo-dns-policy +spec: + applyTo: + - groups: [""] + kinds: ["Pod"] + versions: ["v1"] + match: + scope: Namespaced + kinds: + - apiGroups: ["*"] + kinds: ["Pod"] + location: "spec.dnsPolicy" + parameters: + assign: + value: None +--- +apiVersion: mutations.gatekeeper.sh/v1 +kind: Assign +metadata: + name: demo-dns-config +spec: + applyTo: + - groups: [""] + kinds: ["Pod"] + versions: ["v1"] + match: + scope: Namespaced + kinds: + - apiGroups: ["*"] + kinds: ["Pod"] + location: "spec.dnsConfig" + parameters: + assign: + value: + nameservers: + - 1.2.3.4 +``` + +### Setting a Pod's container image to use a specific digest: + +```yaml +apiVersion: mutations.gatekeeper.sh/v1alpha1 +kind: AssignImage +metadata: + name: add-nginx-digest +spec: + applyTo: + - groups: [ "" ] + kinds: [ "Pod" ] + versions: [ "v1" ] + location: "spec.containers[name:nginx].image" + parameters: + assignTag: "@sha256:abcde67890123456789abc345678901a" + match: + source: "All" + scope: Namespaced + kinds: + - apiGroups: [ "*" ] + kinds: [ "Pod" ] +``` + +### External Data + +See [External Data For Gatekeeper Mutating Webhook](externaldata.md#external-data-for-gatekeeper-mutating-webhook). diff --git a/website/versioned_docs/version-v3.17.x/opa-versions.md b/website/versioned_docs/version-v3.17.x/opa-versions.md new file mode 100644 index 00000000000..2add1188150 --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/opa-versions.md @@ -0,0 +1,34 @@ +--- +id: opa-versions +title: OPA Versions +--- + +Gatekeeper depends on [Open Policy Agent](https://www.openpolicyagent.org/). To see which version of OPA is included in a particular Gatekeeper release, reference the table below. + +| Gatekeeper Version | OPA Version | +| ------------------ | ----------- | +| `v3.15.1` | `v0.60.0` | +| `v3.15.0` | `v0.60.0` | +| `v3.14.0` | `v0.57.1` | +| `v3.13.0` | `v0.54.0` | +| `v3.12.0` | `v0.49.2` | +| `v3.11.1` | `v0.47.2` | +| `v3.11.0` | `v0.47.2` | +| `v3.10.0` | `v0.44.0` | +| `v3.9.2` | `v0.44.0` | +| `v3.9.1` | `v0.44.0` | +| `v3.9.0` | `v0.41.0` | +| `v3.8.1` | `v0.39.0` | +| `v3.8.0` | `v0.39.0` | +| `v3.7.2` | `v0.29.4` | +| `v3.7.1` | `v0.29.4` | +| `v3.7.0` | `v0.29.4` | +| `v3.6.0` | `v0.24.0` | +| `v3.5.2` | `v0.24.0` | +| `v3.5.1` | `v0.24.0` | +| `v3.5.0` | `v0.24.0` | +| `v3.4.1` | `v0.24.0` | +| `v3.4.0` | `v0.24.0` | +| `v3.3.0` | `v0.24.0` | +| `v3.2.0` | `v0.19.1` | +| `v3.1.0` | `v0.19.1` | \ No newline at end of file diff --git a/website/versioned_docs/version-v3.17.x/operations.md b/website/versioned_docs/version-v3.17.x/operations.md new file mode 100644 index 00000000000..7eea4e0fc5f --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/operations.md @@ -0,0 +1,187 @@ +--- +id: operations +title: Operations +--- + +Gatekeeper is flexible in how it can be deployed. If desired, core pieces of functionality can be broken +out to be run in different pods. This allows Gatekeeper to accommodate needs like running in a monolithic pod +in order to avoid overhead, or running each operation in a separate pod to make scaling individual operations +easier and to limit the impact of operational issues with any one operation (e.g. if audit is running in its +own pod, audit running out of memory will not affect the validation webhook). + +Gatekeeper achieves this through the concept of `Operations`, which can be enabled via the `--operation` +command line flag. To enable multiple operations this flag can be defined multiple times. If no `--operation` +flag is provided, all functionality will be enabled by default. + +# Operations + +Below are the operations Gatekeeper supports + +## Validating Webhook + +__--operation key:__ `webhook` + +This operation serves the validating webhook that Kubernetes' API server calls as part of the admission process. + +### Required Behaviors + +At a high level, this requires: + +* Ingesting constraint templates +* Creating CRDs for a corresponding constraint template +* Ingesting constraints +* Reporting the status of ingested constraints/templates +* Watching and syncing resources specified by the `Config` resource to support referential constraints +* Running the HTTP validating webhook service + * In addition to validating incoming requests against policy, this webhook also validates incoming Gatekeeper resources +* Running the namespace label validating webhook service (required to lock down namespaceSelector-type webhook exemptions) + +### Permissions Required + +* The ability to read all `ConstraintTemplate` objects +* The ability to create CRDs (unfortunately RBAC doesn't have the syntax to scope this down to just CRDs in the `constraints.gatekeeper.sh` group) +* The ability to read all `Constraint` resources (members of the group `constraints.gatekeeper.sh`) +* The ability to create `ConstraintTemplatePodStatus` objects in Gatekeeper's namespace +* The ability to create `ConstraintPodStatus` objects in Gatekeeper's namespace +* The ability to write to the `Config` object in Gatekeeper's namespace +* The ability to read all objects (optionally this can be scoped down to resources listed for syncing in the `Config`) +* If certificates are managed by Gatekeeper's embedded cert controller (which can be disabled), then Gatekeeper will need + write permissions to its `ValidatingWebhookConfiguration` + * It will also need the ability to write to the webhook secret in Gatekeeper's namespace +* If you have events enabled, you will need permissions to create events in Gatekeeper's namespace + + +## Mutating Webhook + +__--operation key:__ `mutation-webhook` + +This operation serves the mutating webhook that Kubernetes' API server calls as part of the admission process. + +### Required Behaviors + +At a high level, this requires: + +* Ingesting Mutator objects +* Reporting the status of ingested mutator objects +* Running the HTTP mutating webhook service + +### Permissions Required + +* The ability to read all objects in the group `mutations.gatekeeper.sh` (mutators) +* The ability to create `MutatorPodStatus` objects in Gatekeeper's namespace +* If certificates are managed by Gatekeeper's embedded cert controller (which can be disabled), then Gatekeeper will need + write permissions to its `MutatingWebhookConfiguration` + * It will also need the ability to write to the webhook secret in Gatekeeper's namespace + +## Audit + +__--operation key:__ `audit` + +This operation runs the audit process, which periodically evaluates existing resources against policy, reporting +any violations it discovers. To limit traffic to the API server and to avoid contention writing audit results +to constraints, audit should run as a singleton pod. + +### Required Behaviors + +At a high level, this requires: + +* Listing all objects on the cluster to scan them for violations +* Ingesting constraint templates +* Creating CRDs for a corresponding constraint template +* Ingesting constraints +* Reporting the status of ingested constraints/templates +* Watching and syncing resources specified by the `Config` resource to support referential constraints +* Writing audit results back to constraints (subject to a cap on # of results per constraint) + +### Permissions Required + +* The ability to read all objects in the cluster (this can be scoped down if you are not interested in auditing/syncing all objects) +* The ability to read all `ConstraintTemplate` objects +* The ability to create CRDs (unfortunately RBAC doesn't have the syntax to scope this down to just CRDs in the `constraints.gatekeeper.sh` group) +* The ability to write to all `Constraint` resources (members of the group `constraints.gatekeeper.sh`) +* The ability to create `ConstraintTemplatePodStatus` objects in Gatekeeper's namespace +* The ability to create `ConstraintPodStatus` objects in Gatekeeper's namespace +* The ability to write to the `Config` object in Gatekeeper's namespace +* If you have events enabled, you will need permissions to create events in Gatekeeper's namespace + +## Status + +__--operation key:__ `status` + +Gatekeeper uses an emergent consensus model, where individual pods do not need to talk with each other +in order to provide its functionality. This allows for scalability, but means we should not write status +to resources directly due to the risk of write contention, which could increase network traffic exponentially +relative to the number of pods. Instead, each pod gets its own, private status resource that it alone writes +to. The Status operation aggregates these status resources and writes them to the status field of the appropriate +object for the user to consume. Without this operation, the `status` field of constraints and constraint templates +would be blank. + +In order to do its job (eliminating write contention) effectively, the Status operation should be run as a +singleton. + +### Required Behaviors + +At a high level, this requires: + +* Reading the Constraint[Template]PodStatus resources +* Writing aggregated results to the `status` fields of constraints/templates + +### Permissions Required + +* The ability to write to all `ConstraintTemplate` objects +* The ability to write to all `Constraint` resources (members of the group `constraints.gatekeeper.sh`) +* The ability to read `ConstraintTemplatePodStatus` objects in Gatekeeper's namespace +* The ability to read `ConstraintPodStatus` objects in Gatekeeper's namespace + +## Mutation Status + +__--operation key:__ `mutation-status` + +Because users may not want to install mutation CRDs if they do not want to use the feature, and because +trying to watch a Kind that doesn't exist would cause errors, Gatekeeper splits mutation status into a +separate operation. It behaves like the Status operation, except it only applies for mutation resources. + +### Required Behaviors + +At a high level, this requires: + +* Reading mutator pod status resources +* Writing aggregated results to the `status` fields of mutators + +### Permissions Required + +* The ability to write to all objects in the group `mutations.gatekeeper.sh` (mutators) +* The ability to read `MutatorPodStatus` objects in Gatekeeper's namespace + +## Mutation Controller + +__--operation key:__ `mutation-controller` + +This operation runs the process responsible for ingesting and registering +mutators. `mutation-controller` is run implicitly with the `mutation-webhook` +and `mutation-status` operations, and is redundant if any of the 2 +aforementioned operations are already specified. + +If the `webhook` or `audit` operation is used in isolation without the `mutation-webhook` +or `mutation-status` operations, then the `mutation-controller` operation is +required for mutation to work with [workload expansion](workload-resources.md). + +### Required Behaviors: + +At a high level, this requires: + +* Ingesting Mutator objects + +### Permissions Required + +* The ability to read all objects in the group `mutations.gatekeeper.sh` (mutators) + +# A Note on Permissions + +"Create" implies the `create` and `delete` permissions in addition to the permissions implied by "Read" and "Write". + +"Write" implies the `update` permission in addition to the permissions implied by "Read". + +"Read" implies the `get`, `list`, and `watch` permissions. In some cases, like scraping audit results, +`watch` is unnecessary, but does not substantially increase the power delegated to the service account +under the theory that a `watch` is simply a more efficient version of polling `list`. diff --git a/website/versioned_docs/version-v3.17.x/performance-tuning.md b/website/versioned_docs/version-v3.17.x/performance-tuning.md new file mode 100644 index 00000000000..2abe5291447 --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/performance-tuning.md @@ -0,0 +1,93 @@ +--- +id: performance-tuning +title: Performance Tuning +--- + +Below we go into some of the considerations and options for performance tuning Gatekeeper. + +# General Performance + +## GOMAXPROCS + +[GOMAXPROCS](https://pkg.go.dev/runtime#GOMAXPROCS) sets the number of threads golang uses. +Gatekeeper uses [automaxprocs](https://github.com/uber-go/automaxprocs) to default this value +to the CPU limit set by the linux cgroup (i.e. the limits passed to the Kubernetes container). + +This value can be overridden by setting a `GOMAXPROCS` environment variable. + +Generally speaking, too many threads can lead to CPU throttling, which can increase webhook jitter +and can result in not enough available CPU per operation, which can lead to increased latency. + +# Webhook Performance + +## Max Serving Threads + +The `--max-serving-threads` command line flag caps the number of concurrent goroutines that are +calling out to policy evaluation at any one time. This can be important for two reasons: + +* Excessive numbers of serving goroutines can lead to CPU starvation, which means there is not enough + CPU to go around per goroutine, causing requests to time out. + +* Each serving goroutine can require a non-trivial amount of RAM, which will not be freed until the + request is finished. This can increase the maximum memory used by the process, which can lead to + OOMing. + +By default, the number of webhook threads is capped at the value of `GOMAXPROCS`. If your policies mostly +rely on blocking calls (e.g. calling out to external services via `http.send()` or via external data), CPU +starvation is less of a risk, though memory scaling could still be a concern. + +Playing around with this value may help maximize the throughput of Gatekeeper's validating webhook. + +# Audit + +## Audit Interval + +The `--audit-interval` flag is used to configure how often audit runs on the cluster. + +The time it takes for audit to run is dependent on the size of the cluster, any throttling the K8s +API server may do, and the number and complexity of policies to be evaluated. As such, determining +the ideal audit interval is use-case-specific. + +If you have overlapping audits, the following things can happen: + +* There will be parallel calls to the policy evaluation backend, which can result in increased + RAM usage and CPU starvation, leading to OOMs or audit sessions taking longer per-audit than + they otherwise would. + +* More requests to the K8s API server. If throttled, this can increase the time it takes for an audit + to finish. + +* A newer audit run can pre-empt the reporting of audit results of a previous audit run on the `status` field + of individual constraints. This can lead to constraints not having violation results in their `status` field. + Reports via stdout logging should be unaffected by this. + +Ideally, `--audit-interval` should be set long enough that no more than one audit is running at any time, though +occasional overlap should not be harmful. + +## Constraint Violations Limit + +Memory usage will increase/decrease as `--constraint-violations-limit` is increased/decreased. + +## Audit Chunk Size + +The `--audit-chunk-size` flags tells Gatekeeper to request lists of objects from the API server to be paginated +rather than listing all instances at once. Setting this can reduce maximum memory usage, particularly if you +have a cluster with a lot of objects of a specific kind, or a particular kind that has very large objects (say config maps). + +One caveat about `--audit-chunk-size` is that the K8s API server returns a resumption token for list requests. This +token is only valid for a short window (~O(minutes)) and the listing of all objects for a given kind must be completed +before that token expires. Decreasing `--audit-chunk-size` should decrease maximum memory usage, but may also lead +to an increase in requests to the API server. In cases where this leads to throttling, it's possible the resumption token +could expire before object listing has completed. + +## Match Kind Only + +The `--audit-match-kind-only` flag can be helpful in reducing audit runtime, outgoing API requests and memory usage +if your constraints are only matching against a specific subset of kinds, particularly if there are large volumes +of config that can be ignored due to being out-of-scope. Some caveats: + +* If the bulk of the K8s objects are resources that are already in-scope for constraints, the benefit will be mitigated + +* If a constraint is added that matches against all kinds (say a label constraint), the benefit will be eliminated. If + you are relying on this flag, it's important to make sure all constraints added to the cluster have `spec.match.kind` + specified. \ No newline at end of file diff --git a/website/versioned_docs/version-v3.17.x/pubsub-driver-walkthrough.md b/website/versioned_docs/version-v3.17.x/pubsub-driver-walkthrough.md new file mode 100644 index 00000000000..4d598946640 --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/pubsub-driver-walkthrough.md @@ -0,0 +1,61 @@ +--- +id: pubsub-driver +title: Pubsub Interface/Driver walkthrough +--- + +This guide provides an overview of the pubsub interface, including details on its structure and functionality. Additionally, it offers instructions on adding a new driver and utilizing providers other than the default provider Dapr. + +## Pubsub interface and Driver walkthrough + +Pubsub's connection interface looks like +```go +// Connection is the interface that wraps pubsub methods. +type Connection interface { + // Publish single message over a specific topic/channel + Publish(ctx context.Context, message interface{}, topic string) error + + // Close connections + CloseConnection() error + + // Update an existing connection with new configuration + UpdateConnection(ctx context.Context, config interface{}) error +} +``` + +As an example, the Dapr driver implements these three methods to publish message, close connection, and update connection respectively. Please refer to [dapr.go](https://github.com/open-policy-agent/gatekeeper/blob/master/pkg/pubsub/dapr/dapr.go) to understand the logic that goes in each of these methods. Additionally, the Dapr driver also implements `func NewConnection(_ context.Context, config interface{}) (connection.Connection, error)` method that returns a new client for dapr. + +### How to add new drivers + +**Note:** For example, if we want to add a new driver to use `foo` instead of Dapr as a tool to publish violations. + +A driver must implement the `Connection` interface and a new `func NewConnection(_ context.Context, config interface{}) (connection.Connection, error)` method that returns a client for the respective tool. + +This newly added driver's `NewConnection` method must be used to create a new `pubSubs` object in [provider.go](https://github.com/open-policy-agent/gatekeeper/blob/master/pkg/pubsub/provider/provider.go). For example, + +```go +var pubSubs = newPubSubSet(map[string]InitiateConnection{ + dapr.Name: dapr.NewConnection, + "foo": foo.NewConnection, +}, +) +``` + +### How to use different providers + +To enable audit to use this driver to publish messages, a connection configMap with appropriate `config` and `provider` is needed. For example, + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: audit + namespace: gatekeeper-system +data: + provider: "foo" + config: | + { + + } +``` + +> The `data.provider` field must exist and must match one of the keys of the `pubSubs` map that was defined earlier to use the corresponding driver. The `data.config` field in the configuration can vary depending on the driver being used. For dapr driver, `data.config` must be `{"component": "pubsub"}`. diff --git a/website/versioned_docs/version-v3.17.x/pubsub.md b/website/versioned_docs/version-v3.17.x/pubsub.md new file mode 100644 index 00000000000..8c1df5fb3c0 --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/pubsub.md @@ -0,0 +1,237 @@ +--- +id: pubsub +title: Consuming violations using Pubsub +--- + +`Feature State`: Gatekeeper version v3.13+ (alpha) + +> ❗ This feature is alpha, subject to change (feedback is welcome!). + +## Description + +This feature pushes audit violations to a pubsub service. Users can subscribe to pubsub service to consume violations. + +> To gain insights into different methods of obtaining audit violations and the respective trade-offs for each approach, please refer to [Reading Audit Results](audit.md#reading-audit-results). + +## Enabling Gatekeeper to export audit violations + +Install prerequisites such as a pubsub tool, a message broker etc. + +### Setting up audit with pubsub enabled + +In the audit deployment, set the `--enable-pub-sub` flag to `true` to publish audit violations. Additionally, use `--audit-connection` (defaults to `audit-connection`) and `--audit-channel`(defaults to `audit-channel`) flags to allow audit to publish violations using desired connection onto desired channel. `--audit-connection` must be set to the name of the connection config, and `--audit-channel` must be set to name of the channel where violations should get published. + +A ConfigMap that contains `provider` and `config` fields in `data` is required to establish connection for sending violations over the channel. Following is an example ConfigMap to establish a connection that uses Dapr to publish messages: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: audit-connection + namespace: gatekeeper-system +data: + provider: "dapr" + config: | + { + "component": "pubsub" + } +``` + +- `provider` field determines which tool/driver should be used to establish a connection. Valid values are: `dapr` +- `config` field is a json object that configures how the connection is made. E.g. which queue messages should be sent to. + +#### Available Pubsub drivers +Dapr: https://dapr.io/ + +### Quick start with publishing violations using Dapr and Redis + +#### Prerequisites + +1. Install Dapr + + To install Dapr with specific requirements and configuration, please refer to [Dapr docs](https://docs.dapr.io/operations/hosting/kubernetes/kubernetes-deploy/). + > [!IMPORTANT] + > - Make sure to set `SIDECAR_DROP_ALL_CAPABILITIES` environment variable on `dapr-sidecar` injector pod to `true` to avoid getting `PodSecurity violation` errors for the injected sidecar container as Gatekeeper by default requires workloads to run with [restricted](https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted) policy. If using helm charts to install Dapr, you can use `--set dapr_sidecar_injector.sidecarDropALLCapabilities=true`. + > - Additionally, [configure appropriate seccompProfile for sidecar containers](https://docs.dapr.io/operations/hosting/kubernetes/kubernetes-production/#configure-seccompprofile-for-sidecar-containers) injected by Dapr to avoid getting `PodSecurity violation` errors. We are setting required Dapr annotation for audit pod while deploying Gatekeeper later in this quick start to avoid getting `PodSecurity violation` error. + + > Dapr is installed with mtls enabled by default, for more details on the same please refer to [Dapr security](https://docs.dapr.io/operations/security/mtls/#setting-up-mtls-with-the-configuration-resource). + +2. Install Redis + + Please refer to [this](https://docs.dapr.io/getting-started/tutorials/configure-state-pubsub/#step-1-create-a-redis-store) guide to install Redis. + + > Redis is used for example purposes only. Dapr supports [many different state store options](https://docs.dapr.io/reference/components-reference/supported-state-stores/). To install Redis with TLS, please refer to [this](https://docs.bitnami.com/kubernetes/infrastructure/redis-cluster/administration/enable-tls/) doc. + +#### Configure a sample subscriber to receive violations + +1. Create `fake-subscriber` namespace and redis secret + + ```shell + kubectl create ns fake-subscriber + # creating redis secret in subscriber namespace to allow Dapr sidecar to connect to redis instance + kubectl get secret redis --namespace=default -o yaml | sed 's/namespace: .*/namespace: fake-subscriber/' | kubectl apply -f - + ``` + +2. Create Dapr pubsub component + + ```shell + kubectl apply -f - < Please use [this guide](https://docs.dapr.io/reference/components-reference/supported-state-stores/setup-redis/) to properly configure Redis pubsub component for Dapr. + +3. Deploy subscriber application + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: sub + namespace: fake-subscriber + labels: + app: sub + spec: + replicas: 1 + selector: + matchLabels: + app: sub + template: + metadata: + labels: + app: sub + annotations: + dapr.io/enabled: "true" + dapr.io/app-id: "subscriber" + dapr.io/enable-api-logging: "true" + dapr.io/app-port: "6002" + spec: + containers: + - name: go-sub + image: fake-subscriber:latest + imagePullPolicy: Never + ``` + + > [!IMPORTANT] + > Please make sure `fake-subscriber` image is built and available in your cluster. Dockerfile to build image for `fake-subscriber` is under [gatekeeper/test/fake-subscriber](https://github.com/open-policy-agent/gatekeeper/tree/master/test/pubsub/fake-subscriber). + +#### Configure Gatekeeper with Pubsub enabled + +1. Create Gatekeeper namespace, and create Dapr pubsub component and Redis secret in Gatekeeper's namespace (`gatekeeper-system` by default). Please make sure to update `gatekeeper-system` namespace for the next steps if your cluster's Gatekeeper namespace is different. + + ```shell + kubectl create namespace gatekeeper-system + kubectl get secret redis --namespace=default -o yaml | sed 's/namespace: .*/namespace: gatekeeper-system/' | kubectl apply -f - + kubectl apply -f - < /tmp/annotations.yaml + helm upgrade --install gatekeeper gatekeeper/gatekeeper --namespace gatekeeper-system \ + --set audit.enablePubsub=true \ + --set audit.connection=audit-connection \ + --set audit.channel=audit-channel \ + --values /tmp/annotations.yaml + ``` + + **Note:** Verify that after the audit pod is running there is a Dapr sidecar injected and running along side `manager` container. + +3. Create connection config to establish a connection. + + ```shell + kubectl apply -f - < Note: Gatekeeper generates 2 SBOMs. First is for the build stage which includes the builder image and Gatekeeper source code. Second is for the final stage that includes the built Gatekeeper binary (`manager`). + +To retrieve [SBOM](https://docs.docker.com/build/attestations/sbom/) for all architectures, please run: + +```shell +docker buildx imagetools inspect openpolicyagent/gatekeeper:v3.12.0-rc.0 --format '{{ json .SBOM }}' +``` + +For specific architecutes (like `linux/amd64`), please run: +```shell +docker buildx imagetools inspect openpolicyagent/gatekeeper:v3.12.0-rc.0 --format '{{ json .SBOM }}' | jq -r '.["linux/amd64"]' +``` + +## SLSA Provenance + +To retrieve [SLSA provenance](https://docs.docker.com/build/attestations/slsa-provenance/), please run: + +```shell +docker buildx imagetools inspect openpolicyagent/gatekeeper:v3.12.0-rc.0 --format '{{ json .Provenance }}' +``` diff --git a/website/versioned_docs/version-v3.17.x/sync.md b/website/versioned_docs/version-v3.17.x/sync.md new file mode 100644 index 00000000000..e47665908f1 --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/sync.md @@ -0,0 +1,88 @@ +--- +id: sync +title: Replicating Data +--- + +## Replicating Data + +Some constraints are impossible to write without access to more state than just the object under test. For example, it is impossible to know if a label is unique across all pods and namespaces unless a ConstraintTemplate has access to all other pods and namespaces. To enable this use case, we provide syncing of data into a data client. + +### Replicating Data with SyncSets (Recommended) + +`Feature State`: Gatekeeper version v3.15+ (alpha) + +Kubernetes data can be replicated into the data client using `SyncSet` resources. Below is an example of a `SyncSet`: + +```yaml +apiVersion: syncset.gatekeeper.sh/v1alpha1 +kind: SyncSet +metadata: + name: syncset-1 +spec: + gvks: + - group: "" + version: "v1" + kind: "Namespace" + - group: "" + version: "v1" + kind: "Pod" +``` + +The resources defined in the `gvks` field of a SyncSet will be eventually synced into the data client. + +#### Working with SyncSet resources + +* Updating a SyncSet's `gvks` field should dynamically update what objects are synced. +* Multiple `SyncSet`s may be defined and those will be reconciled by the Gatekeeper syncset-controller. Notably, the [set union](https://en.wikipedia.org/wiki/Union_(set_theory)) of all SyncSet resources' `gvks` and the [Config](sync#replicating-data-with-config) resource's `syncOnly` will be synced into the data client. +* A resource will continue to be present in the data client so long as a SyncSet or Config still specifies it under the `gvks` or `syncOnly` field. + +### Replicating Data with Config + +`Feature State`: Gatekeeper version v3.6+ (alpha) + +> The "Config" resource must be named `config` for it to be reconciled by Gatekeeper. Gatekeeper will ignore the resource if you do not name it `config`. + +Kubernetes data can also be replicated into the data client via the Config resource. Resources defined in `syncOnly` will be synced into OPA. Below is an example: + +```yaml +apiVersion: config.gatekeeper.sh/v1alpha1 +kind: Config +metadata: + name: config + namespace: "gatekeeper-system" +spec: + sync: + syncOnly: + - group: "" + version: "v1" + kind: "Namespace" + - group: "" + version: "v1" + kind: "Pod" +``` + +You can install this config with the following command: + +```sh +kubectl apply -f https://raw.githubusercontent.com/open-policy-agent/gatekeeper/master/demo/basic/sync.yaml +``` + +#### Working with Config resources + +* Updating a Config's `syncOnly` field should dynamically update what objects are synced. +* The `Config` resource is meant to be a singleton. The [set union](https://en.wikipedia.org/wiki/Union_(set_theory)) of all SyncSet resources' `gvks` and the [Config](sync#replicating-data-with-config) resource's `syncOnly` will be synced into the data client. +* A resource will continue to be present in the data client so long as a SyncSet or Config still specifies it under the `gvks` or `syncOnly` field. + +### Accessing replicated data + +Once data is synced, ConstraintTemplates can access the cached data under the `data.inventory` document. + +The `data.inventory` document has the following format: + * For cluster-scoped objects: `data.inventory.cluster[][][]` + * Example referencing the Gatekeeper namespace: `data.inventory.cluster["v1"].Namespace["gatekeeper"]` + * For namespace-scoped objects: `data.inventory.namespace[][groupVersion][][]` + * Example referencing the Gatekeeper pod: `data.inventory.namespace["gatekeeper"]["v1"]["Pod"]["gatekeeper-controller-manager-d4c98b788-j7d92"]` + +### Auditing From Cache + +The [audit](audit.md) feature does not require replication by default. However, when the `audit-from-cache` flag is set to true, the audit informer cache will be used as the source-of-truth for audit queries; thus, an object must first be cached before it can be audited for constraint violations. Kubernetes data can be replicated into the audit cache via one of the resources above. \ No newline at end of file diff --git a/website/versioned_docs/version-v3.17.x/validating-admission-policy.md b/website/versioned_docs/version-v3.17.x/validating-admission-policy.md new file mode 100644 index 00000000000..990ea9fd351 --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/validating-admission-policy.md @@ -0,0 +1,173 @@ +--- +id: validating-admission-policy +title: Integration with Kubernetes Validating Admission Policy +--- + +Validating Admission Policy CEL validation in Gatekeeper: +Feature State: Gatekeeper version v3.17 (beta) +❗ This feature is beta, subject to change (feedback is welcome!). It is enabled by default. Set --enable-k8s-native-validation=false` to disable evaluating Validating Admission Policy CEL in constraint templates. + +VAP management through Gatekeeper: +Feature State: Gatekeeper version v3.16 (alpha) +❗ This feature is alpha, subject to change (feedback is welcome!). It is disabled by default unless explicitly enabled via feature flag and/or via constraint template. + +## Description + +This feature allows Gatekeeper to integrate with Kubernetes Validating Admission Policy based on [Common Expression Language (CEL)](https://github.com/google/cel-spec), a declarative, in-process admission control alternative to validating admission webhooks. + +## Motivations + +The Kubernetes Validating Admission Policy feature was introduced as an alpha feature to Kubernetes v1.26, beta in v1.28 (disabled by default), GA in v1.30 (enabled by default). Some of the benefits include: +- in-tree/native in-process +- reduce admission request latency +- improve reliability and availability +- able to fail closed without impacting availability +- avoid the operational burden of webhooks + +To reduce policy fragmentation and simplify the user experience by standardizing the policy experience. We have created an abstraction layer that provides multi-language (e.g. Rego and CEL), multi-target policy enforcement to allow for portable policies and coexistence of numerous policy implementations. + +The [Constraint Framework](https://github.com/open-policy-agent/frameworks/tree/master/constraint) is the library that underlies Gatekeeper. It provides the execution flow Gatekeeper uses to render a decision to the API server. It also provides abstractions that allow us to define constraint templates and constraints: Engine, Enforcement Points, and Targets. + +Together with Gatekeeper and [gator CLI](gator.md), you can get admission, audit, and shift left validations for policies written in both CEL and Rego policy languages, even for clusters that do not support Validating Admission Policy feature yet. For simple policies, you may want admission requests to be handled by the K8s built-in Validating Admission Controller (only supports CEL) instead of the Gatekeeper admission webhook. + +In summary, these are potential options when running Gatekeeper: + +| Policy Language(s) | Enforcement Point | +| ------------------ | ------------------ | +| CEL, Rego | Gatekeeper validating webhook | +| CEL, Rego | Gatekeeper Audit | +| CEL, Rego | Gator CLI | +| CEL | K8s built-in Validating Admission Controller (aka ValidatingAdmissionPolicy) | +| Rego | Gatekeeper validating webhook (referential policies, external data) | +| Rego | Gatekeeper Audit (referential policies, external data) | +| Rego | Gator CLI (referential policies) | + +Find out more about different [enforcement points](enforcement-points.md) + +## Pre-requisites + +- Requires minimum Gatekeeper v3.17.0 (Please refer to the v3.16.0 docs as flags have changed) +- Requires minimum Kubernetes v1.30, when the Kubernetes `Validating Admission Policy` feature GAed +- [optional] Kubernetes version v1.29, need to enable Kubernetes feature gate and runtime config as shown below: + + ```yaml + kind: Cluster + apiVersion: kind.x-k8s.io/v1alpha4 + featureGates: + ValidatingAdmissionPolicy: true + runtimeConfig: + admissionregistration.k8s.io/v1beta1: true + ``` + +## Get started + +## Policy updates to add VAP CEL +To see how it works, check out this [demo](https://github.com/open-policy-agent/gatekeeper/tree/master/demo/k8s-validating-admission-policy) + +Example `K8sRequiredLabels` constraint template using the `K8sNativeValidation` engine and VAP CEL expressions that requires resources to contain specified labels with values matching provided regular expressions. A similar policy written in Rego can be seen [here](https://open-policy-agent.github.io/gatekeeper-library/website/validation/requiredlabels) + +```yaml +apiVersion: templates.gatekeeper.sh/v1 +kind: ConstraintTemplate +metadata: + name: k8srequiredlabels +spec: + crd: + spec: + names: + kind: K8sRequiredLabels + validation: + # Schema for the `parameters` field + openAPIV3Schema: + type: object + properties: + message: + type: string + labels: + type: array + items: + type: object + properties: + key: + type: string + allowedRegex: + type: string + targets: + - target: admission.k8s.gatekeeper.sh + code: + - engine: K8sNativeValidation + source: + validations: + - expression: '[object, oldObject].exists(obj, obj != null && has(obj.metadata) && variables.params.labels.all(entry, has(obj.metadata.labels) && entry.key in obj.metadata.labels))' + messageExpression: '"missing required label, requires all of: " + variables.params.labels.map(entry, entry.key).join(", ")' + - expression: '[object, oldObject].exists(obj, obj != null && !variables.params.labels.exists(entry, has(obj.metadata.labels) && entry.key in obj.metadata.labels && !string(obj.metadata.labels[entry.key]).matches(string(entry.allowedRegex))))' + message: "regex mismatch" + rego: | + ... +``` + +With this new engine and source added to the constraint template, now Gatekeeper webhook, audit, and shift-left can validate resources with these new VAP CEL-based rules. + +## Policy updates to generate Validating Admission Policy and Binding resources + +For some policies, you may want admission requests to be handled by the K8s Validating Admission Controller instead of the Gatekeeper admission webhook. + +The K8s Validating Admission Controller requires both the Validating Admission Policy (VAP) and Validating Admission Policy Binding (VAPB) resources to exist to enforce a policy. Gatekeeper can be configured to generate both of these resources. To generate VAP Bindings for all Constraints, ensure the Gatekeeper +`--default-create-vap-binding-for-constraint` flag is set to `true`. To generate VAP as part of all Constraint Templates with the VAP CEL engine `K8sNativeValidation`, ensure the Gatekeeper `--default-create-vap-for-templates=true` flag is set to `true`. By default both flags are set to `false` while the feature is still in alpha. + +To override the `--default-create-vap-for-templates` flag's behavior for a constraint template, set `generateVAP` to `true` explicitly under the K8sNativeValidation engine's `source` in the constraint template. + +```yaml +spec: + targets: + - target: admission.k8s.gatekeeper.sh + code: + - engine: K8sNativeValidation + source: + generateVAP: true + ... +``` + +To override the `--default-create-vap-binding-for-constraints` flag's behavior for a constraint, `spec.scopedEnforcementAction` can be used. Gatekeeper determines the intended enforcement actions for a given enforcement point by evaluating what is provided in `spec.scopedEnforcementActions` and `spec.enforcementAction: scoped` in the constraint. + +The overall opt-in/opt-out behavior for constraint to generate Validating Admission Policy Binding (VAPB) is as below: + +Constraint with `enforcementAction: scoped`: + +| `vap.k8s.io` in constraint with `spec.scopedEnforcementActions` | generate VAPB | +|----------|----------| +| Not included | Do not generate VAPB | +| Included | Generate VAPB | + +Constraint without `enforcementAction: scoped`: + +| `--default-create-vap-binding-for-constraints` | generate VAPB | +|----------|----------| +| false | Do not generate VAPB | +| true | Generate VAPB | + +:::note +VAP will only get generated for templates with VAP CEL Engine. VAPB will only get generated for constraints that belong to templates with VAP CEL engine. +::: + +:::tip +In the event K8s Validating Admission Controller fails open, Gatekeeper admission webhook can act as a backup when included in constraint. +::: + +Validating Admission Policy Binding for the below constraint will always get generated, assuming the constraint belongs to a template with VAP CEL engine. + +```yaml +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: K8sAllowedRepos +metadata: + name: prod-repo-is-openpolicyagent +spec: +... + enforcementAction: scoped + scopedEnforcementActions: + - action: deny + enforcementPoints: + - name: "vap.k8s.io" + - name: "validation.gatekeeper.sh" +... +``` diff --git a/website/versioned_docs/version-v3.17.x/violations.md b/website/versioned_docs/version-v3.17.x/violations.md new file mode 100644 index 00000000000..7d1b0215526 --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/violations.md @@ -0,0 +1,73 @@ +--- +id: violations +title: Handling Constraint Violations +--- + +## Log denies + +Set the `--log-denies` flag to log all deny, dryrun and warn failures. +This is useful when trying to see what is being denied/fails dry-run and keeping a log to debug cluster problems without having to enable syncing or looking through the status of all constraints. + +## Dry Run enforcement action + +When rolling out new constraints to running clusters, the dry run functionality can be helpful as it enables constraints to be deployed in the cluster without making actual changes. This allows constraints to be tested in a running cluster without enforcing them. Cluster resources that are impacted by the dry run constraint are surfaced as violations in the `status` field of the constraint. + +To use the dry run feature, add `enforcementAction: dryrun` to the constraint spec to ensure no actual changes are made as a result of the constraint. By default, `enforcementAction` is set to `deny` as the default behavior is to deny admission requests with any violation. + +For example: +```yaml +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: K8sRequiredLabels +metadata: + name: ns-must-have-gk +spec: + enforcementAction: dryrun + match: + kinds: + - apiGroups: [""] + kinds: ["Namespace"] + parameters: + labels: ["gatekeeper"] +status: + auditTimestamp: "2019-08-15T01:46:13Z" + enforced: true + violations: + - enforcementAction: dryrun + kind: Namespace + message: 'you must provide labels: {"gatekeeper"}' + name: default + - enforcementAction: dryrun + kind: Namespace + message: 'you must provide labels: {"gatekeeper"}' + name: gatekeeper-system + +``` + +## Warn enforcement action + +Warn enforcement action offers the same benefits as dry run, such as testing constraints without enforcing them. In addition to this, it will also provide immediate feedback on why that constraint would have been denied. It is available in Gatekeeper v3.4+ with Kubernetes v1.19+. + +```yaml +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: K8sAllowedRepos +metadata: + name: repo-is-openpolicyagent +spec: + enforcementAction: warn + match: + kinds: + - apiGroups: [""] + kinds: ["Pod"] + parameters: + repos: + - "openpolicyagent" +``` + +```shell +$ kubectl apply pod.yaml +Warning: [prod-repo-is-openpolicyagent] container has an invalid image repo , allowed repos are ["openpolicyagent"] +pod/pause created +``` + + +> NOTE: The supported enforcementActions are [`deny`, `dryrun`, `warn`] for constraints. Update the `--disable-enforcementaction-validation=true` flag if the desire is to disable enforcementAction validation against the list of supported enforcementActions. diff --git a/website/versioned_docs/version-v3.17.x/workload-resources.md b/website/versioned_docs/version-v3.17.x/workload-resources.md new file mode 100644 index 00000000000..527b10da71f --- /dev/null +++ b/website/versioned_docs/version-v3.17.x/workload-resources.md @@ -0,0 +1,88 @@ +--- +id: workload-resources +title: Working with Workload Resources +--- + +## Workload Validation + +[Workload resources](https://kubernetes.io/docs/concepts/workloads/) are Kubernetes resources like Deployments or DaemonSets that create Pods by-way-of a controller. Because many Gatekeeper validation policies are written to enforce against Pods, like those found in the [Gatekeeper policy library](https://www.github.com/open-policy-agent/gatekeeper-library), it is important to recognize that Gatekeeper Pod violation messages will not be directly reported to the user when using the library as those Pods are created from workload resources. + +To reject workload resources that can create a resource that violates a constraint, checkout the [Validation of Workload Resources](expansion.md) feature available in Gatekeeper v3.10+. + +### Example + +```yaml +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: K8sPSPPrivilegedContainer +metadata: + name: psp-privileged-container +spec: + match: + kinds: + - apiGroups: [""] + kinds: ["Pod"] +``` + +The above constraint matches on Pods that pass through the Gatekeeper admission controller. If you create a Deployment with the following PodTemplateSpec, then the Deployment itself will not be blocked, even though the containers in the Deployment violate the constraint. + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: i-wont-be-blocked + name: i-wont-be-blocked +spec: + replicas: 1 + selector: + matchLabels: + app: i-wont-be-blocked + template: + metadata: + labels: + app: i-wont-be-blocked + spec: + containers: + - image: nginx + name: nginx + securityContext: + privileged: true +``` + +Instead, the Pods that are created from the Deployment will be blocked, and the Gatekeeper denial messages will be found in the workload object responsible for creating the Pods (in this case, the ReplicaSet created by the Deployment). The denial message will eventually make its way into the Deployment's status as well. + +```yaml +status: + conditions: + - message: 'admission webhook "validation.gatekeeper.sh" denied the request: [psp-privileged-container] + Privileged container is not allowed: nginx, securityContext: {"privileged":true}' +``` + +Gatekeeper violation messages within statuses can be retrieved using a `kubectl` command like the following: + +```shell +$ kubectl get replicaset i-wont-be-blocked-755547df65 -o jsonpath='{ .status.conditions[].message }' +admission webhook "validation.gatekeeper.sh" denied the request: [psp-privileged-container] Privileged container is not allowed: nginx, securityContext: {"privileged": true} +``` + +```shell +$ kubectl get deploy i-wont-be-blocked -o jsonpath='{ .status.conditions[*].message }' +Deployment does not have minimum availability. admission webhook "validation.gatekeeper.sh" denied the request: [psp-privileged-container] Privileged container is not allowed: nginx, securityContext: {"privileged": true} ReplicaSet "i-wont-be-blocked-755547df65" has timed out progressing. +``` + +Note that adding workload objects to the "kinds" list in the [Gatekeeper policy library constraints](https://www.github.com/open-policy-agent/gatekeeper-library) will not block and alert on workload resources. This is because most of the source Rego code for the library constraints match on the `spec.containers[_]` field instead of the `spec.template.spec.containers[_]` field that is often the format used by the PodTemplateSpec in workload resources. + +``` +input_containers[c] { + c := input.review.object.spec.containers[_] +} +input_containers[c] { + c := input.review.object.spec.initContainers[_] +} +``` + +## Workload Mutation + +Similar to Gatekeeper validation policies, Gatekeeper mutation policies can act on individual Pods or the workload resources that generate those Pods. Mutations on workload resource PodTemplateSpecs will implicitly mutate the Pods that are generated by that workload resource. In contrast, mutations on individual Pods will not bubble up to the parent workload resource's PodTemplateSpec. + +Use the [extent of changes](mutation.md#extent-of-changes) section in mutation policies to granularly specify the scope of a mutation. diff --git a/website/versioned_sidebars/version-v3.17.x-sidebars.json b/website/versioned_sidebars/version-v3.17.x-sidebars.json new file mode 100644 index 00000000000..5c668a87177 --- /dev/null +++ b/website/versioned_sidebars/version-v3.17.x-sidebars.json @@ -0,0 +1,73 @@ +{ + "docs": [ + { + "type": "category", + "label": "Getting Started", + "collapsed": false, + "items": [ + "intro", + "install", + "examples" + ] + }, + { + "type": "category", + "label": "How to use Gatekeeper", + "collapsed": false, + "items": [ + "howto", + "audit", + "violations", + "sync", + "exempt-namespaces", + "library", + "customize-startup", + "customize-admission", + "metrics", + "debug", + "emergency", + "vendor-specific", + "failing-closed", + "mutation", + "constrainttemplates", + "externaldata", + "expansion", + "gator", + "workload-resources", + "pubsub", + "validating-admission-policy", + "enforcement-points" + ] + }, + { + "type": "category", + "label": "Architecture", + "collapsed": false, + "items": [ + "operations", + "performance-tuning", + "opa-versions" + ] + }, + { + "type": "category", + "label": "Concepts", + "collapsed": false, + "items": [ + "input", + "mutation-background" + ] + }, + { + "type": "category", + "label": "Contributing", + "collapsed": false, + "items": [ + "developers", + "help", + "security", + "pubsub-driver" + ] + } + ] +} diff --git a/website/versions.json b/website/versions.json index a90e8cba53b..8dbb92357b2 100644 --- a/website/versions.json +++ b/website/versions.json @@ -1,4 +1,5 @@ [ + "v3.17.x", "v3.16.x", "v3.15.x", "v3.14.x",